@@ -1129,9 +1129,9 @@ def forward(self, input):
1129
1129
print (acc_train )
1130
1130
1131
1131
#%%
1132
- # now we are ready to see how finte -tuning works in Pytorch
1133
- # there are several models in github models repository that we can use
1134
- # lets choose one but before lets see what we have at our disposal
1132
+ # OK we are ready to see how fine -tuning works in Pytorch
1133
+ # there are several models in models repository that we can use
1134
+ # lets choose one but before that lets see what we have at our disposal
1135
1135
from torchvision import models
1136
1136
import torch .nn as nn
1137
1137
@@ -1142,19 +1142,23 @@ def forward(self, input):
1142
1142
# lets print the model
1143
1143
print (f'\n ORIGINAL MODEL : \n { resnet18 } \n ' )
1144
1144
1145
- # by looking at the architecure, we notice
1145
+ # by looking at the architecure, we notice :
1146
1146
# (fc): Linear(in_features=512, out_features=1000, bias=True)
1147
- # this means in order to make retrain this network for our usecase
1148
- # lets train this for cifar10 which has 10 classes.
1147
+ # In order to retrain this network for our usecase
1148
+ # we need to alter this layer. this was trained on imagenet
1149
+ # which had 1000 classes. lets train this for cifar10 which
1150
+ # has 10 classes. all we need to do is just defining a new
1151
+ # fully connected (fc) layer and assigning it back to
1152
+ # resnet18.fc attribute!
1149
1153
resnet18 .fc = nn .Linear (512 , 10 )
1150
- # instead of hardcoding the 512 which we saw from the printed version of
1151
- # our model. we can simply use the in_features attribute of the fc layer!
1152
- # and write :
1154
+ # instead of hardcoding the 512 which we saw by looking at the
1155
+ # printed version of our model, we can simply use the
1156
+ # 'in_features' attribute of the fc layer! and write :
1153
1157
# resnet18.fc = nn.Linear(resnet18.fc.in_features, 10)
1154
1158
1155
1159
print (f'\n NEW MODEL(after adding the new fc layer): \n { resnet18 } ' )
1156
- # now before we dive in to train our net we should frst
1157
- # freeze all layers but this new one, and train for several epochs,
1160
+ # now before we dive in to train our network we should first
1161
+ # freeze all layers except this new one, and train for several epochs,
1158
1162
# so that it converges to a reasonable set of weights
1159
1163
# then we unfreeze all previous layers and train the whole net
1160
1164
# altogether again.
0 commit comments