Benutzer-Werkzeuge

Webseiten-Werkzeuge


ws2021:zusaetzliche_klasse_resnet

Unterschiede

Hier werden die Unterschiede zwischen zwei Versionen gezeigt.

Link zu dieser Vergleichsansicht

Nächste Überarbeitung
Vorhergehende Überarbeitung
ws2021:zusaetzliche_klasse_resnet [2021/04/05 21:16]
elena_kirschner angelegt
ws2021:zusaetzliche_klasse_resnet [2021/04/06 16:40] (aktuell)
annika_cibis
Zeile 1: Zeile 1:
 <​code>​ <​code>​
-class block(nn.Module):​ +class ResNet(nn.Module):​ 
-    def __init__(self, ​in_channelsout_channelsidentity_downsample=None,​ stride=1): +    def __init__(self, ​blocklayersimage_channels): 
-        super(block, self).__init__() +          super(ResNet, self).__init__() 
-        self.out_channels, ​self.in_channels ​out_channels,​ in_channels +           
-        self.expansion ​1 +          ​self.history_loss = [] 
-        self.conv1 = nn.Conv1d(self.in_channelsself.out_channels, kernel_size=15, stride=stride, padding=7+          ​self.history_eval ​[] 
-        self.bn1 = nn.BatchNorm1d(self.out_channels+          self.classific_accuracy_training ​[] 
-        self.conv2 = nn.Conv1d(self.out_channels,​ self.out_channels*2, ​kernel_size=9, stride=1, padding=4+          ​self.current_epoch = 0 
-        self.bn2 nn.BatchNorm1d(self.out_channels*2) +           
-        self.conv3 nn.Conv1d(self.out_channels*2, self.out_channels*2, kernel_size=5, stride=1, padding=2) +          self.in_channels = 12 
-        self.bn3 nn.BatchNorm1d(self.out_channels*2) +          ​self.conv1 = nn.Conv1d(1212, kernel_size=7, stride=2, padding=3
-        self.relu = nn.ReLU() +          self.bn1 = nn.BatchNorm1d(12
-        ​self.identity_downsample = identity_downsample ​        +          self.relu = nn.ReLU(
-        ​self.lin1 = nn.Linear(out_channels*2*250, 800) +          ​self.maxpool = nn.MaxPool1d(kernel_size=3, stride=2, padding=1
-        self.lin2 = nn.Linear(800,​ 40) + 
-        self.lin3 = nn.Linear(40,​ 10) +          ​self.layer1 ​self._make_layer(block, layers[0], out_channels=12,​ stride=1) 
-        self.lin4 = nn.Linear(10,​ 2)+          ​self.layer2 = self._make_layer(block,​ layers[1], ​out_channels=24, stride=2) 
 +          self.layer3 ​self._make_layer(block, layers[2], out_channels=24*2, stride=2) 
 +          self.layer4 ​self._make_layer(block, layers[3], ​out_channels=24*4, stride=2) 
 + 
 +          ​self.avgpool ​= nn.AdaptiveAvgPool1d(25
 +          self.lin1 = nn.Linear(4800, 800) 
 +          self.lin2 = nn.Linear(800,​ 40) 
 +          self.lin3 = nn.Linear(40,​ 10) 
 +          self.lin4 = nn.Linear(10,​ 2)
  
     def forward(self,​ x):     def forward(self,​ x):
-        identity = x 
         x = self.conv1(x)         x = self.conv1(x)
         x = self.bn1(x)         x = self.bn1(x)
         x = self.relu(x)         x = self.relu(x)
-        x = self.conv2(x) +        x = self.maxpool(x) 
-        x = self.bn2(x) +        ​ 
-        x = self.relu(x) +        x = self.layer1(x) 
-        x = self.conv3(x) +        x = self.layer2(x) 
-        x = self.bn3(x)   +        ​ 
-        ​if self.identity_downsample is not None: +        x = self.layer3(x) 
-          ​identity ​= self.identity_downsample(identity+        x = self.layer4(x) 
-        x +identity+        ​ 
 +        x = self.avgpool(x) 
 +        ​x ​x.reshape(x.shape[0],​ -1)   # eventuell auf x.view Funktion umsteigen 
 +         
 +        x = F.relu(self.lin1(x)
 +        x = F.relu(self.lin2(x)) 
 +        x = F.relu(self.lin3(x)) 
 +        x = self.lin4(x)
         return x         return x
 +
 +    def _make_layer(self,​ block, num_residual_blocks,​ out_channels,​ stride):
 +        identity_downsample = None
 +        layers = []
 +
 +        if stride != 1 or self.in_channels != out_channels*2:​
 +            identity_downsample = nn.Sequential(nn.Conv1d(self.in_channels,​ out_channels*2,​ kernel_size=1, ​
 +            stride=stride),​
 +                                                nn.BatchNorm1d(out_channels*2))
 +
 +        layers.append(block(self.in_channels,​ out_channels,​ identity_downsample,​ stride=stride))
 +
 +        ​
 +        self.in_channels = out_channels*2
 +        ​
 +        for i in range(num_residual_blocks -1):
 +            layers.append(block(self.in_channels,​ out_channels))
 +        ​
 +        return nn.Sequential(*layers)
 +    ​
 +    ​
 +def ResNet50(img_channels=12):​
 +    return ResNet(block,​ [3, 4, 6, 3], img_channels)
 +
 +def test():
 +    net = ResNet50()
 +    x = torch.randn(30,​ 12, 1000)
 +    y = net(x) ​
 +    return y
 +
 +net = ResNet50()
 +learning_rate = 0.15
 +batch_size = 50
 +optimizer = optim.SGD(net.parameters(),​ lr=learning_rate)
 +
 </​code>​ </​code>​
ws2021/zusaetzliche_klasse_resnet.1617650184.txt.gz · Zuletzt geändert: 2021/04/05 21:16 von elena_kirschner