Benutzer-Werkzeuge

Webseiten-Werkzeuge


Seitenleiste

ws2021:hier_geht_es_zu_unserem_code_mit_convolutional_layern

Dies ist eine alte Version des Dokuments!


<code>

 class Netzwerk(nn.Module):
      def __init__(self):
          super(Netzwerk, self).__init__()
          self.conv1 = nn.Conv1d(12, 24, kernel_size=15, stride=1, padding=1)
          self.pool1 = nn.MaxPool1d(kernel_size=3, stride=2, padding=1)
          self.conv2 = nn.Conv1d(24, 32, kernel_size=10, padding=1)
          self.pool2 = nn.MaxPool1d(2, stride=2, padding=1)
          self.conv3 = nn.Conv1d(32, 24, kernel_size=5, padding=1)
          self.pool3 = nn.MaxPool1d(3, stride=2, padding=1)
          self.conv4 = nn.Conv1d(24, 12, kernel_size=3, padding=1)
          self.pool4 = nn.MaxPool1d(3, stride=2, padding=1)
          self.lin1 = nn.Linear(12*61, 40)
          self.lin3 = nn.Linear(40, 10)
          self.lin4 = nn.Linear(10, 2)
          self.history_loss = []
          self.history_eval = []
          self.classific_accuracy_training = []
          self.current_epoch = 0
      def forward(self, x):
          x = self.pool1(F.relu(self.conv1(x)))
          x = self.pool2(F.relu(self.conv2(x)))
          x = self.pool3(F.relu(self.conv3(x)))
          x = self.pool4(F.relu(self.conv4(x)))
          x = x.view(-1, 12*61)       
          x = F.relu(self.lin1(x))
          x = F.relu(self.lin2(x))
          x = self.lin3(x)
          return x
net = Netzwerk()
net = net.cuda()
learning_rate = 0.3
batch_size = 50
optimizer = optim.SGD(net.parameters(), lr=learning_rate, weight_decay=0.003)
vis_ev = []
vis_loss = []

<\code>

ws2021/hier_geht_es_zu_unserem_code_mit_convolutional_layern.1617647511.txt.gz · Zuletzt geändert: 2021/04/05 20:31 von elena_kirschner