Benutzer-Werkzeuge

Webseiten-Werkzeuge


Seitenleiste

ws2021:hier_geht_es_zu_unserem_code_mit_convolutional_layern

Dies ist eine alte Version des Dokuments!


  class Netzwerk(nn.Module):
        def __init__(self):
            super(Netzwerk, self).__init__()
            self.conv1 = nn.Conv1d(12, 24, kernel_size=15, stride=1, padding=1)
            self.pool1 = nn.MaxPool1d(kernel_size=3, stride=2, padding=1)
            self.conv2 = nn.Conv1d(24, 32, kernel_size=10, padding=1)
            self.pool2 = nn.MaxPool1d(2, stride=2, padding=1)
            self.conv3 = nn.Conv1d(32, 24, kernel_size=5, padding=1)
            self.pool3 = nn.MaxPool1d(3, stride=2, padding=1)
            self.conv4 = nn.Conv1d(24, 12, kernel_size=3, padding=1)
            self.pool4 = nn.MaxPool1d(3, stride=2, padding=1)
            self.lin1 = nn.Linear(12*61, 40)
            self.lin3 = nn.Linear(40, 10)
            self.lin4 = nn.Linear(10, 2)
            self.history_loss = []
            self.history_eval = []
            self.classific_accuracy_training = []
            self.current_epoch = 0
            
        def forward(self, x):
            x = self.pool1(F.relu(self.conv1(x)))
            x = self.pool2(F.relu(self.conv2(x)))
            x = self.pool3(F.relu(self.conv3(x)))
            x = self.pool4(F.relu(self.conv4(x)))
            x = x.view(-1, 12*61)       
            x = F.relu(self.lin1(x))
            x = F.relu(self.lin2(x))
            x = self.lin3(x)
            return x
                   
  net = Netzwerk()
  net = net.cuda()
  learning_rate = 0.3
  batch_size = 50
  optimizer = optim.SGD(net.parameters(), lr=learning_rate, weight_decay=0.003)
  vis_ev = []
  vis_loss = []
  
  def train(epoch = 3000, b_s = batch_size, learning_rate = 0.8, progress = []):
            net.train()
            for i in range(epoch):
                random_batch = [np.random.randint(len(X_train)) for i in range(b_s)]
                # input-Werte
                my_in = np.swapaxes(X_train[random_batch], 1, 2)
                my_in = Variable(torch.Tensor(my_in))
                my_in.unsqueeze(2)
                # target-Werte
                ziel = y_train[y_train.index[random_batch]].tolist()
                zielw = Variable(torch.Tensor(ziel))
                zielw = zielw.type(torch.LongTensor)
                #cuda
                my_in = my_in.cuda()
                zielw = zielw.cuda()
                # forward prop
                criterion = nn.CrossEntropyLoss()    
                optimizer.zero_grad()       
                out = net(my_in)
                #cuda
                out = out.cuda()
                if net.current_epoch >= 1000 and net.current_epoch < 2500:
                    optimizer.param_groups[0]['lr'] = 0.06
                if net.current_epoch >= 2500:
                    optimizer.param_groups[0]['lr'] = 0.01
                loss = criterion(out, zielw)
                #backward prop
                loss.backward()                     
                optimizer.step()               
                net.history_loss.append(loss)
                if i%50 == 0:
                    eval = evaluate()
                    print('\r', net.current_epoch, '/', epoch, "Loss: " + "{:2.5f}".format(loss.item()), 
                                                               "Evaluation: " + "{:2.5f}".format(eval))
                    net.history_eval.append(eval)
                    net.classific_accuracy_training.append(evaluate(test_x=X_train[:2000], test_y=y_train[:2000]))
                net.current_epoch += 1       
                   
  def evaluate(test_x = X_evaluation, test_y = y_evaluation, werte_vgl=False):
                net.eval()
                """ermittelt, wie oft das Netz ein richtig zugeordnetes EKG erkennt
                mittels 'werte_vgl=True' kann man sich die Label der richtig erkannten EKGs ausgeben lassen"""
                z = 0
    
                for i in range(len(test_x)):
                    x = np.swapaxes(test_x[i], 0, 1)
                    x = Variable(torch.Tensor(x))
                    x = x.unsqueeze(0)
                    #cuda
                    x = x.cuda()
                    if torch.argmax(net(x)) == test_y[test_y.index[i]]:
                        z += 1
                    if werte_vgl:
                        print(net(x)[0], test_y[test_y.index[i]])
                return z/len(test_x)
                
  train()
ws2021/hier_geht_es_zu_unserem_code_mit_convolutional_layern.1617648717.txt.gz · Zuletzt geändert: 2021/04/05 20:51 von elena_kirschner