from example_physionet import X, Y # Daten importieren import torch import torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable import torch.optim as optim import random import numpy as np import pandas y_train = Y['diagnostic_superclass'][:1000] XY = [(X[i],y_train[i+1]) for i in range(1000)] y_test = Y['diagnostic_superclass'][1101:1201] x_test = X[1100:1200] class Netzwerk(nn.Module): def __init__(self): super(Netzwerk, self).__init__() self.conv1 = nn.Conv1d(12, 12, kernel_size=3, stride=1, padding=1) self.pool1 = nn.MaxPool1d(kernel_size=3, stride=2, padding=1) self.conv2 = nn.Conv1d(12, 4, kernel_size=2, padding=1) self.pool2 = nn.MaxPool1d(3, stride=2, padding=0) self.lin1 = nn.Linear(4*250, 30) self.lin2 = nn.Linear(30, 10) self.lin3 = nn.Linear(10, 2) def forward(self, x): x = self.pool1(F.relu(self.conv1(x))) x = self.pool2(F.relu(self.conv2(x))) x = x.view(-1, 4*250) # umwandeln der Shape, sodass Uebergang von Conv. Layer zu linearen layers moeglich x = F.relu(self.lin1(x)) x = F.relu(self.lin2(x)) x = self.lin3(x) return x def num_flat_features(self, x): """eigentlich irrelevant???""" size = x.size()[1:] num = 1 for i in size: num *= i return num net = Netzwerk() learning_rate = 0.1 batch_size = 40 optimizer = optim.SGD(net.parameters(), lr=learning_rate) def is_norm(y): """wandelt Label des Datensatzes aus Textform in fuer Netzwerk erkennbare Form um""" if y == ['NORM']: # Y-Index ist im Vergleich zum X-Index um +1 verschoben return [1, 0] else: return [0, 1] def train(epoch = 50, b_s = batch_size): net.train() for i in range(epoch): random.shuffle(XY) # input-Werte my_inp = [XY[j][0].T for j in range(batch_size)] my_in = Variable(torch.Tensor(my_inp)) my_in = my_in.reshape(batch_size, 12, 1000) # target-Werte ziel = [is_norm(XY[k][1]) for k in range(batch_size)] zielw = Variable(torch.Tensor(ziel)) zielw = zielw.unsqueeze(1) criterion = nn.MSELoss() # die Fehlerfunktion auf mean-squared-error setzen optimizer.zero_grad() # Gradienten der vorherigen Epoche ausloeschen out = net(my_in) # forward propagation loss = criterion(out, zielw) # Fehler berechnen loss.backward() # backward propagation optimizer.step() # weights and biases neu einstellen print(evaluate()) # Evaluation mit unabhaengigen Test-Daten printen def evaluate(welche_richtig=False): """ermittelt, wie oft das Netz ein richtig zugeordnetes EKG erkennt mittels 'welche_richtig=True' kann man sich die Label der richtig erkannten EKGs ausgeben lassen""" global y_test global x_test z = 0 for i in range(100): x = Variable(torch.Tensor(x_test[i])) x = x.reshape(12, 1000) x = x.unsqueeze(0) #x = x.unsqueeze(0) if torch.argmax(net(x)) == np.argmax(is_norm(y_test[1102+i])): z += 1 if welche_richtig==True: # printet Labels der richtig klassifizierten EKGs aus Test-Werten print(y_test[1102+i]) return z train()