lstm update

main
arman 2025-02-09 11:35:41 +01:00
parent 6c859703fd
commit 340d915a29
1 changed files with 59 additions and 61 deletions

View File

@ -28,13 +28,7 @@ class ImprovedLSTMBinaryClassifier(nn.Module):
dropout=dropout, dropout=dropout,
bidirectional=False) bidirectional=False)
self.layer_norm = nn.LayerNorm(hidden_dim) self.layer_norm = nn.LayerNorm(hidden_dim)
self.fc = nn.Linear(hidden_dim, 1)
# Zusätzliche Fully Connected Layers ohne ReLU
self.fc1 = nn.Linear(hidden_dim, 128)
self.fc2 = nn.Linear(128, 64)
self.fc3 = nn.Linear(64, 32)
self.fc4 = nn.Linear(32, 1)
self.sigmoid = nn.Sigmoid() self.sigmoid = nn.Sigmoid()
self.dropout = nn.Dropout(dropout) self.dropout = nn.Dropout(dropout)
@ -43,43 +37,11 @@ class ImprovedLSTMBinaryClassifier(nn.Module):
lstm_out = self.dropout(lstm_out) lstm_out = self.dropout(lstm_out)
pooled = lstm_out[:, -1, :] # Letztes verstecktes Zustand pooled = lstm_out[:, -1, :] # Letztes verstecktes Zustand
normalized = self.layer_norm(pooled) normalized = self.layer_norm(pooled)
logits = self.fc(normalized)
# Mehrere Fully Connected Schichten return self.sigmoid(logits)
x = self.fc1(normalized)
x = self.fc2(x)
x = self.fc3(x)
x = self.fc4(x)
return self.sigmoid(x)
# Training und Evaluation # Training und Evaluation
if __name__ == "__main__": def train_model(model, train_loader, val_loader, test_loader, epochs=10):
# Daten laden (Annahme: Eingebettete Daten sind bereits vorbereitet)
data_path = '/content/drive/MyDrive/Colab Notebooks/ANLP_WS24_CA2/data/embedded_padded'
train_dataset = torch.load(data_path + '/train.pt')
test_dataset = torch.load(data_path + '/test.pt')
val_dataset = torch.load(data_path + '/val.pt')
# Hyperparameter
input_dim = 100
hidden_dim = 256
num_layers = 2
dropout = 0.3
batch_size = 64
# DataLoader
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
# Modell initialisieren
model = ImprovedLSTMBinaryClassifier(
input_dim=input_dim,
hidden_dim=hidden_dim,
num_layers=num_layers,
dropout=dropout
).to(device)
criterion = nn.BCELoss() criterion = nn.BCELoss()
optimizer = optim.Adam(model.parameters(), lr=0.001, weight_decay=1e-5) optimizer = optim.Adam(model.parameters(), lr=0.001, weight_decay=1e-5)
scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=2, verbose=True) scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=2, verbose=True)
@ -88,10 +50,8 @@ if __name__ == "__main__":
best_test_accuracy = 0 best_test_accuracy = 0
patience = 3 patience = 3
counter = 0 counter = 0
history = {'train_loss': [], 'val_loss': [], 'test_acc': [], 'test_f1': []} history = {'train_loss': [], 'val_loss': [], 'test_acc': [], 'test_f1': []}
epochs = 5
for epoch in range(epochs): for epoch in range(epochs):
# Training # Training
model.train() model.train()
@ -107,7 +67,7 @@ if __name__ == "__main__":
loss = criterion(outputs, labels) loss = criterion(outputs, labels)
loss.backward() loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), 1) nn.utils.clip_grad_norm_(model.parameters(), 5) # Gradient Clipping
optimizer.step() optimizer.step()
total_loss += loss.item() total_loss += loss.item()
@ -171,3 +131,41 @@ if __name__ == "__main__":
if counter >= patience: if counter >= patience:
print("⛔ Early Stopping ausgelöst!") print("⛔ Early Stopping ausgelöst!")
break break
return history
if __name__ == "__main__":
# Daten laden (Annahme: Eingebettete Daten sind bereits vorbereitet)
data_path = '/content/drive/MyDrive/Colab Notebooks/ANLP_WS24_CA2/data/embedded_padded'
train_dataset = torch.load(data_path + '/train.pt')
test_dataset = torch.load(data_path + '/test.pt')
val_dataset = torch.load(data_path + '/val.pt')
# Hyperparameter
input_dim = 100
hidden_dim = 256
num_layers = 2
dropout = 0.3
batch_size = 64
# DataLoader
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
# Modell initialisieren
model = ImprovedLSTMBinaryClassifier(
input_dim=input_dim,
hidden_dim=hidden_dim,
num_layers=num_layers,
dropout=dropout
).to(device)
# Training starten
history = train_model(
model,
train_loader,
val_loader,
test_loader,
epochs=5
)