Merge branch 'main' of https://gitty.informatik.hs-mannheim.de/3016498/ANLP_WS24_CA2
commit
1947212d40
51
LSTM.py
51
LSTM.py
|
|
@ -6,6 +6,9 @@ from sklearn.metrics import mean_squared_error, r2_score
|
|||
from torch.optim.lr_scheduler import ReduceLROnPlateau
|
||||
import time
|
||||
from tqdm import tqdm
|
||||
from Datasets import GloveDataset as HumorDataset
|
||||
import Datasets
|
||||
import dataset_helper
|
||||
|
||||
|
||||
class LSTMNetwork(nn.Module):
|
||||
|
|
@ -17,7 +20,8 @@ class LSTMNetwork(nn.Module):
|
|||
|
||||
def forward(self, x):
|
||||
lstm_out, _ = self.lstm(x)
|
||||
return self.fc(self.dropout(lstm_out[:, -1, :]))
|
||||
# print(lstm_out)
|
||||
return self.fc(self.dropout(lstm_out))
|
||||
|
||||
|
||||
def compute_metrics(predictions, labels):
|
||||
|
|
@ -47,8 +51,8 @@ def train_model(model, train_loader, val_loader, test_loader, epochs=10, device=
|
|||
|
||||
for batch in tqdm(train_loader, desc=f"Epoch {epoch+1}/{epochs}", ncols=100):
|
||||
optimizer.zero_grad()
|
||||
inputs = batch['input_ids'].to(device)
|
||||
labels = batch['labels'].to(device)
|
||||
inputs = batch[0].float().to(device)#batch['input_ids'].to(device)
|
||||
labels = batch[1].float().to(device)#batch['labels'].to(device)
|
||||
outputs = model(inputs)
|
||||
loss = criterion(outputs.squeeze(), labels)
|
||||
loss.backward()
|
||||
|
|
@ -65,8 +69,8 @@ def train_model(model, train_loader, val_loader, test_loader, epochs=10, device=
|
|||
|
||||
with torch.no_grad():
|
||||
for batch in val_loader:
|
||||
inputs = batch['input_ids'].to(device)
|
||||
labels = batch['labels'].to(device)
|
||||
inputs = batch[0].float().to(device)#batch['input_ids'].to(device)
|
||||
labels = batch[1].float().to(device)#batch['labels'].to(device)
|
||||
outputs = model(inputs)
|
||||
val_loss += criterion(outputs.squeeze(), labels).item()
|
||||
val_preds.extend(outputs.squeeze().cpu().numpy())
|
||||
|
|
@ -78,8 +82,8 @@ def train_model(model, train_loader, val_loader, test_loader, epochs=10, device=
|
|||
|
||||
with torch.no_grad():
|
||||
for batch in test_loader:
|
||||
inputs = batch['input_ids'].to(device)
|
||||
labels = batch['labels'].to(device)
|
||||
inputs = batch[0].float().to(device)#batch['input_ids'].to(device)
|
||||
labels = batch[1].float().to(device)#batch['labels'].to(device)
|
||||
outputs = model(inputs)
|
||||
test_preds.extend(outputs.squeeze().cpu().numpy())
|
||||
test_labels.extend(labels.cpu().numpy())
|
||||
|
|
@ -101,7 +105,7 @@ def train_model(model, train_loader, val_loader, test_loader, epochs=10, device=
|
|||
if test_r2 > best_test_r2:
|
||||
best_test_r2 = test_r2
|
||||
torch.save(model.state_dict(), "best_lstm_model.pth")
|
||||
print(f"🚀 New best model saved (R2: {test_r2:.4f})")
|
||||
print(f"New best model saved (R2: {test_r2:.4f})")
|
||||
|
||||
if avg_val_loss < best_val_loss:
|
||||
best_val_loss = avg_val_loss
|
||||
|
|
@ -109,25 +113,41 @@ def train_model(model, train_loader, val_loader, test_loader, epochs=10, device=
|
|||
else:
|
||||
counter += 1
|
||||
if counter >= patience:
|
||||
print("⛔ Early stopping triggered!")
|
||||
print("Early stopping triggered!")
|
||||
break
|
||||
|
||||
return history
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
data_path = '/content/drive/MyDrive/Colab Notebooks/ANLP_WS24_CA2/data/embedded_padded'
|
||||
train_dataset = torch.load(f'{data_path}/train.pt')
|
||||
test_dataset = torch.load(f'{data_path}/test.pt')
|
||||
val_dataset = torch.load(f'{data_path}/val.pt')
|
||||
|
||||
input_dim = 100
|
||||
input_dim = 128
|
||||
hidden_dim = 1024
|
||||
num_layers = 2
|
||||
output_dim = 1
|
||||
dropout = 0.2
|
||||
batch_size = 256
|
||||
epochs = 5
|
||||
|
||||
DATA_PATH = "data/hack.csv"
|
||||
GLOVE_PATH = "data/glove.6b.100d.txt"
|
||||
EMBEDDING_DIM = 100
|
||||
TEST_SIZE = 0.1
|
||||
VAL_SIZE = 0.1
|
||||
params = {"max_len":128}
|
||||
# data_path = 'data/embedded_padded'
|
||||
embedding_matrix, word_index, vocab_size, d_model = dataset_helper.get_embedding_matrix(
|
||||
gloVe_path=GLOVE_PATH, emb_len=EMBEDDING_DIM)
|
||||
|
||||
X, y = dataset_helper.load_preprocess_data(path_data=DATA_PATH, verbose=True)
|
||||
|
||||
# Aufteilen der Daten
|
||||
data_split = dataset_helper.split_data(X, y, test_size=TEST_SIZE, val_size=VAL_SIZE)
|
||||
|
||||
# Dataset und DataLoader
|
||||
train_dataset = Datasets.GloveDataset(data_split['train']['X'], data_split['train']['y'], word_index, max_len=params["max_len"])
|
||||
val_dataset = Datasets.GloveDataset(data_split['val']['X'], data_split['val']['y'], word_index, max_len=params["max_len"])
|
||||
test_dataset = Datasets.GloveDataset(data_split['test']['X'], data_split['test']['y'], word_index, max_len=params["max_len"])
|
||||
|
||||
|
||||
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
|
||||
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)
|
||||
|
|
@ -135,5 +155,4 @@ if __name__ == "__main__":
|
|||
|
||||
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
||||
model = LSTMNetwork(input_dim=input_dim, hidden_dim=hidden_dim, num_layers=num_layers, output_dim=output_dim, dropout=dropout).to(device)
|
||||
|
||||
history = train_model(model, train_loader, val_loader, test_loader, epochs=epochs, device=device)
|
||||
|
|
|
|||
Loading…
Reference in New Issue