import random import torch import torch.nn as nn import torch.optim as optim from torch.utils.data import DataLoader from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score import numpy as np import Datasets import dataset_helper import EarlyStopping import ml_helper import ml_history import ml_train SEED = 501 random.seed(SEED) np.random.seed(SEED) torch.manual_seed(SEED) torch.cuda.manual_seed_all(SEED) torch.backends.cudnn.deterministic = True class EnhancedCNNRegressor(nn.Module): def __init__(self, vocab_size, embedding_dim, filter_sizes, num_filters, embedding_matrix, dropout): super(EnhancedCNNRegressor, self).__init__() self.embedding = nn.Embedding.from_pretrained(embedding_matrix, freeze=False) # Convolutional Schichten mit Batch-Normalisierung self.convs = nn.ModuleList([ nn.Sequential( nn.Conv2d(1, num_filters, (fs, embedding_dim)), nn.BatchNorm2d(num_filters), # Batch-Normalisierung nn.ReLU(), nn.MaxPool2d((params["max_len"] - fs + 1, 1)), nn.Dropout(dropout) # Dropout nach jeder Schicht ) for fs in filter_sizes ]) # Fully-Connected Layer self.fc1 = nn.Linear(len(filter_sizes) * num_filters, 128) # Erweiterte Dense-Schicht self.fc2 = nn.Linear(128, 1) # Ausgangsschicht (Regression) self.dropout = nn.Dropout(dropout) def forward(self, x): x = self.embedding(x).unsqueeze(1) # [Batch, 1, Seq, Embedding] conv_outputs = [conv(x).squeeze(3).squeeze(2) for conv in self.convs] # Pooling reduziert Dim x = torch.cat(conv_outputs, 1) # Kombiniere Features von allen Filtern x = torch.relu(self.fc1(x)) # Zusätzliche Dense-Schicht x = self.dropout(x) return self.fc2(x).squeeze(1) if __name__ == '__main__': # Hyperparameter und Konfigurationen params = { # Config "max_len": 280, # Training "epochs": 25, "patience": 7, "batch_size": 32, "learning_rate": 0.001, "weight_decay": 5e-4 , # Model "filter_sizes": [2, 3, 4, 5], "num_filters": 150, "dropout": 0.6 } # Configs MODEL_NAME = 'CNN.pt' HIST_NAME = 'CNN_history' GLOVE_PATH = 'data/glove.6B.100d.txt' DATA_PATH = 'data/hack.csv' EMBEDDING_DIM = 100 TEST_SIZE = 0.1 VAL_SIZE = 0.1 # Daten laden und vorbereiten embedding_matrix, word_index, vocab_size, d_model = dataset_helper.get_embedding_matrix( gloVe_path=GLOVE_PATH, emb_len=EMBEDDING_DIM) X, y = dataset_helper.load_preprocess_data(path_data=DATA_PATH, verbose=True) # Aufteilen der Daten data_split = dataset_helper.split_data(X, y, test_size=TEST_SIZE, val_size=VAL_SIZE) # Dataset und DataLoader train_dataset = Datasets.GloveDataset(data_split['train']['X'], data_split['train']['y'], word_index, max_len=params["max_len"]) val_dataset = Datasets.GloveDataset(data_split['val']['X'], data_split['val']['y'], word_index, max_len=params["max_len"]) test_dataset = Datasets.GloveDataset(data_split['test']['X'], data_split['test']['y'], word_index, max_len=params["max_len"]) train_loader = DataLoader(train_dataset, batch_size=params["batch_size"], shuffle=True) val_loader = DataLoader(val_dataset, batch_size=params["batch_size"], shuffle=False) test_loader = DataLoader(test_dataset, batch_size=params["batch_size"], shuffle=False) # Modell initialisieren model = EnhancedCNNRegressor( vocab_size=vocab_size, embedding_dim=EMBEDDING_DIM, filter_sizes=params["filter_sizes"], num_filters=params["num_filters"], embedding_matrix=embedding_matrix, dropout=params["dropout"] ) device = ml_helper.get_device(verbose=True, include_mps=False) model = model.to(device) criterion = nn.MSELoss() optimizer = optim.Adam(model.parameters(), lr=params["learning_rate"], weight_decay=params["weight_decay"]) early_stopping = EarlyStopping.EarlyStoppingCallback(patience=params["patience"], verbose=True, model_name=MODEL_NAME) hist = ml_history.History() # Training und Validierung for epoch in range(params["epochs"]): ml_train.train_epoch(model, train_loader, criterion, optimizer, device, hist, epoch, params["epochs"]) val_rmse = ml_train.validate_epoch(model, val_loader, epoch, criterion, device, hist) early_stopping(val_rmse, model) if early_stopping.early_stop: print("Early stopping triggered.") break # save training history hist.save_history(HIST_NAME) # Load best model model.load_state_dict(torch.load('models/checkpoints/' + MODEL_NAME)) # Test Evaluation test_labels, test_preds = ml_train.test_loop(model, test_loader, device) hist.add_test_results(test_labels, test_preds) # save training history hist.save_history(HIST_NAME) # RMSE, MAE und R²-Score für das Test-Set test_mae = mean_absolute_error(test_labels, test_preds) test_rmse = np.sqrt(mean_squared_error(test_labels, test_preds)) test_r2 = r2_score(test_labels, test_preds) print(f"Test RMSE: {test_rmse:.4f}, Test MAE: {test_mae:.4f}, Test R²: {test_r2:.4f}")