ANLP_WS24_CA2/cnn.py

204 lines
6.7 KiB
Python

import torch
import torch.nn as nn
import torch.optim as optim
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from nltk.tokenize import word_tokenize
from torch.utils.data import DataLoader, Dataset
from sklearn.metrics import accuracy_score
import gensim
import nltk
import time
import matplotlib.pyplot as plt
# NLTK Downloads
nltk.download('punkt') # Entferne punkt_tab, da es nicht existiert
# Check if GPU is available
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Using device:', DEVICE)
# Maximum sequence length
MAX_LEN = 100
# Data helpers
def get_embedding(model, word):
if word in model.wv:
return model.wv.key_to_index[word]
else:
return unk_index
def encode_tokens(tokens):
return [get_embedding(model_embedding, token) for token in tokens]
def pad_sequences(sequences, MAX_LEN):
return np.array([np.pad(seq, (0, MAX_LEN - len(seq)), mode='constant', constant_values=unk_index)
if len(seq) < MAX_LEN else seq[:MAX_LEN] for seq in sequences])
# Dataset class
class HumorDataset(Dataset):
def __init__(self, encodings, labels):
self.encodings = encodings
self.labels = labels
def __getitem__(self, idx):
item = {'input_ids': torch.tensor(self.encodings[idx], dtype=torch.long)}
item['labels'] = torch.tensor(self.labels[idx], dtype=torch.float)
return item
def __len__(self):
return len(self.labels)
# CNN Model
class CNNBinaryClassifier(nn.Module):
def __init__(self, vocab_size, embed_dim, num_filters, kernel_sizes, hidden_dim, dropout=0.1):
super(CNNBinaryClassifier, self).__init__()
self.embedding = nn.Embedding(vocab_size, embed_dim)
self.conv_layers = nn.ModuleList([
nn.Conv1d(in_channels=embed_dim, out_channels=num_filters, kernel_size=k)
for k in kernel_sizes
])
self.fc = nn.Linear(num_filters * len(kernel_sizes), hidden_dim)
self.out = nn.Linear(hidden_dim, 1)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(dropout)
self.sigmoid = nn.Sigmoid()
def forward(self, input_ids):
embedded = self.embedding(input_ids).permute(0, 2, 1)
conv_outs = [self.relu(conv(embedded)) for conv in self.conv_layers]
pooled_outs = [torch.max(out, dim=2)[0] for out in conv_outs]
concatenated = torch.cat(pooled_outs, dim=1)
fc_out = self.relu(self.fc(self.dropout(concatenated)))
logits = self.out(fc_out)
return self.sigmoid(logits)
# Main script
if __name__ == "__main__":
# Load and process data
df = pd.read_csv('/content/hack.csv')
print(f"Loaded dataset: {df.shape}")
X = df['text'].fillna("unknown").astype(str)
y = df['is_humor']
# Train-test split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Tokenization with error handling
train_tokens = []
test_tokens = []
for text in X_train:
try:
train_tokens.append(word_tokenize(text.lower()))
except Exception as e:
print(f"Error tokenizing: {text}. Error: {e}")
train_tokens.append(["unknown"])
for text in X_test:
try:
test_tokens.append(word_tokenize(text.lower()))
except Exception as e:
print(f"Error tokenizing: {text}. Error: {e}")
test_tokens.append(["unknown"])
print("Sample tokenization (Train):", train_tokens[:2])
print("Sample tokenization (Test):", test_tokens[:2])
# Train Word2Vec model
model_embedding = gensim.models.Word2Vec(train_tokens, vector_size=100, window=5, min_count=1, workers=4)
# Add unknown token
model_embedding.wv.add_vector('<UNK>', np.zeros(model_embedding.vector_size))
unk_index = model_embedding.wv.key_to_index['<UNK>']
# Encode tokens
train_encodings = [encode_tokens(tokens) for tokens in train_tokens]
test_encodings = [encode_tokens(tokens) for tokens in test_tokens]
# Pad sequences with validation
train_encodings = pad_sequences(train_encodings, MAX_LEN)
test_encodings = pad_sequences(test_encodings, MAX_LEN)
if len(train_encodings) == 0 or len(test_encodings) == 0:
raise ValueError("Tokenization or padding failed. Please check your input data.")
# Create datasets
train_dataset = HumorDataset(train_encodings, y_train.reset_index(drop=True))
test_dataset = HumorDataset(test_encodings, y_test.reset_index(drop=True))
# Model parameters
vocab_size = len(model_embedding.wv.key_to_index)
embed_dim = model_embedding.vector_size
num_filters = 200
kernel_sizes = [3, 4, 5]
hidden_dim = 128
dropout = 0.5
model = CNNBinaryClassifier(vocab_size, embed_dim, num_filters, kernel_sizes, hidden_dim, dropout)
# Training parameters
epochs = 10
batch_size = 8
learning_rate = 2e-5
# Optimizer and loss function
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
criterion = nn.BCELoss()
# Data loaders
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
# Move model to device
model.to(DEVICE)
print("Starting training...")
train_losses = []
# Training loop
for epoch in range(epochs):
epoch_loss = 0
model.train()
for batch in train_loader:
optimizer.zero_grad()
input_ids = batch['input_ids'].to(DEVICE)
labels = batch['labels'].unsqueeze(1).to(DEVICE)
outputs = model(input_ids)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
train_loss = epoch_loss / len(train_loader)
train_losses.append(train_loss)
print(f"Epoch {epoch + 1}/{epochs}, Train Loss: {train_loss:.4f}")
# Visualize training loss
plt.figure(figsize=(10, 6))
plt.plot(range(1, epochs + 1), train_losses, marker='o', linestyle='-', label='Train Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('Training Loss Over Epochs')
plt.legend()
plt.grid(True)
plt.show()
print("Starting evaluation...")
# Evaluation
model.eval()
predictions, true_labels = [], []
with torch.no_grad():
for batch in test_loader:
input_ids = batch['input_ids'].to(DEVICE)
labels = batch['labels'].unsqueeze(1).to(DEVICE)
outputs = model(input_ids)
preds = (outputs > 0.5).float()
predictions.extend(preds.cpu().numpy())
true_labels.extend(labels.cpu().numpy())
accuracy = accuracy_score(true_labels, predictions)
print(f"Final Accuracy: {accuracy:.4f}")