From 77c92993089fd05dd26c198f10aeea6f2218e598 Mon Sep 17 00:00:00 2001 From: mahehsma Date: Fri, 7 Jun 2024 09:36:01 +0200 Subject: [PATCH] added random forest classifier --- Experiments.ipynb | 259 ++++++++++++++++++++++++++++------------------ 1 file changed, 160 insertions(+), 99 deletions(-) diff --git a/Experiments.ipynb b/Experiments.ipynb index 87d8d45..49f0f8c 100644 --- a/Experiments.ipynb +++ b/Experiments.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "code", - "execution_count": 2, + "execution_count": 4, "id": "initial_id", "metadata": { "jupyter": { @@ -12,12 +12,14 @@ "outputs": [], "source": [ "import pandas as pd\n", - "from sklearn.preprocessing import MinMaxScaler, StandardScaler" + "from sklearn.preprocessing import MinMaxScaler, StandardScaler\n", + "from sklearn.model_selection import KFold\n", + "from sklearn import decomposition" ] }, { "cell_type": "code", - "execution_count": 21, + "execution_count": 2, "id": "67503952-9074-4cdb-9d7e-d9142f7c319c", "metadata": {}, "outputs": [ @@ -216,7 +218,7 @@ "[5 rows x 28 columns]" ] }, - "execution_count": 21, + "execution_count": 2, "metadata": {}, "output_type": "execute_result" } @@ -252,7 +254,7 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 6, "id": "2bbee865-c000-43da-84d9-ce7e04874110", "metadata": {}, "outputs": [], @@ -271,7 +273,7 @@ }, { "cell_type": "code", - "execution_count": 41, + "execution_count": 20, "id": "38eb4f87-ca3c-4ecf-a8ca-29422822d933", "metadata": {}, "outputs": [ @@ -280,56 +282,56 @@ "output_type": "stream", "text": [ "Training fold 0 for 20 epochs\n", - "Train samples:\t267\n", - "Test samples:\t30\n", - "Accuracy of fold 0: 0.9\n", + "\tTrain samples:\t267\n", + "\tTest samples:\t30\n", + "\tAccuracy of fold 0: 0.8666666666666667\n", "Training fold 1 for 20 epochs\n", - "Train samples:\t267\n", - "Test samples:\t30\n", - "Accuracy of fold 1: 0.8666666666666667\n", + "\tTrain samples:\t267\n", + "\tTest samples:\t30\n", + "\tAccuracy of fold 1: 0.8\n", "Training fold 2 for 20 epochs\n", - "Train samples:\t267\n", - "Test samples:\t30\n", - "Accuracy of fold 2: 0.8666666666666667\n", + "\tTrain samples:\t267\n", + "\tTest samples:\t30\n", + "\tAccuracy of fold 2: 0.9\n", "Training fold 3 for 20 epochs\n", - "Train samples:\t267\n", - "Test samples:\t30\n", - "Accuracy of fold 3: 0.9\n", + "\tTrain samples:\t267\n", + "\tTest samples:\t30\n", + "\tAccuracy of fold 3: 0.9\n", "Training fold 4 for 20 epochs\n", - "Train samples:\t267\n", - "Test samples:\t30\n", - "Accuracy of fold 4: 0.9\n", + "\tTrain samples:\t267\n", + "\tTest samples:\t30\n", + "\tAccuracy of fold 4: 0.8666666666666667\n", "Training fold 5 for 20 epochs\n", - "Train samples:\t267\n", - "Test samples:\t30\n", - "Accuracy of fold 5: 0.8333333333333334\n", + "\tTrain samples:\t267\n", + "\tTest samples:\t30\n", + "\tAccuracy of fold 5: 0.8\n", "Training fold 6 for 20 epochs\n", - "Train samples:\t267\n", - "Test samples:\t30\n", - "Accuracy of fold 6: 0.7666666666666667\n", + "\tTrain samples:\t267\n", + "\tTest samples:\t30\n", + "\tAccuracy of fold 6: 0.8333333333333334\n", "Training fold 7 for 20 epochs\n", - "Train samples:\t268\n", - "Test samples:\t29\n", - "Accuracy of fold 7: 0.8275862068965517\n", + "\tTrain samples:\t268\n", + "\tTest samples:\t29\n", + "\tAccuracy of fold 7: 0.8620689655172413\n", "Training fold 8 for 20 epochs\n", - "Train samples:\t268\n", - "Test samples:\t29\n", - "Accuracy of fold 8: 0.7586206896551724\n", + "\tTrain samples:\t268\n", + "\tTest samples:\t29\n", + "\tAccuracy of fold 8: 0.7241379310344828\n", "Training fold 9 for 20 epochs\n", - "Train samples:\t268\n", - "Test samples:\t29\n", - "Accuracy of fold 9: 0.7586206896551724\n", - "Avg accuracy 0.837816091954023\n" + "\tTrain samples:\t268\n", + "\tTest samples:\t29\n", + "\tAccuracy of fold 9: 0.896551724137931\n", + "Avg accuracy 0.8449425287356321\n" ] } ], "source": [ - "from sklearn.model_selection import KFold\n", - "from sklearn import decomposition\n", "import tensorflow as tf\n", "\n", + "use_pca = True\n", "# number of components extracted from the pca\n", "n_features = 8\n", + "n_features = n_features if use_pca else len(X.columns)\n", "\n", "epochs = 20\n", "k_folds = 10\n", @@ -345,27 +347,30 @@ " X_train, X_test = X.iloc[train_idx], X.iloc[test_idx]\n", " y_train, y_test = y[train_idx], y[test_idx]\n", "\n", - " print(f'Train samples:\\t{len(X_train)}')\n", - " print(f'Test samples:\\t{len(X_test)}')\n", + " print(f'\\tTrain samples:\\t{len(X_train)}')\n", + " print(f'\\tTest samples:\\t{len(X_test)}')\n", "\n", - " # do pca based on the train data of the given fold to extract 'n_features'\n", - " pca = decomposition.PCA(n_components=n_features)\n", - " pca.fit(X_train)\n", - " X_train = pca.transform(X_train)\n", + " if use_pca:\n", + " # do pca based on the train data of the given fold to extract 'n_features'\n", + " pca = decomposition.PCA(n_components=n_features)\n", + " pca.fit(X_train)\n", + " X_train = pca.transform(X_train)\n", "\n", " # train the model using the components extracted from pca\n", " model = get_model(n_features)\n", " model.fit(X_train, y_train, epochs=epochs, verbose=0)\n", "\n", - " # transform test data using on the pca model trained on the train data\n", - " X_test = pca.transform(X_test)\n", + " if use_pca:\n", + " # transform test data using on the pca model trained on the train data\n", + " X_test = pca.transform(X_test)\n", + " \n", " y_pred = model.predict(X_test, verbose=0)\n", - " y_pred = y_pred > 0.5\n", + " y_pred = y_pred > 0.5 # threshold to binarize\n", "\n", " # calculate the accuracy of the train data for the current fold\n", " accuracy = sum(y_pred == y_test)[0] / len(y_pred)\n", " accuracies.append(accuracy)\n", - " print(f'Accuracy of fold {i}: {accuracy}')\n", + " print(f'\\tAccuracy of fold {i}: {accuracy}')\n", "\n", "# calculate the average accuracy over all folds\n", "avg_accuracy = sum(accuracies) / len(accuracies)\n", @@ -374,7 +379,7 @@ }, { "cell_type": "code", - "execution_count": 42, + "execution_count": 22, "id": "95215693-47c9-4202-92f5-efbc65bc32c9", "metadata": {}, "outputs": [ @@ -383,16 +388,14 @@ "output_type": "stream", "text": [ "Training fold 0 for 20 epochs\n", - "Train samples:\t237\n", - "Test samples:\t60\n" + "\tTrain samples:\t237\n", + "\tTest samples:\t60\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "C:\\Users\\maxwi\\anaconda3\\Lib\\site-packages\\sklearn\\cluster\\_kmeans.py:870: FutureWarning: The default value of `n_init` will change from 10 to 'auto' in 1.4. Set the value of `n_init` explicitly to suppress the warning\n", - " warnings.warn(\n", "C:\\Users\\maxwi\\anaconda3\\Lib\\site-packages\\sklearn\\cluster\\_kmeans.py:1382: UserWarning: KMeans is known to have a memory leak on Windows with MKL, when there are less chunks than available threads. You can avoid it by setting the environment variable OMP_NUM_THREADS=1.\n", " warnings.warn(\n" ] @@ -401,20 +404,16 @@ "name": "stdout", "output_type": "stream", "text": [ - "[0 1 1 0 0 0 0 0 1 1 1 0 1 0 0 0 0 0 0 0 1 0 0 1 1 0 0 0 0 1 0 1 0 1 0 0 1\n", - " 1 1 1 1 1 0 0 0 1 0 1 0 0 0 1 0 0 1 1 1 1 0 1]\n", - "Accuracy of fold 0: 0.5833333333333334\n", + "\tAccuracy of fold 0: 0.5833333333333334\n", "Training fold 1 for 20 epochs\n", - "Train samples:\t237\n", - "Test samples:\t60\n" + "\tTrain samples:\t237\n", + "\tTest samples:\t60\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "C:\\Users\\maxwi\\anaconda3\\Lib\\site-packages\\sklearn\\cluster\\_kmeans.py:870: FutureWarning: The default value of `n_init` will change from 10 to 'auto' in 1.4. Set the value of `n_init` explicitly to suppress the warning\n", - " warnings.warn(\n", "C:\\Users\\maxwi\\anaconda3\\Lib\\site-packages\\sklearn\\cluster\\_kmeans.py:1382: UserWarning: KMeans is known to have a memory leak on Windows with MKL, when there are less chunks than available threads. You can avoid it by setting the environment variable OMP_NUM_THREADS=1.\n", " warnings.warn(\n" ] @@ -423,20 +422,16 @@ "name": "stdout", "output_type": "stream", "text": [ - "[1 0 1 0 1 1 0 0 1 0 0 1 1 1 0 0 1 0 0 1 1 0 0 1 0 0 0 0 0 1 1 1 0 0 1 1 1\n", - " 0 0 0 0 0 0 1 0 1 1 1 1 1 1 1 1 1 0 0 0 1 1 1]\n", - "Accuracy of fold 1: 0.5\n", + "\tAccuracy of fold 1: 0.5\n", "Training fold 2 for 20 epochs\n", - "Train samples:\t238\n", - "Test samples:\t59\n" + "\tTrain samples:\t238\n", + "\tTest samples:\t59\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "C:\\Users\\maxwi\\anaconda3\\Lib\\site-packages\\sklearn\\cluster\\_kmeans.py:870: FutureWarning: The default value of `n_init` will change from 10 to 'auto' in 1.4. Set the value of `n_init` explicitly to suppress the warning\n", - " warnings.warn(\n", "C:\\Users\\maxwi\\anaconda3\\Lib\\site-packages\\sklearn\\cluster\\_kmeans.py:1382: UserWarning: KMeans is known to have a memory leak on Windows with MKL, when there are less chunks than available threads. You can avoid it by setting the environment variable OMP_NUM_THREADS=1.\n", " warnings.warn(\n" ] @@ -445,20 +440,16 @@ "name": "stdout", "output_type": "stream", "text": [ - "[0 0 0 0 1 0 0 1 1 0 0 1 0 1 1 0 0 0 1 1 0 1 0 0 1 0 1 1 1 0 1 1 0 0 0 0 0\n", - " 0 1 1 0 1 1 1 0 1 0 1 0 0 0 1 0 0 0 0 1 1 0]\n", - "Accuracy of fold 2: 0.559322033898305\n", + "\tAccuracy of fold 2: 0.559322033898305\n", "Training fold 3 for 20 epochs\n", - "Train samples:\t238\n", - "Test samples:\t59\n" + "\tTrain samples:\t238\n", + "\tTest samples:\t59\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "C:\\Users\\maxwi\\anaconda3\\Lib\\site-packages\\sklearn\\cluster\\_kmeans.py:870: FutureWarning: The default value of `n_init` will change from 10 to 'auto' in 1.4. Set the value of `n_init` explicitly to suppress the warning\n", - " warnings.warn(\n", "C:\\Users\\maxwi\\anaconda3\\Lib\\site-packages\\sklearn\\cluster\\_kmeans.py:1382: UserWarning: KMeans is known to have a memory leak on Windows with MKL, when there are less chunks than available threads. You can avoid it by setting the environment variable OMP_NUM_THREADS=1.\n", " warnings.warn(\n" ] @@ -467,20 +458,16 @@ "name": "stdout", "output_type": "stream", "text": [ - "[0 1 0 1 1 1 0 0 0 1 0 1 1 0 1 0 1 1 1 1 0 1 0 0 0 0 1 1 1 0 1 0 1 0 1 0 1\n", - " 1 1 1 1 0 0 1 1 1 0 0 1 0 1 1 1 0 0 0 1 1 1]\n", - "Accuracy of fold 3: 0.576271186440678\n", + "\tAccuracy of fold 3: 0.576271186440678\n", "Training fold 4 for 20 epochs\n", - "Train samples:\t238\n", - "Test samples:\t59\n" + "\tTrain samples:\t238\n", + "\tTest samples:\t59\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "C:\\Users\\maxwi\\anaconda3\\Lib\\site-packages\\sklearn\\cluster\\_kmeans.py:870: FutureWarning: The default value of `n_init` will change from 10 to 'auto' in 1.4. Set the value of `n_init` explicitly to suppress the warning\n", - " warnings.warn(\n", "C:\\Users\\maxwi\\anaconda3\\Lib\\site-packages\\sklearn\\cluster\\_kmeans.py:1382: UserWarning: KMeans is known to have a memory leak on Windows with MKL, when there are less chunks than available threads. You can avoid it by setting the environment variable OMP_NUM_THREADS=1.\n", " warnings.warn(\n" ] @@ -489,9 +476,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "[1 1 1 1 1 0 0 0 1 0 0 0 1 1 1 1 1 1 1 1 1 1 1 0 0 0 1 1 0 1 0 1 1 0 1 1 1\n", - " 1 0 1 0 1 0 0 0 1 1 0 1 0 0 0 1 0 0 0 0 0 1]\n", - "Accuracy of fold 4: 0.5254237288135594\n", + "\tAccuracy of fold 4: 0.5254237288135594\n", "Avg accuracy 0.5488700564971751\n" ] } @@ -499,6 +484,7 @@ "source": [ "from sklearn.cluster import KMeans\n", "\n", + "use_pca = True\n", "# number of components extracted from the pca\n", "n_features = 10\n", "\n", @@ -515,28 +501,29 @@ " X_train, X_test = X.iloc[train_idx], X.iloc[test_idx]\n", " y_train, y_test = y[train_idx], y[test_idx]\n", "\n", - " print(f'Train samples:\\t{len(X_train)}')\n", - " print(f'Test samples:\\t{len(X_test)}')\n", + " print(f'\\tTrain samples:\\t{len(X_train)}')\n", + " print(f'\\tTest samples:\\t{len(X_test)}')\n", "\n", - " # do pca based on the train data of the given fold to extract 'n_features'\n", - " #pca = decomposition.PCA(n_components=n_features)\n", - " #pca.fit(X_train)\n", - " #X_train = pca.transform(X_train)\n", + " if use_pca:\n", + " # do pca based on the train data of the given fold to extract 'n_features'\n", + " pca = decomposition.PCA(n_components=n_features)\n", + " pca.fit(X_train)\n", + " X_train = pca.transform(X_train)\n", "\n", - " model = KMeans(n_clusters=2)\n", + " model = KMeans(n_clusters=2, n_init=10)\n", " model.fit(X_train)\n", "\n", - " #X_test = pca.transform(X_test)\n", + " if use_pca:\n", + " X_test = pca.transform(X_test)\n", + " \n", " y_pred = model.predict(X_test)\n", - " print(y_pred)\n", - " \n", "\n", " # calculate the accuracy of the train data for the current fold\n", " accuracy1 = sum(y_pred == y_test)[0] / len(y_pred)\n", " accuracy2 = sum(y_pred != y_test)[0] / len(y_pred)\n", " accuracy = max(accuracy1, accuracy2)\n", " accuracies.append(accuracy)\n", - " print(f'Accuracy of fold {i}: {accuracy}')\n", + " print(f'\\tAccuracy of fold {i}: {accuracy}')\n", "\n", "# calculate the average accuracy over all folds\n", "avg_accuracy = sum(accuracies) / len(accuracies)\n", @@ -545,11 +532,85 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 23, "id": "880302e4-82c1-47b9-9fe3-cb3567511639", "metadata": {}, - "outputs": [], - "source": [] + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Training fold 0 for 20 epochs\n", + "\tTrain samples:\t237\n", + "\tTest samples:\t60\n", + "\tAccuracy of fold 0: 0.85\n", + "Training fold 1 for 20 epochs\n", + "\tTrain samples:\t237\n", + "\tTest samples:\t60\n", + "\tAccuracy of fold 1: 0.9\n", + "Training fold 2 for 20 epochs\n", + "\tTrain samples:\t238\n", + "\tTest samples:\t59\n", + "\tAccuracy of fold 2: 0.847457627118644\n", + "Training fold 3 for 20 epochs\n", + "\tTrain samples:\t238\n", + "\tTest samples:\t59\n", + "\tAccuracy of fold 3: 0.7627118644067796\n", + "Training fold 4 for 20 epochs\n", + "\tTrain samples:\t238\n", + "\tTest samples:\t59\n", + "\tAccuracy of fold 4: 0.7796610169491526\n", + "Avg accuracy 0.8279661016949152\n" + ] + } + ], + "source": [ + "from sklearn.ensemble import RandomForestClassifier\n", + "\n", + "use_pca = True\n", + "# number of components extracted from the pca\n", + "n_features = 10\n", + "\n", + "k_folds = 5\n", + "\n", + "# used to split the dataset into k folds\n", + "kf = KFold(n_splits=k_folds)\n", + "\n", + "accuracies = []\n", + "for i, (train_idx, test_idx) in enumerate(kf.split(X[numeric_columns])):\n", + " print(f'Training fold {i} for {epochs} epochs')\n", + "\n", + " # extract train and test data from the cleaned dataset\n", + " X_train, X_test = X.iloc[train_idx], X.iloc[test_idx]\n", + " y_train, y_test = y[train_idx], y[test_idx]\n", + " y_train, y_test = y_train[:, 0], y_test[:, 0]\n", + "\n", + " print(f'\\tTrain samples:\\t{len(X_train)}')\n", + " print(f'\\tTest samples:\\t{len(X_test)}')\n", + "\n", + " if use_pca:\n", + " # do pca based on the train data of the given fold to extract 'n_features'\n", + " pca = decomposition.PCA(n_components=n_features)\n", + " pca.fit(X_train)\n", + " X_train = pca.transform(X_train)\n", + "\n", + " model = RandomForestClassifier(max_depth=2, random_state=0)\n", + " model.fit(X_train, y_train)\n", + "\n", + " if use_pca:\n", + " X_test = pca.transform(X_test)\n", + " \n", + " y_pred = model.predict(X_test)\n", + "\n", + " # calculate the accuracy of the train data for the current fold\n", + " accuracy = sum(y_pred == y_test) / len(y_pred)\n", + " accuracies.append(accuracy)\n", + " print(f'\\tAccuracy of fold {i}: {accuracy}')\n", + "\n", + "# calculate the average accuracy over all folds\n", + "avg_accuracy = sum(accuracies) / len(accuracies)\n", + "print(f'Avg accuracy {avg_accuracy}')" + ] } ], "metadata": {