master
nicole 2024-06-07 13:49:51 +02:00
commit cdb6d7be35
3 changed files with 500 additions and 214 deletions

269
Cleaning.ipynb 100644
View File

@ -0,0 +1,269 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "c95fbd16-09ed-497b-892a-473496150996",
"metadata": {},
"source": [
"<h1>Cleaning</h1>\n",
"<p>Import dataset using the ucirepo package</p>"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "3eb339fa-ef85-4544-9ad0-bc22d4de9f1a",
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>age</th>\n",
" <th>sex</th>\n",
" <th>cp</th>\n",
" <th>trestbps</th>\n",
" <th>chol</th>\n",
" <th>fbs</th>\n",
" <th>restecg</th>\n",
" <th>thalach</th>\n",
" <th>exang</th>\n",
" <th>oldpeak</th>\n",
" <th>slope</th>\n",
" <th>ca</th>\n",
" <th>thal</th>\n",
" <th>goal</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>63</td>\n",
" <td>1</td>\n",
" <td>1</td>\n",
" <td>145</td>\n",
" <td>233</td>\n",
" <td>1</td>\n",
" <td>2</td>\n",
" <td>150</td>\n",
" <td>0</td>\n",
" <td>2.3</td>\n",
" <td>3</td>\n",
" <td>0.0</td>\n",
" <td>6.0</td>\n",
" <td>0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1</th>\n",
" <td>67</td>\n",
" <td>1</td>\n",
" <td>4</td>\n",
" <td>160</td>\n",
" <td>286</td>\n",
" <td>0</td>\n",
" <td>2</td>\n",
" <td>108</td>\n",
" <td>1</td>\n",
" <td>1.5</td>\n",
" <td>2</td>\n",
" <td>3.0</td>\n",
" <td>3.0</td>\n",
" <td>2</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2</th>\n",
" <td>67</td>\n",
" <td>1</td>\n",
" <td>4</td>\n",
" <td>120</td>\n",
" <td>229</td>\n",
" <td>0</td>\n",
" <td>2</td>\n",
" <td>129</td>\n",
" <td>1</td>\n",
" <td>2.6</td>\n",
" <td>2</td>\n",
" <td>2.0</td>\n",
" <td>7.0</td>\n",
" <td>1</td>\n",
" </tr>\n",
" <tr>\n",
" <th>3</th>\n",
" <td>37</td>\n",
" <td>1</td>\n",
" <td>3</td>\n",
" <td>130</td>\n",
" <td>250</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>187</td>\n",
" <td>0</td>\n",
" <td>3.5</td>\n",
" <td>3</td>\n",
" <td>0.0</td>\n",
" <td>3.0</td>\n",
" <td>0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>4</th>\n",
" <td>41</td>\n",
" <td>0</td>\n",
" <td>2</td>\n",
" <td>130</td>\n",
" <td>204</td>\n",
" <td>0</td>\n",
" <td>2</td>\n",
" <td>172</td>\n",
" <td>0</td>\n",
" <td>1.4</td>\n",
" <td>1</td>\n",
" <td>0.0</td>\n",
" <td>3.0</td>\n",
" <td>0</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" age sex cp trestbps chol fbs restecg thalach exang oldpeak slope \\\n",
"0 63 1 1 145 233 1 2 150 0 2.3 3 \n",
"1 67 1 4 160 286 0 2 108 1 1.5 2 \n",
"2 67 1 4 120 229 0 2 129 1 2.6 2 \n",
"3 37 1 3 130 250 0 0 187 0 3.5 3 \n",
"4 41 0 2 130 204 0 2 172 0 1.4 1 \n",
"\n",
" ca thal goal \n",
"0 0.0 6.0 0 \n",
"1 3.0 3.0 2 \n",
"2 2.0 7.0 1 \n",
"3 0.0 3.0 0 \n",
"4 0.0 3.0 0 "
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from ucimlrepo import fetch_ucirepo\n",
"import pandas as pd\n",
"\n",
"# fetch dataset \n",
"heart_disease = fetch_ucirepo(id=45) \n",
" \n",
"# data (as pandas dataframes) \n",
"X = heart_disease.data.features \n",
"y = heart_disease.data.targets \n",
"\n",
"df = pd.concat([X, y], axis=1)\n",
"df = df.rename(columns={'num':'goal'})\n",
"\n",
"df.head()"
]
},
{
"cell_type": "markdown",
"id": "8c5ab8b9-e46a-4968-b0c8-fe393f093f73",
"metadata": {},
"source": [
"<p>Get overview of all missing values. As there are only a few, those rows can be dropped.</p>"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "6f7e6a3a-63cb-40e2-8746-937c24b184ef",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"age 0\n",
"sex 0\n",
"cp 0\n",
"trestbps 0\n",
"chol 0\n",
"fbs 0\n",
"restecg 0\n",
"thalach 0\n",
"exang 0\n",
"oldpeak 0\n",
"slope 0\n",
"ca 4\n",
"thal 2\n",
"goal 0\n",
"dtype: int64"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"df.isna().sum()"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "d1639e92-d401-49fb-a1f1-67250ffa2c81",
"metadata": {},
"outputs": [],
"source": [
"df.dropna(inplace=True)"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "d7bf2c46-7885-4dfe-a4e7-8b8439cf0434",
"metadata": {},
"outputs": [],
"source": [
"# save 'cleaned' dataset as csv file for further processing\n",
"df.to_csv('./data/dataset_cleaned.csv', index=False)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.7"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@ -2,7 +2,7 @@
"cells": [
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 1,
"id": "initial_id",
"metadata": {
"jupyter": {
@ -12,12 +12,14 @@
"outputs": [],
"source": [
"import pandas as pd\n",
"from sklearn.preprocessing import MinMaxScaler, StandardScaler"
"from sklearn.preprocessing import MinMaxScaler, StandardScaler\n",
"from sklearn.model_selection import KFold\n",
"from sklearn import decomposition"
]
},
{
"cell_type": "code",
"execution_count": 21,
"execution_count": 2,
"id": "67503952-9074-4cdb-9d7e-d9142f7c319c",
"metadata": {},
"outputs": [
@ -216,14 +218,13 @@
"[5 rows x 28 columns]"
]
},
"execution_count": 21,
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"df = pd.read_csv('./data/dataset_cleaned.csv')\n",
"df.dropna(inplace=True)\n",
"\n",
"# extract all columns except 'goal' --> X\n",
"X = df.loc[:, df.columns != 'goal']\n",
@ -252,7 +253,7 @@
},
{
"cell_type": "code",
"execution_count": 18,
"execution_count": 3,
"id": "2bbee865-c000-43da-84d9-ce7e04874110",
"metadata": {},
"outputs": [],
@ -271,7 +272,7 @@
},
{
"cell_type": "code",
"execution_count": 41,
"execution_count": 4,
"id": "38eb4f87-ca3c-4ecf-a8ca-29422822d933",
"metadata": {},
"outputs": [
@ -279,57 +280,50 @@
"name": "stdout",
"output_type": "stream",
"text": [
"Training fold 0 for 20 epochs\n",
"Train samples:\t267\n",
"Test samples:\t30\n",
"Accuracy of fold 0: 0.9\n",
"Training fold 1 for 20 epochs\n",
"Train samples:\t267\n",
"Test samples:\t30\n",
"Accuracy of fold 1: 0.8666666666666667\n",
"Training fold 2 for 20 epochs\n",
"Train samples:\t267\n",
"Test samples:\t30\n",
"Accuracy of fold 2: 0.8666666666666667\n",
"Training fold 3 for 20 epochs\n",
"Train samples:\t267\n",
"Test samples:\t30\n",
"Accuracy of fold 3: 0.9\n",
"Training fold 4 for 20 epochs\n",
"Train samples:\t267\n",
"Test samples:\t30\n",
"Accuracy of fold 4: 0.9\n",
"Training fold 5 for 20 epochs\n",
"Train samples:\t267\n",
"Test samples:\t30\n",
"Accuracy of fold 5: 0.8333333333333334\n",
"Training fold 6 for 20 epochs\n",
"Train samples:\t267\n",
"Test samples:\t30\n",
"Accuracy of fold 6: 0.7666666666666667\n",
"Training fold 7 for 20 epochs\n",
"Train samples:\t268\n",
"Test samples:\t29\n",
"Accuracy of fold 7: 0.8275862068965517\n",
"Training fold 8 for 20 epochs\n",
"Train samples:\t268\n",
"Test samples:\t29\n",
"Accuracy of fold 8: 0.7586206896551724\n",
"Training fold 9 for 20 epochs\n",
"Train samples:\t268\n",
"Test samples:\t29\n",
"Accuracy of fold 9: 0.7586206896551724\n",
"Avg accuracy 0.837816091954023\n"
"Training 10 folds for 20 epochs\n",
"Fold 0\n",
"\tTrain samples:\t267\tTest samples:\t30\n",
"\tAccuracy: 90.000%\n",
"Fold 1\n",
"\tTrain samples:\t267\tTest samples:\t30\n",
"\tAccuracy: 80.000%\n",
"Fold 2\n",
"\tTrain samples:\t267\tTest samples:\t30\n",
"\tAccuracy: 90.000%\n",
"Fold 3\n",
"\tTrain samples:\t267\tTest samples:\t30\n",
"\tAccuracy: 90.000%\n",
"Fold 4\n",
"\tTrain samples:\t267\tTest samples:\t30\n",
"WARNING:tensorflow:5 out of the last 5 calls to <function TensorFlowTrainer.make_predict_function.<locals>.one_step_on_data_distributed at 0x0000023D0BD63C40> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has reduce_retracing=True option that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/guide/function#controlling_retracing and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n",
"\tAccuracy: 90.000%\n",
"Fold 5\n",
"\tTrain samples:\t267\tTest samples:\t30\n",
"WARNING:tensorflow:6 out of the last 6 calls to <function TensorFlowTrainer.make_predict_function.<locals>.one_step_on_data_distributed at 0x0000023D0D548CC0> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has reduce_retracing=True option that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/guide/function#controlling_retracing and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n",
"\tAccuracy: 86.667%\n",
"Fold 6\n",
"\tTrain samples:\t267\tTest samples:\t30\n",
"\tAccuracy: 80.000%\n",
"Fold 7\n",
"\tTrain samples:\t268\tTest samples:\t29\n",
"\tAccuracy: 86.207%\n",
"Fold 8\n",
"\tTrain samples:\t268\tTest samples:\t29\n",
"\tAccuracy: 79.310%\n",
"Fold 9\n",
"\tTrain samples:\t268\tTest samples:\t29\n",
"\tAccuracy: 82.759%\n",
"Avg accuracy 85.494%\n"
]
}
],
"source": [
"from sklearn.model_selection import KFold\n",
"from sklearn import decomposition\n",
"import tensorflow as tf\n",
"\n",
"use_pca = True\n",
"# number of components extracted from the pca\n",
"n_features = 8\n",
"n_features = n_features if use_pca else len(X.columns)\n",
"\n",
"epochs = 20\n",
"k_folds = 10\n",
@ -338,43 +332,47 @@
"kf = KFold(n_splits=k_folds)\n",
"\n",
"accuracies = []\n",
"print(f'Training {k_folds} folds for {epochs} epochs')\n",
"for i, (train_idx, test_idx) in enumerate(kf.split(X)):\n",
" print(f'Training fold {i} for {epochs} epochs')\n",
"\n",
" print(f'Fold {i}')\n",
" \n",
" # extract train and test data from the cleaned dataset\n",
" X_train, X_test = X.iloc[train_idx], X.iloc[test_idx]\n",
" y_train, y_test = y[train_idx], y[test_idx]\n",
"\n",
" print(f'Train samples:\\t{len(X_train)}')\n",
" print(f'Test samples:\\t{len(X_test)}')\n",
" print(f'\\tTrain samples:\\t{len(X_train)}\\tTest samples:\\t{len(X_test)}')\n",
"\n",
" # do pca based on the train data of the given fold to extract 'n_features'\n",
" pca = decomposition.PCA(n_components=n_features)\n",
" pca.fit(X_train)\n",
" X_train = pca.transform(X_train)\n",
" if use_pca:\n",
" # do pca based on the train data of the given fold to extract 'n_features'\n",
" pca = decomposition.PCA(n_components=n_features)\n",
" pca.fit(X_train)\n",
" X_train = pca.transform(X_train)\n",
"\n",
" # train the model using the components extracted from pca\n",
" model = get_model(n_features)\n",
" model.fit(X_train, y_train, epochs=epochs, verbose=0)\n",
"\n",
" # transform test data using on the pca model trained on the train data\n",
" X_test = pca.transform(X_test)\n",
" if use_pca:\n",
" # transform test data using on the pca model trained on the train data\n",
" X_test = pca.transform(X_test)\n",
" \n",
" y_pred = model.predict(X_test, verbose=0)\n",
" y_pred = y_pred > 0.5\n",
" y_pred = y_pred > 0.5 # threshold to binarize\n",
"\n",
" # calculate the accuracy of the train data for the current fold\n",
" accuracy = sum(y_pred == y_test)[0] / len(y_pred)\n",
" accuracies.append(accuracy)\n",
" print(f'Accuracy of fold {i}: {accuracy}')\n",
" print(f'\\tAccuracy: {accuracy:.3%}')\n",
"\n",
"# calculate the average accuracy over all folds\n",
"avg_accuracy = sum(accuracies) / len(accuracies)\n",
"print(f'Avg accuracy {avg_accuracy}')"
"print(f'Avg accuracy {avg_accuracy:.3%}')"
]
},
{
"cell_type": "code",
"execution_count": 42,
"execution_count": 5,
"id": "95215693-47c9-4202-92f5-efbc65bc32c9",
"metadata": {},
"outputs": [
@ -382,17 +380,15 @@
"name": "stdout",
"output_type": "stream",
"text": [
"Training fold 0 for 20 epochs\n",
"Train samples:\t237\n",
"Test samples:\t60\n"
"Training 5 folds\n",
"Fold 0\n",
"\tTrain samples:\t237\tTest samples:\t60\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"C:\\Users\\maxwi\\anaconda3\\Lib\\site-packages\\sklearn\\cluster\\_kmeans.py:870: FutureWarning: The default value of `n_init` will change from 10 to 'auto' in 1.4. Set the value of `n_init` explicitly to suppress the warning\n",
" warnings.warn(\n",
"C:\\Users\\maxwi\\anaconda3\\Lib\\site-packages\\sklearn\\cluster\\_kmeans.py:1382: UserWarning: KMeans is known to have a memory leak on Windows with MKL, when there are less chunks than available threads. You can avoid it by setting the environment variable OMP_NUM_THREADS=1.\n",
" warnings.warn(\n"
]
@ -401,20 +397,16 @@
"name": "stdout",
"output_type": "stream",
"text": [
"[0 1 1 0 0 0 0 0 1 1 1 0 1 0 0 0 0 0 0 0 1 0 0 1 1 0 0 0 0 1 0 1 0 1 0 0 1\n",
" 1 1 1 1 1 0 0 0 1 0 1 0 0 0 1 0 0 1 1 1 1 0 1]\n",
"Accuracy of fold 0: 0.5833333333333334\n",
"Training fold 1 for 20 epochs\n",
"Train samples:\t237\n",
"Test samples:\t60\n"
"\tAccuracy 58.333%\n",
"\n",
"Fold 1\n",
"\tTrain samples:\t237\tTest samples:\t60\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"C:\\Users\\maxwi\\anaconda3\\Lib\\site-packages\\sklearn\\cluster\\_kmeans.py:870: FutureWarning: The default value of `n_init` will change from 10 to 'auto' in 1.4. Set the value of `n_init` explicitly to suppress the warning\n",
" warnings.warn(\n",
"C:\\Users\\maxwi\\anaconda3\\Lib\\site-packages\\sklearn\\cluster\\_kmeans.py:1382: UserWarning: KMeans is known to have a memory leak on Windows with MKL, when there are less chunks than available threads. You can avoid it by setting the environment variable OMP_NUM_THREADS=1.\n",
" warnings.warn(\n"
]
@ -423,20 +415,16 @@
"name": "stdout",
"output_type": "stream",
"text": [
"[1 0 1 0 1 1 0 0 1 0 0 1 1 1 0 0 1 0 0 1 1 0 0 1 0 0 0 0 0 1 1 1 0 0 1 1 1\n",
" 0 0 0 0 0 0 1 0 1 1 1 1 1 1 1 1 1 0 0 0 1 1 1]\n",
"Accuracy of fold 1: 0.5\n",
"Training fold 2 for 20 epochs\n",
"Train samples:\t238\n",
"Test samples:\t59\n"
"\tAccuracy 50.000%\n",
"\n",
"Fold 2\n",
"\tTrain samples:\t238\tTest samples:\t59\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"C:\\Users\\maxwi\\anaconda3\\Lib\\site-packages\\sklearn\\cluster\\_kmeans.py:870: FutureWarning: The default value of `n_init` will change from 10 to 'auto' in 1.4. Set the value of `n_init` explicitly to suppress the warning\n",
" warnings.warn(\n",
"C:\\Users\\maxwi\\anaconda3\\Lib\\site-packages\\sklearn\\cluster\\_kmeans.py:1382: UserWarning: KMeans is known to have a memory leak on Windows with MKL, when there are less chunks than available threads. You can avoid it by setting the environment variable OMP_NUM_THREADS=1.\n",
" warnings.warn(\n"
]
@ -445,20 +433,16 @@
"name": "stdout",
"output_type": "stream",
"text": [
"[0 0 0 0 1 0 0 1 1 0 0 1 0 1 1 0 0 0 1 1 0 1 0 0 1 0 1 1 1 0 1 1 0 0 0 0 0\n",
" 0 1 1 0 1 1 1 0 1 0 1 0 0 0 1 0 0 0 0 1 1 0]\n",
"Accuracy of fold 2: 0.559322033898305\n",
"Training fold 3 for 20 epochs\n",
"Train samples:\t238\n",
"Test samples:\t59\n"
"\tAccuracy 55.932%\n",
"\n",
"Fold 3\n",
"\tTrain samples:\t238\tTest samples:\t59\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"C:\\Users\\maxwi\\anaconda3\\Lib\\site-packages\\sklearn\\cluster\\_kmeans.py:870: FutureWarning: The default value of `n_init` will change from 10 to 'auto' in 1.4. Set the value of `n_init` explicitly to suppress the warning\n",
" warnings.warn(\n",
"C:\\Users\\maxwi\\anaconda3\\Lib\\site-packages\\sklearn\\cluster\\_kmeans.py:1382: UserWarning: KMeans is known to have a memory leak on Windows with MKL, when there are less chunks than available threads. You can avoid it by setting the environment variable OMP_NUM_THREADS=1.\n",
" warnings.warn(\n"
]
@ -467,20 +451,16 @@
"name": "stdout",
"output_type": "stream",
"text": [
"[0 1 0 1 1 1 0 0 0 1 0 1 1 0 1 0 1 1 1 1 0 1 0 0 0 0 1 1 1 0 1 0 1 0 1 0 1\n",
" 1 1 1 1 0 0 1 1 1 0 0 1 0 1 1 1 0 0 0 1 1 1]\n",
"Accuracy of fold 3: 0.576271186440678\n",
"Training fold 4 for 20 epochs\n",
"Train samples:\t238\n",
"Test samples:\t59\n"
"\tAccuracy 57.627%\n",
"\n",
"Fold 4\n",
"\tTrain samples:\t238\tTest samples:\t59\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"C:\\Users\\maxwi\\anaconda3\\Lib\\site-packages\\sklearn\\cluster\\_kmeans.py:870: FutureWarning: The default value of `n_init` will change from 10 to 'auto' in 1.4. Set the value of `n_init` explicitly to suppress the warning\n",
" warnings.warn(\n",
"C:\\Users\\maxwi\\anaconda3\\Lib\\site-packages\\sklearn\\cluster\\_kmeans.py:1382: UserWarning: KMeans is known to have a memory leak on Windows with MKL, when there are less chunks than available threads. You can avoid it by setting the environment variable OMP_NUM_THREADS=1.\n",
" warnings.warn(\n"
]
@ -489,16 +469,16 @@
"name": "stdout",
"output_type": "stream",
"text": [
"[1 1 1 1 1 0 0 0 1 0 0 0 1 1 1 1 1 1 1 1 1 1 1 0 0 0 1 1 0 1 0 1 1 0 1 1 1\n",
" 1 0 1 0 1 0 0 0 1 1 0 1 0 0 0 1 0 0 0 0 0 1]\n",
"Accuracy of fold 4: 0.5254237288135594\n",
"Avg accuracy 0.5488700564971751\n"
"\tAccuracy 52.542%\n",
"\n",
"Avg accuracy 54.887%\n"
]
}
],
"source": [
"from sklearn.cluster import KMeans\n",
"\n",
"use_pca = True\n",
"# number of components extracted from the pca\n",
"n_features = 10\n",
"\n",
@ -508,48 +488,127 @@
"kf = KFold(n_splits=k_folds)\n",
"\n",
"accuracies = []\n",
"print(f'Training {k_folds} folds')\n",
"for i, (train_idx, test_idx) in enumerate(kf.split(X[numeric_columns])):\n",
" print(f'Training fold {i} for {epochs} epochs')\n",
"\n",
" print(f'Fold {i}')\n",
" \n",
" # extract train and test data from the cleaned dataset\n",
" X_train, X_test = X.iloc[train_idx], X.iloc[test_idx]\n",
" y_train, y_test = y[train_idx], y[test_idx]\n",
"\n",
" print(f'Train samples:\\t{len(X_train)}')\n",
" print(f'Test samples:\\t{len(X_test)}')\n",
" print(f'\\tTrain samples:\\t{len(X_train)}\\tTest samples:\\t{len(X_test)}')\n",
"\n",
" # do pca based on the train data of the given fold to extract 'n_features'\n",
" #pca = decomposition.PCA(n_components=n_features)\n",
" #pca.fit(X_train)\n",
" #X_train = pca.transform(X_train)\n",
" if use_pca:\n",
" # do pca based on the train data of the given fold to extract 'n_features'\n",
" pca = decomposition.PCA(n_components=n_features)\n",
" pca.fit(X_train)\n",
" X_train = pca.transform(X_train)\n",
"\n",
" model = KMeans(n_clusters=2)\n",
" model = KMeans(n_clusters=2, n_init=10)\n",
" model.fit(X_train)\n",
"\n",
" #X_test = pca.transform(X_test)\n",
" if use_pca:\n",
" X_test = pca.transform(X_test)\n",
" \n",
" y_pred = model.predict(X_test)\n",
" print(y_pred)\n",
" \n",
"\n",
" # calculate the accuracy of the train data for the current fold\n",
" accuracy1 = sum(y_pred == y_test)[0] / len(y_pred)\n",
" accuracy2 = sum(y_pred != y_test)[0] / len(y_pred)\n",
" accuracy = max(accuracy1, accuracy2)\n",
" accuracies.append(accuracy)\n",
" print(f'Accuracy of fold {i}: {accuracy}')\n",
" print(f'\\tAccuracy {accuracy:.3%}')\n",
" print()\n",
"\n",
"# calculate the average accuracy over all folds\n",
"avg_accuracy = sum(accuracies) / len(accuracies)\n",
"print(f'Avg accuracy {avg_accuracy}')"
"print(f'Avg accuracy {avg_accuracy:.3%}')"
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 6,
"id": "880302e4-82c1-47b9-9fe3-cb3567511639",
"metadata": {},
"outputs": [],
"source": []
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Training 5 folds\n",
"Fold 0\n",
"\tTrain samples:\t237\tTest samples:\t60\n",
"\tAccuracy 85.000%\n",
"\n",
"Fold 1\n",
"\tTrain samples:\t237\tTest samples:\t60\n",
"\tAccuracy 90.000%\n",
"\n",
"Fold 2\n",
"\tTrain samples:\t238\tTest samples:\t59\n",
"\tAccuracy 84.746%\n",
"\n",
"Fold 3\n",
"\tTrain samples:\t238\tTest samples:\t59\n",
"\tAccuracy 76.271%\n",
"\n",
"Fold 4\n",
"\tTrain samples:\t238\tTest samples:\t59\n",
"\tAccuracy 77.966%\n",
"\n",
"Avg accuracy 82.797%\n"
]
}
],
"source": [
"from sklearn.ensemble import RandomForestClassifier\n",
"\n",
"use_pca = True\n",
"# number of components extracted from the pca\n",
"n_features = 10\n",
"\n",
"k_folds = 5\n",
"\n",
"# used to split the dataset into k folds\n",
"kf = KFold(n_splits=k_folds)\n",
"\n",
"accuracies = []\n",
"print(f'Training {k_folds} folds')\n",
"for i, (train_idx, test_idx) in enumerate(kf.split(X[numeric_columns])):\n",
" print(f'Fold {i}')\n",
"\n",
" # extract train and test data from the cleaned dataset\n",
" X_train, X_test = X.iloc[train_idx], X.iloc[test_idx]\n",
" y_train, y_test = y[train_idx], y[test_idx]\n",
" y_train, y_test = y_train[:, 0], y_test[:, 0]\n",
"\n",
" print(f'\\tTrain samples:\\t{len(X_train)}\\tTest samples:\\t{len(X_test)}')\n",
"\n",
" if use_pca:\n",
" # do pca based on the train data of the given fold to extract 'n_features'\n",
" pca = decomposition.PCA(n_components=n_features)\n",
" pca.fit(X_train)\n",
" X_train = pca.transform(X_train)\n",
"\n",
" model = RandomForestClassifier(max_depth=2, random_state=0)\n",
" model.fit(X_train, y_train)\n",
"\n",
" if use_pca:\n",
" X_test = pca.transform(X_test)\n",
" \n",
" y_pred = model.predict(X_test)\n",
"\n",
" # calculate the accuracy of the train data for the current fold\n",
" accuracy = sum(y_pred == y_test) / len(y_pred)\n",
" accuracies.append(accuracy)\n",
" print(f'\\tAccuracy {accuracy:.3%}')\n",
" print()\n",
"\n",
"# calculate the average accuracy over all folds\n",
"avg_accuracy = sum(accuracies) / len(accuracies)\n",
"print(f'Avg accuracy {avg_accuracy:.3%}')"
]
}
],
"metadata": {

File diff suppressed because one or more lines are too long