simplified output messages

master
mahehsma 2024-06-07 09:51:06 +02:00
parent 77c9299308
commit 84e749f9c0
1 changed files with 102 additions and 103 deletions

View File

@ -2,7 +2,7 @@
"cells": [
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 1,
"id": "initial_id",
"metadata": {
"jupyter": {
@ -254,7 +254,7 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 3,
"id": "2bbee865-c000-43da-84d9-ce7e04874110",
"metadata": {},
"outputs": [],
@ -273,7 +273,7 @@
},
{
"cell_type": "code",
"execution_count": 20,
"execution_count": 4,
"id": "38eb4f87-ca3c-4ecf-a8ca-29422822d933",
"metadata": {},
"outputs": [
@ -281,47 +281,40 @@
"name": "stdout",
"output_type": "stream",
"text": [
"Training fold 0 for 20 epochs\n",
"\tTrain samples:\t267\n",
"\tTest samples:\t30\n",
"\tAccuracy of fold 0: 0.8666666666666667\n",
"Training fold 1 for 20 epochs\n",
"\tTrain samples:\t267\n",
"\tTest samples:\t30\n",
"\tAccuracy of fold 1: 0.8\n",
"Training fold 2 for 20 epochs\n",
"\tTrain samples:\t267\n",
"\tTest samples:\t30\n",
"\tAccuracy of fold 2: 0.9\n",
"Training fold 3 for 20 epochs\n",
"\tTrain samples:\t267\n",
"\tTest samples:\t30\n",
"\tAccuracy of fold 3: 0.9\n",
"Training fold 4 for 20 epochs\n",
"\tTrain samples:\t267\n",
"\tTest samples:\t30\n",
"\tAccuracy of fold 4: 0.8666666666666667\n",
"Training fold 5 for 20 epochs\n",
"\tTrain samples:\t267\n",
"\tTest samples:\t30\n",
"\tAccuracy of fold 5: 0.8\n",
"Training fold 6 for 20 epochs\n",
"\tTrain samples:\t267\n",
"\tTest samples:\t30\n",
"\tAccuracy of fold 6: 0.8333333333333334\n",
"Training fold 7 for 20 epochs\n",
"\tTrain samples:\t268\n",
"\tTest samples:\t29\n",
"\tAccuracy of fold 7: 0.8620689655172413\n",
"Training fold 8 for 20 epochs\n",
"\tTrain samples:\t268\n",
"\tTest samples:\t29\n",
"\tAccuracy of fold 8: 0.7241379310344828\n",
"Training fold 9 for 20 epochs\n",
"\tTrain samples:\t268\n",
"\tTest samples:\t29\n",
"\tAccuracy of fold 9: 0.896551724137931\n",
"Avg accuracy 0.8449425287356321\n"
"Training 10 folds for 20 epochs\n",
"Fold 0\n",
"\tTrain samples:\t267\tTest samples:\t30\n",
"\tAccuracy: 90.000%\n",
"Fold 1\n",
"\tTrain samples:\t267\tTest samples:\t30\n",
"\tAccuracy: 80.000%\n",
"Fold 2\n",
"\tTrain samples:\t267\tTest samples:\t30\n",
"\tAccuracy: 90.000%\n",
"Fold 3\n",
"\tTrain samples:\t267\tTest samples:\t30\n",
"\tAccuracy: 90.000%\n",
"Fold 4\n",
"\tTrain samples:\t267\tTest samples:\t30\n",
"WARNING:tensorflow:5 out of the last 5 calls to <function TensorFlowTrainer.make_predict_function.<locals>.one_step_on_data_distributed at 0x0000023D0BD63C40> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has reduce_retracing=True option that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/guide/function#controlling_retracing and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n",
"\tAccuracy: 90.000%\n",
"Fold 5\n",
"\tTrain samples:\t267\tTest samples:\t30\n",
"WARNING:tensorflow:6 out of the last 6 calls to <function TensorFlowTrainer.make_predict_function.<locals>.one_step_on_data_distributed at 0x0000023D0D548CC0> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has reduce_retracing=True option that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/guide/function#controlling_retracing and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n",
"\tAccuracy: 86.667%\n",
"Fold 6\n",
"\tTrain samples:\t267\tTest samples:\t30\n",
"\tAccuracy: 80.000%\n",
"Fold 7\n",
"\tTrain samples:\t268\tTest samples:\t29\n",
"\tAccuracy: 86.207%\n",
"Fold 8\n",
"\tTrain samples:\t268\tTest samples:\t29\n",
"\tAccuracy: 79.310%\n",
"Fold 9\n",
"\tTrain samples:\t268\tTest samples:\t29\n",
"\tAccuracy: 82.759%\n",
"Avg accuracy 85.494%\n"
]
}
],
@ -340,15 +333,16 @@
"kf = KFold(n_splits=k_folds)\n",
"\n",
"accuracies = []\n",
"print(f'Training {k_folds} folds for {epochs} epochs')\n",
"for i, (train_idx, test_idx) in enumerate(kf.split(X)):\n",
" print(f'Training fold {i} for {epochs} epochs')\n",
"\n",
" print(f'Fold {i}')\n",
" \n",
" # extract train and test data from the cleaned dataset\n",
" X_train, X_test = X.iloc[train_idx], X.iloc[test_idx]\n",
" y_train, y_test = y[train_idx], y[test_idx]\n",
"\n",
" print(f'\\tTrain samples:\\t{len(X_train)}')\n",
" print(f'\\tTest samples:\\t{len(X_test)}')\n",
" print(f'\\tTrain samples:\\t{len(X_train)}\\tTest samples:\\t{len(X_test)}')\n",
"\n",
" if use_pca:\n",
" # do pca based on the train data of the given fold to extract 'n_features'\n",
@ -370,16 +364,16 @@
" # calculate the accuracy of the train data for the current fold\n",
" accuracy = sum(y_pred == y_test)[0] / len(y_pred)\n",
" accuracies.append(accuracy)\n",
" print(f'\\tAccuracy of fold {i}: {accuracy}')\n",
" print(f'\\tAccuracy: {accuracy:.3%}')\n",
"\n",
"# calculate the average accuracy over all folds\n",
"avg_accuracy = sum(accuracies) / len(accuracies)\n",
"print(f'Avg accuracy {avg_accuracy}')"
"print(f'Avg accuracy {avg_accuracy:.3%}')"
]
},
{
"cell_type": "code",
"execution_count": 22,
"execution_count": 5,
"id": "95215693-47c9-4202-92f5-efbc65bc32c9",
"metadata": {},
"outputs": [
@ -387,9 +381,9 @@
"name": "stdout",
"output_type": "stream",
"text": [
"Training fold 0 for 20 epochs\n",
"\tTrain samples:\t237\n",
"\tTest samples:\t60\n"
"Training 5 folds\n",
"Fold 0\n",
"\tTrain samples:\t237\tTest samples:\t60\n"
]
},
{
@ -404,10 +398,10 @@
"name": "stdout",
"output_type": "stream",
"text": [
"\tAccuracy of fold 0: 0.5833333333333334\n",
"Training fold 1 for 20 epochs\n",
"\tTrain samples:\t237\n",
"\tTest samples:\t60\n"
"\tAccuracy 58.333%\n",
"\n",
"Fold 1\n",
"\tTrain samples:\t237\tTest samples:\t60\n"
]
},
{
@ -422,10 +416,10 @@
"name": "stdout",
"output_type": "stream",
"text": [
"\tAccuracy of fold 1: 0.5\n",
"Training fold 2 for 20 epochs\n",
"\tTrain samples:\t238\n",
"\tTest samples:\t59\n"
"\tAccuracy 50.000%\n",
"\n",
"Fold 2\n",
"\tTrain samples:\t238\tTest samples:\t59\n"
]
},
{
@ -440,10 +434,10 @@
"name": "stdout",
"output_type": "stream",
"text": [
"\tAccuracy of fold 2: 0.559322033898305\n",
"Training fold 3 for 20 epochs\n",
"\tTrain samples:\t238\n",
"\tTest samples:\t59\n"
"\tAccuracy 55.932%\n",
"\n",
"Fold 3\n",
"\tTrain samples:\t238\tTest samples:\t59\n"
]
},
{
@ -458,10 +452,10 @@
"name": "stdout",
"output_type": "stream",
"text": [
"\tAccuracy of fold 3: 0.576271186440678\n",
"Training fold 4 for 20 epochs\n",
"\tTrain samples:\t238\n",
"\tTest samples:\t59\n"
"\tAccuracy 57.627%\n",
"\n",
"Fold 4\n",
"\tTrain samples:\t238\tTest samples:\t59\n"
]
},
{
@ -476,8 +470,9 @@
"name": "stdout",
"output_type": "stream",
"text": [
"\tAccuracy of fold 4: 0.5254237288135594\n",
"Avg accuracy 0.5488700564971751\n"
"\tAccuracy 52.542%\n",
"\n",
"Avg accuracy 54.887%\n"
]
}
],
@ -494,15 +489,16 @@
"kf = KFold(n_splits=k_folds)\n",
"\n",
"accuracies = []\n",
"print(f'Training {k_folds} folds')\n",
"for i, (train_idx, test_idx) in enumerate(kf.split(X[numeric_columns])):\n",
" print(f'Training fold {i} for {epochs} epochs')\n",
"\n",
" print(f'Fold {i}')\n",
" \n",
" # extract train and test data from the cleaned dataset\n",
" X_train, X_test = X.iloc[train_idx], X.iloc[test_idx]\n",
" y_train, y_test = y[train_idx], y[test_idx]\n",
"\n",
" print(f'\\tTrain samples:\\t{len(X_train)}')\n",
" print(f'\\tTest samples:\\t{len(X_test)}')\n",
" print(f'\\tTrain samples:\\t{len(X_train)}\\tTest samples:\\t{len(X_test)}')\n",
"\n",
" if use_pca:\n",
" # do pca based on the train data of the given fold to extract 'n_features'\n",
@ -523,16 +519,17 @@
" accuracy2 = sum(y_pred != y_test)[0] / len(y_pred)\n",
" accuracy = max(accuracy1, accuracy2)\n",
" accuracies.append(accuracy)\n",
" print(f'\\tAccuracy of fold {i}: {accuracy}')\n",
" print(f'\\tAccuracy {accuracy:.3%}')\n",
" print()\n",
"\n",
"# calculate the average accuracy over all folds\n",
"avg_accuracy = sum(accuracies) / len(accuracies)\n",
"print(f'Avg accuracy {avg_accuracy}')"
"print(f'Avg accuracy {avg_accuracy:.3%}')"
]
},
{
"cell_type": "code",
"execution_count": 23,
"execution_count": 6,
"id": "880302e4-82c1-47b9-9fe3-cb3567511639",
"metadata": {},
"outputs": [
@ -540,27 +537,28 @@
"name": "stdout",
"output_type": "stream",
"text": [
"Training fold 0 for 20 epochs\n",
"\tTrain samples:\t237\n",
"\tTest samples:\t60\n",
"\tAccuracy of fold 0: 0.85\n",
"Training fold 1 for 20 epochs\n",
"\tTrain samples:\t237\n",
"\tTest samples:\t60\n",
"\tAccuracy of fold 1: 0.9\n",
"Training fold 2 for 20 epochs\n",
"\tTrain samples:\t238\n",
"\tTest samples:\t59\n",
"\tAccuracy of fold 2: 0.847457627118644\n",
"Training fold 3 for 20 epochs\n",
"\tTrain samples:\t238\n",
"\tTest samples:\t59\n",
"\tAccuracy of fold 3: 0.7627118644067796\n",
"Training fold 4 for 20 epochs\n",
"\tTrain samples:\t238\n",
"\tTest samples:\t59\n",
"\tAccuracy of fold 4: 0.7796610169491526\n",
"Avg accuracy 0.8279661016949152\n"
"Training 5 folds\n",
"Fold 0\n",
"\tTrain samples:\t237\tTest samples:\t60\n",
"\tAccuracy 85.000%\n",
"\n",
"Fold 1\n",
"\tTrain samples:\t237\tTest samples:\t60\n",
"\tAccuracy 90.000%\n",
"\n",
"Fold 2\n",
"\tTrain samples:\t238\tTest samples:\t59\n",
"\tAccuracy 84.746%\n",
"\n",
"Fold 3\n",
"\tTrain samples:\t238\tTest samples:\t59\n",
"\tAccuracy 76.271%\n",
"\n",
"Fold 4\n",
"\tTrain samples:\t238\tTest samples:\t59\n",
"\tAccuracy 77.966%\n",
"\n",
"Avg accuracy 82.797%\n"
]
}
],
@ -577,16 +575,16 @@
"kf = KFold(n_splits=k_folds)\n",
"\n",
"accuracies = []\n",
"print(f'Training {k_folds} folds')\n",
"for i, (train_idx, test_idx) in enumerate(kf.split(X[numeric_columns])):\n",
" print(f'Training fold {i} for {epochs} epochs')\n",
" print(f'Fold {i}')\n",
"\n",
" # extract train and test data from the cleaned dataset\n",
" X_train, X_test = X.iloc[train_idx], X.iloc[test_idx]\n",
" y_train, y_test = y[train_idx], y[test_idx]\n",
" y_train, y_test = y_train[:, 0], y_test[:, 0]\n",
"\n",
" print(f'\\tTrain samples:\\t{len(X_train)}')\n",
" print(f'\\tTest samples:\\t{len(X_test)}')\n",
" print(f'\\tTrain samples:\\t{len(X_train)}\\tTest samples:\\t{len(X_test)}')\n",
"\n",
" if use_pca:\n",
" # do pca based on the train data of the given fold to extract 'n_features'\n",
@ -605,11 +603,12 @@
" # calculate the accuracy of the train data for the current fold\n",
" accuracy = sum(y_pred == y_test) / len(y_pred)\n",
" accuracies.append(accuracy)\n",
" print(f'\\tAccuracy of fold {i}: {accuracy}')\n",
" print(f'\\tAccuracy {accuracy:.3%}')\n",
" print()\n",
"\n",
"# calculate the average accuracy over all folds\n",
"avg_accuracy = sum(accuracies) / len(accuracies)\n",
"print(f'Avg accuracy {avg_accuracy}')"
"print(f'Avg accuracy {avg_accuracy:.3%}')"
]
}
],