changed to single runs
parent
c17d8dab1a
commit
cfdef49a73
|
|
@ -41,8 +41,9 @@ def oneTry(EPSILON, ALPHA, GAMMA, AMOUNT_RUNS, REWARD_ON_WIN, REWARD_ON_LOSE):
|
||||||
for element in cookies_per_run:
|
for element in cookies_per_run:
|
||||||
if element == 20:
|
if element == 20:
|
||||||
wins += 1
|
wins += 1
|
||||||
|
toAdd = 1 if element == 20 else 0
|
||||||
|
|
||||||
# print(f"Win percentage: {(wins/AMOUNT_RUNS)*100}%")
|
print(f"Win percentage: {(wins/AMOUNT_RUNS)*100}%")
|
||||||
|
|
||||||
return cookies_per_run, wins
|
return cookies_per_run, wins
|
||||||
|
|
||||||
|
|
|
||||||
8
main.py
8
main.py
|
|
@ -2,8 +2,8 @@ from GenTunic.gen_tuning import gen_tuning_main
|
||||||
from ReinforcmentLearning.learning import multipleTries
|
from ReinforcmentLearning.learning import multipleTries
|
||||||
|
|
||||||
|
|
||||||
EPSILON = 0.3
|
EPSILON = 0.1
|
||||||
ALPHA = 0.3
|
ALPHA = 0.4
|
||||||
GAMMA = 0.8
|
GAMMA = 0.8
|
||||||
|
|
||||||
AMOUNT_RUNS = 5000
|
AMOUNT_RUNS = 5000
|
||||||
|
|
@ -13,5 +13,5 @@ REWARD_ON_WIN = 10
|
||||||
REWARD_ON_LOSE = -450
|
REWARD_ON_LOSE = -450
|
||||||
|
|
||||||
|
|
||||||
#multipleTries(EPSILON, ALPHA, GAMMA, REWARD_ON_WIN, REWARD_ON_LOSE)
|
multipleTries(EPSILON, ALPHA, GAMMA,AMOUNT_TRIES, AMOUNT_RUNS, REWARD_ON_WIN, REWARD_ON_LOSE)
|
||||||
gen_tuning_main(AMOUNT_TRIES, AMOUNT_RUNS, REWARD_ON_WIN, REWARD_ON_LOSE)
|
#gen_tuning_main(AMOUNT_TRIES, AMOUNT_RUNS, REWARD_ON_WIN, REWARD_ON_LOSE)
|
||||||
Loading…
Reference in New Issue