From a76d2c41d3730c5c6c6e2f6071b66741a8415324 Mon Sep 17 00:00:00 2001 From: Ruben Seitz Date: Mon, 24 Nov 2025 11:24:27 +0100 Subject: [PATCH] added max iterations --- 04_pacman_rl/pacman.py | 15 ++++-- 04_pacman_rl/reinforcement_learning.py | 63 ++++++++++++++++++++------ 2 files changed, 60 insertions(+), 18 deletions(-) diff --git a/04_pacman_rl/pacman.py b/04_pacman_rl/pacman.py index bf4d813..0fd24b8 100644 --- a/04_pacman_rl/pacman.py +++ b/04_pacman_rl/pacman.py @@ -166,14 +166,17 @@ def main(): s_not_terminal = True labyrinth_copy = [list(row) for row in labyrinth] # Create proper deep copy a = None - while s_not_terminal: + iteration = 0 + max_iterations = 50 # Prevent infinite loops + while s_not_terminal and iteration < max_iterations: + iteration += 1 print("s: " + str(s)) print("q[s] before action: " + str(q[s])) a = rl.epsilon_greedy(q, s) # 0 = Left; 1 = Right ; 2 = Up ; 3 = Down s_new, r, labyrinth_copy = rl.take_action(s, a, labyrinth_copy) - q[s][a] += round(alpha * (r + gamma * max(q[s_new]) - q[s][a]), 2) + q[s][a] += round(alpha * (r + gamma * rl.max_q(q, s_new, labyrinth) - q[s][a]), 2) # q[s_new][a_opposite_direction[a]] += round(alpha * (r + gamma * max(q[s]) - q[s_new][a_opposite_direction[a]]), 2) s = s_new @@ -184,8 +187,14 @@ def main(): # Check for collisions (game over if ghost catches pacman) if s[0] == s[2] and s[1] == s[3]: s_not_terminal = False + q[s][a] = 0.01 + print("There was just a collision!!!") + print("s: " + str(s)) - # time.sleep(0.05) + time.sleep(0.025) + + if iteration >= max_iterations: + print(f"Max iterations reached ({max_iterations}), breaking out of loop") s = (pacman.x, pacman.y, ghost.x, ghost.y) # as a tuple so the state becomes hashable a = rl.epsilon_greedy(q, s) # 0 = Left; 1 = Right ; 2 = Up ; 3 = Down diff --git a/04_pacman_rl/reinforcement_learning.py b/04_pacman_rl/reinforcement_learning.py index 1207f62..1768a9a 100644 --- a/04_pacman_rl/reinforcement_learning.py +++ b/04_pacman_rl/reinforcement_learning.py @@ -80,8 +80,8 @@ def epsilon_greedy(q, s, epsilon=0.2): a = q[s].index(q_max) return a - """ + """ if np.random.random() < epsilon: # Explore: choose random action (excluding blocked actions with Q=0) valid_actions = [i for i in range(len(q[s])) if q[s][i] > 0] @@ -130,6 +130,52 @@ def bfs_distance(start, end, labyrinth): return float('inf') # No path found +def max_q(q, s_new, labyrinth): + """Calculate the maximum reward for all possible actions in state s_new""" + max_reward = float('-inf') + for a in range(4): + if q[s_new][a] > 0: # Only consider valid (non-blocked) actions + s_test = list(s_new) + if a == 0: # left + s_test[0] -= 1 + elif a == 1: # right + s_test[0] += 1 + elif a == 2: # up + s_test[1] -= 1 + elif a == 3: # down + s_test[1] += 1 + + reward = calc_reward(tuple(s_test), labyrinth) + max_reward = max(max_reward, reward) + + return max_reward if max_reward != float('-inf') else 0.0 + +def calc_reward(s_new, labyrinth): + # consider new distance between Pacman and Ghost using actual pathfinding + pacman_pos_new = (s_new[0], s_new[1]) + ghost_pos = (s_new[2], s_new[3]) + + # distance_old = bfs_distance((s[0], s[1]), ghost_pos, labyrinth) + distance_new = bfs_distance(pacman_pos_new, ghost_pos, labyrinth) + + r = 0 + + if distance_new < 3: + r = -2 + elif distance_new == 4: + r = 0.5 + elif distance_new > 4: + r = 1 + + # Reward for cookies + r += 1.0 if labyrinth[s_new[1]][s_new[0]] == "." else -1.5 + + # efficiency experiment + r -= 0.1 + + r = max(r, 0.01) + + return r def take_action(s, a, labyrinth): labyrinth_copy = [list(row) for row in labyrinth] @@ -144,19 +190,6 @@ def take_action(s, a, labyrinth): if a == 3: # down s_new[1] += 1 - # consider new distance between Pacman and Ghost using actual pathfinding - pacman_pos_new = (s_new[0], s_new[1]) - ghost_pos = (s[2], s[3]) - - distance_new = bfs_distance(pacman_pos_new, ghost_pos, labyrinth) - distance_old = bfs_distance((s[0], s[1]), ghost_pos, labyrinth) - - r = 0.05 * distance_new if distance_new != float('inf') else 0.0 - - # Reward for eating cookies - r += 1.0 if labyrinth[s_new[1]][s_new[0]] == "." else -1.5 - - # Ensure reward doesn't drop below 0.01 - r = max(r, 0.01) + r = calc_reward(s_new, labyrinth) return tuple(s_new), r, labyrinth_copy \ No newline at end of file