From 1453fd930a7e2e36fd4ca5f5567f10f6306e5670 Mon Sep 17 00:00:00 2001 From: Ruben Seitz Date: Mon, 24 Nov 2025 10:22:34 +0100 Subject: [PATCH] debugging reward system --- 04_pacman_rl/pacman.py | 18 ++++++++++-------- 04_pacman_rl/reinforcement_learning.py | 9 ++++----- 2 files changed, 14 insertions(+), 13 deletions(-) diff --git a/04_pacman_rl/pacman.py b/04_pacman_rl/pacman.py index dedbb2a..bf4d813 100644 --- a/04_pacman_rl/pacman.py +++ b/04_pacman_rl/pacman.py @@ -120,6 +120,7 @@ def move_pacman(pacman, a): # Main game function def main(): + global labyrinth clock = pygame.time.Clock() # Initialize Pacman and Ghost positions @@ -129,8 +130,8 @@ def main(): s = (pacman.x, pacman.y, ghost.x, ghost.y) # as a tuple so the state becomes hashable q = rl.q_init() a_opposite_direction = {0: 1, 1: 0, 2: 3, 3: 2} - gamma = 0.9 - alpha = 0.8 + gamma = 0.90 + alpha = 0.2 # Game loop running = True @@ -163,8 +164,8 @@ def main(): # Start of my code s_not_terminal = True - labyrinth_copy = labyrinth.copy() - a = 0 + labyrinth_copy = [list(row) for row in labyrinth] # Create proper deep copy + a = None while s_not_terminal: print("s: " + str(s)) print("q[s] before action: " + str(q[s])) @@ -173,7 +174,7 @@ def main(): s_new, r, labyrinth_copy = rl.take_action(s, a, labyrinth_copy) q[s][a] += round(alpha * (r + gamma * max(q[s_new]) - q[s][a]), 2) - q[s_new][a_opposite_direction[a]] += round(alpha * (r + gamma * max(q[s]) - q[s_new][a_opposite_direction[a]]), 2) + # q[s_new][a_opposite_direction[a]] += round(alpha * (r + gamma * max(q[s]) - q[s_new][a_opposite_direction[a]]), 2) s = s_new @@ -184,11 +185,12 @@ def main(): if s[0] == s[2] and s[1] == s[3]: s_not_terminal = False - time.sleep(0.2) + # time.sleep(0.05) - labyrinth_copy = [] - print("NEW LOOP") + s = (pacman.x, pacman.y, ghost.x, ghost.y) # as a tuple so the state becomes hashable + a = rl.epsilon_greedy(q, s) # 0 = Left; 1 = Right ; 2 = Up ; 3 = Down move_pacman(pacman, a) + print("NEW LOOP") # Draw the labyrinth, pacman, and ghost draw_labyrinth() diff --git a/04_pacman_rl/reinforcement_learning.py b/04_pacman_rl/reinforcement_learning.py index aa6a336..1207f62 100644 --- a/04_pacman_rl/reinforcement_learning.py +++ b/04_pacman_rl/reinforcement_learning.py @@ -80,8 +80,8 @@ def epsilon_greedy(q, s, epsilon=0.2): a = q[s].index(q_max) return a - """ + if np.random.random() < epsilon: # Explore: choose random action (excluding blocked actions with Q=0) valid_actions = [i for i in range(len(q[s])) if q[s][i] > 0] @@ -100,7 +100,6 @@ def epsilon_greedy(q, s, epsilon=0.2): return 0 """ - def bfs_distance(start, end, labyrinth): """ Calculate shortest path distance between two points using BFS. @@ -150,12 +149,12 @@ def take_action(s, a, labyrinth): ghost_pos = (s[2], s[3]) distance_new = bfs_distance(pacman_pos_new, ghost_pos, labyrinth) + distance_old = bfs_distance((s[0], s[1]), ghost_pos, labyrinth) - # Reward inversely proportional to distance from ghost (asymptotes to 0) - r = 1.0 / (2.0 + distance_new) if distance_new != float('inf') else 0.0 + r = 0.05 * distance_new if distance_new != float('inf') else 0.0 # Reward for eating cookies - r += 0.5 if labyrinth[s_new[1]][s_new[0]] == "." else -0.5 + r += 1.0 if labyrinth[s_new[1]][s_new[0]] == "." else -1.5 # Ensure reward doesn't drop below 0.01 r = max(r, 0.01)