From 6c9a096b610ede1179153505064267a4167ab1ec Mon Sep 17 00:00:00 2001 From: Ruben-FreddyLoafers Date: Thu, 20 Nov 2025 15:32:28 +0100 Subject: [PATCH] mental breakdown --- 04_pacman_rl/pacman.py | 63 +++++++++++++------------- 04_pacman_rl/reinforcement_learning.py | 23 +++------- 2 files changed, 38 insertions(+), 48 deletions(-) diff --git a/04_pacman_rl/pacman.py b/04_pacman_rl/pacman.py index e34e8ea..7cc3f73 100644 --- a/04_pacman_rl/pacman.py +++ b/04_pacman_rl/pacman.py @@ -127,7 +127,6 @@ def main(): ghost = Ghost(COLS - 2, ROWS - 2) s = (pacman.x, pacman.y, ghost.x, ghost.y) # as a tuple so the state becomes hashable - opposite_action = {0: 1, 1: 0, 2: 3, 3: 2} q = rl.q_init() gamma = 0.9 alpha = 0.8 @@ -162,38 +161,24 @@ def main(): running = False # Start of my code - # s_not_terminal = True - # while s_not_terminal: - print("s: " + str(s)) - print("q[s] before action: " + str(q[s])) + s_not_terminal = True + while s_not_terminal: + print("s: " + str(s)) + print("q[s] before action: " + str(q[s])) - a = rl.epsilon_greedy(q, s) # 0 = Left; 1 = Right ; 2 = Up ; 3 = Down - s_new, r = rl.take_action(s, a, labyrinth) + a = rl.epsilon_greedy(q, s) # 0 = Left; 1 = Right ; 2 = Up ; 3 = Down + s_new, r = rl.take_action(s, a, labyrinth) + + q[s][a] += round(alpha * (r + gamma * max(q[s_new]) - q[s][a]), 2) + + s = s_new + + if abs(r - q[s_new][a]) < 0.2: # Reward difference is small (convergence) + s_not_terminal = False + + time.sleep(0.2) + move_pacman(pacman, a) - - q[s][a] += round(alpha * (r + gamma * max(q[s_new]) - q[s][a]), 2) - q[s_new][opposite_action[a]] += round(alpha * (r + gamma * max(q[s_new]) - q[s][opposite_action[a]]), 2) - - # Update Q-values for all states with the same Pacman position (s0, s1) - pacman_s0, pacman_s1 = s_new[0], s_new[1] - for state_key in q: - if state_key[0] == pacman_s0 and state_key[1] == pacman_s1: - # Update this state's Q-values based on the current transition, but only if action is valid - if q[state_key][a] > 0: # Only update if action is not blocked - q[state_key][a] += round(alpha * (r + gamma * max(q[s_new]) - q[state_key][a]), 2) - if q[state_key][opposite_action[a]] > 0: # Only update if opposite action is not blocked - q[state_key][opposite_action[a]] += round(alpha * (r + gamma * max(q[s_new]) - q[state_key][opposite_action[a]]), 2) - - print("s_new: " + str(s_new)) - print("q[s] after action with manipulated a: " + str(q[s])) - print("q[s_new] after action: " + str(q[s_new])) - print() - - # s = s_new - s = (pacman.x, pacman.y, ghost.x, ghost.y) - time.sleep(0.5) - - gamma *= gamma # Draw the labyrinth, pacman, and ghost draw_labyrinth() @@ -209,4 +194,18 @@ def main(): pygame.quit() if __name__ == "__main__": - main() \ No newline at end of file + main() + +""" + for state_key in q: + if state_key[0] == s_new[0] and state_key[1] == s_new[1]: + # Update this state's Q-values based on the current transition, but only if action is valid + if q[state_key][a] > 0: # Only update if action is not blocked + q[state_key][a] += round(alpha * (r + gamma * max(q[s_new]) - q[state_key][a]), 2) + if q[state_key][opposite_action[a]] > 0: # Only update if opposite action is not blocked + q[state_key][opposite_action[a]] += round(alpha * (r + gamma * max(q[s_new]) - q[state_key][opposite_action[a]]), 2) + print("s_new: " + str(s_new)) + print("q[s] after action with manipulated a: " + str(q[s])) + print("q[s_new] after action: " + str(q[s_new])) + print() +""" \ No newline at end of file diff --git a/04_pacman_rl/reinforcement_learning.py b/04_pacman_rl/reinforcement_learning.py index f2709ac..c9b37e4 100644 --- a/04_pacman_rl/reinforcement_learning.py +++ b/04_pacman_rl/reinforcement_learning.py @@ -12,7 +12,7 @@ def q_init(): # Configuration NUM_ACTIONS = 4 - INITIAL_Q_VALUE = 1.0 # Small value for initialization + INITIAL_Q_VALUE = 3.0 # Small value for initialization # Labyrinth layout labyrinth = [ @@ -145,26 +145,17 @@ def take_action(s, a, labyrinth): if a == 3: # down s_new[1] += 1 - # consider if there is a point on the field - # r = 2.0 if labyrinth[s_new[1]][s_new[0]] == "." else -5.0 - r = -2 - # consider new distance between Pacman and Ghost using actual pathfinding - pacman_pos = (s[0], s[1]) - ghost_pos = (s[2], s[3]) pacman_pos_new = (s_new[0], s_new[1]) + ghost_pos = (s[2], s[3]) distance_new = bfs_distance(pacman_pos_new, ghost_pos, labyrinth) - # Reward based on distance from ghost (closer distance = worse reward) - if distance_new >= 5: - r -= 2.0 # Good reward for being far away - elif distance_new >= 3: - r -= 1.0 # Small reward for being moderately far - elif distance_new <= 2: - r += 5.0 # Large penalty for being adjacent to ghost - elif distance_new == 1: - r += 10.0 # Large penalty for being adjacent to ghost + # Reward inversely proportional to distance from ghost (asymptotes to 0) + r = 1.0 / (1.0 + distance_new) if distance_new != float('inf') else 0.0 + + # Reward for eating cookies + r += 5.0 if labyrinth[s_new[1]][s_new[0]] == "." else -2.0 # Ensure reward doesn't drop below 0.01 r = max(r, 0.01)