diff --git a/04_pacman_rl/pacman.py b/04_pacman_rl/pacman.py index 7cc3f73..dedbb2a 100644 --- a/04_pacman_rl/pacman.py +++ b/04_pacman_rl/pacman.py @@ -128,6 +128,7 @@ def main(): s = (pacman.x, pacman.y, ghost.x, ghost.y) # as a tuple so the state becomes hashable q = rl.q_init() + a_opposite_direction = {0: 1, 1: 0, 2: 3, 3: 2} gamma = 0.9 alpha = 0.8 @@ -162,22 +163,31 @@ def main(): # Start of my code s_not_terminal = True + labyrinth_copy = labyrinth.copy() + a = 0 while s_not_terminal: print("s: " + str(s)) print("q[s] before action: " + str(q[s])) a = rl.epsilon_greedy(q, s) # 0 = Left; 1 = Right ; 2 = Up ; 3 = Down - s_new, r = rl.take_action(s, a, labyrinth) + s_new, r, labyrinth_copy = rl.take_action(s, a, labyrinth_copy) q[s][a] += round(alpha * (r + gamma * max(q[s_new]) - q[s][a]), 2) + q[s_new][a_opposite_direction[a]] += round(alpha * (r + gamma * max(q[s]) - q[s_new][a_opposite_direction[a]]), 2) s = s_new + + if all("." not in row for row in labyrinth_copy): + s_not_terminal = False - if abs(r - q[s_new][a]) < 0.2: # Reward difference is small (convergence) + # Check for collisions (game over if ghost catches pacman) + if s[0] == s[2] and s[1] == s[3]: s_not_terminal = False time.sleep(0.2) + labyrinth_copy = [] + print("NEW LOOP") move_pacman(pacman, a) # Draw the labyrinth, pacman, and ghost diff --git a/04_pacman_rl/reinforcement_learning.py b/04_pacman_rl/reinforcement_learning.py index c9b37e4..aa6a336 100644 --- a/04_pacman_rl/reinforcement_learning.py +++ b/04_pacman_rl/reinforcement_learning.py @@ -12,7 +12,7 @@ def q_init(): # Configuration NUM_ACTIONS = 4 - INITIAL_Q_VALUE = 3.0 # Small value for initialization + INITIAL_Q_VALUE = 2.0 # Small value for initialization # Labyrinth layout labyrinth = [ @@ -43,8 +43,6 @@ def q_init(): continue if s3 == 2 and s2 not in s_constrained_values: continue - if s0 == s2 and s1 == s3: - continue # Assign all possible states a tuple of values state_key = (s0, s1, s2, s3) @@ -135,6 +133,8 @@ def bfs_distance(start, end, labyrinth): def take_action(s, a, labyrinth): + labyrinth_copy = [list(row) for row in labyrinth] + labyrinth_copy[s[1]][s[0]] = " " s_new = list(s) if a == 0: # left s_new[0] -= 1 @@ -152,12 +152,12 @@ def take_action(s, a, labyrinth): distance_new = bfs_distance(pacman_pos_new, ghost_pos, labyrinth) # Reward inversely proportional to distance from ghost (asymptotes to 0) - r = 1.0 / (1.0 + distance_new) if distance_new != float('inf') else 0.0 + r = 1.0 / (2.0 + distance_new) if distance_new != float('inf') else 0.0 # Reward for eating cookies - r += 5.0 if labyrinth[s_new[1]][s_new[0]] == "." else -2.0 + r += 0.5 if labyrinth[s_new[1]][s_new[0]] == "." else -0.5 # Ensure reward doesn't drop below 0.01 r = max(r, 0.01) - return tuple(s_new), r \ No newline at end of file + return tuple(s_new), r, labyrinth_copy \ No newline at end of file