diff --git a/04_pacman_rl/pacman.py b/04_pacman_rl/pacman.py index 75bd7b5..e34e8ea 100644 --- a/04_pacman_rl/pacman.py +++ b/04_pacman_rl/pacman.py @@ -127,7 +127,7 @@ def main(): ghost = Ghost(COLS - 2, ROWS - 2) s = (pacman.x, pacman.y, ghost.x, ghost.y) # as a tuple so the state becomes hashable - a_prev = 4 + opposite_action = {0: 1, 1: 0, 2: 3, 3: 2} q = rl.q_init() gamma = 0.9 alpha = 0.8 @@ -166,23 +166,34 @@ def main(): # while s_not_terminal: print("s: " + str(s)) print("q[s] before action: " + str(q[s])) - - a = rl.epsilon_greedy(q, s, a_prev) # 0 = Left; 1 = Right ; 2 = Up ; 3 = Down + a = rl.epsilon_greedy(q, s) # 0 = Left; 1 = Right ; 2 = Up ; 3 = Down s_new, r = rl.take_action(s, a, labyrinth) move_pacman(pacman, a) q[s][a] += round(alpha * (r + gamma * max(q[s_new]) - q[s][a]), 2) + q[s_new][opposite_action[a]] += round(alpha * (r + gamma * max(q[s_new]) - q[s][opposite_action[a]]), 2) + + # Update Q-values for all states with the same Pacman position (s0, s1) + pacman_s0, pacman_s1 = s_new[0], s_new[1] + for state_key in q: + if state_key[0] == pacman_s0 and state_key[1] == pacman_s1: + # Update this state's Q-values based on the current transition, but only if action is valid + if q[state_key][a] > 0: # Only update if action is not blocked + q[state_key][a] += round(alpha * (r + gamma * max(q[s_new]) - q[state_key][a]), 2) + if q[state_key][opposite_action[a]] > 0: # Only update if opposite action is not blocked + q[state_key][opposite_action[a]] += round(alpha * (r + gamma * max(q[s_new]) - q[state_key][opposite_action[a]]), 2) + print("s_new: " + str(s_new)) print("q[s] after action with manipulated a: " + str(q[s])) print("q[s_new] after action: " + str(q[s_new])) print() - s = s_new - a_prev = a + # s = s_new + s = (pacman.x, pacman.y, ghost.x, ghost.y) time.sleep(0.5) - #gamma *= gamma + gamma *= gamma # Draw the labyrinth, pacman, and ghost draw_labyrinth() diff --git a/04_pacman_rl/reinforcement_learning.py b/04_pacman_rl/reinforcement_learning.py index c704a69..f2709ac 100644 --- a/04_pacman_rl/reinforcement_learning.py +++ b/04_pacman_rl/reinforcement_learning.py @@ -12,7 +12,7 @@ def q_init(): # Configuration NUM_ACTIONS = 4 - INITIAL_Q_VALUE = 2.0 # Small value for initialization + INITIAL_Q_VALUE = 1.0 # Small value for initialization # Labyrinth layout labyrinth = [ @@ -70,40 +70,17 @@ def q_init(): # print(list(q_table.items())[:5]) # Uncomment to see the first 5 entries return q_table -def epsilon_greedy(q, s, a_prev, epsilon=0.2): +def epsilon_greedy(q, s, epsilon=0.2): """ Return which direction Pacman should move to using epsilon-greedy algorithm With probability epsilon, choose a random action. Otherwise choose the greedy action. If multiple actions have the same max Q-value, prefer actions different from a_prev. Never allows Pacman to move backwards (opposite direction). """ - - opposite_action = {0: 1, 1: 0, 2: 3, 3: 2} q_max = max(q[s]) a = q[s].index(q_max) - - """ - # Find all actions with the maximum Q-value - max_actions = [a for a in range(4) if q[s][a] == q_max] - # Exclude the opposite action (going backwards) - if a_prev in opposite_action: - backward_action = opposite_action[a_prev] - if backward_action in max_actions: - max_actions.remove(backward_action) - - # If no actions left after removing backward action, allow it (no choice) - if not max_actions: - max_actions = [a for a in range(4) if q[s][a] == q_max] - if a_prev in opposite_action: - backward_action = opposite_action[a_prev] - if backward_action in max_actions: - max_actions.remove(backward_action) - - # Return the first valid action - a = max_actions[0] if max_actions else 0 - """ return a """ @@ -126,10 +103,6 @@ def epsilon_greedy(q, s, a_prev, epsilon=0.2): """ -def max_q(q, s_new): - pass - - def bfs_distance(start, end, labyrinth): """ Calculate shortest path distance between two points using BFS. @@ -151,7 +124,7 @@ def bfs_distance(start, end, labyrinth): nx, ny = x + dx, y + dy if (nx, ny) == end: - return dist + 1 + return round(dist + 1, 2) if 0 <= ny < len(labyrinth) and 0 <= nx < len(labyrinth[0]): if (nx, ny) not in visited and labyrinth[ny][nx] != "#": @@ -173,7 +146,8 @@ def take_action(s, a, labyrinth): s_new[1] += 1 # consider if there is a point on the field - r = 2.0 if labyrinth[s_new[1]][s_new[0]] == "." else -5.0 + # r = 2.0 if labyrinth[s_new[1]][s_new[0]] == "." else -5.0 + r = -2 # consider new distance between Pacman and Ghost using actual pathfinding pacman_pos = (s[0], s[1]) @@ -183,11 +157,16 @@ def take_action(s, a, labyrinth): distance_new = bfs_distance(pacman_pos_new, ghost_pos, labyrinth) # Reward based on distance from ghost (closer distance = worse reward) - if distance_new >= 4: - r += 2.0 # Good reward for being far away - elif distance_new >= 2: - r += 1.0 # Small reward for being moderately far + if distance_new >= 5: + r -= 2.0 # Good reward for being far away + elif distance_new >= 3: + r -= 1.0 # Small reward for being moderately far + elif distance_new <= 2: + r += 5.0 # Large penalty for being adjacent to ghost elif distance_new == 1: - r -= 10.0 # Large penalty for being adjacent to ghost + r += 10.0 # Large penalty for being adjacent to ghost + + # Ensure reward doesn't drop below 0.01 + r = max(r, 0.01) return tuple(s_new), r \ No newline at end of file