MLE/04_pacman_rl/reinforcement_learning.py

179 lines
6.0 KiB
Python

"""
Entwickeln Sie einen Reinforcement Learning (RL) Agenten, der in
einem minimalistischen Pacman-Spiel (bereitgestellt auf meiner
Homepage) effektiv Punkte sammelt, während er dem Geist
ausweicht und somit vermeidet gefressen zu werden.
"""
import numpy as np
import random
GAMMA = 0.90
ALPHA = 0.2
def q_init():
""" Fill every possible action in every state with a small value for initialization"""
# Configuration
RAND_Q_VALUES = [random.uniform(-0.1, 0.1), random.uniform(-0.1, 0.1), random.uniform(-0.1, 0.1), random.uniform(-0.1, 0.1)]
# print(RAND_Q_VALUES) # debugging
# Labyrinth layout
labyrinth = [
"##########",
"#........#",
"#.##..##.#",
"#........#",
"##########"
]
s0_range = range(1, 9)
s1_range = range(1, 4)
s2_range = range(1, 9)
s3_range = range(1, 4)
s_constrained_values = {1, 4, 5, 8}
# The Q-Table dictionary
q_table = {}
# Iterate through all possible combinations of s0, s1, s2, s3
for s0 in s0_range:
for s1 in s1_range:
for s2 in s2_range:
for s3 in s3_range:
# Skip impossible states
if s1 == 2 and s0 not in s_constrained_values:
continue
if s3 == 2 and s2 not in s_constrained_values:
continue
# Assign all possible states a tuple of values
state_key = (s0, s1, s2, s3)
q_values = RAND_Q_VALUES.copy() # Create a copy for each state
# Check which actions are blocked by walls
# Action 0: move left (s0 - 1)
if labyrinth[s1][s0 - 1] == "#":
q_values[0] = None
# Action 1: move right (s0 + 1)
if labyrinth[s1][s0 + 1] == "#":
q_values[1] = None
# Action 2: move up (s1 - 1)
if labyrinth[s1 - 1][s0] == "#":
q_values[2] = None
# Action 3: move down (s1 + 1)
if labyrinth[s1 + 1][s0] == "#":
q_values[3] = None
q_table[state_key] = q_values
# print(f"Total number of valid states initialized: {len(q_table)}") # debugging
# print(list(q_table.items())[:5]) # Uncomment to see the first 5 entries
return q_table
def epsilon_greedy(q, s, epsilon=0.025):
"""
Return which direction Pacman should move to using epsilon-greedy algorithm
With probability epsilon, choose a random action. Otherwise choose the greedy action.
Avoids actions that would result in collision with ghost.
"""
if np.random.random() < epsilon:
# Explore: choose random action (excluding blocked actions with Q=0)
valid_actions = [i for i in range(len(q[s])) if q[s][i] is not None]
return np.random.choice(valid_actions)
else:
# Get all valid (non-blocked) actions with their Q-values
valid_actions = [(i, q[s][i]) for i in range(len(q[s])) if q[s][i] is not None]
# Sort by Q-value in descending order
valid_actions.sort(key=lambda x: x[1], reverse=True)
# Try each action starting from highest Q-value
for a, q_val in valid_actions:
s_test = list(s)
if a == 0: # left
s_test[0] -= 1
elif a == 1: # right
s_test[0] += 1
elif a == 2: # up
s_test[1] -= 1
elif a == 3: # down
s_test[1] += 1
return a
def calc_reward(s_new, labyrinth):
# Reward for cookies; punish for not eating cookies
r = 1.0 if labyrinth[s_new[1]][s_new[0]] == "." else -1.0
return r
def take_action(s, a, labyrinth):
s_new = list(s)
if a == 0: # left
s_new[0] -= 1
if a == 1: # right
s_new[0] += 1
if a == 2: # up
s_new[1] -= 1
if a == 3: # down
s_new[1] += 1
# Check if action caused gameover (Pacman caught by ghost)
if s_new[0] == s_new[2] and s_new[1] == s_new[3]:
r = -100.0
# print("Invalid action")
else:
r = calc_reward(tuple(s_new), labyrinth)
# Boost reward if moving closer to nearest cookie
cookie_dx, cookie_dy = get_nearest_cookie(s[0], s[1], labyrinth)
old_distance = abs(cookie_dx) + abs(cookie_dy)
new_cookie_dx = int(np.sign(get_nearest_cookie(s_new[0], s_new[1], labyrinth)[0] - s_new[0]))
new_cookie_dy = int(np.sign(get_nearest_cookie(s_new[0], s_new[1], labyrinth)[1] - s_new[1]))
new_distance = abs(new_cookie_dx) + abs(new_cookie_dy)
if new_distance < old_distance:
r += 2 # Bonus for moving closer to cookie
# Mark new Pacman position as eaten (if it's a cookie)
if labyrinth[s_new[1]][s_new[0]] == ".":
# Convert string row to list, modify it, then convert back to string
row_list = list(labyrinth[s_new[1]])
row_list[s_new[0]] = " "
labyrinth[s_new[1]] = "".join(row_list)
return tuple(s_new), r, labyrinth
def max_q(q, s_new, labyrinth):
"""Return the maximum Q-value among valid actions in state s_new"""
if s_new not in q:
return 0
q_max = 0
for a in range(4):
if q[s_new][a] is not None: # Only consider valid (non-blocked) actions
q_max = max(q_max, q[s_new][a])
return q_max
def get_nearest_cookie(pacman_x, pacman_y, labyrinth):
cookies = [
(x, y)
for y, row in enumerate(labyrinth)
for x, cell in enumerate(row)
if cell == "."
]
if cookies:
nearest = min(
cookies, key=lambda c: abs(c[0] - pacman_x) + abs(c[1] - pacman_y)
)
cookie_dx = int(np.sign(nearest[0] - pacman_x))
cookie_dy = int(np.sign(nearest[1] - pacman_y))
else:
cookie_dx, cookie_dy = 0, 0
return cookie_dx, cookie_dy