159 lines
5.4 KiB
Python
159 lines
5.4 KiB
Python
"""
|
|
Entwickeln Sie einen Reinforcement Learning (RL) Agenten, der in
|
|
einem minimalistischen Pacman-Spiel (bereitgestellt auf meiner
|
|
Homepage) effektiv Punkte sammelt, während er dem Geist
|
|
ausweicht und somit vermeidet gefressen zu werden.
|
|
"""
|
|
|
|
import numpy as np
|
|
from collections import deque
|
|
|
|
GAMMA = 0.90
|
|
ALPHA = 0.2
|
|
|
|
def q_init():
|
|
""" Fill every possible action in every state with a small value for initialization"""
|
|
|
|
# Configuration
|
|
NUM_ACTIONS = 4
|
|
INITIAL_Q_VALUE = 2.0 # Small value for initialization
|
|
|
|
# Labyrinth layout
|
|
labyrinth = [
|
|
"##########",
|
|
"#........#",
|
|
"#.##..##.#",
|
|
"#........#",
|
|
"##########"
|
|
]
|
|
|
|
s0_range = range(1, 9)
|
|
s1_range = range(1, 4)
|
|
s2_range = range(1, 9)
|
|
s3_range = range(1, 4)
|
|
s_constrained_values = {1, 4, 5, 8}
|
|
|
|
# The Q-Table dictionary
|
|
q_table = {}
|
|
|
|
# Iterate through all possible combinations of s0, s1, s2, s3
|
|
for s0 in s0_range:
|
|
for s1 in s1_range:
|
|
for s2 in s2_range:
|
|
for s3 in s3_range:
|
|
|
|
# Skip impossible states
|
|
if s1 == 2 and s0 not in s_constrained_values:
|
|
continue
|
|
if s3 == 2 and s2 not in s_constrained_values:
|
|
continue
|
|
|
|
# Assign all possible states a tuple of values
|
|
state_key = (s0, s1, s2, s3)
|
|
q_values = [INITIAL_Q_VALUE] * NUM_ACTIONS
|
|
|
|
# Check which actions are blocked by walls
|
|
# Action 0: move left (s0 - 1)
|
|
if labyrinth[s1][s0 - 1] == "#":
|
|
q_values[0] = None
|
|
# Action 1: move right (s0 + 1)
|
|
if labyrinth[s1][s0 + 1] == "#":
|
|
q_values[1] = None
|
|
# Action 2: move up (s1 - 1)
|
|
if labyrinth[s1 - 1][s0] == "#":
|
|
q_values[2] = None
|
|
# Action 3: move down (s1 + 1)
|
|
if labyrinth[s1 + 1][s0] == "#":
|
|
q_values[3] = None
|
|
|
|
q_table[state_key] = q_values
|
|
|
|
# print(f"Total number of valid states initialized: {len(q_table)}") # debugging
|
|
# print(list(q_table.items())[:5]) # Uncomment to see the first 5 entries
|
|
return q_table
|
|
|
|
def epsilon_greedy(q, s, epsilon=0.025):
|
|
"""
|
|
Return which direction Pacman should move to using epsilon-greedy algorithm
|
|
With probability epsilon, choose a random action. Otherwise choose the greedy action.
|
|
Avoids actions that would result in collision with ghost.
|
|
"""
|
|
if np.random.random() < epsilon:
|
|
# Explore: choose random action (excluding blocked actions with Q=0)
|
|
valid_actions = [i for i in range(len(q[s])) if q[s][i] is not None]
|
|
return np.random.choice(valid_actions)
|
|
|
|
else:
|
|
# Get all valid (non-blocked) actions with their Q-values
|
|
valid_actions = [(i, q[s][i]) for i in range(len(q[s])) if q[s][i] is not None]
|
|
|
|
# Sort by Q-value in descending order
|
|
valid_actions.sort(key=lambda x: x[1], reverse=True)
|
|
|
|
# Try each action starting from highest Q-value
|
|
for a, q_val in valid_actions:
|
|
s_test = list(s)
|
|
if a == 0: # left
|
|
s_test[0] -= 1
|
|
elif a == 1: # right
|
|
s_test[0] += 1
|
|
elif a == 2: # up
|
|
s_test[1] -= 1
|
|
elif a == 3: # down
|
|
s_test[1] += 1
|
|
|
|
# Check if this action would cause collision
|
|
if s_test[0] == s[2] and s_test[1] == s[3]:
|
|
continue # Skip this action, try next highest Q-value
|
|
|
|
return a
|
|
|
|
def max_q(q, s_new, labyrinth, depth=0, max_depth=2):
|
|
"""Calculate Q-values for all possible actions in state s_new and return the maximum"""
|
|
q_max = 0
|
|
for a in range(4):
|
|
if q[s_new][a] != None and s_new in q: # Only consider valid (non-blocked) actions
|
|
s_test = tuple(list(s_new)[:2] + [s_new[2], s_new[3]]) # Keep ghost position
|
|
s_test_list = list(s_test)
|
|
if a == 0: # left
|
|
s_test_list[0] -= 1
|
|
elif a == 1: # right
|
|
s_test_list[0] += 1
|
|
elif a == 2: # up
|
|
s_test_list[1] -= 1
|
|
elif a == 3: # down
|
|
s_test_list[1] += 1
|
|
s_test = tuple(s_test_list)
|
|
|
|
if s_test in q and depth < max_depth:
|
|
q[s_new][a] += ALPHA * (calc_reward(s_test, labyrinth) + GAMMA * max_q(q, s_test, labyrinth, depth + 1, max_depth) - q[s_new][a])
|
|
q_max = max(q_max, q[s_new][a])
|
|
|
|
return q_max
|
|
|
|
def calc_reward(s_new, labyrinth):
|
|
|
|
# Reward for cookies
|
|
r = 1.0 if labyrinth[s_new[1]][s_new[0]] == "." else -1.0
|
|
|
|
return r
|
|
|
|
def take_action(s, a, labyrinth):
|
|
# Use the labyrinth parameter (already updated from previous iterations)
|
|
s_new = list(s)
|
|
if a == 0: # left
|
|
s_new[0] -= 1
|
|
if a == 1: # right
|
|
s_new[0] += 1
|
|
if a == 2: # up
|
|
s_new[1] -= 1
|
|
if a == 3: # down
|
|
s_new[1] += 1
|
|
|
|
# Mark new Pacman position as eaten (if it's a cookie)
|
|
if labyrinth[s_new[1]][s_new[0]] == ".":
|
|
labyrinth[s_new[1]][s_new[0]] = " "
|
|
|
|
r = calc_reward(tuple(s_new), labyrinth)
|
|
|
|
return tuple(s_new), r, labyrinth |