status quo

master
Ruben-FreddyLoafers 2025-12-10 12:09:15 +01:00
parent ec2060c375
commit 476b67fa71
3 changed files with 55 additions and 55 deletions

View File

@ -172,20 +172,6 @@ def train(q, num_iterations=10000):
iter = iter + 1
total_iterations += 1
# Check for collisions
if pacman_x == ghost_x and pacman_y == ghost_y:
running = False
break
# Eat cookies
if labyrinth[pacman_y][pacman_x] == ".":
labyrinth[pacman_y] = labyrinth[pacman_y][:pacman_x] + " " + labyrinth[pacman_y][pacman_x+1:]
# Check if all cookies are eaten
if all("." not in row for row in labyrinth):
running = False
break
# Q-Learning
a = rl.epsilon_greedy(q, s)
s_new, r, labyrinth = rl.take_action(s, a, labyrinth)
@ -214,6 +200,20 @@ def train(q, num_iterations=10000):
ghost_y -= 1
s = (pacman_x, pacman_y, ghost_x, ghost_y)
# Check for collisions
if pacman_x == ghost_x and pacman_y == ghost_y:
running = False
break
# Eat cookies
if labyrinth[pacman_y][pacman_x] == ".":
labyrinth[pacman_y] = labyrinth[pacman_y][:pacman_x] + " " + labyrinth[pacman_y][pacman_x+1:]
# Check if all cookies are eaten
if all("." not in row for row in labyrinth):
running = False
break
outer_iter += 1
if outer_iter % 100 == 0:
@ -250,6 +250,19 @@ def visualize(q, num_games=10):
screen.fill(BLACK)
iter = iter + 1
# Q-Learning
a = rl.epsilon_greedy(q, s, epsilon=0.025)
s_new, r, labyrinth = rl.take_action(s, a, labyrinth)
q[s][a] += ALPHA * (r + GAMMA * rl.max_q(q, s_new, labyrinth) - q[s][a])
s = s_new
move_pacman(pacman, a)
if iter % 3 == 0:
ghost.move_towards_pacman(pacman)
s = (pacman.x, pacman.y, ghost.x, ghost.y)
# Check for collisions
if pacman.x == ghost.x and pacman.y == ghost.y:
print("Game Over! The ghost caught Pacman.")
@ -266,19 +279,6 @@ def visualize(q, num_games=10):
running = False
break
# Q-Learning
a = rl.epsilon_greedy(q, s)
s_new, r, labyrinth = rl.take_action(s, a, labyrinth)
q[s][a] += ALPHA * (r + GAMMA * rl.max_q(q, s_new, labyrinth) - q[s][a])
s = s_new
move_pacman(pacman, a)
if iter % 3 == 0:
ghost.move_towards_pacman(pacman)
s = (pacman.x, pacman.y, ghost.x, ghost.y)
# Draw
draw_labyrinth()
pacman.draw()
@ -298,7 +298,7 @@ def main():
q = rl.q_init()
print("Training for 10000 iterations...")
q = train(q, num_iterations=10000)
q = train(q, num_iterations=5000)
print("\nTraining complete! Starting visualization...")
visualize(q, num_games=10)

View File

@ -72,41 +72,41 @@ def q_init():
# print(list(q_table.items())[:5]) # Uncomment to see the first 5 entries
return q_table
def epsilon_greedy(q, s, epsilon=0.1):
def epsilon_greedy(q, s, epsilon=0.025):
"""
Return which direction Pacman should move to using epsilon-greedy algorithm
With probability epsilon, choose a random action. Otherwise choose the greedy action.
Avoids actions that would result in collision with ghost.
"""
# if np.random.random() < epsilon:
# # Explore: choose random action (excluding blocked actions with Q=0)
# valid_actions = [i for i in range(len(q[s])) if q[s][i] is not None]
# return np.random.choice(valid_actions)
if np.random.random() < epsilon:
# Explore: choose random action (excluding blocked actions with Q=0)
valid_actions = [i for i in range(len(q[s])) if q[s][i] is not None]
return np.random.choice(valid_actions)
# else:
# Get all valid (non-blocked) actions with their Q-values
valid_actions = [(i, q[s][i]) for i in range(len(q[s])) if q[s][i] is not None]
# Sort by Q-value in descending order
valid_actions.sort(key=lambda x: x[1], reverse=True)
# Try each action starting from highest Q-value
for a, q_val in valid_actions:
s_test = list(s)
if a == 0: # left
s_test[0] -= 1
elif a == 1: # right
s_test[0] += 1
elif a == 2: # up
s_test[1] -= 1
elif a == 3: # down
s_test[1] += 1
else:
# Get all valid (non-blocked) actions with their Q-values
valid_actions = [(i, q[s][i]) for i in range(len(q[s])) if q[s][i] is not None]
return a
# Sort by Q-value in descending order
valid_actions.sort(key=lambda x: x[1], reverse=True)
# Try each action starting from highest Q-value
for a, q_val in valid_actions:
s_test = list(s)
if a == 0: # left
s_test[0] -= 1
elif a == 1: # right
s_test[0] += 1
elif a == 2: # up
s_test[1] -= 1
elif a == 3: # down
s_test[1] += 1
return a
def calc_reward(s_new, labyrinth):
# Reward for cookies; punish for not eating cookies
r = 2.0 if labyrinth[s_new[1]][s_new[0]] == "." else -1.0
r = 1.0 if labyrinth[s_new[1]][s_new[0]] == "." else -1.0
return r

View File

@ -19,13 +19,13 @@ print("Loading MNIST...") # debugging
prototype_data = prototype_data_raw.reshape(prototype_data_raw.shape[0], -1).astype(np.float32)
test_data = test_data_raw.reshape(test_data_raw.shape[0], -1).astype(np.float32)
print("Train:", prototype_data.shape, "Test:", test_data.shape)
# print("Train:", prototype_data.shape, "Test:", test_data.shape) # debugging
# Select first 1000 prototype vectors
prototypes = prototype_data[:1000]
prototype_labels = prototype_labels_set[:1000]
print("Using", len(prototypes), "prototype vectors.") # debugging
# print("Using", len(prototypes), "prototype vectors.") # debugging
# kNN function with explicit loops for readability
def knn_predict_batch(X_batch, k=3):