status quo
parent
ec2060c375
commit
476b67fa71
|
|
@ -172,20 +172,6 @@ def train(q, num_iterations=10000):
|
||||||
iter = iter + 1
|
iter = iter + 1
|
||||||
total_iterations += 1
|
total_iterations += 1
|
||||||
|
|
||||||
# Check for collisions
|
|
||||||
if pacman_x == ghost_x and pacman_y == ghost_y:
|
|
||||||
running = False
|
|
||||||
break
|
|
||||||
|
|
||||||
# Eat cookies
|
|
||||||
if labyrinth[pacman_y][pacman_x] == ".":
|
|
||||||
labyrinth[pacman_y] = labyrinth[pacman_y][:pacman_x] + " " + labyrinth[pacman_y][pacman_x+1:]
|
|
||||||
|
|
||||||
# Check if all cookies are eaten
|
|
||||||
if all("." not in row for row in labyrinth):
|
|
||||||
running = False
|
|
||||||
break
|
|
||||||
|
|
||||||
# Q-Learning
|
# Q-Learning
|
||||||
a = rl.epsilon_greedy(q, s)
|
a = rl.epsilon_greedy(q, s)
|
||||||
s_new, r, labyrinth = rl.take_action(s, a, labyrinth)
|
s_new, r, labyrinth = rl.take_action(s, a, labyrinth)
|
||||||
|
|
@ -215,6 +201,20 @@ def train(q, num_iterations=10000):
|
||||||
|
|
||||||
s = (pacman_x, pacman_y, ghost_x, ghost_y)
|
s = (pacman_x, pacman_y, ghost_x, ghost_y)
|
||||||
|
|
||||||
|
# Check for collisions
|
||||||
|
if pacman_x == ghost_x and pacman_y == ghost_y:
|
||||||
|
running = False
|
||||||
|
break
|
||||||
|
|
||||||
|
# Eat cookies
|
||||||
|
if labyrinth[pacman_y][pacman_x] == ".":
|
||||||
|
labyrinth[pacman_y] = labyrinth[pacman_y][:pacman_x] + " " + labyrinth[pacman_y][pacman_x+1:]
|
||||||
|
|
||||||
|
# Check if all cookies are eaten
|
||||||
|
if all("." not in row for row in labyrinth):
|
||||||
|
running = False
|
||||||
|
break
|
||||||
|
|
||||||
outer_iter += 1
|
outer_iter += 1
|
||||||
if outer_iter % 100 == 0:
|
if outer_iter % 100 == 0:
|
||||||
print(f"Training iteration {outer_iter}, Total steps: {total_iterations}")
|
print(f"Training iteration {outer_iter}, Total steps: {total_iterations}")
|
||||||
|
|
@ -250,6 +250,19 @@ def visualize(q, num_games=10):
|
||||||
screen.fill(BLACK)
|
screen.fill(BLACK)
|
||||||
iter = iter + 1
|
iter = iter + 1
|
||||||
|
|
||||||
|
# Q-Learning
|
||||||
|
a = rl.epsilon_greedy(q, s, epsilon=0.025)
|
||||||
|
s_new, r, labyrinth = rl.take_action(s, a, labyrinth)
|
||||||
|
q[s][a] += ALPHA * (r + GAMMA * rl.max_q(q, s_new, labyrinth) - q[s][a])
|
||||||
|
s = s_new
|
||||||
|
|
||||||
|
move_pacman(pacman, a)
|
||||||
|
|
||||||
|
if iter % 3 == 0:
|
||||||
|
ghost.move_towards_pacman(pacman)
|
||||||
|
|
||||||
|
s = (pacman.x, pacman.y, ghost.x, ghost.y)
|
||||||
|
|
||||||
# Check for collisions
|
# Check for collisions
|
||||||
if pacman.x == ghost.x and pacman.y == ghost.y:
|
if pacman.x == ghost.x and pacman.y == ghost.y:
|
||||||
print("Game Over! The ghost caught Pacman.")
|
print("Game Over! The ghost caught Pacman.")
|
||||||
|
|
@ -266,19 +279,6 @@ def visualize(q, num_games=10):
|
||||||
running = False
|
running = False
|
||||||
break
|
break
|
||||||
|
|
||||||
# Q-Learning
|
|
||||||
a = rl.epsilon_greedy(q, s)
|
|
||||||
s_new, r, labyrinth = rl.take_action(s, a, labyrinth)
|
|
||||||
q[s][a] += ALPHA * (r + GAMMA * rl.max_q(q, s_new, labyrinth) - q[s][a])
|
|
||||||
s = s_new
|
|
||||||
|
|
||||||
move_pacman(pacman, a)
|
|
||||||
|
|
||||||
if iter % 3 == 0:
|
|
||||||
ghost.move_towards_pacman(pacman)
|
|
||||||
|
|
||||||
s = (pacman.x, pacman.y, ghost.x, ghost.y)
|
|
||||||
|
|
||||||
# Draw
|
# Draw
|
||||||
draw_labyrinth()
|
draw_labyrinth()
|
||||||
pacman.draw()
|
pacman.draw()
|
||||||
|
|
@ -298,7 +298,7 @@ def main():
|
||||||
q = rl.q_init()
|
q = rl.q_init()
|
||||||
|
|
||||||
print("Training for 10000 iterations...")
|
print("Training for 10000 iterations...")
|
||||||
q = train(q, num_iterations=10000)
|
q = train(q, num_iterations=5000)
|
||||||
|
|
||||||
print("\nTraining complete! Starting visualization...")
|
print("\nTraining complete! Starting visualization...")
|
||||||
visualize(q, num_games=10)
|
visualize(q, num_games=10)
|
||||||
|
|
|
||||||
|
|
@ -72,41 +72,41 @@ def q_init():
|
||||||
# print(list(q_table.items())[:5]) # Uncomment to see the first 5 entries
|
# print(list(q_table.items())[:5]) # Uncomment to see the first 5 entries
|
||||||
return q_table
|
return q_table
|
||||||
|
|
||||||
def epsilon_greedy(q, s, epsilon=0.1):
|
def epsilon_greedy(q, s, epsilon=0.025):
|
||||||
"""
|
"""
|
||||||
Return which direction Pacman should move to using epsilon-greedy algorithm
|
Return which direction Pacman should move to using epsilon-greedy algorithm
|
||||||
With probability epsilon, choose a random action. Otherwise choose the greedy action.
|
With probability epsilon, choose a random action. Otherwise choose the greedy action.
|
||||||
Avoids actions that would result in collision with ghost.
|
Avoids actions that would result in collision with ghost.
|
||||||
"""
|
"""
|
||||||
# if np.random.random() < epsilon:
|
if np.random.random() < epsilon:
|
||||||
# # Explore: choose random action (excluding blocked actions with Q=0)
|
# Explore: choose random action (excluding blocked actions with Q=0)
|
||||||
# valid_actions = [i for i in range(len(q[s])) if q[s][i] is not None]
|
valid_actions = [i for i in range(len(q[s])) if q[s][i] is not None]
|
||||||
# return np.random.choice(valid_actions)
|
return np.random.choice(valid_actions)
|
||||||
|
|
||||||
# else:
|
else:
|
||||||
# Get all valid (non-blocked) actions with their Q-values
|
# Get all valid (non-blocked) actions with their Q-values
|
||||||
valid_actions = [(i, q[s][i]) for i in range(len(q[s])) if q[s][i] is not None]
|
valid_actions = [(i, q[s][i]) for i in range(len(q[s])) if q[s][i] is not None]
|
||||||
|
|
||||||
# Sort by Q-value in descending order
|
# Sort by Q-value in descending order
|
||||||
valid_actions.sort(key=lambda x: x[1], reverse=True)
|
valid_actions.sort(key=lambda x: x[1], reverse=True)
|
||||||
|
|
||||||
# Try each action starting from highest Q-value
|
# Try each action starting from highest Q-value
|
||||||
for a, q_val in valid_actions:
|
for a, q_val in valid_actions:
|
||||||
s_test = list(s)
|
s_test = list(s)
|
||||||
if a == 0: # left
|
if a == 0: # left
|
||||||
s_test[0] -= 1
|
s_test[0] -= 1
|
||||||
elif a == 1: # right
|
elif a == 1: # right
|
||||||
s_test[0] += 1
|
s_test[0] += 1
|
||||||
elif a == 2: # up
|
elif a == 2: # up
|
||||||
s_test[1] -= 1
|
s_test[1] -= 1
|
||||||
elif a == 3: # down
|
elif a == 3: # down
|
||||||
s_test[1] += 1
|
s_test[1] += 1
|
||||||
|
|
||||||
return a
|
return a
|
||||||
|
|
||||||
def calc_reward(s_new, labyrinth):
|
def calc_reward(s_new, labyrinth):
|
||||||
# Reward for cookies; punish for not eating cookies
|
# Reward for cookies; punish for not eating cookies
|
||||||
r = 2.0 if labyrinth[s_new[1]][s_new[0]] == "." else -1.0
|
r = 1.0 if labyrinth[s_new[1]][s_new[0]] == "." else -1.0
|
||||||
|
|
||||||
return r
|
return r
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -19,13 +19,13 @@ print("Loading MNIST...") # debugging
|
||||||
prototype_data = prototype_data_raw.reshape(prototype_data_raw.shape[0], -1).astype(np.float32)
|
prototype_data = prototype_data_raw.reshape(prototype_data_raw.shape[0], -1).astype(np.float32)
|
||||||
test_data = test_data_raw.reshape(test_data_raw.shape[0], -1).astype(np.float32)
|
test_data = test_data_raw.reshape(test_data_raw.shape[0], -1).astype(np.float32)
|
||||||
|
|
||||||
print("Train:", prototype_data.shape, "Test:", test_data.shape)
|
# print("Train:", prototype_data.shape, "Test:", test_data.shape) # debugging
|
||||||
|
|
||||||
# Select first 1000 prototype vectors
|
# Select first 1000 prototype vectors
|
||||||
prototypes = prototype_data[:1000]
|
prototypes = prototype_data[:1000]
|
||||||
prototype_labels = prototype_labels_set[:1000]
|
prototype_labels = prototype_labels_set[:1000]
|
||||||
|
|
||||||
print("Using", len(prototypes), "prototype vectors.") # debugging
|
# print("Using", len(prototypes), "prototype vectors.") # debugging
|
||||||
|
|
||||||
# kNN function with explicit loops for readability
|
# kNN function with explicit loops for readability
|
||||||
def knn_predict_batch(X_batch, k=3):
|
def knn_predict_batch(X_batch, k=3):
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue