mental breakdown
parent
6f7dcb8326
commit
6c9a096b61
|
|
@ -127,7 +127,6 @@ def main():
|
||||||
ghost = Ghost(COLS - 2, ROWS - 2)
|
ghost = Ghost(COLS - 2, ROWS - 2)
|
||||||
|
|
||||||
s = (pacman.x, pacman.y, ghost.x, ghost.y) # as a tuple so the state becomes hashable
|
s = (pacman.x, pacman.y, ghost.x, ghost.y) # as a tuple so the state becomes hashable
|
||||||
opposite_action = {0: 1, 1: 0, 2: 3, 3: 2}
|
|
||||||
q = rl.q_init()
|
q = rl.q_init()
|
||||||
gamma = 0.9
|
gamma = 0.9
|
||||||
alpha = 0.8
|
alpha = 0.8
|
||||||
|
|
@ -162,38 +161,24 @@ def main():
|
||||||
running = False
|
running = False
|
||||||
|
|
||||||
# Start of my code
|
# Start of my code
|
||||||
# s_not_terminal = True
|
s_not_terminal = True
|
||||||
# while s_not_terminal:
|
while s_not_terminal:
|
||||||
print("s: " + str(s))
|
print("s: " + str(s))
|
||||||
print("q[s] before action: " + str(q[s]))
|
print("q[s] before action: " + str(q[s]))
|
||||||
|
|
||||||
a = rl.epsilon_greedy(q, s) # 0 = Left; 1 = Right ; 2 = Up ; 3 = Down
|
a = rl.epsilon_greedy(q, s) # 0 = Left; 1 = Right ; 2 = Up ; 3 = Down
|
||||||
s_new, r = rl.take_action(s, a, labyrinth)
|
s_new, r = rl.take_action(s, a, labyrinth)
|
||||||
|
|
||||||
|
q[s][a] += round(alpha * (r + gamma * max(q[s_new]) - q[s][a]), 2)
|
||||||
|
|
||||||
|
s = s_new
|
||||||
|
|
||||||
|
if abs(r - q[s_new][a]) < 0.2: # Reward difference is small (convergence)
|
||||||
|
s_not_terminal = False
|
||||||
|
|
||||||
|
time.sleep(0.2)
|
||||||
|
|
||||||
move_pacman(pacman, a)
|
move_pacman(pacman, a)
|
||||||
|
|
||||||
q[s][a] += round(alpha * (r + gamma * max(q[s_new]) - q[s][a]), 2)
|
|
||||||
q[s_new][opposite_action[a]] += round(alpha * (r + gamma * max(q[s_new]) - q[s][opposite_action[a]]), 2)
|
|
||||||
|
|
||||||
# Update Q-values for all states with the same Pacman position (s0, s1)
|
|
||||||
pacman_s0, pacman_s1 = s_new[0], s_new[1]
|
|
||||||
for state_key in q:
|
|
||||||
if state_key[0] == pacman_s0 and state_key[1] == pacman_s1:
|
|
||||||
# Update this state's Q-values based on the current transition, but only if action is valid
|
|
||||||
if q[state_key][a] > 0: # Only update if action is not blocked
|
|
||||||
q[state_key][a] += round(alpha * (r + gamma * max(q[s_new]) - q[state_key][a]), 2)
|
|
||||||
if q[state_key][opposite_action[a]] > 0: # Only update if opposite action is not blocked
|
|
||||||
q[state_key][opposite_action[a]] += round(alpha * (r + gamma * max(q[s_new]) - q[state_key][opposite_action[a]]), 2)
|
|
||||||
|
|
||||||
print("s_new: " + str(s_new))
|
|
||||||
print("q[s] after action with manipulated a: " + str(q[s]))
|
|
||||||
print("q[s_new] after action: " + str(q[s_new]))
|
|
||||||
print()
|
|
||||||
|
|
||||||
# s = s_new
|
|
||||||
s = (pacman.x, pacman.y, ghost.x, ghost.y)
|
|
||||||
time.sleep(0.5)
|
|
||||||
|
|
||||||
gamma *= gamma
|
|
||||||
|
|
||||||
# Draw the labyrinth, pacman, and ghost
|
# Draw the labyrinth, pacman, and ghost
|
||||||
draw_labyrinth()
|
draw_labyrinth()
|
||||||
|
|
@ -209,4 +194,18 @@ def main():
|
||||||
pygame.quit()
|
pygame.quit()
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
|
|
||||||
|
"""
|
||||||
|
for state_key in q:
|
||||||
|
if state_key[0] == s_new[0] and state_key[1] == s_new[1]:
|
||||||
|
# Update this state's Q-values based on the current transition, but only if action is valid
|
||||||
|
if q[state_key][a] > 0: # Only update if action is not blocked
|
||||||
|
q[state_key][a] += round(alpha * (r + gamma * max(q[s_new]) - q[state_key][a]), 2)
|
||||||
|
if q[state_key][opposite_action[a]] > 0: # Only update if opposite action is not blocked
|
||||||
|
q[state_key][opposite_action[a]] += round(alpha * (r + gamma * max(q[s_new]) - q[state_key][opposite_action[a]]), 2)
|
||||||
|
print("s_new: " + str(s_new))
|
||||||
|
print("q[s] after action with manipulated a: " + str(q[s]))
|
||||||
|
print("q[s_new] after action: " + str(q[s_new]))
|
||||||
|
print()
|
||||||
|
"""
|
||||||
|
|
@ -12,7 +12,7 @@ def q_init():
|
||||||
|
|
||||||
# Configuration
|
# Configuration
|
||||||
NUM_ACTIONS = 4
|
NUM_ACTIONS = 4
|
||||||
INITIAL_Q_VALUE = 1.0 # Small value for initialization
|
INITIAL_Q_VALUE = 3.0 # Small value for initialization
|
||||||
|
|
||||||
# Labyrinth layout
|
# Labyrinth layout
|
||||||
labyrinth = [
|
labyrinth = [
|
||||||
|
|
@ -145,26 +145,17 @@ def take_action(s, a, labyrinth):
|
||||||
if a == 3: # down
|
if a == 3: # down
|
||||||
s_new[1] += 1
|
s_new[1] += 1
|
||||||
|
|
||||||
# consider if there is a point on the field
|
|
||||||
# r = 2.0 if labyrinth[s_new[1]][s_new[0]] == "." else -5.0
|
|
||||||
r = -2
|
|
||||||
|
|
||||||
# consider new distance between Pacman and Ghost using actual pathfinding
|
# consider new distance between Pacman and Ghost using actual pathfinding
|
||||||
pacman_pos = (s[0], s[1])
|
|
||||||
ghost_pos = (s[2], s[3])
|
|
||||||
pacman_pos_new = (s_new[0], s_new[1])
|
pacman_pos_new = (s_new[0], s_new[1])
|
||||||
|
ghost_pos = (s[2], s[3])
|
||||||
|
|
||||||
distance_new = bfs_distance(pacman_pos_new, ghost_pos, labyrinth)
|
distance_new = bfs_distance(pacman_pos_new, ghost_pos, labyrinth)
|
||||||
|
|
||||||
# Reward based on distance from ghost (closer distance = worse reward)
|
# Reward inversely proportional to distance from ghost (asymptotes to 0)
|
||||||
if distance_new >= 5:
|
r = 1.0 / (1.0 + distance_new) if distance_new != float('inf') else 0.0
|
||||||
r -= 2.0 # Good reward for being far away
|
|
||||||
elif distance_new >= 3:
|
# Reward for eating cookies
|
||||||
r -= 1.0 # Small reward for being moderately far
|
r += 5.0 if labyrinth[s_new[1]][s_new[0]] == "." else -2.0
|
||||||
elif distance_new <= 2:
|
|
||||||
r += 5.0 # Large penalty for being adjacent to ghost
|
|
||||||
elif distance_new == 1:
|
|
||||||
r += 10.0 # Large penalty for being adjacent to ghost
|
|
||||||
|
|
||||||
# Ensure reward doesn't drop below 0.01
|
# Ensure reward doesn't drop below 0.01
|
||||||
r = max(r, 0.01)
|
r = max(r, 0.01)
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue