debugging reward system
parent
8bd97eb9ef
commit
1453fd930a
|
|
@ -120,6 +120,7 @@ def move_pacman(pacman, a):
|
|||
|
||||
# Main game function
|
||||
def main():
|
||||
global labyrinth
|
||||
clock = pygame.time.Clock()
|
||||
|
||||
# Initialize Pacman and Ghost positions
|
||||
|
|
@ -129,8 +130,8 @@ def main():
|
|||
s = (pacman.x, pacman.y, ghost.x, ghost.y) # as a tuple so the state becomes hashable
|
||||
q = rl.q_init()
|
||||
a_opposite_direction = {0: 1, 1: 0, 2: 3, 3: 2}
|
||||
gamma = 0.9
|
||||
alpha = 0.8
|
||||
gamma = 0.90
|
||||
alpha = 0.2
|
||||
|
||||
# Game loop
|
||||
running = True
|
||||
|
|
@ -163,8 +164,8 @@ def main():
|
|||
|
||||
# Start of my code
|
||||
s_not_terminal = True
|
||||
labyrinth_copy = labyrinth.copy()
|
||||
a = 0
|
||||
labyrinth_copy = [list(row) for row in labyrinth] # Create proper deep copy
|
||||
a = None
|
||||
while s_not_terminal:
|
||||
print("s: " + str(s))
|
||||
print("q[s] before action: " + str(q[s]))
|
||||
|
|
@ -173,7 +174,7 @@ def main():
|
|||
s_new, r, labyrinth_copy = rl.take_action(s, a, labyrinth_copy)
|
||||
|
||||
q[s][a] += round(alpha * (r + gamma * max(q[s_new]) - q[s][a]), 2)
|
||||
q[s_new][a_opposite_direction[a]] += round(alpha * (r + gamma * max(q[s]) - q[s_new][a_opposite_direction[a]]), 2)
|
||||
# q[s_new][a_opposite_direction[a]] += round(alpha * (r + gamma * max(q[s]) - q[s_new][a_opposite_direction[a]]), 2)
|
||||
|
||||
s = s_new
|
||||
|
||||
|
|
@ -184,11 +185,12 @@ def main():
|
|||
if s[0] == s[2] and s[1] == s[3]:
|
||||
s_not_terminal = False
|
||||
|
||||
time.sleep(0.2)
|
||||
# time.sleep(0.05)
|
||||
|
||||
labyrinth_copy = []
|
||||
print("NEW LOOP")
|
||||
s = (pacman.x, pacman.y, ghost.x, ghost.y) # as a tuple so the state becomes hashable
|
||||
a = rl.epsilon_greedy(q, s) # 0 = Left; 1 = Right ; 2 = Up ; 3 = Down
|
||||
move_pacman(pacman, a)
|
||||
print("NEW LOOP")
|
||||
|
||||
# Draw the labyrinth, pacman, and ghost
|
||||
draw_labyrinth()
|
||||
|
|
|
|||
|
|
@ -80,8 +80,8 @@ def epsilon_greedy(q, s, epsilon=0.2):
|
|||
a = q[s].index(q_max)
|
||||
|
||||
return a
|
||||
|
||||
"""
|
||||
|
||||
if np.random.random() < epsilon:
|
||||
# Explore: choose random action (excluding blocked actions with Q=0)
|
||||
valid_actions = [i for i in range(len(q[s])) if q[s][i] > 0]
|
||||
|
|
@ -100,7 +100,6 @@ def epsilon_greedy(q, s, epsilon=0.2):
|
|||
return 0
|
||||
"""
|
||||
|
||||
|
||||
def bfs_distance(start, end, labyrinth):
|
||||
"""
|
||||
Calculate shortest path distance between two points using BFS.
|
||||
|
|
@ -150,12 +149,12 @@ def take_action(s, a, labyrinth):
|
|||
ghost_pos = (s[2], s[3])
|
||||
|
||||
distance_new = bfs_distance(pacman_pos_new, ghost_pos, labyrinth)
|
||||
distance_old = bfs_distance((s[0], s[1]), ghost_pos, labyrinth)
|
||||
|
||||
# Reward inversely proportional to distance from ghost (asymptotes to 0)
|
||||
r = 1.0 / (2.0 + distance_new) if distance_new != float('inf') else 0.0
|
||||
r = 0.05 * distance_new if distance_new != float('inf') else 0.0
|
||||
|
||||
# Reward for eating cookies
|
||||
r += 0.5 if labyrinth[s_new[1]][s_new[0]] == "." else -0.5
|
||||
r += 1.0 if labyrinth[s_new[1]][s_new[0]] == "." else -1.5
|
||||
|
||||
# Ensure reward doesn't drop below 0.01
|
||||
r = max(r, 0.01)
|
||||
|
|
|
|||
Loading…
Reference in New Issue