added max iterations
parent
1453fd930a
commit
a76d2c41d3
|
|
@ -166,14 +166,17 @@ def main():
|
||||||
s_not_terminal = True
|
s_not_terminal = True
|
||||||
labyrinth_copy = [list(row) for row in labyrinth] # Create proper deep copy
|
labyrinth_copy = [list(row) for row in labyrinth] # Create proper deep copy
|
||||||
a = None
|
a = None
|
||||||
while s_not_terminal:
|
iteration = 0
|
||||||
|
max_iterations = 50 # Prevent infinite loops
|
||||||
|
while s_not_terminal and iteration < max_iterations:
|
||||||
|
iteration += 1
|
||||||
print("s: " + str(s))
|
print("s: " + str(s))
|
||||||
print("q[s] before action: " + str(q[s]))
|
print("q[s] before action: " + str(q[s]))
|
||||||
|
|
||||||
a = rl.epsilon_greedy(q, s) # 0 = Left; 1 = Right ; 2 = Up ; 3 = Down
|
a = rl.epsilon_greedy(q, s) # 0 = Left; 1 = Right ; 2 = Up ; 3 = Down
|
||||||
s_new, r, labyrinth_copy = rl.take_action(s, a, labyrinth_copy)
|
s_new, r, labyrinth_copy = rl.take_action(s, a, labyrinth_copy)
|
||||||
|
|
||||||
q[s][a] += round(alpha * (r + gamma * max(q[s_new]) - q[s][a]), 2)
|
q[s][a] += round(alpha * (r + gamma * rl.max_q(q, s_new, labyrinth) - q[s][a]), 2)
|
||||||
# q[s_new][a_opposite_direction[a]] += round(alpha * (r + gamma * max(q[s]) - q[s_new][a_opposite_direction[a]]), 2)
|
# q[s_new][a_opposite_direction[a]] += round(alpha * (r + gamma * max(q[s]) - q[s_new][a_opposite_direction[a]]), 2)
|
||||||
|
|
||||||
s = s_new
|
s = s_new
|
||||||
|
|
@ -184,8 +187,14 @@ def main():
|
||||||
# Check for collisions (game over if ghost catches pacman)
|
# Check for collisions (game over if ghost catches pacman)
|
||||||
if s[0] == s[2] and s[1] == s[3]:
|
if s[0] == s[2] and s[1] == s[3]:
|
||||||
s_not_terminal = False
|
s_not_terminal = False
|
||||||
|
q[s][a] = 0.01
|
||||||
|
print("There was just a collision!!!")
|
||||||
|
print("s: " + str(s))
|
||||||
|
|
||||||
# time.sleep(0.05)
|
time.sleep(0.025)
|
||||||
|
|
||||||
|
if iteration >= max_iterations:
|
||||||
|
print(f"Max iterations reached ({max_iterations}), breaking out of loop")
|
||||||
|
|
||||||
s = (pacman.x, pacman.y, ghost.x, ghost.y) # as a tuple so the state becomes hashable
|
s = (pacman.x, pacman.y, ghost.x, ghost.y) # as a tuple so the state becomes hashable
|
||||||
a = rl.epsilon_greedy(q, s) # 0 = Left; 1 = Right ; 2 = Up ; 3 = Down
|
a = rl.epsilon_greedy(q, s) # 0 = Left; 1 = Right ; 2 = Up ; 3 = Down
|
||||||
|
|
|
||||||
|
|
@ -80,8 +80,8 @@ def epsilon_greedy(q, s, epsilon=0.2):
|
||||||
a = q[s].index(q_max)
|
a = q[s].index(q_max)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
"""
|
|
||||||
|
|
||||||
|
"""
|
||||||
if np.random.random() < epsilon:
|
if np.random.random() < epsilon:
|
||||||
# Explore: choose random action (excluding blocked actions with Q=0)
|
# Explore: choose random action (excluding blocked actions with Q=0)
|
||||||
valid_actions = [i for i in range(len(q[s])) if q[s][i] > 0]
|
valid_actions = [i for i in range(len(q[s])) if q[s][i] > 0]
|
||||||
|
|
@ -130,6 +130,52 @@ def bfs_distance(start, end, labyrinth):
|
||||||
|
|
||||||
return float('inf') # No path found
|
return float('inf') # No path found
|
||||||
|
|
||||||
|
def max_q(q, s_new, labyrinth):
|
||||||
|
"""Calculate the maximum reward for all possible actions in state s_new"""
|
||||||
|
max_reward = float('-inf')
|
||||||
|
for a in range(4):
|
||||||
|
if q[s_new][a] > 0: # Only consider valid (non-blocked) actions
|
||||||
|
s_test = list(s_new)
|
||||||
|
if a == 0: # left
|
||||||
|
s_test[0] -= 1
|
||||||
|
elif a == 1: # right
|
||||||
|
s_test[0] += 1
|
||||||
|
elif a == 2: # up
|
||||||
|
s_test[1] -= 1
|
||||||
|
elif a == 3: # down
|
||||||
|
s_test[1] += 1
|
||||||
|
|
||||||
|
reward = calc_reward(tuple(s_test), labyrinth)
|
||||||
|
max_reward = max(max_reward, reward)
|
||||||
|
|
||||||
|
return max_reward if max_reward != float('-inf') else 0.0
|
||||||
|
|
||||||
|
def calc_reward(s_new, labyrinth):
|
||||||
|
# consider new distance between Pacman and Ghost using actual pathfinding
|
||||||
|
pacman_pos_new = (s_new[0], s_new[1])
|
||||||
|
ghost_pos = (s_new[2], s_new[3])
|
||||||
|
|
||||||
|
# distance_old = bfs_distance((s[0], s[1]), ghost_pos, labyrinth)
|
||||||
|
distance_new = bfs_distance(pacman_pos_new, ghost_pos, labyrinth)
|
||||||
|
|
||||||
|
r = 0
|
||||||
|
|
||||||
|
if distance_new < 3:
|
||||||
|
r = -2
|
||||||
|
elif distance_new == 4:
|
||||||
|
r = 0.5
|
||||||
|
elif distance_new > 4:
|
||||||
|
r = 1
|
||||||
|
|
||||||
|
# Reward for cookies
|
||||||
|
r += 1.0 if labyrinth[s_new[1]][s_new[0]] == "." else -1.5
|
||||||
|
|
||||||
|
# efficiency experiment
|
||||||
|
r -= 0.1
|
||||||
|
|
||||||
|
r = max(r, 0.01)
|
||||||
|
|
||||||
|
return r
|
||||||
|
|
||||||
def take_action(s, a, labyrinth):
|
def take_action(s, a, labyrinth):
|
||||||
labyrinth_copy = [list(row) for row in labyrinth]
|
labyrinth_copy = [list(row) for row in labyrinth]
|
||||||
|
|
@ -144,19 +190,6 @@ def take_action(s, a, labyrinth):
|
||||||
if a == 3: # down
|
if a == 3: # down
|
||||||
s_new[1] += 1
|
s_new[1] += 1
|
||||||
|
|
||||||
# consider new distance between Pacman and Ghost using actual pathfinding
|
r = calc_reward(s_new, labyrinth)
|
||||||
pacman_pos_new = (s_new[0], s_new[1])
|
|
||||||
ghost_pos = (s[2], s[3])
|
|
||||||
|
|
||||||
distance_new = bfs_distance(pacman_pos_new, ghost_pos, labyrinth)
|
|
||||||
distance_old = bfs_distance((s[0], s[1]), ghost_pos, labyrinth)
|
|
||||||
|
|
||||||
r = 0.05 * distance_new if distance_new != float('inf') else 0.0
|
|
||||||
|
|
||||||
# Reward for eating cookies
|
|
||||||
r += 1.0 if labyrinth[s_new[1]][s_new[0]] == "." else -1.5
|
|
||||||
|
|
||||||
# Ensure reward doesn't drop below 0.01
|
|
||||||
r = max(r, 0.01)
|
|
||||||
|
|
||||||
return tuple(s_new), r, labyrinth_copy
|
return tuple(s_new), r, labyrinth_copy
|
||||||
Loading…
Reference in New Issue