將例二改寫成面向對象模式,並加了環境!
不過更新環境的過程中,用到了清屏命令,play()的時候,會有點問題。learn()的時候可以勉強看到:P
0.效果圖
1.完整代碼
相對於例一,修改的地方:
Agent 五處:states, actions, rewards, get_valid_actions(), get_next_state()
Env 兩處:__init__(), update()
import pandas as pd import random import time import pickle import pathlib import os ''' 四格迷宮:
---------------
| 入口 | |
---------------
| 陷阱 | 出口 |
--------------- ''' class Env(object): '''環境類''' def __init__(self): '''初始化''' self.env = list('--\n#-') def update(self, state, delay=0.1): '''更新環境,並打印''' env = self.env[:] if state > 1: state += 1 env[state] = 'o' # 更新環境 print('\r{}'.format(''.join(env)), end='') time.sleep(delay) os.system('cls') class Agent(object): '''個體類''' def __init__(self, alpha=0.01, gamma=0.9): '''初始化''' self.states = range(4) # 狀態集。0, 1, 2, 3 四個狀態 self.actions = list('udlr') # 動作集。上下左右 4個動作 self.rewards = [0,0,-10,10] # 獎勵集。到達位置3(出口)獎勵10,位置2(陷阱)獎勵-10,其他皆為0 self.alpha = alpha self.gamma = gamma self.q_table = pd.DataFrame(data=[[0 for _ in self.actions] for _ in self.states], index=self.states, columns=self.actions) def save_policy(self): '''保存Q table''' with open('q_table.pickle', 'wb') as f: # Pickle the 'data' dictionary using the highest protocol available. pickle.dump(self.q_table, f, pickle.HIGHEST_PROTOCOL) def load_policy(self): '''導入Q table''' with open('q_table.pickle', 'rb') as f: self.q_table = pickle.load(f) def choose_action(self, state, epsilon=0.8): '''選擇相應的動作。根據當前狀態,隨機或貪婪,按照參數epsilon''' if (random.uniform(0,1) > epsilon) or ((self.q_table.ix[state] == 0).all()): # 探索 action = random.choice(self.get_valid_actions(state)) else: action = self.q_table.ix[state].idxmax() # 利用(貪婪) return action def get_q_values(self, state): '''取狀態state的所有Q value''' q_values = self.q_table.ix[state, self.get_valid_actions(state)] return q_values def update_q_value(self, state, action, next_state_reward, next_state_q_values): '''更新Q value,根據貝爾曼方程''' self.q_table.ix[state, action] += self.alpha * (next_state_reward + self.gamma * next_state_q_values.max() - self.q_table.ix[state, action]) def get_valid_actions(self, state): '''取當前狀態下所有的合法動作''' valid_actions = set(self.actions) if state % 2 == 1: # 最后一列,則 valid_actions -= set(['r']) # 無向右的動作 if state % 2 == 0: # 最前一列,則 valid_actions -= set(['l']) # 無向左 if state // 2 == 1: # 最后一行,則 valid_actions -= set(['d']) # 無向下 if state // 2 == 0: # 最前一行,則 valid_actions -= set(['u']) # 無向上 return list(valid_actions) def get_next_state(self, state, action): '''對狀態執行動作后,得到下一狀態''' #u,d,l,r,n = -2,+2,-1,+1,0 if state % 2 != 1 and action == 'r': # 除最后一列,皆可向右(+1) next_state = state + 1 elif state % 2 != 0 and action == 'l': # 除最前一列,皆可向左(-1) next_state = state -1 elif state // 2 != 1 and action == 'd': # 除最后一行,皆可向下(+2) next_state = state + 2 elif state // 2 != 0 and action == 'u': # 除最前一行,皆可向上(-2) next_state = state - 2 else: next_state = state return next_state def learn(self, env=None, episode=1000, epsilon=0.8): '''q-learning算法''' print('Agent is learning...') for _ in range(episode): current_state = self.states[0] if env is not None: # 若提供了環境,則更新之! env.update(current_state) while current_state != self.states[-1]: current_action = self.choose_action(current_state, epsilon) # 按一定概率,隨機或貪婪地選擇 next_state = self.get_next_state(current_state, current_action) next_state_reward = self.rewards[next_state] next_state_q_values = self.get_q_values(next_state) self.update_q_value(current_state, current_action, next_state_reward, next_state_q_values) current_state = next_state if env is not None: # 若提供了環境,則更新之! env.update(current_state) print('\nok') def play(self, env=None, delay=0.5): '''玩游戲,使用策略''' assert env != None, 'Env must be not None!' if pathlib.Path("q_table.pickle").exists(): self.load_policy() else: print("I need to learn before playing this game.") self.learn(env, 13) self.save_policy() print('Agent is playing...') current_state = self.states[0] env.update(current_state, delay) while current_state != self.states[-1]: current_action = self.choose_action(current_state, 1.) # 1., 不隨機 next_state = self.get_next_state(current_state, current_action) current_state = next_state env.update(current_state, delay) print('\nCongratulations, Agent got it!') if __name__ == '__main__': env = Env() # 環境 agent = Agent() # 個體 agent.learn(env, episode=25, epsilon=0.6) # 先學 #agent.save_policy() # 保存所學 #agent.load_policy() # 導入所學 #agent.play(env) # 再玩