强化学习------DQN in maze

强化学习------DQN in maze

maze_env.py:

"""
Reinforcement learning maze example.

Red rectangle:          explorer.
Black rectangles:       hells       [reward = -1].
Yellow bin circle:      paradise    [reward = +1].
All other states:       ground      [reward = 0].

This script is the environment part of this example.
The RL is in RL_brain.py.

View more on my tutorial page: https://morvanzhou.github.io/tutorials/
"""
import numpy as np
import time
import sys
if sys.version_info.major == 2:
    import Tkinter as tk
else:
    import tkinter as tk

UNIT = 40   # pixels
MAZE_H = 4  # grid height
MAZE_W = 4  # grid width


class Maze(tk.Tk, object):
    def __init__(self):
        super(Maze, self).__init__()
        self.action_space = ['u', 'd', 'l', 'r']
        self.n_actions = len(self.action_space)
        self.n_features = 2
        self.title('maze')
        self.geometry('{0}x{1}'.format(MAZE_H * UNIT, MAZE_H * UNIT))
        self._build_maze()

    def _build_maze(self):
        self.canvas = tk.Canvas(self, bg='white',
                           height=MAZE_H * UNIT,
                           width=MAZE_W * UNIT)

        # create grids
        for c in range(0, MAZE_W * UNIT, UNIT):
            x0, y0, x1, y1 = c, 0, c, MAZE_H * UNIT
            self.canvas.create_line(x0, y0, x1, y1)
        for r in range(0, MAZE_H * UNIT, UNIT):
            x0, y0, x1, y1 = 0, r, MAZE_W * UNIT, r
            self.canvas.create_line(x0, y0, x1, y1)

        # create origin
        origin = np.array([20, 20])

        # hell
        hell1_center = origin + np.array([UNIT * 2, UNIT])
        self.hell1 = self.canvas.create_rectangle(
            hell1_center[0] - 15, hell1_center[1] - 15,
            hell1_center[0] + 15, hell1_center[1] + 15,
            fill='black')
        # hell
        # hell2_center = origin + np.array([UNIT, UNIT * 2])
        # self.hell2 = self.canvas.create_rectangle(
        #     hell2_center[0] - 15, hell2_center[1] - 15,
        #     hell2_center[0] + 15, hell2_center[1] + 15,
        #     fill='black')

        # create oval
        oval_center = origin + UNIT * 2
        self.oval = self.canvas.create_oval(
            oval_center[0] - 15, oval_center[1] - 15,
            oval_center[0] + 15, oval_center[1] + 15,
            fill='yellow')

        # create red rect
        self.rect = self.canvas.create_rectangle(
            origin[0] - 15, origin[1] - 15,
            origin[0] + 15, origin[1] + 15,
            fill='red')

        # pack all
        self.canvas.pack()

    def reset(self):
        self.update()
        time.sleep(0.1)
        self.canvas.delete(self.rect)
        origin = np.array([20, 20])
        self.rect = self.canvas.create_rectangle(
            origin[0] - 15, origin[1] - 15,
            origin[0] + 15, origin[1] + 15,
            fill='red')
        # return observation
        return (np.array(self.canvas.coords(self.rect)[:2]) - np.array(self.canvas.coords(self.oval)[:2]))/(MAZE_H*UNIT)

    def step(self, action):
        s = self.canvas.coords(self.rect)
        base_action = np.array([0, 0])
        if action == 0:   # up
            if s[1] > UNIT:
                base_action[1] -= UNIT
        elif action == 1:   # down
            if s[1] < (MAZE_H - 1) * UNIT:
                base_action[1] += UNIT
        elif action == 2:   # right
            if s[0] < (MAZE_W - 1) * UNIT:
                base_action[0] += UNIT
        elif action == 3:   # left
            if s[0] > UNIT:
                base_action[0] -= UNIT

        self.canvas.move(self.rect, base_action[0], base_action[1])  # move agent

        next_coords = self.canvas.coords(self.rect)  # next state

        # reward function
        if next_coords == self.canvas.coords(self.oval):
            reward = 1
            done = True
        elif next_coords in [self.canvas.coords(self.hell1)]:
            reward = -1
            done = True
        else:
            reward = 0
            done = False
        s_ = (np.array(next_coords[:2]) - np.array(self.canvas.coords(self.oval)[:2]))/(MAZE_H*UNIT)
        return s_, reward, done

    def render(self):
        #time.sleep(0.01)
        self.update()

RL_brain.py:

import numpy as np
import tensorflow as tf

np.random.seed(0)

tf.set_random_seed(0)


class DeepQNetwork:
    def __init__(
            self,
            n_actions,
            n_feature,
            learning_rate=0.01,
            reward_decay=0.9,
            e_greedy=0.9,
            replace_target_iter=300,
            memory_size=500,
            batch_size=32,
            e_greedy_increment=None,
            graph_output=False,
            begin_learn_step = 200
    ):
        self.n_actions = n_actions
        self.n_feature = n_feature
        self.lr = learning_rate
        self.gamma = reward_decay
        self.epsilon_max = e_greedy
        self.replace_target_iter = replace_target_iter  # 更换一次target_net参数
        self.memory_size = memory_size  # # Replay Memory 大小
        self.batch_size = batch_size
        self.begin_learm_step = begin_learn_step
        self.learn_step_counter = 0  # eval net学习的次数
        self.epsilon_increment = e_greedy_increment
        self.epsilon = 0 if e_greedy_increment is not None else self.epsilon_max
        # 初始化记忆库
        self.memory = np.zeros((self.memory_size, n_feature * 2 + 2))  # + 2 (action和reward)

        self._built_net()
        # 更新target_net 参数
        t_params = tf.get_collection('target_net_params')
        e_params = tf.get_collection('eval_net_params')
        self.replace_target_op = [tf.assign(t, e) for t, e in zip(t_params, e_params)]
        self.sess = tf.Session()
        if graph_output:
            tf.summary.FileWriter('logs/', self.sess.graph)

        self.sess.run(tf.global_variables_initializer())
        self.cost_his = []

    def _built_net(self):
        # -----------------------------evaluate_net---------------------------------
        self.s = tf.placeholder(tf.float32, [None, self.n_feature], name='s')  # state(t)
        self.s_ = tf.placeholder(tf.float32, [None, self.n_feature], name='s_')  # state(t+1)
        self.target = tf.placeholder(tf.float32, [None, self.n_actions])  # to built the loss
        with tf.variable_scope('eval_net'):
            c_name, n_l1, w_initializer, b_initializer = ['eval_net_params',
                                                          tf.GraphKeys.GLOBAL_VARIABLES], 10, \
                                                         tf.random_normal_initializer(0., 0.3), \
                                                         tf.constant_initializer(0.1)
            # layer 1
            with tf.variable_scope('l1'):
                w1 = tf.get_variable('w1', [self.n_feature, n_l1], initializer=w_initializer, collections=c_name)
                b1 = tf.get_variable('b1', [1, n_l1], initializer=b_initializer, collections=c_name)

                l1 = tf.nn.relu(tf.matmul(self.s, w1) + b1)
            # output layer
            with tf.variable_scope('l2'):
                w2 = tf.get_variable('w2', [n_l1, self.n_actions], initializer=w_initializer, collections=c_name)
                b2 = tf.get_variable('b2', [1, self.n_actions], initializer=b_initializer, collections=c_name)
                self.q_eval = tf.matmul(l1, w2) + b2
            with tf.variable_scope('loss'):
                self.loss = tf.reduce_mean(tf.squared_difference(self.target, self.q_eval))
            with tf.variable_scope('train'):
                self.train_opt = tf.train.RMSPropOptimizer(self.lr).minimize(self.loss)

        # -----------------------------target_net---------------------------------
        with tf.variable_scope('target_net'):
            c_name = ['target_net_params', tf.GraphKeys.GLOBAL_VARIABLES]
            # layer 1
            with tf.variable_scope('l1'):
                w1 = tf.get_variable('w1', [self.n_feature, n_l1], initializer=w_initializer, collections=c_name)
                b1 = tf.get_variable('b1', [1, n_l1], initializer=b_initializer, collections=c_name)

                l1 = tf.nn.relu(tf.matmul(self.s_, w1) + b1)
            # output layer
            with tf.variable_scope('l2'):
                w2 = tf.get_variable('w2', [n_l1, self.n_actions], initializer=w_initializer, collections=c_name)
                b2 = tf.get_variable('b2', [1, self.n_actions], initializer=b_initializer, collections=c_name)
                self.q_target = tf.matmul(l1, w2) + b2

    def choose_action(self, observation):

        observation = observation[np.newaxis, :]  # (2, ) -> (1, 2)

        if np.random.uniform() < self.epsilon:
            actions_value = self.sess.run(self.q_eval, feed_dict={self.s: observation})
            action = np.argmax(actions_value)
        else:
            action = np.random.randint(0, self.n_actions)
        return action

    def learn(self, s, a, r, s_, step):
        # store transition
        transition = np.hstack((s, [a, r], s_))
        # add of replace the old memory with new memory
        index = step % self.memory_size
        self.memory[index, :] = transition

        if (step > self.begin_learm_step) and (step % 5 == 0):
            if self.learn_step_counter % self.replace_target_iter == 0:
                self.sess.run(self.replace_target_op)
                print('\ntarget_params_replaced\n')
            # 考虑记忆库是否已经满了
            if step > self.memory_size:
                sample_index = np.random.choice(self.memory_size, size=self.batch_size)
            else:
                sample_index = np.random.choice(step, size=self.batch_size)
            batch_memory = self.memory[sample_index, :]

            q_target, q_eval = self.sess.run([self.q_target, self.q_eval],
                                             feed_dict={self.s: batch_memory[:, :self.n_feature],
                                                        self.s_: batch_memory[:, -self.n_feature:]})
            # 以下操作使得[a1, a2, a_chosen, a4,...] 只更新其中一个被选中的action
            target = q_eval.copy()
            batch_index = np.arange(self.batch_size, dtype=np.int32)
            action_index = self.n_feature
            # 由于用0,1,2,3代替action,故这些数字也是eval_act的索引
            eval_act_index = batch_memory[:, action_index].astype(int)
            reward_index = self.n_feature + 1
            reward = batch_memory[:, reward_index]
            # 被采取的action才会加上奖惩,其他不变。
            target[batch_index, eval_act_index] = reward + self.gamma * np.max(q_target, axis=1)  # target[(0, 1, 2 ...,batch_size - 1),(2, 2, 3, 0, 1,.......)]
            # 训练 eval net .target与q_eval相减得到的矩阵中被采取的action的位置不会等于0,其他由于是eval net 和target相同部分所以等于0。
            self.cost,_ = self.sess.run([self.loss, self.train_opt],
                                      feed_dict={self.s: batch_memory[:, : self.n_feature],
                                                 self.target: target})
            self.cost_his.append(self.cost)
            # increasing epsilon
            self.epsilon = self.epsilon + self.epsilon_increment if self.epsilon < self.epsilon_max else self.epsilon_max
            self.learn_step_counter += 1

    def plot_cost(self):
        import matplotlib.pyplot as plt
        plt.plot(np.arange(len(self.cost_his)), self.cost_his)
        plt.ylabel('Cost')
        plt.xlabel('training steps')
        plt.show()

run_this.py:

from maze_env import Maze
from RL_brain import DeepQNetwork

Max_Episode = 300


def run_maze():
    step = 0
    for episode in range(300):
        s = env.reset()
        while True:
            env.render()

            a = RL.choose_action(s)
            s_, r, done = env.step(a)
            RL.learn(s, a, r, s_, step)
            s = s_
            if done:
                break
            step += 1
    print('game over')
    env.destroy()


if __name__ == "__main__":
    # maze game
    env = Maze()
    RL = DeepQNetwork(env.n_actions, env.n_features,
                      learning_rate=0.01,
                      reward_decay=0.9,
                      e_greedy=0.9,
                      replace_target_iter=200,
                      memory_size=2000,
                      begin_learn_step=200
                      # output_graph=True
                      )
    env.after(100, run_maze)
    env.mainloop()
    RL.plot_cost()

源代码

https://github.com/MorvanZhou/Reinforcement-learning-with-tensorflow/tree/master/contents/5_Deep_Q_Network

发布了23 篇原创文章 · 获赞 2 · 访问量 798

猜你喜欢

转载自blog.csdn.net/weixin_44378835/article/details/102015538
今日推荐