从这里开始换个游戏演示,cartpole游戏

Deep Q Network

实例代码

 import sys
import gym
import pylab
import random
import numpy as np
from collections import deque
from keras.layers import Dense
from keras.optimizers import Adam
from keras.models import Sequential EPISODES = 300 # DQN Agent for the Cartpole
# it uses Neural Network to approximate q function,使用神经网络近似q-learning的q函数
# and experience replay memory & fixed target q network
class DQNAgent:
def __init__(self, state_size, action_size):
# if you want to see Cartpole learning, then change to True
self.render = True
self.load_model = False # get size of state and action
self.state_size = state_size
self.action_size = action_size # These are hyper parameters for the DQN
self.discount_factor = 0.99
self.learning_rate = 0.001
self.epsilon = 1.0
self.epsilon_decay = 0.999
self.epsilon_min = 0.01
self.batch_size = 64
self.train_start = 1000
# create replay memory using deque
self.memory = deque(maxlen=2000) # create main model and target model
self.model = self.build_model()
self.target_model = self.build_model() # initialize target model
self.update_target_model() if self.load_model:
self.model.load_weights("./save_model/cartpole_dqn.h5") # approximate Q function using Neural Network
# state is input and Q Value of each action is output of network
def build_model(self):
model = Sequential()
model.add(Dense(24, input_dim=self.state_size, activation='relu',
kernel_initializer='he_uniform'))
model.add(Dense(24, activation='relu',
kernel_initializer='he_uniform'))
model.add(Dense(self.action_size, activation='linear',
kernel_initializer='he_uniform'))
model.summary()
model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))
return model # after some time interval update the target model to be same with model
def update_target_model(self):
self.target_model.set_weights(self.model.get_weights()) # get action from model using epsilon-greedy policy
def get_action(self, state):
if np.random.rand() <= self.epsilon:
return random.randrange(self.action_size)
else:
q_value = self.model.predict(state)#2,q(s,a),利用模型预测不同action的q值,选大的作为下一action
return np.argmax(q_value[0]) # save sample <s,a,r,s'> to the replay memory
def append_sample(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done))
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay # pick samples randomly from replay memory (with batch_size)
def train_model(self):
if len(self.memory) < self.train_start:
return
import pdb; pdb.set_trace()
batch_size = min(self.batch_size, len(self.memory))
mini_batch = random.sample(self.memory, batch_size)#64list
#(array([[-0.04263461, -0.00657423, 0.00506589, -0.00200269]]), 0, 1.0, array([[-0.04276609, -0.20176846, 0.00502584, 0.29227427]]), False) update_input = np.zeros((batch_size, self.state_size))
update_target = np.zeros((batch_size, self.state_size))
action, reward, done = [], [], [] for i in range(self.batch_size):
update_input[i] = mini_batch[i][0]
action.append(mini_batch[i][1])
reward.append(mini_batch[i][2])
update_target[i] = mini_batch[i][3]
done.append(mini_batch[i][4]) target = self.model.predict(update_input)#(64,2)
target_val = self.target_model.predict(update_target)#(64, 2) for i in range(self.batch_size):
# Q Learning: get maximum Q value at s' from target model
if done[i]:
target[i][action[i]] = reward[i]
else:
target[i][action[i]] = reward[i] + self.discount_factor * (
np.amax(target_val[i]))#off-policy 更新 # and do the model fit!
self.model.fit(update_input, target, batch_size=self.batch_size,
epochs=1, verbose=0) if __name__ == "__main__":
# In case of CartPole-v1, maximum length of episode is 500
env = gym.make('CartPole-v1')
# get size of state and action from environment
state_size = env.observation_space.shape[0]#
action_size = env.action_space.n# agent = DQNAgent(state_size, action_size) scores, episodes = [], [] for e in range(EPISODES):
done = False
score = 0
state = env.reset()
state = np.reshape(state, [1, state_size]) while not done:
if agent.render:
env.render() # get action for the current state and go one step in environment
action = agent.get_action(state)
next_state, reward, done, info = env.step(action)
next_state = np.reshape(next_state, [1, state_size])
# if an action make the episode end, then gives penalty of -100
reward = reward if not done or score == 499 else -100 # save the sample <s, a, r, s'> to the replay memory
agent.append_sample(state, action, reward, next_state, done)
# every time step do the training
agent.train_model()
score += reward
state = next_state if done:
# every episode update the target model to be same with model
agent.update_target_model() # every episode, plot the play time
score = score if score == 500 else score + 100
scores.append(score)
episodes.append(e)
pylab.plot(episodes, scores, 'b')
pylab.savefig("./save_graph/cartpole_dqn.png")
print("episode:", e, " score:", score, " memory length:",
len(agent.memory), " epsilon:", agent.epsilon) # if the mean of scores of last 10 episode is bigger than 490
# stop training
if np.mean(scores[-min(10, len(scores)):]) > 490:
sys.exit() # save the model
if e % 50 == 0:
agent.model.save_weights("./save_model/cartpole_dqn.h5")

深度增强学习--Deep Q Network的更多相关文章

  1. AlphaGo的前世今生(一)Deep Q Network and Game Search Tree:Road to AI Revolution

    这一个专题将会是有关AlphaGo的前世今生以及其带来的AI革命,总共分成三节.本人水平有限,如有错误还望指正.如需转载,须征得本人同意. Road to AI Revolution(通往AI革命之路 ...

  2. 强化学习系列之:Deep Q Network (DQN)

    文章目录 [隐藏] 1. 强化学习和深度学习结合 2. Deep Q Network (DQN) 算法 3. 后续发展 3.1 Double DQN 3.2 Prioritized Replay 3. ...

  3. Deep Q Network(DQN)原理解析

    1. 前言 在前面的章节中我们介绍了时序差分算法(TD)和Q-Learning,当状态和动作空间是离散且维数不高时可使用Q-Table储存每个状态动作对的Q值,而当状态和动作空间是高维连续时,使用Q- ...

  4. 【转】【强化学习】Deep Q Network(DQN)算法详解

    原文地址:https://blog.csdn.net/qq_30615903/article/details/80744083 DQN(Deep Q-Learning)是将深度学习deeplearni ...

  5. 深度增强学习--DDPG

    DDPG DDPG介绍2 ddpg输出的不是行为的概率, 而是具体的行为, 用于连续动作 (continuous action) 的预测 公式推导 推导 代码实现的gym的pendulum游戏,这个游 ...

  6. 深度增强学习--A3C

    A3C 它会创建多个并行的环境, 让多个拥有副结构的 agent 同时在这些并行环境上更新主结构中的参数. 并行中的 agent 们互不干扰, 而主结构的参数更新受到副结构提交更新的不连续性干扰, 所 ...

  7. 深度增强学习--Actor Critic

    Actor Critic value-based和policy-based的结合 实例代码 import sys import gym import pylab import numpy as np ...

  8. 深度增强学习--Policy Gradient

    前面都是value based的方法,现在看一种直接预测动作的方法 Policy Based Policy Gradient 一个介绍 karpathy的博客 一个推导 下面的例子实现的REINFOR ...

  9. 深度增强学习--DPPO

    PPO DPPO介绍 PPO实现 代码DPPO

随机推荐

  1. Makefile 變數替換

    Makefile SUBDIRS = xxx aaa BUILDSUBDIRS = $(SUBDIRS:%=build-%) CLEANSUBDIRS = $(SUBDIRS:%=clean-%) . ...

  2. rhel5.5 linux系统下安装Oracle 11g

    一.配置环境变量1.我将环境变量配置写成了一个脚本,将这个脚本copy到一个新建的linux系统.(脚本是本人原创,前2篇文章里有,感兴趣的朋友可以去看看) 2.进入脚本所在的目录. 3.执行脚本,需 ...

  3. 【snmp】华为和H3C 网络设备设置snmp

    snmp-agent sys-info version all snmp community read public snmp community write private snmp sys-inf ...

  4. Redis Hlen 命令用于获取哈希表中字段的数量

    http://www.runoob.com/redis/hashes-hlen.html

  5. mysql 如何提高批量导入的速度

    mysql 如何提高批量导入的速度 最近一个项目测试,有几个mysql数据库的表数据记录达到了几十万条,在搭建测试环境 导入 测试数据时,十分慢.在网上搜索了一下,有下面一些方法可以加快mysql数据 ...

  6. Django+ openpyxl 导出文件,设置表头/内容格式

    之前使用xlrd.xlrt处理文件的导入导出,这两个主要用于excel2003格式的文件的读写,并且xlrt最多可以写入256行,大于256行会报错 找了新插件openpyxl,对它找到针对某一行设置 ...

  7. 构建ASP.NET MVC4+EF5+EasyUI+Unity2.x注入的后台管理系统

    http://www.tuicool.com/articles/NfyqQr 本节主要知识点是easyui 的手风琴加树结构做菜单导航 有园友抱怨原来菜单非常难看,但是基于原有树形无限级别的设计,没有 ...

  8. php关键字

    \b( (a(bstract|nd|rray|s))| (c(a(llable|se|tch)|l(ass|one)|on(st|tinue)))| (d(e(clare|fault)|ie|o))| ...

  9. 洛谷 P1570 KC喝咖啡【二分答案/最大化平均值模板】

    题目描述 话说KC和SH在福州的时候常常跑去85°C喝咖啡或者其他的一些什么东西. 这天,KC想要喝一杯咖啡,服务员告诉他,现在有n种调料,这杯咖啡只可以加入其中的m种(当然KC一定会加入m种,不会加 ...

  10. HDU 1106 排序(排序)

    输入一行数字,如果我们把这行数字中的‘5’都看成空格,那么就得到一行用空格分割的若干非负整数(可能有些整数以‘0’开头,这些头部的‘0’应该被忽略掉,除非这个整数就是由若干个‘0’组成的,这时这个整数 ...