强化学习DQN
代码基于莫凡pythonclass DeepQNetwork:def __init__(self,n_actions,n_features,# 多少个observation eg:长宽高等,用来预测action的值learning_rate=0.01,reward_decay=0.9,e_greedy=0.9,
·
代码基于莫凡python
class DeepQNetwork:
def __init__(
self,
n_actions,
n_features, # 多少个observation eg:长宽高等,用来预测action的值
learning_rate=0.01,
reward_decay=0.9,
e_greedy=0.9,
replace_target_iter=300,
memory_size=500,
batch_size=32,
e_greedy_increment=None, # 缩小范围
output_graph=False,
):
self.n_actions = n_actions
self.n_features = n_features
self.lr = learning_rate
self.gamma = reward_decay
self.epsilon_max = e_greedy # 最大q值
self.replace_target_iter = replace_target_iter # 更换target_net的步数
self.memory_size = memory_size # 记忆上限
self.batch_size = batch_size # 每次更新时从memory里面取多少记忆出来
self.epsilon_increment = e_greedy_increment # ε的增量
self.epsilon = 0 if e_greedy_increment is not None else self.epsilon_max # 是否开启探索模式,并逐步减少探索次数
self.learn_step_counter = 0 # 记录学习次数(用于判断是否更换target_net参数)
self.memory = np.zeros((self.memory_size, n_features * 2 + 2)) # 初始化全0记忆(s,a,r,s_),两个o*2+a+r
self._build_net() # 创建两个神经网络(target_net,evaluate_net)
t_params = tf.get_collection('target_net_params') # 提取target_net的参数
e_params = tf.get_collection('eval_net_params') # 提取eval_net的参数
self.replace_target_op = [tf.assign(t, e) for t, e in zip(t_params, e_params)] # 更新target_net的参数
# zip() 函数用于将可迭代对象作为参数,将对象中对应的元素打包成一个个元组,然后返回由这些元组组成的对象
self.sess = tf.Session()
if output_graph: # 输出tensorboard文件
# $ tensorboard --logdir=logs
# tf.train.SummaryWriter soon be deprecated, use following
tf.summary.FileWriter("logs/", self.sess.graph)
self.sess.run(tf.global_variables_initializer()) # 用session激活TensorFlow
self.cost_his = [] # 记录所有cost(误差)变化,用于最后plot出来观看
def _build_net(self): #神经网络的建立
# ------------------ 创建eval神经网络,及时提升参数 ------------------
# eva-Network的输入s 类比 图片输入 是一个 列矩阵 形式
# tf.placeholder(s类型为32位浮点,[行向量],标签名字为s)
# 这里的 None 与 minibatch 的 size 相同
self.s = tf.placeholder(tf.float32, [None, self.n_features], name='s') # 输入s
# 用于计算loss的Q现实值,并不是本结构的输出,但需要扩展作用域
self.q_target = tf.placeholder(tf.float32, [None, self.n_actions], name='Q_target') # 计算误差,输入用于计算 loss function
# tf.variable_scope 创建eval_net的相关变量
with tf.variable_scope('eval_net'):
# c_names是数组,中间层神经元个数10,正态分布随机数作为权值w,常量0.1作为偏置b
# c_names(collections_names) 在更新target_net参数时会用到,设置b和w的参数,10个神经元
# q估计的参数都会放入collections里面,然后再调用加入q现实
c_names, n_l1, w_initializer, b_initializer = \
['eval_net_params', tf.GraphKeys.GLOBAL_VARIABLES], 10, \
tf.random_normal_initializer(0., 0.3), tf.constant_initializer(0.1) # config of layers
# eval_net的第一层,collections是在更新target_net参数时用到
# 第一层,w1 b1 以及输出 l1 = 激活函数( input s * w1 + b1 )
with tf.variable_scope('l1'):
# get_variable('name',[shape],<initializer>)
# 权值矩阵大小为 features行 * n_l1列
# 偏置矩阵大小为 1行 * n_l1列
w1 = tf.get_variable('w1', [self.n_features, n_l1], initializer=w_initializer, collections=c_names)
b1 = tf.get_variable('b1', [1, n_l1], initializer=b_initializer, collections=c_names)
l1 = tf.nn.relu(tf.matmul(self.s, w1) + b1)
# 上一层输出对应输入,这一层输出对应 action,输出层
# 第二层,w2 b2 以及输出 q_eval = l1 * w2 + b2
# eval_net的第二层,collections是在更新target_net参数时用到
with tf.variable_scope('l2'):
# 权值矩阵大小为 n_l1行 * actions列
# 偏置矩阵大小为 1行 * actions列
w2 = tf.get_variable('w2', [n_l1, self.n_actions], initializer=w_initializer, collections=c_names)
b2 = tf.get_variable('b2', [1, self.n_actions], initializer=b_initializer, collections=c_names)
self.q_eval = tf.matmul(l1, w2) + b2 #有多少行为就输出多少q值
# 定义evl_Network的更新依据 loss函数
# reduce_mean默认对所有数求平均,squared_difference最小二乘
# 最小二乘的结果是1*features,平均之后是1*1
with tf.variable_scope('loss'):
self.loss = tf.reduce_mean(tf.squared_difference(self.q_target, self.q_eval))
with tf.variable_scope('train'): # 定义evl_Network训练方法
self._train_op = tf.train.RMSPropOptimizer(self.lr).minimize(self.loss)
# ------------------ 创建target神经网络,提供target Q ------------------
# target net是旧版的evl_Net。需要给出 q_target 的计算依据 q_next
# 输入为 s_ 及下一时刻的state,网络结构与evl_Net相同
self.s_ = tf.placeholder(tf.float32, [None, self.n_features], name='s_') # 接收下一个observation(输入下一时刻状态)
with tf.variable_scope('target_net'):
# c_names(collections_names) 在更新target_net参数时用到
c_names = ['target_net_params', tf.GraphKeys.GLOBAL_VARIABLES]
# target_net的第一层,collections是在更新target_net参数时用到
with tf.variable_scope('l1'):
w1 = tf.get_variable('w1', [self.n_features, n_l1], initializer=w_initializer, collections=c_names)
b1 = tf.get_variable('b1', [1, n_l1], initializer=b_initializer, collections=c_names)
l1 = tf.nn.relu(tf.matmul(self.s_, w1) + b1)
# target_net的第二层,collections是在更新target_net参数时用到
with tf.variable_scope('l2'):
w2 = tf.get_variable('w2', [n_l1, self.n_actions], initializer=w_initializer, collections=c_names)
b2 = tf.get_variable('b2', [1, self.n_actions], initializer=b_initializer, collections=c_names)
self.q_next = tf.matmul(l1, w2) + b2
# 定义样本池
def store_transition(self, s, a, r, s_):
if not hasattr(self, 'memory_counter'): # hasattr(object, name) 检查对象是否包含对应属性
self.memory_counter = 0
# np.hastack 水平合并 数组
transition = np.hstack((s, [a, r], s_)) # 记录一条[s,[a,r],s_]
# 总memory大小是固定的,如果超出总大小,旧的memory就会被新的memory替换掉
# 样本池按列存放样本,每个样本为一个行向量
# 循环存储每个行动样本,index用于循环覆盖的起始行数
index = self.memory_counter % self.memory_size
self.memory[index, :] = transition # 替换过程
self.memory_counter += 1
def choose_action(self, observation):
# 统一 observation 的 shape(升维1)
observation = observation[np.newaxis, :]
if np.random.uniform() < self.epsilon:
# feed 与 placeholder 搭配使用 ,传递值
# 让eval_net 神经网络生成所有action的值,并选择最大的action
actions_value = self.sess.run(self.q_eval, feed_dict={self.s: observation})
action = np.argmax(actions_value) # np.argmax 取出最大值的索引
else:
action = np.random.randint(0, self.n_actions) # 随机选择
return action
def learn(self):
if self.learn_step_counter % self.replace_target_iter == 0: # 检查是否替换target_net参数
self.sess.run(self.replace_target_op)
print('\ntarget_params_replaced\n')
# 随机抽取minibatch
# 当样本池未满的时候,依照计数器值随机选取,否则按照池大小选取
if self.memory_counter > self.memory_size:
# numpy.random.choice(a, size=None, replace=True, p=None)
# 从 a 中以概率 p 随机抽取 size 个。replace表示是否放回
# 从memory中随机抽取batch_size这么多记忆
sample_index = np.random.choice(self.memory_size, size=self.batch_size)
else:
sample_index = np.random.choice(self.memory_counter, size=self.batch_size)
# 选择出需要训练的样本 batch_memory
batch_memory = self.memory[sample_index, :]
# 获取q_next (target_net产生了q)和q_eval(eval_net产生的q)
q_next, q_eval = self.sess.run(
[self.q_next, self.q_eval],
feed_dict={
# batch_memory格式为行向量,s(features),a,r,s_(feature)
# 分别取出 s 和 s_ 作为网络输入
# np矩阵切片 a[:,a:b:c] 从a到b列步长为c切片
# 下方对batch_memory切片取出s_ 和 s
self.s_: batch_memory[:, -self.n_features:], # fixed params(前四个)旧参数
self.s: batch_memory[:, :self.n_features], # newest params(后四个)新参数
})
# 更换 q_target w.r.t q_eval的动作值
# 加上奖励和折扣因子作为q实际
q_target = q_eval.copy()
batch_index = np.arange(self.batch_size, dtype=np.int32)
# numpy 数组的四个方法:ndim返回维度数,shape返回维度数组,dtype返回元素类型,astype类型转换
# 对 batch_memroy 切片,取出 action / reward
eval_act_index = batch_memory[:, self.n_features].astype(int)
reward = batch_memory[:, self.n_features + 1]
q_target[batch_index, eval_act_index] = reward + self.gamma * np.max(q_next, axis=1)
"""
For example in this batch I have 2 samples and 3 actions:
q_eval =
[[1, 2, 3],
[4, 5, 6]]
q_target = q_eval =
[[1, 2, 3],
[4, 5, 6]]
Then change q_target with the real q_target value w.r.t the q_eval's action.
For example in:
sample 0, I took action 0, and the max q_target value is -1;
sample 1, I took action 2, and the max q_target value is -2:
q_target =
[[-1, 2, 3],
[4, 5, -2]]
So the (q_target - q_eval) becomes:
[[(-1)-(1), 0, 0],
[0, 0, (-2)-(6)]]
We then backpropagate this error w.r.t the corresponding action to network,
leave other action as error=0 cause we didn't choose it.
"""
# 训练 eval_net
_, self.cost = self.sess.run([self._train_op, self.loss],
feed_dict={self.s: batch_memory[:, :self.n_features],
self.q_target: q_target})
self.cost_his.append(self.cost) # 记录每一步更新的 cost 用于训练后画图分析
# 增加探索度ε,到达一个果断的程度以后,不再探索,选取最优方案(降低行为的随机性)
self.epsilon = self.epsilon + self.epsilon_increment if self.epsilon < self.epsilon_max else self.epsilon_max
self.learn_step_counter += 1
def plot_cost(self):
import matplotlib.pyplot as plt
plt.plot(np.arange(len(self.cost_his)), self.cost_his)
plt.ylabel('Cost')
plt.xlabel('training steps')
plt.show()
由于本人目前正在学习强化学习ing,难免有疏漏和错误。欢迎大家提出批评意见,共同进步!
更多推荐
已为社区贡献1条内容
所有评论(0)