如何解决Acrobot和CartPole的强化学习
我想使用相同的代理来解决卡特波和杂技演员。我在一本书的帮助下编写了此代码,但我注意到它无法在Acrobot中使用。
我使用了神经网络:
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
env = gym.make("Acrobot-v1")
env = gym.make("CartPole-v1")
input_shape = env.observation_space.shape
n_outputs = env.action_space.n
max_episode_steps = 500
model = keras.models.Sequential([
keras.layers.Dense(32,activation="elu",input_shape=input_shape),keras.layers.Dense(32,activation="elu"),keras.layers.Dense(n_outputs)
])
这个贪婪的政策:
#we pick the action with the largest predicted QValue.
#To ensure that the agent explores the environment,we will use an ε-greedy policy
def epsilon_greedy_policy(state,epsilon=0):
if np.random.rand() < epsilon:
return np.random.randint(env.action_space.n)
#return env.action_space.sample()
else:
Q_values = model.predict(state[np.newaxis])
return np.argmax(Q_values[0])
我用了缓冲
#we will store all experiences in a replay buffer (or replay memory),from collections import deque
replay_memory = deque(maxlen=1000)
#five elements: a state,the action the agent took,#the resulting reward,the next state it reached,and finally a Boolean indicating
#whether the episode ended at that point (done).
def sample_experiences(batch_size):
indices = np.random.randint(len(replay_memory),size=batch_size)
batch = [replay_memory[index] for index in indices]
states,actions,rewards,next_states,dones = [
np.array([experience[field_index] for experience in batch])
for field_index in range(5)]
return states,dones
#create a function that will play a single step using the ε-greedy policy,then
#store the resulting experience in the replay buffer (IN[5]):
def play_one_step(env,state,epsilon):
action = epsilon_greedy_policy(state,epsilon)
next_state,reward,done,info = env.step(action)
replay_memory.append((state,action,next_state,done))
return next_state,info
#define some hyperparameters and create the optimizer and the loss function.
batch_size = 32
discount_rate = 0.95
optimizer = keras.optimizers.Adam(lr=1e-3)
loss_fn = keras.losses.mean_squared_error
def training_step(batch_size):
experiences = sample_experiences(batch_size)
states,dones = experiences
next_Q_values = model.predict(next_states)
max_next_Q_values = np.max(next_Q_values,axis=1)
target_Q_values = (rewards + (1 - dones) * discount_rate * max_next_Q_values)
target_Q_values = target_Q_values.reshape(-1,1) #reshape() convert target_Q_values in column vector
mask = tf.one_hot(actions,n_outputs)
with tf.GradientTape() as tape:
all_Q_values = model(states)
Q_values = tf.reduce_sum(all_Q_values * mask,axis=1,keepdims=True)
loss = tf.reduce_mean(loss_fn(target_Q_values,Q_values))
grads = tape.gradient(loss,model.trainable_variables)
optimizer.apply_gradients(zip(grads,model.trainable_variables))
我确定损失函数存在错误,因为在这种情况下我只能得到正数奖励:
您有什么建议吗?
版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。