当前位置: 首页 > news >正文

强化学习嵌入Transformer(代码实践)

这里写目录标题

  • ChatGPT的答案
  • GPT4.0

ChatGPT的答案

# 定义Transformer模块
class Transformer(nn.Module):def __init__(self, input_dim, hidden_dim, num_heads, num_layers):super(Transformer, self).__init__()self.encoder_layer = nn.TransformerEncoderLayer(d_model=input_dim, nhead=num_heads)self.transformer_encoder = nn.TransformerEncoder(self.encoder_layer, num_layers=num_layers)self.linear = nn.Linear(input_dim, hidden_dim)def forward(self, x):x = self.transformer_encoder(x)x = self.linear(x)return x# 定义策略网络
class PolicyNetwork(nn.Module):def __init__(self, input_dim, output_dim, hidden_dim, num_heads, num_layers):super(PolicyNetwork, self).__init__()self.transformer = Transformer(input_dim, hidden_dim, num_heads, num_layers)self.fc = nn.Linear(hidden_dim, output_dim)def forward(self, x):x = self.transformer(x)x = F.softmax(self.fc(x), dim=-1)return x

问:为什么只有Encoder 没有decoder?
在强化学习任务中,特别是在策略梯度方法中,通常不需要一个完整的Transformer模型,包括Encoder和Decoder。这是因为在这些任务中,我们通常只需要一个从状态到动作的映射,而不需要生成序列。

因此,我们可以只使用Transformer的Encoder部分,将状态作为输入,经过编码后得到一个表示状态的向量,然后将这个向量传递给策略网络(或者Critic网络)来进行动作选择或值函数估计。

使用Transformer的Encoder部分可以有效地处理状态的变长输入,并且具有对序列建模的能力,这在处理许多强化学习任务中是非常有用的。

因此,在PPO算法中,我们只需使用Transformer的Encoder部分,而不需要Decoder部分。在强化学习中,尤其是在处理具有复杂状态和动作空间的任务时,能够捕获状态序列间的依赖关系对于预测未来奖励和决策至关重要。Transformer通过其自注意力机制,能够有效地处理这种序列依赖,从而提升策略性能。

GPT4.0

# time: 2024/3/1 21:31
# author: YanJP
import gym
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
from torch.distributions import Categorical
from torch.nn import TransformerEncoder, TransformerEncoderLayer
from torch.utils.data.sampler import BatchSampler, SubsetRandomSampler# Transformer特征提取器
class TransformerFeatureExtractor(nn.Module):def __init__(self, input_dim, model_dim, num_heads, num_layers, dropout=0.1):super(TransformerFeatureExtractor, self).__init__()self.model_dim = model_dimencoder_layers = TransformerEncoderLayer(d_model=model_dim, nhead=num_heads, dropout=dropout)self.transformer_encoder = TransformerEncoder(encoder_layer=encoder_layers, num_layers=num_layers)self.input_projection = nn.Linear(input_dim, model_dim)def forward(self, x):x = self.input_projection(x)# 添加一个序列长度的维度,假设x的原始形状为(batch, model_dim)x = x.unsqueeze(0)  # 现在x的形状变为(1, batch, model_dim)output = self.transformer_encoder(x)return output.squeeze(0)  # 移除序列长度的维度,恢复到(batch, model_dim)# PPO网络定义
class PPONetwork(nn.Module):def __init__(self, state_dim, action_dim, model_dim=64, num_heads=4, num_layers=4):super(PPONetwork, self).__init__()self.feature_extractor = TransformerFeatureExtractor(input_dim=state_dim, model_dim=model_dim,num_heads=num_heads, num_layers=num_layers)self.policy_head = nn.Linear(model_dim, action_dim)self.value_head = nn.Linear(model_dim, 1)def forward(self, state):features = self.feature_extractor(state)# features = features[:, -1, :]  # 使用最后一个时间步的特征action_probs = torch.softmax(self.policy_head(features), dim=-1)state_values = self.value_head(features)return action_probs, state_values# PPO Agent
class PPOAgent:def __init__(self, env):self.env = envself.state_dim = env.observation_space.shape[0]self.action_dim = env.action_space.nself.network = PPONetwork(self.state_dim, self.action_dim)self.optimizer = optim.Adam(self.network.parameters(), lr=2.5e-4)self.gamma = 0.99self.lamda = 0.95self.eps_clip = 0.2self.K_epoch = 4self.buffer_capacity = 1000self.batch_size = 64self.buffer = {'states': [], 'actions': [], 'log_probs': [], 'rewards': [], 'is_terminals': []}def select_action(self, state):state = torch.FloatTensor(state).unsqueeze(0)with torch.no_grad():action_probs, _ = self.network(state)dist = Categorical(action_probs)action = dist.sample()return action.item(), dist.log_prob(action)def put_data(self, transition):self.buffer['states'].append(transition[0])self.buffer['actions'].append(transition[1])self.buffer['log_probs'].append(transition[2])self.buffer['rewards'].append(transition[3])self.buffer['is_terminals'].append(transition[4])def train_net(self):R = 0discounted_rewards = []for reward, is_terminal in zip(reversed(self.buffer['rewards']), reversed(self.buffer['is_terminals'])):if is_terminal:R = 0R = reward + (self.gamma * R)discounted_rewards.insert(0, R)discounted_rewards = torch.tensor(discounted_rewards, dtype=torch.float32)old_states = torch.tensor(np.array(self.buffer['states']), dtype=torch.float32)old_actions = torch.tensor(self.buffer['actions']).view(-1, 1)old_log_probs = torch.tensor(self.buffer['log_probs']).view(-1, 1)# Normalize the rewardsdiscounted_rewards = (discounted_rewards - discounted_rewards.mean()) / (discounted_rewards.std() + 1e-5)for _ in range(self.K_epoch):for index in BatchSampler(SubsetRandomSampler(range(len(self.buffer['states']))), self.batch_size, False):# Extract batchesstate_sample = old_states[index]action_sample = old_actions[index]old_log_probs_sample = old_log_probs[index]returns_sample = discounted_rewards[index].view(-1, 1)# Get current policiesaction_probs, state_values = self.network(state_sample)dist = Categorical(action_probs)entropy = dist.entropy().mean()new_log_probs = dist.log_prob(action_sample.squeeze(-1))# Calculating the ratio (pi_theta / pi_theta__old):ratios = torch.exp(new_log_probs - old_log_probs_sample.detach())# Calculating Surrogate Loss:advantages = returns_sample - state_values.detach()surr1 = ratios * advantagessurr2 = torch.clamp(ratios, 1 - self.eps_clip, 1 + self.eps_clip) * advantagesloss = -torch.min(surr1, surr2) + 0.5 * (state_values - returns_sample).pow(2) - 0.01 * entropy# take gradient stepself.optimizer.zero_grad()loss.mean().backward()self.optimizer.step()self.buffer = {'states': [], 'actions': [], 'log_probs': [], 'rewards': [], 'is_terminals': []}def train(self, max_episodes):for episode in range(max_episodes):state = self.env.reset()done = Falserewards=0while not done:action, log_prob = self.select_action(state)next_state, reward, done, _ = self.env.step(action)rewards+=rewardself.put_data((state, action, log_prob, reward, done))state = next_stateif done:self.train_net()if episode % 5 == 0:print("eposide:", episode, '\t reward:', rewards)# 主函数
def main():env = gym.make('CartPole-v1')agent = PPOAgent(env)max_episodes = 300agent.train(max_episodes)if __name__ == "__main__":main()

注意:代码能跑,但是不能正常学习到策略!!!!!!!!!!!!!!!!!!!!!!!!!!!!

http://www.lryc.cn/news/309273.html

相关文章:

  • 决定西弗吉尼亚州地区版图的关键历史事件
  • LeetCode_22_中等_括号生成
  • Verilog(未完待续)
  • 【Linux实践室】Linux初体验
  • Flutter中高级JSON处理:使用json_serializable进行深入定制
  • 华为OD技术面试案例4-2024年
  • 【TestNG】(4) 重试机制与监听器的使用
  • “智农”-高标准农田
  • 利用 lxml 库的XPath()方法在网页中快速查找元素
  • nginx---------------重写功能 防盗链 反向代理 (五)
  • unity shaderGraph实例-物体线框显示
  • 分类问题经典算法 | 二分类问题 | Logistic回归:公式推导
  • redis实现分布式全局唯一id
  • Sora引发安全新挑战
  • Android 14.0 Launcher3定制化之桌面分页横线改成圆点显示功能实现
  • SemiDrive E3 MCAL 开发系列(3)– Wdg 模块的使用
  • AI推荐算法的演进之路
  • Tomcat安装,配置文件、组件
  • 精读《React Hooks 最佳实践》
  • varFormatter 数据格式化库 以性能优先的 快速的 内存对象格式转换
  • 基于PHP的在线英语学习平台
  • 基于微信小程序电影院订票选座系统 (后台JSP+JDBC+Mysql)答辩常规问题和如何回答(答辩指导)
  • C++知识点总结(22):模拟算法真题 ★★★☆☆《安全警报》
  • 蓝桥杯练习系统(算法训练)ALGO-993 RP大冒险
  • Unity的相机跟随和第三人称视角
  • 哪个超声波清洗机品牌值得入手?销量榜品牌值得选购!
  • Stwo:基于Circle STARK和M31的下一代STARK证明系统
  • 笔记本以太网集线器Hub充电可能导致网络异常(貌似是我把服务器网关写错了)
  • npm ERR! code ETIMEDOUT
  • 动态规划(算法竞赛、蓝桥杯)--分组背包DP