当前位置: 首页 > news >正文

【深度学习】RNN的简单实现

目录

1.RNNCell

2.RNN

3.RNN_Embedding


1.RNNCell

import torchinput_size = 4
hidden_size = 4
batch_size = 1idx2char = ['e', 'h', 'l', 'o']
x_data = [1, 0, 2, 2, 3]  # 输入:hello
y_data = [3, 1, 2, 3, 2]  # 期待:ohlol# 独热向量
one_hot_lookup = [[1, 0, 0, 0],[0, 1, 0, 0],[0, 0, 1, 0],[0, 0, 0, 1]]
x_one_hot = [one_hot_lookup[x] for x in x_data]inputs = torch.Tensor(x_one_hot).view(-1, batch_size, input_size)  # (seqLen,batchSize,inputSize)
labels = torch.LongTensor(y_data).view(-1, 1)  # (seqLen,1)class Model(torch.nn.Module):def __init__(self, input_size, hidden_size, batch_size):super(Model, self).__init__()self.batch_size = batch_sizeself.input_size = input_sizeself.hidden_size = hidden_sizeself.rnncell = torch.nn.RNNCell(input_size=self.input_size,hidden_size=self.hidden_size)def forward(self, input, hidden):hidden = self.rnncell(input, hidden)  # input:(batch, input_size) hidden:(batch, hidden_size)return hiddendef init_hidden(self):return torch.zeros(self.batch_size, self.hidden_size)net = Model(input_size, hidden_size, batch_size)criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(net.parameters(), lr=0.1)for epoch in range(15):loss = 0optimizer.zero_grad()hidden = net.init_hidden()print('Predicted string: ', end='')for input, label in zip(inputs, labels):hidden = net(input, hidden)loss += criterion(hidden, label)idx = torch.argmax(hidden, dim=1)print(idx2char[idx.item()], end='')loss.backward()optimizer.step()print(', Epoch [%d/15] loss=%.4f' % (epoch+1, loss.item()))

2.RNN

import torchinput_size = 4  # 输入的维度,例如hello为四个字母表示,其维度为四
hidden_size = 4  # 隐藏层维度
num_layers = 1  # number of layers
batch_size = 1
seq_len = 5idx2char = ['e', 'h', 'l', 'o']
x_data = [1, 0, 2, 2, 3]  # 输入:hello
y_data = [3, 1, 2, 3, 2]  # 期待:ohlol# 独热向量
one_hot_lookup = [[1, 0, 0, 0],[0, 1, 0, 0],[0, 0, 1, 0],[0, 0, 0, 1]]
x_one_hot = [one_hot_lookup[x] for x in x_data]inputs = torch.Tensor(x_one_hot).view(seq_len, batch_size, input_size)
labels = torch.LongTensor(y_data)  # (seqSize*batchSize, 1)class Model(torch.nn.Module):def __init__(self, input_size, hidden_size, batch_size, num_layers=1):super(Model, self).__init__()self.num_layers = num_layersself.batch_size = batch_sizeself.input_size = input_sizeself.hidden_size = hidden_sizeself.rnn = torch.nn.RNN(input_size=self.input_size,hidden_size=self.hidden_size,num_layers=num_layers)def forward(self, input):hidden = torch.zeros(self.num_layers,self.batch_size,self.hidden_size)  # (numLayers, batchSize, hiddenSize)out, hidden_last = self.rnn(input, hidden)  # out:(seqLen, batchSize, hiddenSize), hidden_last:最后一个hiddenreturn out.view(-1, self.hidden_size)  # (seqLen×batchSize, hiddenSize)net = Model(input_size, hidden_size, batch_size, num_layers)criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(net.parameters(), lr=0.05)for epoch in range(15):optimizer.zero_grad()outputs = net(inputs)loss = criterion(outputs, labels)loss.backward()optimizer.step()idx = torch.argmax(outputs, dim=1)print('Predicted string: ', ''.join([idx2char[i.item()] for i in idx]), end='')print(', Epoch [%d/15] loss = %.3f' % (epoch + 1, loss.item()))

3.RNN_Embedding

import torch# parameters
num_class = 4  # 引入线性层,不用不必要求一个输入就有一个输出,可以多个
input_size = 4
hidden_size = 8
embedding_size = 10
num_layers = 2
batch_size = 1
seq_len = 5idx2char = ['e', 'h', 'l', 'o']
x_data = [[1, 0, 2, 2, 3]]  # (batch:1, seq_len:5)
y_data = [3, 1, 2, 3, 2]  # (batch * seq_len)
inputs = torch.LongTensor(x_data)
labels = torch.LongTensor(y_data)class Model(torch.nn.Module):def __init__(self):super(Model, self).__init__()self.emb = torch.nn.Embedding(input_size, embedding_size)self.rnn = torch.nn.RNN(input_size=embedding_size,hidden_size=hidden_size,num_layers=num_layers,batch_first=True)# batchSize在第一位: (batchSize:一共几个句子, seqLen:每个句子有几个单词, inputSize:每个单词有多少特征)self.fc = torch.nn.Linear(hidden_size, num_class)def forward(self, x):hidden = torch.zeros(num_layers, x.size(0), hidden_size)x = self.emb(x)  # (batch, seqLen, embeddingSize), 输入数据x首先经过嵌入层,将字符索引转换为向量x, _ = self.rnn(x, hidden)x = self.fc(x)return x.view(-1, num_class)net = Model()criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(net.parameters(), lr=0.05)for epoch in range(15):optimizer.zero_grad()outputs = net(inputs)loss = criterion(outputs, labels)loss.backward()optimizer.step()idx = torch.argmax(outputs, dim=1)print('Predicted string: ', ''.join([idx2char[i.item()] for i in idx]), end='')print(', Epoch [%d/15] loss = %.3f' % (epoch + 1, loss.item()))

http://www.lryc.cn/news/464354.html

相关文章:

  • 每次请求时,检查 JWT Token的有效期并决定是否需要刷新
  • AI大模型开发架构设计(13)——LLM大模型的向量数据库应用实战
  • WPF中Grid、StackPanel、Canvas、WrapPanel常用属性
  • 【芙丽芳丝净润洗面霜和雅漾舒护活泉喷雾
  • ubuntu更新Cmake
  • CMOS晶体管的串联与并联
  • 从IT高管到看门大爷:53岁我的职场华丽转身
  • Redis入门到精通(三):入门Redis看这一篇就够了
  • IP基本原理
  • 数据分析题面试题系列2
  • uniapp 单表、多级动态表单添加validateFunction自定义规则
  • FPGA高端图像处理培训第一期,提供工程源码+视频教程+FPGA开发板
  • 顺序表的实现(数据结构)——C语言
  • 【VUE】Vue中 computed计算属性和watch侦听器的区别
  • linux线程 | 同步与互斥 | 深度学习与理解同步
  • Tkinter Frame布局笔记--做一个简易的计算器
  • 算法专题八: 链表
  • MySQL中关于NULL值的六大坑!你被坑过吗?
  • 学生学习动机测试:激发潜能,引领未来
  • 基于SSM党务政务服务热线管理系统的设计
  • OSI参考模型详解:初学者指南与实践案例
  • S7-200 SMART 与 S7-1200 之间 TCP 通信— S7-200 SMART 作为服务器
  • Java @RequestPart注解:同时实现文件上传与JSON对象传参
  • 深度学习基础知识-02 数据预处理
  • 【CTF刷题9】2024.10.19
  • WPF中的Setter
  • RabbitMQ下载与配置
  • 【数据结构与算法】力扣 54. 螺旋矩阵
  • 速通不了的人工智能
  • 微信新功能上线,找工作也能“附近”搞定