当前位置: 首页 > news >正文

13.深度学习——Minst手写数字识别

第一部分——起手式

import torch
from torchvision import datasets, transforms
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optimuse_cuda = torch.cuda.is_available()if use_cuda:device = torch.device("cuda")
else: device = torch.device("cpu")print(f"Using device {device}")

第二部分——计算均值、方差

transform = transforms.Compose([#将数据转换成Tensor张量transforms.ToTensor()
]
)#读取数据
datasets1 = datasets.MNIST('./data',train=True,download = True, transform =transform)
datasets1_len = len(datasets1)#设置数据加载器、批次大小全部图片
train_loader = torch.utils.data.DataLoader(datasets1, batch_size=datasets1_len, shuffle = True)#循环训练集 DataLoader,0是起始索引
for batch_idx, data in enumerate(train_loader,0):inputs, targets = data #将训练集图(60000,1,28,28)像转换为(60000*1,28*28)的二维数组,-1 是占位符用于自动计算维度大小x = inputs.view(-1,28*28)#计算均值-0.3081x_mean =x.mean().item()#计算标准差-0.1307x_std =x.std().item()print(f"mean: {x_mean}, std: {x_std}")
#mean: 0.13066047430038452, std: 0.30810782313346863

第三部分——网络模型

#自定义类构建模型、继承torch.nn.module初始化网络模型
class Net(torch.nn.Module):def __init__(self):super(Net,self).__init__()self.fc1 = torch.nn.Linear(784, 128)#Liner线性加权求和,784是input,128是当前层神经元个数self.dropout = torch.nn.Dropout(p = 0.2)self.fc2 = torch.nn.Linear(128, 10)#input=上一层的神经元个数,输出是10,做一个0-9的10分类def forward(self, x):#把x的每条数据展成一维数组28*28=784x = torch.flatten(x,1)x = self.fc1(x)x = F.relu(x)x = self.dropout(x)x = self.fc2(x)output = F.log_softmax(x, dim=1)#做完softmax然后取log,便于后续计算损失函数(损失函数需要取log)return output       

第四部分——训练策略、测试策略

#创建实例
model = Net().to(device)#每个批次如何训练
def train_step(data, target, model, optimizer):optimizer.zero_grad()#梯度归零output = model(data)loss = F.nll_loss(output,target)#nll是负对数似然,output是y_head,target是y_trueloss.backward()#反向传播求梯度optimizer.step()#根据梯度更新网络return loss#每个批次如何测试
def test(data, target, model, test_loss, correct):output = model(data)#累积计算每个批次的损失test_loss += F.nll_loss(output,target,reduction='sum').item()#获取对数概率最大对应的索引,dim=1:表示选取每一行概率最大的索引,keepdim = True 表示维度保持不变pred = output.argmax(dim=1, keepdim=True)#统计预测值与正确值相同的数量,eq在做比较,返回True/Fasle,sum是求和,item是将数据取出来(原来是tensor)correct += pred.eq(target.view_as(pred)).sum().item()return test_loss, correct

第五部分——开始训练

#真正分轮次训练
EPOCHS = 5#调参优化器,lr是学习率
optimizer = torch.optim.Adam(model.parameters(), lr=0.003)for epoch in range(EPOCHS):model.train()#设置为训练模式:BN层计算的是均值方差for batch_index, (data, target) in enumerate(train_loader):data, target = data.to(device),target.to(device)loss = train_step(data, target, model, optimizer)#每隔10个批次打印一次信息if batch_index%10 ==0:print('Train Epoch:{epoch} [{batch}/{total_batch} {percent}%] train_loss:{loss:.3f}'.format(epoch=epoch+1,#第几个批次batch = batch_index*len(data),#已跑多少数据total_batch = len(train_loader.dataset),#当前轮总数据条数percent = 100.0*batch_index/len(train_loader),#当前轮数已占训练集百分比loss = loss.item()#损失是tensor,转为数值))       #设置为测试模式:BN层计算的是滑动平均,Droput层不进行预测model.eval()test_loss = 0correct = 0with torch.no_grad():#不求梯度for data, target in test_loader:data, target = data.to(device), target.to(device)test_loss, correct = test_step(data, target, model, test_loss, correct)    test_loss = test_loss/len(test_loader.dataset)print('\n Average loss: {:.4f}, Accuracy: {}/{} ({:.3f}%)\n'.format(test_loss,correct,len(test_loader.dataset),100. * correct / len(test_loader.dataset)))

完整代码

import torch
from torchvision import datasets, transforms
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optimuse_cuda = torch.cuda.is_available()if use_cuda:device = torch.device("cuda")
else: device = torch.device("cpu")print(f"Using device {device}")#数据预处理
transform = transforms.Compose([#将数据转换成Tensor张量transforms.ToTensor(),#图片数据归一化:0.1307是均值,0.3081是方差。数值和数据集有关系transforms.Normalize((0.1307),(0.3081))
]
)#读取数据
datasets1 =datasets.MNIST('./data',train=True,download = True, transform =transform)
datasets2 =datasets.MNIST('./data',train=False,download = True, transform =transform)#设置数据加载器、批次大小128、是否打乱顺序-是
train_loader = torch.utils.data.DataLoader(datasets1, batch_size=128, shuffle = True)
#测试批次可以大,测试集不需要打乱顺序-False
test_loader = torch.utils.data.DataLoader(datasets2, batch_size =1000,shuffle = False)#自定义类构建模型、继承torch.nn.module初始化网络模型
class Net(torch.nn.Module):def __init__(self):super(Net,self).__init__()self.fc1 = torch.nn.Linear(784, 128)#Liner线性加权求和,784是input,128是当前层神经元个数self.dropout = torch.nn.Dropout(p = 0.2)self.fc2 = torch.nn.Linear(128, 10)#input=上一层的神经元个数,输出是10,做一个0-9的10分类def forward(self, x):#把x的每条数据展成一维数组28*28=784x = torch.flatten(x,1)x = self.fc1(x)x = F.relu(x)x = self.dropout(x)x = self.fc2(x)output = F.log_softmax(x, dim=1)#做完softmax然后取log,便于后续计算损失函数(损失函数需要取log)return output       #创建实例
model = Net().to(device)#每个批次如何训练
def train_step(data, target, model, optimizer):optimizer.zero_grad()#梯度归零output = model(data)loss = F.nll_loss(output,target)#nll是负对数似然,output是y_head,target是y_trueloss.backward()#反向传播求梯度optimizer.step()#根据梯度更新网络return loss#每个批次如何测试
def test_step(data, target, model, test_loss, correct):output = model(data)#累积计算每个批次的损失test_loss += F.nll_loss(output,target,reduction='sum').item()#获取对数概率最大对应的索引,dim=1:表示选取每一行概率最大的索引,keepdim = True 表示维度保持不变pred = output.argmax(dim=1, keepdim=True)#统计预测值与正确值相同的数量,eq在做比较,返回True/Fasle,sum是求和,item是将数据取出来(原来是tensor)correct += pred.eq(target.view_as(pred)).sum().item()return test_loss, correct#真正分轮次训练
EPOCHS = 5#调参优化器,lr是学习率
optimizer = torch.optim.Adam(model.parameters(), lr=0.003)for epoch in range(EPOCHS):model.train()#设置为训练模式:BN层计算的是均值方差for batch_index, (data, target) in enumerate(train_loader):data, target = data.to(device),target.to(device)loss = train_step(data, target, model, optimizer)#每隔10个批次打印一次信息if batch_index%10 ==0:print('Train Epoch:{epoch} [{batch}/{total_batch} {percent}%] train_loss:{loss:.3f}'.format(epoch=epoch+1,#第几个批次batch = batch_index*len(data),#已跑多少数据total_batch = len(train_loader.dataset),#当前轮总数据条数percent = 100.0*batch_index/len(train_loader),#当前轮数已占训练集百分比loss = loss.item()#损失是tensor,转为数值))       #设置为测试模式:BN层计算的是滑动平均,Droput层不进行预测model.eval()test_loss = 0correct = 0with torch.no_grad():#不求梯度for data, target in test_loader:data, target = data.to(device), target.to(device)test_loss, correct = test_step(data, target, model, test_loss, correct)    test_loss = test_loss/len(test_loader.dataset)print('\n Average loss: {:.4f}, Accuracy: {}/{} ({:.3f}%)\n'.format(test_loss,correct,len(test_loader.dataset),100. * correct / len(test_loader.dataset)))
http://www.lryc.cn/news/618985.html

相关文章:

  • 【自动化运维神器Ansible】playbook文件内变量定义全流程解析
  • 实时域自适应检测SOTA方案RT-DATR,刷新多个跨域检测榜单!
  • wordpress数据库文件sql导入时出现#1253错误
  • Java数据结构之ArrayList
  • 嵌入式分享合集136
  • 移动端调用大模型详解
  • 关于淘宝双十一
  • 数据分析小白训练营:基于python编程语言的Numpy库介绍(第三方库)(上篇)
  • DuckDB读取xlsx格式数据的方法比较
  • 【SpringBoot】MyBatis 动态 sql
  • 如何应对CAN总线冲突和数据丢包
  • 【c++深入系列】:万字详解模版(下)
  • 【项目设计】高并发内存池
  • AI赋能IT服务管理:从被动响应到智能驱动的跃迁
  • Linux驱动开发probe字符设备的完整创建流程
  • 【前端八股文面试题】【JavaScript篇7】什么是JavaScript的原型、原型链? 有什么特点
  • JavaScript Array.prototype.flatMap ():数组 “扁平化 + 映射” 的高效组合拳
  • 无人机三维路径规划
  • 2020/12 JLPT听力原文 问题一 4番
  • MyBatis-Plus——SQL注入器
  • LintCode第1526-N叉树的前序遍历
  • RabbitMQ面试精讲 Day 20:RabbitMQ压测与性能评估
  • 【游戏优化笔记】开发中如何减少建筑和树木等环境元素的资源消耗?
  • 行业热点丨智能仿真时代:电子工程多物理场解决方案创新实践
  • 【盘古100Pro+开发板实验例程】FPGA学习 | 中值滤波 | 图像实验指导手册
  • Redis知识点+项目+面试八股
  • redis认识缓存击穿
  • Flutter UI Kits by Olayemi Garuba:免费开源的高质量UI组件库
  • Element用法---Loading 加载
  • React 腾讯面试手写题