当前位置: 首页 > news >正文

Neural Network学习笔记4

完整的模型训练套路

train.py

import torch
import torchvision
from torch.utils.data import DataLoader
# 引入自定义的网络模型
from torch.utils.tensorboard import SummaryWriterfrom model import *# 准备数据集
train_data = torchvision.datasets.CIFAR10(root="dataset_transform", train=True, transform=torchvision.transforms.ToTensor(),download=True)
test_data = torchvision.datasets.CIFAR10(root="dataset_transform", train=False, transform=torchvision.transforms.ToTensor(),download=True)
# length 长度 获取数据集长度
train_data_size = len(train_data)
test_data_size = len(test_data)
print("训练数据集的长度为:{}".format(train_data_size))
print("测试数据集的长度为:{}".format(test_data_size))# 利用 DataLoader 来加载数据集
train_dataloader = DataLoader(train_data, batch_size=64)
test_dataloader = DataLoader(test_data, batch_size=64)# 搭建神经网络:
# 一般情况下我们会把网络放到单独的python文件里,通常命名为model.py,然后再本文件头部引入就可以了
# class Zrf(nn.Module):
#     def __init__(self):
#         super(Zrf, self).__init__()
#         # Sequential 序列
#         self.model = Sequential(
#             # padding=2 是根据输入输出的H,W计算出来的
#             Conv2d(3, 32, 5, 1, padding=2), 输入通道,输出通道,卷积核尺寸,步长,padding要用公式算
#             MaxPool2d(2),
#             Conv2d(32, 32, 5, 1, padding=2),
#             MaxPool2d(2),
#             Conv2d(32, 64, 5, 1, padding=2),
#             MaxPool2d(2),
#             Flatten(),
#             Linear(1024, 64),
#             Linear(64, 10)
#         )
#
#     def forward(self, x):
#         x = self.model(x)
#         return x# 创建网络模型
zrf = Zrf()# 损失函数
loss_fn = nn.CrossEntropyLoss()# 优化器
# learning_rate = 0.01
learning_rate = 1e-2
optimizer = torch.optim.SGD(zrf.parameters(), lr=learning_rate)# 设置训练网络的一些参数
# 记录训练的次数
total_train_step = 0
# 记录测试的次数
total_test_step = 0
# 训练的轮数
epoch = 10# 添加tensorboard
writer = SummaryWriter("../log_train")for i in range(epoch):print("--------第 {} 轮训练开始--------".format(i+1))# 训练步骤开始zrf.train() # 设置训练模式(本模型中这一行可以不写)for data in train_dataloader:imgs, targets = dataoutputs = zrf(imgs)loss = loss_fn(outputs, targets)# 优化器优化模型optimizer.zero_grad() # 在进行反向传播来计算梯度时,要先将梯度置为0,防止之前计算出来的梯度的影响loss.backward() # 计算梯度optimizer.step() # 根据梯度对卷积核参数进行调优total_train_step = total_train_step + 1if total_train_step % 100 == 0:print("训练次数:{},Loss:{}".format(total_train_step, loss.item()))writer.add_scalar("train_loss", loss.item(), total_train_step)# 为了看模型有没有训练好,所以在训练完一轮之后,在测试数据集上进行测试# 以测试数据集上的损失来判断# 以下部分没有梯度,测试时不需要调优# 测试步骤开始zrf.eval()  # 设置评估模式(本模型中这一行可以不写)total_test_loss = 0# 计算整体正确率total_accuracy = 0with torch.no_grad():for data in test_dataloader:imgs, targets = dataoutputs = zrf(imgs)loss = loss_fn(outputs, targets)# 计算整体正确率accuracy = (outputs.argmax(1) == targets).sum()total_accuracy = total_accuracy + accuracytotal_test_loss = total_test_loss + loss.item()print("整体测试集上的Loss:{}",format(total_test_loss))print("整体测试集上的正确率:{}".format(total_accuracy/test_data_size))total_test_step = total_test_step + 1writer.add_scalar("test_loss", total_test_loss, total_test_step)writer.add_scalar("test_accuracy", total_accuracy/test_data_size, total_test_step)torch.save(zrf, "zrf_{}.pth".format(i)) # torch.save(zrf.state_dict(), "zrf_{}.pth".format(i))print("模型已保存")
writer.close()ssssssssaaaassxcscwq

model.py

import torch
from torch import nn
from torch.nn import Conv2d, MaxPool2d, Flatten, Linear, Sequential# 搭建神经网络class Zrf(nn.Module):def __init__(self):super(Zrf, self).__init__()# Sequential 序列self.model = Sequential(# padding=2 是根据输入输出的H,W计算出来的Conv2d(3, 32, 5, 1, padding=2),MaxPool2d(2),Conv2d(32, 32, 5, 1, padding=2),MaxPool2d(2),Conv2d(32, 64, 5, 1, padding=2),MaxPool2d(2),Flatten(),Linear(1024, 64),Linear(64, 10))def forward(self, x):x = self.model(x)return xif __name__ == '__main__':# 一般在这里测试网络的正确性zrf = Zrf()input = torch.ones((64, 3, 32, 32)) # 64batch_size,3通道,32x32output = zrf(input)print(output.shape)

关于正确率计算的一点说明

import torchoutputs = torch.tensor([[0.1, 0.2],[0.3, 0.4]])
print(outputs.argmax(1)) # 1或0代表着方向,1是横向看
# tensor([1, 1]) 最大值是0.3 0.4
print(outputs.argmax(0)) # 0是纵向看
# tensor([1, 1]) 最大值是0.2 0.4
# outputs = torch.tensor([[0.1, 0.2],
#                         [0.05, 0.4]])
# print(outputs.argmax(0))
# # tensor([0, 1]) 最大值是0.1 0.4
preds = outputs.argmax(1)
targets = torch.tensor([0, 1])
print((preds == targets).sum())

利用GPU进行训练train_gpu

train_gpu.py

第一种GPU训练方法

# 对模型,数据(输入、标注),损失函数的后面,加 .cuda()import torch
import torchvision
from torch import nn
from torch.nn import Conv2d, MaxPool2d, Flatten, Linear, Sequential
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import time# 准备数据集
train_data = torchvision.datasets.CIFAR10(root="dataset_transform", train=True, transform=torchvision.transforms.ToTensor(),download=True)
test_data = torchvision.datasets.CIFAR10(root="dataset_transform", train=False, transform=torchvision.transforms.ToTensor(),download=True)
# length 长度 获取数据集长度
train_data_size = len(train_data)
test_data_size = len(test_data)
print("训练数据集的长度为:{}".format(train_data_size))
print("测试数据集的长度为:{}".format(test_data_size))train_dataloader = DataLoader(train_data, batch_size=64)
test_dataloader = DataLoader(test_data, batch_size=64)class Zrf(nn.Module):def __init__(self):super(Zrf, self).__init__()# Sequential 序列self.model = Sequential(Conv2d(3, 32, 5, 1, padding=2),MaxPool2d(2),Conv2d(32, 32, 5, 1, padding=2),MaxPool2d(2),Conv2d(32, 64, 5, 1, padding=2),MaxPool2d(2),Flatten(),Linear(1024, 64),Linear(64, 10))def forward(self, x):x = self.model(x)return x# 创建网络模型
zrf = Zrf()
# -------------------利用GPU训练-------------------#
if torch.cuda.is_available():zrf = zrf.cuda()# 损失函数
loss_fn = nn.CrossEntropyLoss()
# -------------------利用GPU训练-------------------#
if torch.cuda.is_available():loss_fn = loss_fn.cuda()# 优化器
learning_rate = 1e-2
optimizer = torch.optim.SGD(zrf.parameters(), lr=learning_rate)# 设置训练网络的一些参数
total_train_step = 0
total_test_step = 0
epoch = 10# 添加tensorboard
writer = SummaryWriter("../log_train")start_time = time.time()for i in range(epoch):print("--------第 {} 轮训练开始--------".format(i+1))# 训练步骤开始zrf.train()for data in train_dataloader:imgs, targets = data# -------------------利用GPU训练-------------------#if torch.cuda.is_available():imgs = imgs.cuda()targets = targets.cuda()outputs = zrf(imgs)loss = loss_fn(outputs, targets)# 优化器优化模型optimizer.zero_grad()loss.backward()optimizer.step()total_train_step = total_train_step + 1if total_train_step % 100 == 0:end_time = time.time()print(end_time - start_time)print("训练次数:{},Loss:{}".format(total_train_step, loss.item()))writer.add_scalar("train_loss", loss.item(), total_train_step)# 测试步骤开始zrf.eval()total_test_loss = 0total_accuracy = 0with torch.no_grad():for data in test_dataloader:imgs, targets = data# -------------------利用GPU训练-------------------#if torch.cuda.is_available():imgs = imgs.cuda()targets = targets.cuda()outputs = zrf(imgs)loss = loss_fn(outputs, targets)accuracy = (outputs.argmax(1) == targets).sum()total_accuracy = total_accuracy + accuracytotal_test_loss = total_test_loss + loss.item()print("整体测试集上的Loss:{}",format(total_test_loss))print("整体测试集上的正确率:{}".format(total_accuracy/test_data_size))total_test_step = total_test_step + 1writer.add_scalar("test_loss", total_test_loss, total_test_step)writer.add_scalar("test_accuracy", total_accuracy/test_data_size, total_test_step)torch.save(zrf, "zrf_{}.pth".format(i))print("模型已保存")
writer.close()

第二种GPU训练方法

# .to(device)
# device = torch.device("cpu")
# torch.device("cuda")
# torch.device("cuda:0")
# torch.device("cuda:1")import torch
import torchvision
from torch import nn
from torch.nn import Conv2d, MaxPool2d, Flatten, Linear, Sequential
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import time# 定义训练的设备
# device = torch.device("cpu")
# device = torch.device("cuda")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")# 准备数据集
train_data = torchvision.datasets.CIFAR10(root="dataset_transform", train=True, transform=torchvision.transforms.ToTensor(),download=True)
test_data = torchvision.datasets.CIFAR10(root="dataset_transform", train=False, transform=torchvision.transforms.ToTensor(),download=True)
# length 长度 获取数据集长度
train_data_size = len(train_data)
test_data_size = len(test_data)
print("训练数据集的长度为:{}".format(train_data_size))
print("测试数据集的长度为:{}".format(test_data_size))train_dataloader = DataLoader(train_data, batch_size=64)
test_dataloader = DataLoader(test_data, batch_size=64)class Zrf(nn.Module):def __init__(self):super(Zrf, self).__init__()# Sequential 序列self.model = Sequential(Conv2d(3, 32, 5, 1, padding=2),MaxPool2d(2),Conv2d(32, 32, 5, 1, padding=2),MaxPool2d(2),Conv2d(32, 64, 5, 1, padding=2),MaxPool2d(2),Flatten(),Linear(1024, 64),Linear(64, 10))def forward(self, x):x = self.model(x)return x# 创建网络模型
zrf = Zrf()
# -------------------利用GPU训练-------------------#
zrf.to(device)  # 可以不重新赋值
# zrf = zrf.to(device)# 损失函数
loss_fn = nn.CrossEntropyLoss()
# -------------------利用GPU训练-------------------#
loss_fn.to(device) # 可以不重新赋值
# loss_fn = loss_fn.to(device)# 优化器
learning_rate = 1e-2
optimizer = torch.optim.SGD(zrf.parameters(), lr=learning_rate)# 设置训练网络的一些参数
total_train_step = 0
total_test_step = 0
epoch = 10# 添加tensorboard
writer = SummaryWriter("../log_train")start_time = time.time()for i in range(epoch):print("--------第 {} 轮训练开始--------".format(i+1))# 训练步骤开始zrf.train()for data in train_dataloader:imgs, targets = data# -------------------利用GPU训练-------------------## 必须重新赋值imgs = imgs.to(device)targets = targets.to(device)outputs = zrf(imgs)loss = loss_fn(outputs, targets)# 优化器优化模型optimizer.zero_grad()loss.backward()optimizer.step()total_train_step = total_train_step + 1if total_train_step % 100 == 0:end_time = time.time()print(end_time - start_time)print("训练次数:{},Loss:{}".format(total_train_step, loss.item()))writer.add_scalar("train_loss", loss.item(), total_train_step)# 测试步骤开始zrf.eval()total_test_loss = 0total_accuracy = 0with torch.no_grad():for data in test_dataloader:imgs, targets = data# -------------------利用GPU训练-------------------#imgs = imgs.to(device)targets = targets.to(device)outputs = zrf(imgs)loss = loss_fn(outputs, targets)accuracy = (outputs.argmax(1) == targets).sum()total_accuracy = total_accuracy + accuracytotal_test_loss = total_test_loss + loss.item()print("整体测试集上的Loss:{}",format(total_test_loss))print("整体测试集上的正确率:{}".format(total_accuracy/test_data_size))total_test_step = total_test_step + 1writer.add_scalar("test_loss", total_test_loss, total_test_step)writer.add_scalar("test_accuracy", total_accuracy/test_data_size, total_test_step)torch.save(zrf, "zrf_{}.pth".format(i))print("模型已保存")
writer.close()

利用GPU训练前一百次的时间:  4.680064678192139

没有GPU: 6.723153114318848

完整的模型验证套路

(测试、demo)利用已经训练好的模型,然后给他提供输入

 

http://www.lryc.cn/news/106556.html

相关文章:

  • [转]关于cmake --build .的理解
  • 【Linux下6818开发板(ARM)】硬件空间挂载
  • 剑指offer 动态规划篇
  • 关于Linux中前端负载均衡之VIP(LVS+Keepalived)自动化部署的一些笔记
  • C++ 拷贝交换技术示例
  • 使用 Go 语言实现二叉搜索树
  • 系统接口自动化测试方案
  • 小研究 - JVM 垃圾回收方式性能研究(一)
  • [LeetCode]链表相关题目(c语言实现)
  • [深入理解NAND Flash (操作篇)] NAND 初始化常用命令:复位 (Reset) 和 Read ID 和 Read UID 操作和代码实现
  • RxJava 复刻简版之二,调用流程分析之案例实现
  • SpringMVC中Model和ModelAndView的区别
  • Tomcat安装与管理
  • React之路由
  • 机器学习深度学习——非NVIDIA显卡怎么做深度学习(坑点排查)
  • 2021 Robocom 决赛 第四题
  • 线程池-手写线程池Linux C简单版本(生产者-消费者模型)
  • 05-向量的意义_n维欧式空间
  • 交通运输安全大数据分析解决方案
  • vimrc 配置 (持续跟新中)
  • 【集成学习介绍】
  • 动画制作选择Blender还是Maya
  • 215. 数组中的第K个最大元素
  • NLP From Scratch: 生成名称与字符级RNN
  • Spring MVC程序开发
  • 医疗知识图谱问答——文本分类解析
  • JS关于多张图片上传显示报错不影响后面图片上传方法
  • MySQL踩坑之sql_mode的用法
  • 消息队列总结(4)- RabbitMQ Kafka RocketMQ高性能方案
  • websocket服务端大报文发送连接自动断开分析