Python day43
@浙大疏锦行 Python day43
import torch
import numpy as np
import pandas as pd
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import DataLoader, Datasettransform = transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])test_dataset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)
train_dataset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)train_dataloader = DataLoader(train_dataset,batch_size=32,shuffle=True,
)test_dataloader = DataLoader(test_dataset,batch_size=32,shuffle=False,
)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f"Using device: {device}")class Net(nn.Module):def __init__(self):super(Net,self).__init__()# Convolutional Layer 1self.conv1 = nn.Conv2d(in_channels =3,out_channels=32,padding=2,kernel_size=3,stride=1)# Batch Normlizationself.bn1 = nn.BatchNorm2d(num_features=32)# ReLU Activationself.relu1 = nn.ReLU()# CNN Layer 2self.conv2 = nn.Conv2d(in_channels=32,out_channels=64,padding=2,kernel_size=3,stride=1)self.bn2 = nn.BatchNorm2d(num_features=64)self.relu2 = nn.ReLU()# MLPself.fc1 = nn.Linear(in_features=64*8*8, out_features=128)# Dropoutself.dropout = nn.Dropout(p=0.5)# Output Layerself.fc2 = nn.Linear(in_features=128, out_features=10)def forward(self,x):# CNN layer 1x = self.conv1(x)x = self.bn1(x)x = self.relu1(x)# CNN layer 2x = self.conv2(x)x = self.bn2(x)x = self.relu2(x)# MLPx = x.view(-1, 64*8*8)x = self.fc1(x) # MLPx = self.dropout(x) # Dropout 随机丢弃神经元x = self.fc2(x) # Output Layerreturn x # 这里的x是未经过softmax的结果model = Net()
model.to(device)
print(model)criterion = nn.CrossEntropyLoss() # 交叉熵损失函数
optimizer = optim.Adam(model.parameters(), lr=0.001) # Adam优化器# 引入学习率调度器,在训练过程中动态调整学习率--训练初期使用较大的 LR 快速降低损失,训练后期使用较小的 LR 更精细地逼近全局最优解。
# 在每个 epoch 结束后,需要手动调用调度器来更新学习率,可以在训练过程中调用 scheduler.step()
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, # 指定要控制的优化器(这里是Adam)mode='min', # 监测的指标是"最小化"(如损失函数)patience=3, # 如果连续3个epoch指标没有改善,才降低LRfactor=0.5 # 降低LR的比例(新LR = 旧LR × 0.5)
)def train():pass