1.1 从零实现线性回归
import random
import torch
import matplotlib.pyplot as plt
def train_data_make(batch_size,X,y):num_examples=len(X)idx=list(range(num_examples))random.shuffle(idx)for i in range(0,num_examples,batch_size):batch_idx=torch.tensor(idx[i:min(i+batch_size,num_examples)])yield X[batch_idx],y[batch_idx]
def linear_reg(X,w,b):return torch.matmul(X,w)+b
def mse_loss(y_pred,y):return (y_pred-y)**2/2
def sgd(params, lr, batch_size): with torch.no_grad():for param in params:param -= lr * param.grad / batch_sizeparam.grad.zero_()
def make_data(w,b,num_exmamples):"y=wx+b+e"X=torch.normal(0,1,(num_exmamples,len(w)))y=torch.matmul(X,w)+b y+=torch.normal(0,0.01,y.shape)return X,y.reshape((-1,1))
w1=torch.tensor([2,-3.4])
b1=torch.tensor([4.2])
X,y=make_data(w1,b1,1000)
batch_size=10
num_epochs=300
lr=0.05
w = torch.normal(0, 0.01, size=(2,1), requires_grad=True)
b = torch.zeros(1, requires_grad=True)for epoch in range(num_epochs):for X,y in train_data_make(batch_size,X,y):loss=mse_loss(linear_reg(X,w,b),y)loss.sum().backward()sgd([w,b],lr,batch_size)with torch.no_grad():trian_loss=mse_loss(linear_reg(X,w,b),y)print(f"epoch {epoch+1}|Loss {float(trian_loss.mean()):.4f}|w:[{float(w[0]):.4f},{float(w[1]):.4f}]|b:[{float(b):.4f}]")print(f'w的估计误差: {w1 - w.reshape(w1.shape)}')
print(f'b的估计误差: {b1- b}')
print(f"最终的线性回归表达式:F(X1,X2)={float(w[0]):.4f}X1{float(w[1]):.4f}X2+{float(b):.4f}")
1.2 简洁实现线性回归
import numpy as np
import torch
from torch.utils import data
import matplotlib.pyplot as plt
def make_data(w,b,num_exmamples):"y=wx+b+e"X=torch.normal(0,1,(num_exmamples,len(w)))y=torch.matmul(X,w)+b y+=torch.normal(0,0.01,y.shape)return X,y.reshape((-1,1))
def load_data(data_arrays,batch_size,is_Train=True):dataset=data.TensorDataset(*data_arrays)return data.DataLoader(dataset,batch_size,shuffle=is_Train)
batch_size=10
num_epochs=10
w1=torch.tensor([2,-3.4])
b1=torch.tensor([4.2])
X,y=make_data(w1,b1,1000)
dataset=load_data((X,y),batch_size,is_Train=True)
loss=nn.MSELoss()
model=nn.Sequential(nn.Linear(2,1))
model[0].weight.data.normal_(0,0.01)
model[0].bias.data.fill_(0)
optimizer=torch.optim.SGD(model.parameters(),lr=0.05)
for epoch in range(num_epochs):for X_train,y_train in dataset:train_loss=loss(model(X_train),y_train)optimizer.zero_grad()train_loss.backward()optimizer.step()all_loss=loss(model(X),y)print(f"epoch {epoch+1}|Loss {float(all_loss):.6f}")
wr0=model[0].weight[0,0].item()
wr1=model[0].weight[0,1].item()
b0=model[0].bias[0].item()
print(f'w的估计误差: {w1 - model[0].weight}')
print(f'b的估计误差: {b1- model[0].bias}')
print(f"最终的线性回归表达式:F(X1,X2)={float(wr0):.4f}X1{float(wr1):.4f}X2+{float(b0):.4f}")