当前位置: 首页 > news >正文

pytorch bert实现文本分类

以imdb公开数据集为例,bert模型可以在huggingface上自行挑选

1.导入必要的库

import os
import torch
from torch.utils.data import DataLoader, TensorDataset, random_split
from transformers import BertTokenizer, BertModel, BertConfig
from torch import nn
from torch.optim import AdamW
import numpy as np
from sklearn.metrics import accuracy_score
import pandas as pd
from tqdm import tqdmdevice = torch.device("cuda:0")
print(device)

2.加载和预处理数据:读取数据,将其转换为适合BERT的格式,并将评分映射到三个类别。

import random
def load_imdb_dataset_and_create_multiclass_labels(path_to_data, split="train"):print(f"load start: {split}")reviews = []labels = []  # 0 for low, 1 for medium, 2 for highfor label in ["pos", "neg"]:labeled_path = os.path.join(path_to_data, split, label)for file in os.listdir(labeled_path):if file.endswith('.txt'):with open(os.path.join(labeled_path, file), 'r', encoding='utf-8') as f:reviews.append(f.read())if label == "neg":# Randomly assign negative reviews to low or mediumlabels.append(random.choice([0, 1]))  else:labels.append(2)  # Assign positive reviews to highreturn reviews[:1000], labels[:1000]
#加载数据集
train_texts, train_labels = load_imdb_dataset_and_create_multiclass_labels("./data/aclImdb", split="train")
test_texts, test_labels = load_imdb_dataset_and_create_multiclass_labels("./data/aclImdb", split="test")
print("load okk")
#样本数量
print("train_texts: ",len(train_texts))
print("test_texts: ",len(test_texts))

3.文本转换为BERT的输入格式

tokenizer = BertTokenizer.from_pretrained('./bert_pretrain')def encode_texts(tokenizer, texts, max_len=512):input_ids = []attention_masks = []for text in texts:encoded = tokenizer.encode_plus(text,add_special_tokens=True,max_length=max_len,pad_to_max_length=True,return_attention_mask=True,return_tensors='pt',)input_ids.append(encoded['input_ids'])attention_masks.append(encoded['attention_mask'])return torch.cat(input_ids, dim=0), torch.cat(attention_masks, dim=0)train_inputs, train_masks = encode_texts(tokenizer, train_texts)
test_inputs, test_masks = encode_texts(tokenizer, test_texts)
print("input transfromer encode done")

4.创建TensorDataset和DataLoader

train_labels = torch.tensor(train_labels)
test_labels = torch.tensor(test_labels)train_dataset = TensorDataset(train_inputs, train_masks, train_labels)
test_dataset = TensorDataset(test_inputs, test_masks, test_labels)# Split the dataset into train and validation sets
train_size = int(0.9 * len(train_dataset))
val_size = len(train_dataset) - train_size
train_dataset, val_dataset = random_split(train_dataset, [train_size, val_size])train_dataloader = DataLoader(train_dataset, batch_size=128, shuffle=True)
val_dataloader = DataLoader(val_dataset, batch_size=128, shuffle=False)
test_dataloader = DataLoader(test_dataset, batch_size=128, shuffle=False)

5.构建模型:使用BERT进行多分类任务

class BertForMultiLabelClassification(nn.Module):def __init__(self):super(BertForMultiLabelClassification, self).__init__()self.bert = BertModel.from_pretrained('./bert_pretrain')self.dropout = nn.Dropout(0.1)self.classifier = nn.Linear(self.bert.config.hidden_size, 3)  # 3类def forward(self, input_ids, attention_mask):_, pooled_output = self.bert(input_ids=input_ids, attention_mask=attention_mask, return_dict=False)pooled_output = self.dropout(pooled_output)return self.classifier(pooled_output)

6.训练和评估模型

# 初始化模型、优化器和损失函数
model = BertForMultiLabelClassification()
# 使用多GPU
# if MULTI_GPU:
#     model = nn.DataParallel(model)
model.to(device)optimizer = AdamW(model.parameters(), lr=2e-5)
loss_fn = nn.CrossEntropyLoss()# 训练函数
def train(model, dataloader, optimizer, loss_fn, device):model.train()total_loss = 0for batch in dataloader:batch = tuple(b.to(device) for b in batch)inputs, masks, labels = batchoptimizer.zero_grad()outputs = model(input_ids=inputs, attention_mask=masks)loss = loss_fn(outputs, labels)total_loss += loss.item()loss.backward()optimizer.step()average_loss = total_loss / len(dataloader)return average_loss# 评估函数
def evaluate(model, dataloader, loss_fn, device):model.eval()total_loss = 0predictions, true_labels = [], []with torch.no_grad():for batch in dataloader:batch = tuple(b.to(device) for b in batch)inputs, masks, labels = batchoutputs = model(input_ids=inputs, attention_mask=masks)loss = loss_fn(outputs, labels)total_loss += loss.item()logits = outputs.detach().cpu().numpy()label_ids = labels.to('cpu').numpy()predictions.append(logits)true_labels.append(label_ids)average_loss = total_loss / len(dataloader)flat_predictions = np.concatenate(predictions, axis=0)flat_predictions = np.argmax(flat_predictions, axis=1).flatten()flat_true_labels = np.concatenate(true_labels, axis=0)accuracy = accuracy_score(flat_true_labels, flat_predictions)return average_loss, accuracy# 训练和评估循环
for epoch in range(3):  # 假设训练3个周期train_loss = train(model, train_dataloader, optimizer, loss_fn, device)val_loss, val_accuracy = evaluate(model, val_dataloader, loss_fn, device)print(f"Epoch {epoch+1}")print(f"Train Loss: {train_loss:.3f}")print(f"Validation Loss: {val_loss:.3f}, Accuracy: {val_accuracy:.3f}")# 在测试集上评估模型性能
test_loss, test_accuracy = evaluate(model, test_dataloader, loss_fn, device)
print(f"Test Loss: {test_loss:.3f}, Accuracy: {test_accuracy:.3f}")
#保存模型
torch.save(model.state_dict(), "./model/bert_multiclass_imdb_model.pt")

7.模型预测

from transformers import BertModel
import torchdef predict(texts, model, tokenizer, device, max_len=128):# 将文本编码为BERT的输入格式def encode_texts(tokenizer, texts, max_len):input_ids = []attention_masks = []for text in texts:encoded = tokenizer.encode_plus(text,add_special_tokens=True,max_length=max_len,pad_to_max_length=True,return_attention_mask=True,return_tensors='pt',)input_ids.append(encoded['input_ids'])attention_masks.append(encoded['attention_mask'])return torch.cat(input_ids, dim=0), torch.cat(attention_masks, dim=0)model.eval()  # 将模型设置为评估模式predictions = []input_ids, attention_masks = encode_texts(tokenizer, texts, max_len)input_ids = input_ids.to(device)attention_masks = attention_masks.to(device)with torch.no_grad():outputs = model(input_ids, attention_mask=attention_masks)logits = outputs.detach().cpu().numpy()predictions = np.argmax(logits, axis=1)return predictions# 示例文本
texts = ["I very like the movie", "the movie is so bad"]# 调用预测函数# 初始化模型
device = torch.device("cuda:0")
model = BertForMultiLabelClassification()
model.to(device)# 加载模型状态
model.load_state_dict(torch.load('./model/bert_multiclass_imdb_model.pt'))# 将模型设置为评估模式
model.eval()# 加载tokenizer
tokenizer = BertTokenizer.from_pretrained('./bert_pretrain')predictions = predict(texts, model, tokenizer, device)# 输出预测结果
for text, pred in zip(texts, predictions):print(f"Text: {text}, Predicted category: {pred}")
http://www.lryc.cn/news/252477.html

相关文章:

  • 《开箱元宇宙》:Madballs 解锁炫酷新境界,人物化身系列大卖
  • 4K-Resolution Photo Exposure Correction at 125 FPS with ~8K Parameters
  • 网络初识:局域网广域网网络通信基础
  • JVM之jps虚拟机进程状态工具
  • C++实现顺序栈的基本操作(扩展)
  • 用python写一个简单的爬虫
  • 分布式追踪
  • make -c VS make -f
  • Unity 代码控制Color无变化
  • 【Erlang进阶学习】2、匿名函数
  • 肖sir__mysql之视图__009
  • FPGA falsh相关知识总结
  • 升辉清洁IPO:广东清洁服务“一哥”还需要讲好全国化的故事
  • Python自动化办公:PDF文件的分割与合并
  • 破解app思路
  • 背景特效插件:Background Effects
  • 36.位运算符
  • C#异常处理-throw语句
  • PlantUML语法(全)及使用教程-时序图
  • 231204 刷题日报
  • PTA 7-229 sdut-C语言实验- 排序
  • 原生横向滚动条 吸附 页面底部
  • 1+x网络系统建设与运维(中级)-练习3
  • 知识图谱07——图片中表格开源ocr识别
  • 每日一练2023.12.4——正整数【PTA】
  • golang之net/http模块学习
  • Python中format函数用法
  • Android 断点调试
  • 对抗神经网络 CGAN实战详解 完整数据代码可直接运行
  • LeetCode417. Pacific Atlantic Water Flow