【python代码】一些小实验
目录
1. 测试Resnet50 ONNX模型的推理速度
1. 测试Resnet50 ONNX模型的推理速度
###############################
# 导出resnet50 模型
# 测试onnx模型推理 cpu 和 GPU 的对比
###############################import time
import numpy as np
import onnxruntime as ort
import torchvision.models as models
import torchdef export_onnx(onnx_path):# ResNet50 模型, 保存onnx model 返回 torch modelresnet50 = models.resnet50(pretrained=False)torch.onnx.export(resnet50, # 模型torch.randn(1, 3, 224, 224), # 输入onnx_path, # 输出文件名opset_version=12, # opset版本 input_names=["input"], # 输入变量名output_names=["output"], # 输出变量名dynamic_axes={"input":{0 : "batch_x"}, "output":{0: "batch_y"}} # 动态轴设置)class ONNXModel:def __init__(self, model_path, use_gpu=False):"""初始化 ONNX 模型推理类。:param model_path: ONNX 模型文件路径:param use_gpu: 是否使用 GPU 推理"""self.model_path = model_pathself.use_gpu = use_gpuself.session = Noneself.load_model()self.input_name = self.session.get_inputs()[0].name # 输入名字def load_model(self):"""加载 ONNX 模型。"""try:print(f"Loading model from {self.model_path}...")providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if self.use_gpu else ['CPUExecutionProvider']self.session = ort.InferenceSession(self.model_path, providers=providers)except Exception as e:print(f"Failed to load model: {e}")raiseproviders = self.session.get_providers()if 'CUDAExecutionProvider' in providers:print("✅ ONNX Runtime 正在使用 GPU 推理")else:print("❌ ONNX Runtime 正在使用 CPU 推理")def run_inference(self, input_data):"""执行推理并返回结果。:param input_data: 输入数据,通常为 numpy 数组或字典:return: 推理结果"""# 确保输入是字典格式input_data = {self.input_name: input_data}outputs = self.session.run(None, input_data)return outputs[0]def eval(onnx_model, n):# 推理n 次 返回对比结果batch_size = 32print("评估中....")onnx_input = np.random.randn(batch_size,3,224,224).astype(np.float32) # ONNX 需要 CPU 数据# 1. ONNX 时间t1 = time.time()for _ in range(n):onnx_model.run_inference(onnx_input)t2 = time.time()t = t2 - t1print(f"推理{n}次时间对比:")print(f"模型推理时间: {t:.4f} 秒")print(f"模型推理平均时间: {t/n:.4f} 秒")def test():onnx_path = "./resnet50.onnx"# export_onnx(onnx_path)onnx_model_cpu = ONNXModel(onnx_path, use_gpu=False)# 评估推理性能eval(onnx_model_cpu, 50)onnx_model_gpu = ONNXModel(onnx_path, use_gpu=True)eval(onnx_model_cpu, 50)# 示例用法
if __name__ == "__main__":test()