当前位置: 首页 > news >正文

mnist数据集

训练模型

import tensorflow as tfimport keras
from keras.models import Sequential
from keras.layers import Dense,Dropout, Flatten,Conv2D, MaxPooling2D
# from keras.optimizers import SGD
from tensorflow.keras.optimizers import Adam,Nadam, SGDfrom PIL import Image
import numpy as np
import matplotlib.pyplot as pltprint('tf',tf.__version__)
print('keras',keras.__version__)# batch大小,每处理128个样本进行一次梯度更新
batch_size = 64
# 训练素材类别数
num_classes = 10
# 迭代次数
epochs = 5f = np.load("mnist.npz")
x_train, y_train = f['x_train'], f['y_train']
x_test, y_test = f['x_test'], f['y_test']
f.close()
print(x_train.shape,"  ",y_train.shape)
print(x_test.shape,"  ",y_test.shape)# im=plt.imshow(x_train[0],cmap="gray")
# plt.show()## 维度合并784 = 28*28
x_train = x_train.reshape(60000, 784).astype('float32')
x_test = x_test.reshape(10000, 784).astype('float32')## 归一化,像素点的值 转成 0-1 之间的数字
x_train /= 255
x_test /= 255# print(x_train[0])# 标签转换为独热码
y_train = tf.keras.utils.to_categorical(y_train, num_classes)
y_test = tf.keras.utils.to_categorical(y_test, num_classes)
# print(y_train[0]) ## 类似 [0. 0. 0. 0. 0. 1. 0. 0. 0. 0.]print(x_train.shape,"  ",y_train.shape)
print(x_test.shape,"  ",y_test.shape)# 构建模型
model = Sequential()
model.add(Dense(512, activation='relu',input_shape=(784,)))
model.add(Dense(256, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.summary()# [编译模型] 配置模型,损失函数采用交叉熵,优化采用Adadelta,将识别准确率作为模型评估
model.compile(loss=tf.keras.losses.categorical_crossentropy, optimizer=tf.keras.optimizers.Adadelta(), metrics=['accuracy'])
#  validation_data为验证集
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test))# 开始评估模型效果 # verbose=0为不输出日志信息
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1]) # 准确度model.save('mnist_model_weights.h5') # 保存训练模型

训练结果

Epoch 4/5
938/938 [==============================] - 7s 7ms/step - loss: 1.5926 - accuracy: 0.7292 - val_loss: 1.4802 - val_accuracy: 0.7653
Epoch 5/5
938/938 [==============================] - 6s 6ms/step - loss: 1.4047 - accuracy: 0.7686 - val_loss: 1.2988 - val_accuracy: 0.7918
Test loss: 1.2988097667694092
Test accuracy: 0.7918000221252441Process finished with exit code 0

测试模型


import tensorflow as tffrom PIL import Image
import numpy as np
from keras.models import load_model# 构建模型
model = load_model('mnist_model_weights.h5') # 加载训练模型
# model.summary()def read_image(img_name):im = Image.open(img_name).resize((28,28),Image.ANTIALIAS).convert('L') # 将要识别的图缩放到训练图一样的大小,并且灰度化data = np.array(im)return dataimages=[]
images.append(read_image("test.png"))
# print(images)X = np.array(images)
print(X.shape)
X=X.reshape(1, 784).astype('float32')
print(X.shape)
X /=255
# print(X[0:1])
result=model.predict(X[0:1])[0] # 识别出第一张图的结果,多张图的时候,把后面的[0] 去掉,返回的就是多张图结果
num=0 # 用来分析预测的结果
for i in range(len(result)): # result的长度是10# print(result[i]*255)if result[i]*255>result[num]*255: # 值越大,就越可能是结果num=iprint("预测结果",num)

将数据集转换为图片


#coding: utf-8
import os
import tensorflow as tf
import input_data
from PIL import Image'''
函数功能:按照bmp格式提取mnist数据集中的图片
参数介绍:mnist_dir   mnist数据集存储的路径save_dir    提取结果存储的目录
'''mint=tf.keras.datasets.mnistdef extract_mnist(mnist_dir, save_dir):rows = 28cols = 28# 加载mnist数据集# one_hot = True为默认打开"独热编码"mnist = input_data.read_data_sets(mnist_dir, one_hot=False)# 获取训练图片数量shape = mnist.train.images.shapeimages_train_count = shape[0]pixels_count_per_image = shape[1]# 获取训练标签数量=训练图片数量# 关闭"独热编码"后,labels的类型为[7 3 4 ... 5 6 8]labels = mnist.train.labelsprint(labels)exit(0)labels_train_count = labels.shape[0]if (images_train_count == labels_train_count):print("训练集共包含%d张图片,%d个标签" % (images_train_count, labels_train_count))print("每张图片包含%d个像素" % (pixels_count_per_image))print("数据类型为", mnist.train.images.dtype)# mnist图像数值的范围为[0,1], 需将其转换为[0,255]for current_image_id in range(images_train_count):for i in range(pixels_count_per_image):if mnist.train.images[current_image_id][i] != 0:mnist.train.images[current_image_id][i] = 255if ((current_image_id + 1) % 50) == 0:print("已转换%d张,共需转换%d张" %(current_image_id + 1, images_train_count))# 创建train images的保存目录, 按标签保存for i in range(10):dir = "%s/%s" % (save_dir, i)print(dir)if not os.path.exists(dir):os.mkdir(dir)# indices = [0, 0, 0, ..., 0]用来记录每个标签对应的图片数量indices = [0 for x in range(0, 10)]for i in range(images_train_count):new_image = Image.new("L", (cols, rows))# 遍历new_image 进行赋值for r in range(rows):for c in range(cols):new_image.putpixel((r, c), int(mnist.train.images[i][c + r * cols]))# 获取第i张训练图片对应的标签label = labels[i]image_save_path = "%s/%s/%s.bmp" % (save_dir, label,indices[label])indices[label] += 1new_image.save(image_save_path)# 打印保存进度if ((i + 1) % 50) == 0:print("图片保存进度: 已保存%d张,共需保存%d张" % (i + 1, images_train_count))else:print("图片数量与标签数量不一致!")if __name__ == '__main__':mnist_dir = "Mnist_Data"save_dir = "Mnist_Data_TrainImages"extract_mnist(mnist_dir, save_dir)

利用图片制作mnist格式数据集

import os
from PIL import Image
from array import *
from random import shuffle# # 文件组织架构:
# ├──training-images
# │   └──0(类别为0的图像)
# │   ├──1(类别为1的图像)
# │   ├──2(类别为2的图像)
# │   ├──3(类别为3的图像)
# │   └──4(类别为4的图像)
# ├──test-images
# │   └──0(类别为0的图像)
# │   ├──1(类别为1的图像)
# │   ├──2(类别为2的图像)
# │   ├──3(类别为3的图像)
# │   └──4(类别为4的图像)
# └── mnist数据集制作.py(本脚本)# Load from and save to
Names = [['./training-images', 'train'], ['./test-images', 'test']]for name in Names:data_image = array('B')data_label = array('B')print(os.listdir(name[0]))FileList = []for dirname in os.listdir(name[0])[0:]:  # [1:] Excludes .DS_Store from Mac OS# print(dirname)path = os.path.join(name[0], dirname)# print(path)for filename in os.listdir(path):# print(filename)if filename.endswith(".png"):FileList.append(os.path.join(name[0] + '/', dirname + '/', filename))print(FileList)shuffle(FileList)  # Usefull for further segmenting the validation setfor filename in FileList:label = int(filename.split('/')[2])print(filename)Im = Image.open(filename)# print(Im)pixel = Im.load()width, height = Im.sizefor x in range(0, width):for y in range(0, height):data_image.append(pixel[y, x])data_label.append(label)  # labels start (one unsigned byte each)hexval = "{0:#0{1}x}".format(len(FileList), 6)  # number of files in HEX# header for label arrayheader = array('B')header.extend([0, 0, 8, 1, 0, 0])header.append(int('0x' + hexval[2:][:2], 16))header.append(int('0x' + hexval[2:][2:], 16))data_label = header + data_label# additional header for images arrayif max([width, height]) <= 256:header.extend([0, 0, 0, width, 0, 0, 0, height])else:raise ValueError('Image exceeds maximum size: 256x256 pixels');header[3] = 3  # Changing MSB for image data (0x00000803)data_image = header + data_imageoutput_file = open(name[1] + '-images-idx3-ubyte', 'wb')data_image.tofile(output_file)output_file.close()output_file = open(name[1] + '-labels-idx1-ubyte', 'wb')data_label.tofile(output_file)output_file.close()# 运行脚本得到四个文件test-images-idx3-ubyte、test-labels-idx1-ubyte、train-images-idx3-ubyte、train-labels-idx1-ubyte
# 在cmd中利用gzip -c train-labels-idx1-ubyte > train-labels-idx1-ubyte.gz命令对上述四个文件压缩得到最终的mnist格式数据集
http://www.lryc.cn/news/172076.html

相关文章:

  • Java之IO概述以及
  • Spring WebFlux—Reactive 核心
  • 由于找不到d3dx9_43.dll,无法继续执行代码要怎么解决
  • git安装配置教程
  • 要如何选择报修工单管理系统?需要注意哪些核心功能?
  • 面对大数据量渲染,前端工程师如何保证页面流畅性?
  • 2023年浙工商MBA新生奖学金名单公布,如何看待?
  • 关于时空数据的培训 GAN:实用指南(第 02/3 部分)
  • UNIAPP利用canvas绘制图片和文字,并跟随鼠标移动
  • 【智能电表数据接入物联网平台实践】
  • Docker--network命令的用法
  • 优维低代码实践:图片和搜索
  • [Qt]控件
  • GEE:快速实现时间序列线性趋势和变化敏感性计算(斜率、截距)以NDVI时间序列为例
  • LC1713. 得到子序列的最少操作次数(java - 动态规划)
  • vr飞机驾驶舱模拟流程3D仿真演示加大航飞安全法码
  • 一、八大排序(sort)
  • 【AWS】AI 代码生成器—Amazon CodeWhisperer初体验 | 开启开挂编程之旅
  • 【Mysql主从配置方法---单主从】
  • ⼀⽂读懂加密资产交易赛道的新锐⼒量Bitdu
  • 万里牛与金蝶云星空对接集成查询调拨单连通调拨单新增(万里牛调拨单-金蝶【直接调拨单】)
  • 虚拟DOM与diff算法
  • K8S:pod资源限制及探针
  • CSS中的定位
  • 二、链表(linked-list)
  • Android EditText筛选+选择功能开发
  • Linux 信号 alarm函数 setitimer函数
  • 自主设计,模拟实现 RabbitMQ - 实现发送方消息确认机制
  • 【数据结构】二叉树的·深度优先遍历(前中后序遍历)and·广度优先(层序遍历)
  • 优彩云采集器下载-免费优彩云采集器下载地址