当前位置: 首页 > news >正文

用TensorFlow进行逻辑回归(五)

Softmax分类

 #List3-50

%matplotlib inline

import numpy as np

import tensorflow as tf

import matplotlib.pyplot as plt

x1_label0 = np.random.normal(1, 1, (100, 1))

x2_label0 = np.random.normal(1, 1, (100, 1))

x1_label1 = np.random.normal(5, 1, (100, 1))

x2_label1 = np.random.normal(4, 1, (100, 1))

x1_label2 = np.random.normal(8, 1, (100, 1))

x2_label2 = np.random.normal(0, 1, (100, 1))

plt.scatter(x1_label0, x2_label0, c='r', marker='o', s=60)

plt.scatter(x1_label1, x2_label1, c='g', marker='x', s=60)

plt.scatter(x1_label2, x2_label2, c='b', marker='_', s=60)

plt.show()

xs_label0 = np.hstack((x1_label0, x2_label0))

xs_label1 = np.hstack((x1_label1, x2_label1))

xs_label2 = np.hstack((x1_label2, x2_label2))

xs = np.vstack((xs_label0, xs_label1, xs_label2))

labels = np.matrix([[1., 0., 0.]] * len(x1_label0) + [[0., 1., 0.]] * len(x1_label1) + [[0., 0., 1.]] * len(x1_label2))

arr = np.arange(xs.shape[0])

np.random.shuffle(arr)

xs = xs[arr, :]

labels = labels[arr, :]

test_x1_label0 = np.random.normal(1, 1, (10, 1))

test_x2_label0 = np.random.normal(1, 1, (10, 1))

test_x1_label1 = np.random.normal(5, 1, (10, 1))

test_x2_label1 = np.random.normal(4, 1, (10, 1))

test_x1_label2 = np.random.normal(8, 1, (10, 1))

test_x2_label2 = np.random.normal(0, 1, (10, 1))

test_xs_label0 = np.hstack((test_x1_label0, test_x2_label0))

test_xs_label1 = np.hstack((test_x1_label1, test_x2_label1))

test_xs_label2 = np.hstack((test_x1_label2, test_x2_label2))

test_xs = np.vstack((test_xs_label0, test_xs_label1, test_xs_label2))

test_labels = np.matrix([[1., 0., 0.]] * 10 + [[0., 1., 0.]] * 10 + [[0., 0., 1.]] * 10)

num_labels = 3

train_size, num_features = xs.shape

W = tf.Variable(tf.zeros([num_features, num_labels]))

b = tf.Variable(tf.zeros([num_labels]))

learning_rate = 0.01

training_epochs = 1000

batch_size = 100

optimizer = tf.optimizers.SGD(learning_rate)

for step in range(training_epochs * train_size // batch_size):

        offset = (step * batch_size) % train_size

        batch_xs = xs[offset:(offset + batch_size), :]

        batch_labels = labels[offset:(offset + batch_size)]

        X=tf.cast(batch_xs,tf.float32)

        Y=tf.cast(batch_labels,tf.float32)

        with tf.GradientTape() as g:

            y_model = tf.nn.softmax(tf.matmul(X, W) + b)

            cost = -tf.reduce_sum(Y * tf.math.log(y_model))

            gradients = g.gradient(cost, [W, b])

        # 更新梯度

        optimizer.apply_gradients(zip(gradients, [W, b]))

        if step % 100 == 0:

            print (step, cost.numpy())

W_val = W

print('w', W_val.numpy())

b_val = b

print('b', b_val.numpy())

#train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)

correct_prediction = tf.equal(tf.argmax(y_model, 1), tf.argmax(Y, 1))

accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

print("accuracy", accuracy.numpy())

http://www.lryc.cn/news/588853.html

相关文章:

  • 简单明了的对比PyTorch与TensorFlow
  • VSCode同时支持Vue2和Vue3开发的插件指南
  • Spark 之 Join BoundCondition
  • 云手机隐私保护指南:如何保障账号与数据的云端安全?
  • Java单元测试JUnit
  • 静态补丁脚本 - 修改 libtolua.so
  • MySQL数据库----约束
  • 开源工具与框架:基于.NET Core 的 Modbus 网关开发(一)
  • 硬件与软件的桥梁:冯诺依曼体系、操作系统和初始进程的深度解析
  • 【目标追踪】MUTR3D: A Multi-camera Tracking Framework via 3D-to-2D Queries
  • S7-200 SMART PLC:不同CPU及数字量 IO 接线全解析
  • AUTOSAR进阶图解==>AUTOSAR_SWS_FlexRayISOTransportLayer
  • 读书笔记5:交易在供应链中的关键作用
  • AI产品经理面试宝典第20天:AI+金融场景相关面试题及回答指导
  • C#,List<T> 与 Vector<T>
  • 【记录】Ubuntu20.04安装mysql
  • k8s之Snapshots 详解
  • Apifox 和 Apipost如何选?2025企业API开发工具选型需求分析及建议
  • 前端打包自动压缩为zip--archiver
  • SpringBoot 2.x→3.0升级实战:Jakarta EE兼容性改造清单
  • Flink双流实时对账
  • GaussDB 数据库架构师修炼(三) 集群管理概览
  • 数据结构--树(1)
  • 同样是“跳转”,为何forward地址栏不变,redirect会变?
  • 20250715给荣品RD-RK3588开发板刷Android14时打开USB鼠标
  • MATLAB知识点总结
  • 物联网设备管理工具实战:用AR运维镜击穿6.8天修复魔咒
  • 构建企业级项目管理全面数字化运营体系︱易趋(蓝云软件)总裁唐智勇
  • 学习C++、QT---26(QT中实现记事本项目实现文件路径的提示、现在我们来学习一下C++类模板、记事本的行高亮的操作的讲解)
  • 【PDF识别改名】使用京东云OCR完成PDF图片识别改名,根据PDF图片内容批量改名详细步骤和解决方案