当前位置: 代码迷 >> 综合 >> Python自学 day07 --- Tensorflow 简单入门
  详细解决方案

Python自学 day07 --- Tensorflow 简单入门

热度:49   发布时间:2024-01-15 18:40:42.0

同之前

先在此放一些大佬写好的总结吧~

转载自大佬:Doit_ --> Tensorflow基础知识与神经网络构建--step by step 入门TensorFlow(一)

drilistbox --> TensorFlow入门深度学习--01.基础知识

以下是本菜鸟练习的笔记..

import tensorflow as tf
import numpy as np
# Your CPU supports instructions that this TensorFlow binary was not compiled
# 你的CPU支持AVX扩展,但是你安装的TensorFlow版本无法编译使用
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import matplotlib.pyplot as plt
# <editor-fold desc="TF处理结构,例子2">
# # creat dat
# x_data = np.random.rand(100).astype(np.float32)
# y_data = x_data*0.1 + 0.3
#
# # creat tensorflow structure start ### 创建结构
# # 生成随机的参数       一维, 初始值 :-1 ——1
# Weights = tf.Variable(tf.random_uniform([1],-1.0,1.0))
# # 初始值为1
# biases = tf.Variable(tf.zeros([1]))
# # y :预测的值
# y = Weights*x_data+biases
# # 误差
# loss = tf.reduce_mean(tf.square(y-y_data))
# # 建立优化器,减少误差
# # 反向传递误差的工作就教给optimizer了, 我们使用的误差传递方法是梯度下降法:
# # Gradient Descent 让后我们使用 optimizer 来进行参数的更新.
# optimizer = tf.train.GradientDescentOptimizer(0.5)      # GradientOptimizer 学习效率(小于1的数)
# train = optimizer.minimize(loss)
# # 初始化,让数据活动起来
# init = tf.initialize_all_variables()
# # creat tensorflow structure start ###
#
# # 结构激活,初始化
# sess = tf.Session()
# sess.run(init)              # Very important
#
# # 训练201次
# for step in range(201):
#     sess.run(train)         # 开始训练
#     # 每20次训练,输出一次结果
#     if step % 20 == 0:
#         print(step,sess.run(Weights),sess.run(biases))
# </editor-fold># <editor-fold desc="Session 会话控制">
# matrix1 = tf.constant([[3,3]])
# matrix2 = tf.constant([[2],
#                        [2]])
# # matrix multriply 矩阵乘法     np.dot(n1,n2)
# product = tf.matmul(matrix1,matrix2)
#
# # 方法1
# sess = tf.Session()
# result = sess.run(product)
# print(result)
# sess.close()
#
# # 方法2
# # 自动关闭
# with tf.Session() as sess:
#     result2 = sess.run(product)
#     print(result2)
# </editor-fold># <editor-fold desc="Variable 变量">
# state = tf.Variable(0,name='counter')
# # print(state.name)
# one = tf.constant(1)
#
# new_value = tf.add(state, one)
# update = tf.assign(state, new_value)
# # 初始化所有变量
# # 有定义变量必须用这种方式
# init = tf.global_variables_initializer()
#
# with tf.Session() as sess:
#     sess.run(init)
#     for _ in range(3):
#         sess.run(update)
#         print(sess.run(state))
# </editor-fold># <editor-fold desc="Placeholder 变量传入">
# # 创建placeholder时给定类型, 大部分用float32
# input1 = tf.placeholder(tf.float32)
# input2 = tf.placeholder(tf.float32)
#
# output = tf.multiply(input1,input2)
#
# # 用placeholder时会和dict绑定
# with tf.Session() as sess:
#     print(sess.run(output,feed_dict={input1:[7.], input2:[2.]}))
# </editor-fold># <editor-fold desc="激励函数 Activation">
# 激励函数推荐
# 在卷积神经网络 Convolutional neural networks 的卷积层中, 推荐的激励函数是 relu.
# 在循环神经网络中 recurrent neural networks, 推荐的是 tanh 或者是 relu
# </editor-fold># <editor-fold desc="建造神经网络">
# # <editor-fold desc="添加神经层">
# # 添加神经层
# def add_layer(inputs, in_size, out_size, activation_function = None):
#     # 形成一个随机矩阵
#     Weights = tf.Variable(tf.random_normal([in_size, out_size]))
#     # 1行,x列 全都为0.1
#     biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)
#     Wx_plus_b = tf.matmul(inputs, Weights) + biases
#     if activation_function is None:
#         outputs = Wx_plus_b
#     else:
#         outputs = activation_function(Wx_plus_b)
#     return outputs
# # </editor-fold>
#
# # 新建数据源
# # -1到1 300行
# x_data = np.linspace(-1,1,300)[:,np.newaxis]
# # 噪点 使数据更像真实数据
# noise = np.random.normal(0,0.05,x_data.shape)
# y_data = np.square(x_data) - 0.5 + noise
#
# #  float32为数据格式,None 表示给多少数据都ok, 1为x_data的值
# xs = tf.placeholder(tf.float32, [None, 1])
# ys = tf.placeholder(tf.float32, [None, 1])
# # 隐藏层,共10层
# l1 = add_layer(xs, 1, 10, activation_function=tf.nn.relu)
# # 输出层   隐藏层,l1的size, y_data的size
# prediction = add_layer(l1, 10, 1, activation_function=None)
#
# # 对所有数据的误差求和 再求平均值 =》 平均误差
# loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction),
#                                     reduction_indices=[1]))
# # 最常用的优化器 学习效率(<1),减小误差
# train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
# # 初始化变量
# init = tf.global_variables_initializer()
# # 创建对话
# sess = tf.Session()
# sess.run(init)
#
# # 训练1000次
# for i in range(1000):
#     # 用全部数据进行训练
#     sess.run(train_step, feed_dict={xs:x_data , ys:y_data})
#     if i % 50 == 0:
#         print(sess.run(loss, feed_dict={xs: x_data, ys: y_data}))
# </editor-fold># <editor-fold desc="结果可视化">
# # 导入可视化数据模块
# # import matplotlib.pyplot as plt
# # 添加神经层
# def add_layer(inputs, in_size, out_size, activation_function = None):
#     # 形成一个随机矩阵
#     Weights = tf.Variable(tf.random_normal([in_size, out_size]))
#     # 1行,x列 全都为0.1
#     biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)
#     Wx_plus_b = tf.matmul(inputs, Weights) + biases
#     if activation_function is None:
#         outputs = Wx_plus_b
#     else:
#         outputs = activation_function(Wx_plus_b)
#     return outputs
#
# # 新建数据源
# # -1到1 300行
# x_data = np.linspace(-1,1,300)[:,np.newaxis]
# # 噪点 使数据更像真实数据
# noise = np.random.normal(0,0.05,x_data.shape)
# y_data = np.square(x_data) - 0.5 + noise
#
# #  float32为数据格式,None 表示给多少数据都ok, 1为x_data的值
# xs = tf.placeholder(tf.float32, [None, 1])
# ys = tf.placeholder(tf.float32, [None, 1])
# # 隐藏层,共10层
# l1 = add_layer(xs, 1, 10, activation_function=tf.nn.relu)
# # 输出层   隐藏层,l1的size, y_data的size
# prediction = add_layer(l1, 10, 1, activation_function=None)
#
# # 对所有数据的误差求和 再求平均值 =》 平均误差
# loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction),
#                                     reduction_indices=[1]))
# # 最常用的优化器 学习效率(<1),减小误差
# train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
#
# # 重要步骤###!!##
# # 初始化变量
# init = tf.global_variables_initializer()
# # 创建对话
# sess = tf.Session()
# sess.run(init)
#
# # 显示真实数据
# fig = plt.figure()
# ax = fig.add_subplot(1,1,1)
# ax.scatter(x_data, y_data)
# # show了以后不暂停
# plt.ion()
# # show以后会默认暂停
# plt.show()
#
#
# # 训练1000次
# for i in range(1000):
#     # 用全部数据进行训练
#     sess.run(train_step, feed_dict={xs:x_data , ys:y_data})
#     if i % 50 == 0:
#         # print(sess.run(loss, feed_dict={xs: x_data, ys: y_data}))
#         try:
#             # 去除掉lines的第一个单位
#             ax.lines.remove(lines[0])
#         except Exception:
#             pass
#         prediction_value = sess.run(prediction, feed_dict={xs: x_data})
#         lines = ax.plot(x_data, prediction_value, 'r-', lw=5)
#
#         # 暂停1秒再继续
#         plt.pause(0.1)
# plt.pause(0)
# </editor-fold># <editor-fold desc="Tensorboard 可视化好帮手">
# # <editor-fold desc="添加神经层">
# # 添加神经层
# def add_layer(inputs, in_size, out_size, activation_function = None):
#     # 形成一个随机矩阵
#     with tf.name_scope('layer'):
#         with tf.name_scope('Weights'):
#             Weights = tf.Variable(tf.random_normal([in_size, out_size]))
#         # 1行,x列 全都为0.1
#         with tf.name_scope('biases'):
#             biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)
#         with tf.name_scope('Wx_plus_b'):
#             Wx_plus_b = tf.matmul(inputs, Weights) + biases
#         if activation_function is None:
#             outputs = Wx_plus_b
#         else:
#             outputs = activation_function(Wx_plus_b)
#         return outputs
# # </editor-fold>
#
# # 新建数据源
# # -1到1 300行
# x_data = np.linspace(-1,1,300)[:,np.newaxis]
# # 噪点 使数据更像真实数据
# noise = np.random.normal(0,0.05,x_data.shape)
# y_data = np.square(x_data) - 0.5 + noise
#
# #  float32为数据格式,None 表示给多少数据都ok, 1为x_data的值
# with tf.name_scope('inputs'):
#     xs = tf.placeholder(tf.float32, [None, 1], name='x_input')
#     ys = tf.placeholder(tf.float32, [None, 1], name='y_input')
# # 隐藏层,共10层
# l1 = add_layer(xs, 1, 10, activation_function=tf.nn.relu)
# # 输出层   隐藏层,l1的size, y_data的size
# prediction = add_layer(l1, 10, 1, activation_function=None)
#
# # 对所有数据的误差求和 再求平均值 =》 平均误差
# with tf.name_scope('loss'):
#     loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction),
#                                     reduction_indices=[1]))
# # 最常用的优化器 学习效率(<1),减小误差
# with tf.name_scope('train'):
#     train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
# # 初始化变量
# init = tf.global_variables_initializer()
# # 创建对话
# sess = tf.Session()
# writer = tf.summary.FileWriter("logs/", sess.graph)
# sess.run(init)
#
# # 训练1000次
# for i in range(1000):
#     # 用全部数据进行训练
#     sess.run(train_step, feed_dict={xs:x_data , ys:y_data})
#     if i % 50 == 0:
#         print(sess.run(loss, feed_dict={xs: x_data, ys: y_data}))
# </editor-fold>