import tensorflow as tf
import numpy as np
# create data
x_data = np.random.rand(100).astype(np.float32)
y_data = x_data * 0.3 + 0.1
### create tensorflow structure start ###
Weights = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
biases = tf.Variable(tf.zeros([1]))
y = Weights * x_data + biases
loss = tf.reduce_mean(tf.square(y-y_data))
optimizer = tf.train.GradientDescentOptimizer(0.5)
train = optimizer.minimize(loss)
init = tf.global_variables_initializer()
### create tensorflow structure end ###
with tf.Session() as sess:
sess.run(init)
print(sess.run(Weights))
print(sess.run(biases))
for step in range(200):
# print("before training: ", step, sess.run(Weights), sess.run(biases))
sess.run(train)
if step % 20 == 0:
print("after training: ", step, sess.run(Weights), sess.run(biases))
[-0.68305206]
[0.]
after training: 0 [-0.32642883] [0.5775757]
after training: 20 [0.10762579] [0.19961295]
after training: 40 [0.24628659] [0.12781325]
after training: 60 [0.2850025] [0.10776584]
after training: 80 [0.29581252] [0.10216832]
after training: 100 [0.29883078] [0.10060544]
after training: 120 [0.29967356] [0.10016903]
after training: 140 [0.29990885] [0.1000472]
after training: 160 [0.29997456] [0.10001317]
after training: 180 [0.29999295] [0.10000367]
import tensorflow as tf
import numpy as np
def add_layer(inputs, in_size, out_size, activation_function=None):
Weights = tf.Variable(tf.random_normal([in_size, out_size]))
biases = tf.Variable(tf.zeros([1, outsize]) + 0.1)
Wx_plus_b = tf.matmul(inputs, Weights) + biases
if activation_function is None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b)
return outputs
# 自己定义一个二次曲线加噪声的训练集
x_data = np.linspace(-1, 1, 300)[:, np.newaxis]
noise = np.random.normal(0, 0.05, x_data.shape)
y_data = np.square(x_data) - 0.5 + noise
xs = tf.placeholder(tf.float32, [None, 1])
ys = tf.placeholder(tf.float32, [None, 1])
# 定义两个层
l1 = add_layer(xs, 1, 10, activation_function=tf.nn.relu)
prediction = add_layer(l1, 10, 1, activation_function=None)
# 计算prediction和y_data的差别
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
# 用散点展示一下自己创造的数据
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.scatter(x_data, y_data)
for i in range(1000):
sess.run(train_step, feed_dict={xs:x_data, ys:y_data})
if i % 50 == 0:
# print(sess.run(loss, feed_dict={xs:x_data, ys:y_data}))
# try:
# ax.lines.remove(lines[0])
# except Exception:
# pass
prediction_value = sess.run(prediction, feed_dict={xs: x_data})
lines = ax.plot(x_data, prediction_value, 'r-', lw=1)