目的

使用Python构建一层的神经网络模型

代码

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
#coding:utf-8
import tensorflow as tf
import numpy as np
BATCH_SIZE =8
SEED =23455

rdm= np.random.RandomState(SEED)
X = rdm.rand(32,2)
#构造目标结果集,(rdm.rand()/10.0-0.05)表示-0.05-0.05的随机数。y=x1+x2+随机噪音。
Y_ = [[x1+x2+(rdm.rand()/10.0-0.05)] for (x1,x2) in X]

#定义模型的输入,参数和输出,定义前项传播过程
#x和y占位符,由后面用数据填充
x= tf.placeholder(tf.float32,shape=(None,2))
y_= tf.placeholder(tf.float32,shape=(None,1))
# w1为2维1列的,标准差为1的原始数据,随机种子是1的w1初始化值
w1= tf.Variable(tf.random_normal([2,1],stddev=1,seed=1))
# 待拟合的公式为y=w1*x
y = tf.matmul(x,w1)

#定义损失函数和反向传播
#优化目标,均方误差公式计算(y_-y)^2
loss_mse = tf.reduce_mean(tf.square(y_-y))
#定义梯度下降算法优化,
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(loss_mse)

with tf.Session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
# print "w1 init :",sess.run(w1)
STEPS = 20000
for i in range(STEPS):
start =(i*BATCH_SIZE)%32
end= (i*BATCH_SIZE)%32+BATCH_SIZE
#构造的x,y结果集喂入模型,求得w1
sess.run(train_step,feed_dict={x:X[start:end],y_:Y_[start:end]})
if i%500 == 0:
print "After %d training steps,w1 is:" %(i)
print sess.run(w1),"\n"

print "Final w1 is :\n",sess.run(w1)

运行结果

1
2
3
4
5
6
7
After 19500 training steps,w1 is:
[[1.0043712 ]
[0.99488556]]

Final w1 is :
[[1.0043175 ]
[0.99481463]]