全部博文(165)
分类: Python/Ruby
2019-06-15 16:34:10
import simple_and_naive_tensorflow as tf import numpy as np # Prepare train data # w=2, b=10, err=0.33 train_X = np.linspace(-1, 1, 100) train_Y = 2 * train_X + np.random.randn(*train_X.shape) * 0.33 + 10 # Define the model X = tf.placeholder("float", "X") Y = tf.placeholder("float", "Y") w = tf.Variable(0.0, name="weight") b = tf.Variable(0.0, name="bias") loss = tf.square(Y - X * w - b) train_op = tf.GradientDescentOptimizer(0.01).minimize(loss) # Create session to run with tf.Session() as sess: sess.run(tf.initialize_all_variables()) epoch = 1 for i in range(10): for (x, y) in zip(train_X, train_Y): _, w_value, b_value = sess.run([train_op, w, b],feed_dict={X: x, Y: y}) print("Epoch: {}, w: {}, b: {}".format(epoch, w_value, b_value)) epoch += 1