#读取数据 book = xlrd.open_workbook(DATA_FILE, encoding_override='utf-8') sheet = book.sheet_by_index(0) data = np.asarray([sheet.row_values(i) for i in range(1, sheet.nrows)]) n_samples = sheet.nrows - 1
#创建输入和输出,即x,y X = tf.placeholder(dtype=tf.float32, name='X') Y = tf.placeholder(dtype=tf.float32, name='Y') # 初始化w和b,都是0阶,即单个值,因为是线性回归,最终得到一条直线。wx+b, w = tf.Variable(0.0,name='weights') b = tf.Variable(0.0,name='bias')
# 预测函数 Y_predicted = X*w+b
# 成本函数 loss = tf.square(Y-Y_predicted, name='loss')
with tf.Session() as sess: # 初始化w和b,变量需要在会话中执行初始化 sess.run(tf.global_variables_initializer()) writer = tf.summary.FileWriter('./graphs/linear_reg',sess.graph) # S训练模型,把样本挨个喂给预测函数,一般采取批,即分批喂数据, for i in range(50): # 100个批次 total_loss = 0 for x, y in data: # 会话里执行优化,获得成本函数值, _,l = sess.run([optimizer,loss],feed_dict={X:x,Y:y}) total_loss += l print("Epoch {0}: {1}".format(i, total_loss/n_samples)) writer.close() w,b = sess.run([w,b])
# 绘制出样本点和训练好的模型 X, Y = data.T[0], data.T[1] plt.plot(X, Y, 'bo', label='Real data') plt.plot(X, X * w + b, 'r', label='Predicted data') plt.legend() plt.show()
import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data from tensorflow.python.training import learning_rate_decay from tensorflow.contrib.factorization.examples.mnist import fill_feed_dict import time
#设置placholder,存储样本和标签 #28*28像素,拉成向量即784维,标签是0-9,即10个值,向量10维 #batch_size即每个批次的大小,一个批次含多少个样本 X = tf.placeholder(tf.float32,[batch_size,784]) Y = tf.placeholder(tf.float32,[batch_size,10])
#设置训练参数,权重w和偏置bias #并且初始值,使用概率分布 #w对应784维的输入,每维有个权重, w = tf.Variable(tf.random_normal(shape=[784,10],stddev=0.01), name='weights') b = tf.Variable(tf.zeros(shape=[1,10]), name='bias')
#预测 logits = tf.matmul(X,w) + b
#定义损失函数 #多类别输出,softmax,采取交叉熵, entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits,labels=Y) loss = tf.reduce_mean(entropy)