import numpy as np
import tensorflow as tf
import tqdm
from sklearn.model_selection import train_test_split
from tensorflow.python.framework import ops
ops.reset_default_graph()
x = np.linspace(0, 10, 1000, dtype='float32')
y = np.sin(x) + np.random.normal(size=len(x))
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=.3)
x_ = tf.placeholder(name="input", shape=None, dtype=np.float32)
y_ = tf.placeholder(name="output", shape=None, dtype=np.float32)
w = tf.Variable(tf.random_normal([]), name='w')
b = tf.Variable(tf.random_normal([]), name='bias')
model_output = tf.add(tf.multiply(x_, w), b)
loss = tf.reduce_mean(tf.pow(y_ - model_output, 2), name='loss')
train_step = tf.train.GradientDescentOptimizer(0.0025).minimize(loss)
summary_writer = tf.summary.FileWriter('linreg')
for value in [x_, model_output, w, loss]:
tf.summary.scalar(value.op.name, value)
summaries = tf.summary.merge_all()
n_epochs = 100
train_errors = []
test_errors = []
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in tqdm.tqdm(range(n_epochs)): # 100
_, train_err = sess.run([train_step, loss],
Feed_dict={x_: X_train, y_: y_train})
train_errors.append(train_err)
test_errors.append(
sess.run(loss, Feed_dict={x_: X_test, y_: y_test}))
summary_writer.add_summary(sess.run(summaries), i)
我得到了这个:
InvalidArgumentError (see above for traceback): You must Feed a value for placeholder tensor 'input' with dtype float
[[Node: input = Placeholder[dtype=DT_FLOAT, shape=[], _device="/job:localhost/replica:0/task:0/cpu:0"]()]]
所以,如果我理解正确,它会问我Feed_dict,好吧让我们修改最后一行:
summary_writer.add_summary(sess.run(summaries, Feed_dict={x_: X_train, y_: y_train}), i)
现在我们有:
InvalidArgumentError (see above for traceback): tags and values not the same shape: [] != [700] (tag 'input_1')
[[Node: input_1 = Scalarsummary[T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/cpu:0"](input_1/tags, _recv_input_0)]]
所以,重量想要与x相同的形状,我可以这样做:
w = tf.Variable(tf.random_normal([700]), name='w')
但是X_test怎么样?它只有300行:
InvalidArgumentError (see above for traceback): Incompatible shapes: [300] vs. [700]
[[Node: Mul = Mul[T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/cpu:0"](_recv_input_0, w/read)]]
我应该动态改变w形状吗?或获得w1和w2进行火车和测试?如何张量?
================================================== ======================
形状时间.
在变量和占位符的形状规范之后:
x_ = tf.placeholder(name="input", shape=[None, 1], dtype=np.float32)
y_ = tf.placeholder(name="output", shape=[None, 1], dtype=np.float32)
w = tf.Variable(tf.random_normal([1, 1]), name='w')
b = tf.Variable(tf.random_normal([1]), name='bias')
我们可以看到数据也应该是正确的:
ValueError: Cannot Feed value of shape (700,) for Tensor 'input:0', which has shape '(?, 1)'
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in tqdm.tqdm(range(n_epochs)):
_, train_err, summ = sess.run([train_step, loss, summaries],
Feed_dict={x_: X_train.reshape(len(X_train), 1), y_: y_train.reshape(len(y_train), 1)})
summary_writer.add_summary(summ, i)
train_errors.append(train_err)
test_errors.append(
sess.run(loss, Feed_dict={x_: X_test.reshape(len(X_test), 1), y_: y_test.reshape(len(y_test), 1)}))
而目前的错误:
InvalidArgumentError (see above for traceback): tags and values not the same shape: [] != [1,1] (tag 'w_1')
[[Node: w_1 = Scalarsummary[T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/cpu:0"](w_1/tags, w/read)]]
现在,我甚至没有得到具有[]形状的张量.
================================================== ============
结论时间.
tf.summary.scalar([value.op.name], value)
不会这样做,因为tf.summary.scalar的coz first / name参数需要字符串或字节,否则会给出错误.
因此,无论如何,名称将是[]的形状,让我们接受它并稍微改变代码:
w = tf.Variable(tf.random_normal([]), name='w')
b = tf.Variable(tf.random_normal([]), name='bias')
...
for value in [w, b, loss]:
tf.summary.scalar(value.op.name, value)
终于有效了
解决方法:
x_是一个占位符,它将包含您的输入值.它在图表中没有任何固定值,它唯一的值是您提供的值.所以你只需要使用:
summary_writer.add_summary(sess.run(summaries, Feed_dict={x_: X_train, y_: y_train}), i)
但这样做可以让你计算两次.你应该使用的是:
_, train_err, summ = sess.run([train_step, loss, summaries],
Feed_dict={x_: X_train, y_: y_train})
summary_writer.add_summary(summ, i)
这样,您的训练步骤和摘要计算就会同时发生.
编辑:
看起来你只是有形状问题,只有张量板显示…
>你的占位符x_应该声明为shape [None,n_features](这里,n_features = 1,所以你也可以让它只用[None].我真的不知道没有做什么,也许你的问题来自于也许不吧…)
> y应该是[None,n_outputs]的形状,所以[None,1]在这里.可能没有或[无]也有效.
> w应该是形状[n_features,n_outputs],在你的情况下[1,1].你不能在批量大小之后塑造它,这在机器学习方面是无稽之谈(至少如果你试图从x单独学习sin(x)而不是从批次的其余部分学习sin(x),这不会很有意义)
> b应该是[n_outputs]的形状,所以[1]在这里.
如果指定所有这些形状,它是否有效?
编辑2
tf.summary.scalar(value.op.name, value)
同
tf.summary.scalar([value.op.name], value)
版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。