I am trying to build a basic RNN, but I get errors trying to use the network after training.
I hold network architecture in a function inference
def inference(inp):
with tf.name_scope("inference"):
layer = SimpleRNN(1, activation='sigmoid', return_sequences=False)(inp)
layer = Dense(1)(layer)
return layer
but everytime i call it, another set of variables gets created despite using the same scope in training:
def train(sess, seq_len=2, epochs=100):
x_input, y_input = generate_data(seq_len)
with tf.name_scope('train_input'):
x = tf.placeholder(tf.float32, (None, seq_len, 1))
y = tf.placeholder(tf.float32, (None, 1))
with tf.variable_scope('RNN'):
output = inference(x)
with tf.name_scope('training'):
loss = tf.losses.mean_squared_error(labels=y, predictions=output)
train_op = tf.train.RMSPropOptimizer(learning_rate=0.1).minimize(loss=loss, global_step=tf.train.get_global_step())
with sess.as_default():
sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
for i in tqdm.trange(epochs):
ls, res, _ = sess.run([loss, output, train_op], feed_dict={x:x_input, y:y_input})
if i%100==0:
print(f'{ls}: {res[10]} - {y_input[10]}')
x_input, y_input = generate_data(seq_len)
and prediction:
def predict_signal(sess, x, seq_len):
# Preparing signal (omitted)
# Predict
inp = tf.convert_to_tensor(prepared_signal, tf.float32)
with sess.as_default():
with tf.variable_scope('RNN', reuse=True) as scope:
output = inference(inp)
result = output.eval()
return result
I have spent couple of hours reading about variables scopes by now, but on running prediction I still get an error Attempting to use uninitialized value RNN_1/inference/simple_rnn_2/kernel
, with the number by RNN_1 increasing with each call
See Question&Answers more detail:
os 与恶龙缠斗过久,自身亦成为恶龙;凝视深渊过久,深渊将回以凝视…