我尝试使用tensorboard来使用DNN可视化图像分类器.我非常确定目录路径是正确的,但是没有显示数据.当我尝试
tensorboard --inspect --logdir='PATH/'
返回时:在logdir'PATH /'中找不到任何事件文件
我想我的编码一定有问题.
图形
batch_size = 500 graph = tf.Graph() with graph.as_default(): # Input data. For the training data, we use a placeholder that will be fed # at run time with a training minibatch. with tf.name_scope('train_input'): tf_train_dataset = tf.placeholder(tf.float32, shape=(batch_size, image_size * image_size), name = 'train_x_input') tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels), name = 'train_y_input') with tf.name_scope('validation_input'): tf_valid_dataset = tf.constant(valid_dataset, name = 'valid_x_input') tf_test_dataset = tf.constant(test_dataset, name = 'valid_y_input') # Variables. with tf.name_scope('layer'): with tf.name_scope('weights'): weights = tf.Variable( tf.truncated_normal([image_size * image_size, num_labels]), name = 'W') variable_summaries(weights) with tf.name_scope('biases'): biases = tf.Variable(tf.zeros([num_labels]), name = 'B') variable_summaries(biases) # Training computation. with tf.name_scope('Wx_plus_b'): logits = tf.matmul(tf_train_dataset, weights) + biases tf.summary.histogram('logits', logits) with tf.name_scope('loss'): loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(labels=tf_train_labels, logits=logits), name = 'loss') tf.summary.histogram('loss', loss) tf.summary.scalar('loss_scalar', loss) # Optimizer. with tf.name_scope('optimizer'): optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss) # Predictions for the training, validation, and test data. train_prediction = tf.nn.softmax(logits) valid_prediction = tf.nn.softmax(tf.matmul(tf_valid_dataset, weights) + biases) test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases)
跑
num_steps = 1001 t1 = time.time() with tf.Session(graph=graph) as session: merged = tf.summary.merge_all() writer = tf.summary.FileWriter('C:/Users/Dr_Chenxy/Documents/pylogs', session.graph) tf.global_variables_initializer().run() print("Initialized") for step in range(num_steps): # Pick an offset within the training data, which has been randomized. # Note: we could use better randomization across epochs. offset = (step * batch_size) % (train_labels.shape[0] - batch_size) # 1*128 % (200000 - 128) # Generate a minibatch. batch_data = train_dataset[offset:(offset + batch_size), :] # choose training set for this iteration batch_labels = train_labels[offset:(offset + batch_size), :] # choose labels for this iteration # Prepare a dictionary telling the session where to feed the minibatch. # The key of the dictionary is the placeholder node of the graph to be fed, # and the value is the numpy array to feed to it. feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels} _, l, predictions = session.run([optimizer, loss, train_prediction], feed_dict=feed_dict) if (step % 100 == 0): print("Minibatch loss at step %d: %f" % (step, l)) print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels)) print("Validation accuracy: %.1f%%" % accuracy( valid_prediction.eval(), valid_labels)) print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels)) t2 = time.time() print('Running time', t2-t1, 'seconds')
Xiuyuanc.. 14
解决了.对于那些在我的命令行上不好的人来说,问题是在命令行中,不要使用quote('')来标记你的目录.假设您的数据位于'X:\ X\file.x'.请在命令行中输入X:\ first.然后输入:
tensorboard --logdir=X/
NOT
tensorboard --logdir='.X/'
解决了.对于那些在我的命令行上不好的人来说,问题是在命令行中,不要使用quote('')来标记你的目录.假设您的数据位于'X:\ X\file.x'.请在命令行中输入X:\ first.然后输入:
tensorboard --logdir=X/
NOT
tensorboard --logdir='.X/'