Python tensorflow 模块,argmax() 实例源码
我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.argmax()。
def main():
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# Placeholder that will be fed image data.
x = tf.placeholder(tf.float32, [None, 784])
# Placeholder that will be fed the correct labels.
y_ = tf.placeholder(tf.float32, 10])
# Define weight and bias.
W = weight_variable([784, 10])
b = bias_variable([10])
# Here we define our model which utilizes the softmax regression.
y = tf.nn.softmax(tf.matmul(x, W) + b)
# Define our loss.
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
# Define our optimizer.
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
# Define accuracy.
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
correct_prediction = tf.cast(correct_prediction, tf.float32)
accuracy = tf.reduce_mean(correct_prediction)
def sample_dtype(self):
return tf.int32
# WRONG SECOND DERIVATIVES
# class CategoricalPd(Pd):
# def __init__(self,logits):
# self.logits = logits
# self.ps = tf.nn.softmax(logits)
# @classmethod
# def fromflat(cls,flat):
# return cls(flat)
# def flatparam(self):
# return self.logits
# def mode(self):
# return U.argmax(self.logits,axis=1)
# def logp(self,x):
# return -tf.nn.sparse_softmax_cross_entropy_with_logits(self.logits,x)
# def kl(self,other):
# return tf.nn.softmax_cross_entropy_with_logits(other.logits,self.ps) \
# - tf.nn.softmax_cross_entropy_with_logits(self.logits,self.ps)
# def entropy(self):
# return tf.nn.softmax_cross_entropy_with_logits(self.logits,self.ps)
# def sample(self):
# u = tf.random_uniform(tf.shape(self.logits))
# return U.argmax(self.logits - tf.log(-tf.log(u)),axis=1)
def get_loss(l_pred, seg_pred, label, seg, weight, end_points):
per_instance_label_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=l_pred, labels=label)
label_loss = tf.reduce_mean(per_instance_label_loss)
# size of seg_pred is batch_size x point_num x part_cat_num
# size of seg is batch_size x point_num
per_instance_seg_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=seg_pred, labels=seg), axis=1)
seg_loss = tf.reduce_mean(per_instance_seg_loss)
per_instance_seg_pred_res = tf.argmax(seg_pred, 2)
# Enforce the transformation as orthogonal matrix
transform = end_points['transform'] # BxKxK
K = transform.get_shape()[1].value
mat_diff = tf.matmul(transform, tf.transpose(transform, perm=[0,2,1])) - tf.constant(np.eye(K), dtype=tf.float32)
mat_diff_loss = tf.nn.l2_loss(mat_diff)
total_loss = weight * seg_loss + (1 - weight) * label_loss + mat_diff_loss * 1e-3
return total_loss, label_loss, per_instance_label_loss, seg_loss, per_instance_seg_loss, per_instance_seg_pred_res
def _region_classification(self, fc7, is_training, initializer, initializer_bBox):
cls_score = slim.fully_connected(fc7, self._num_classes,
weights_initializer=initializer,
trainable=is_training,
activation_fn=None, scope='cls_score')
cls_prob = self._softmax_layer(cls_score, "cls_prob")
cls_pred = tf.argmax(cls_score, axis=1, name="cls_pred")
bBox_pred = slim.fully_connected(fc7, self._num_classes * 4,
weights_initializer=initializer_bBox,
trainable=is_training,
activation_fn=None, scope='bBox_pred')
self._predictions["cls_score"] = cls_score
self._predictions["cls_pred"] = cls_pred
self._predictions["cls_prob"] = cls_prob
self._predictions["bBox_pred"] = bBox_pred
return cls_prob, bBox_pred
def test(self, input_path, output_path):
if not self.load()[0]:
raise Exception("No model is found,please train first")
mean, std = self.sess.run([self.mean, self.std])
images = np.empty((1, self.im_size[0], self.im_size[1], self.im_size[2], 1), dtype=np.float32)
#labels = np.empty((1,self.im_size[0],self.im_size[1],self.im_size[2],self.nclass),dtype=np.float32)
for f in input_path:
images[0, ..., 0], read_info = read_testing_inputs(f, self.roi[0], self.im_size, output_path)
probs = self.sess.run(self.probs, Feed_dict = { self.images: (images - mean) / std,
self.is_training: True,
self.keep_prob: 1 })
#print(self.roi[1] + os.path.basename(f) + ":" + str(dice))
output_file = os.path.join(output_path, self.roi[1] + '_' + os.path.basename(f))
f_h5 = h5py.File(output_file, 'w')
if self.roi[0] < 0:
f_h5['predictions'] = restore_labels(np.argmax(probs[0], 3), read_info)
else:
f_h5['probs'] = restore_labels(probs[0, 1], read_info)
f_h5.close()
def __init__(self, channels=3, n_class=2, cost="cross_entropy", cost_kwargs={}, **kwargs):
tf.reset_default_graph()
self.n_class = n_class
self.summaries = kwargs.get("summaries", True)
self.x = tf.placeholder("float", shape=[None, None, channels])
self.y = tf.placeholder("float", n_class])
self.keep_prob = tf.placeholder(tf.float32) #dropout (keep probability)
logits, self.variables, self.offset = create_conv_net(self.x, self.keep_prob, channels, n_class, **kwargs)
self.cost = self._get_cost(logits, cost, cost_kwargs)
self.gradients_node = tf.gradients(self.cost, self.variables)
self.cross_entropy = tf.reduce_mean(cross_entropy(tf.reshape(self.y, [-1, n_class]),
tf.reshape(pixel_wise_softmax_2(logits), n_class])))
self.predicter = pixel_wise_softmax_2(logits)
self.correct_pred = tf.equal(tf.argmax(self.predicter, tf.argmax(self.y, 3))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_pred, tf.float32))
def add_evaluation_step(graph, final_tensor_name, ground_truth_tensor_name):
"""Inserts the operations we need to evaluate the accuracy of our results.
Args:
graph: Container for the existing model's Graph.
final_tensor_name: Name string for the new final node that produces results.
ground_truth_tensor_name: Name string for the node we Feed ground truth data
into.
Returns:
nothing.
"""
result_tensor = graph.get_tensor_by_name(ensure_name_has_port(
final_tensor_name))
ground_truth_tensor = graph.get_tensor_by_name(ensure_name_has_port(
ground_truth_tensor_name))
correct_prediction = tf.equal(
tf.argmax(result_tensor, tf.argmax(ground_truth_tensor, 1))
evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
return evaluation_step
def extract_argmax_and_embed(embedding, output_projection=None):
"""
Get a loop_function that extracts the prevIoUs symbol and embeds it. Used by decoder.
:param embedding: embedding tensor for symbol
:param output_projection: None or a pair (W,B). If provided,each fed prevIoUs output will
first be multiplied by W and added B.
:return: A loop function
"""
def loop_function(prev, _):
if output_projection is not None:
prev = tf.matmul(prev, output_projection[0]) + output_projection[1]
prev_symbol = tf.argmax(prev, 1) #?????INDEX
emb_prev = tf.gather(embedding, prev_symbol) #????INDEX???embedding
return emb_prev
return loop_function
# RNN??????
# ???????????????????test,?t???????t+1???s??
def inference(self):
"""main computation graph here: 1. embeddding layers,2.convolutional layer,3.max-pooling,4.softmax layer."""
# 1.=====>get emebedding of words in the sentence
self.embedded_words1 = tf.nn.embedding_lookup(self.Embedding,self.input_x)#[None,sentence_length,embed_size]
self.sentence_embeddings_expanded1=tf.expand_dims(self.embedded_words1,-1) #[None,embed_size,1). expand dimension so meet input requirement of 2d-conv
self.embedded_words2 = tf.nn.embedding_lookup(self.Embedding,self.input_x2)#[None,embed_size]
self.sentence_embeddings_expanded2=tf.expand_dims(self.embedded_words2,1). expand dimension so meet input requirement of 2d-conv
#2.1 get features of sentence1
h1=self.conv_relu_pool_dropout(self.sentence_embeddings_expanded1,name_scope_prefix="s1") #[None,num_filters_total]
#2.2 get features of sentence2
h2 =self.conv_relu_pool_dropout(self.sentence_embeddings_expanded2,name_scope_prefix="s2") # [None,num_filters_total]
#3. concat features
h=tf.concat([h1,h2],axis=1) #[None,num_filters_total*2]
#4. logits(use linear layer)and predictions(argmax)
with tf.name_scope("output"):
logits = tf.matmul(h,self.W_projection) + self.b_projection #shape:[None,self.num_classes]==tf.matmul([None,self.num_filters_total*2],[self.num_filters_total*2,self.num_classes])
return logits
def build_loss(self, inp, output):
y_gt = inp['y_gt']
y_out = output['y_out']
ce = tfplus.nn.CE()({'y_gt': y_gt, 'y_out': y_out})
num_ex_f = tf.to_float(tf.shape(inp['x'])[0])
ce = tf.reduce_sum(ce) / num_ex_f
self.add_loss(ce)
total_loss = self.get_loss()
self.register_var('loss', total_loss)
ans = tf.argmax(y_gt, 1)
correct = tf.equal(ans, tf.argmax(y_out, 1))
top5_acc = tf.reduce_sum(tf.to_float(
tf.nn.in_top_k(y_out, ans, 5))) / num_ex_f
self.register_var('top5_acc', top5_acc)
acc = tf.reduce_sum(tf.to_float(correct)) / num_ex_f
self.register_var('acc', acc)
return total_loss
def build_loss_grad(self, 'y_out': y_out})
num_ex_f = tf.to_float(tf.shape(inp['x'])[0])
ce = tf.reduce_sum(ce) / num_ex_f
self.add_loss(ce)
learn_rate = self.get_option('learn_rate')
total_loss = self.get_loss()
self.register_var('loss', total_loss)
eps = self.get_option('adam_eps')
optimizer = tf.train.AdamOptimizer(learn_rate, epsilon=eps)
global_step = tf.Variable(0.0)
self.register_var('step', global_step)
train_step = optimizer.minimize(
total_loss, global_step=global_step)
self.register_var('train_step', train_step)
correct = tf.equal(tf.argmax(y_gt, 1))
acc = tf.reduce_sum(tf.to_float(correct)) / num_ex_f
self.register_var('acc', acc)
pass
def _build(self):
# ?????????? --- build
self._lin = photinia.Linear('LINEAR', self._input_size, self._num_classes).build()
# ????
x = tf.placeholder(dtype=photinia.D_TYPE, self._input_size])
y_ = tf.placeholder(dtype=photinia.D_TYPE, self._num_classes])
# ?????? --- setup
y = self._lin.setup(x)
# ??????? softmax?????
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
# accuracy??
correct_prediction = tf.equal(tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, photinia.D_TYPE))
# ????????slot
self._add_slot(
'train',
outputs=loss,
inputs=(x, y_),
updates=tf.train.GradientDescentOptimizer(0.5).minimize(loss)
)
self._add_slot(
'predict',
outputs=accuracy, y_)
)
def gumbel_softmax(logits, temperature, hard=False):
"""Sample from the Gumbel-softmax distribution and optionally discretize.
Args:
logits: [batch_size,n_class] unnormalized log-probs
temperature: non-negative scalar
hard: if True,take argmax,but differentiate w.r.t. soft sample y
Returns:
[batch_size,n_class] sample from the Gumbel-softmax distribution.
If hard=True,then the returned sample will be one-hot,otherwise it will
be a probabilitiy distribution that sums to 1 across classes
"""
y = gumbel_softmax_sample(logits, temperature)
#if hard:
# k = tf.shape(logits)[-1]
# #y_hard = tf.cast(tf.one_hot(tf.argmax(y,1),k),y.dtype)
# y_hard = tf.cast(tf.equal(y,tf.reduce_max(y,1,keep_dims=True)),y.dtype)
# y = tf.stop_gradient(y_hard - y) + y
return y
def __init__(self, lr, s_size, a_size):
self.state_in = tf.placeholder(shape=[1], dtype=tf.int32)
state_in_OH = slim.one_hot_encoding(self.state_in, s_size)
output = slim.fully_connected(state_in_OH,
a_size,
biases_initializer=None,
activation_fn=tf.nn.sigmoid,
weights_initializer=tf.ones_initializer())
self.output = tf.reshape(output, [-1])
self.chosen_action = tf.argmax(self.output, 0)
self.reward_holder = tf.placeholder(shape=[1], dtype=tf.float32)
self.action_holder = tf.placeholder(shape=[1], dtype=tf.int32)
self.responsible_weight = tf.slice(self.output, self.action_holder, [1])
self.loss = -(tf.log(self.responsible_weight) * self.reward_holder)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=lr)
self.update = optimizer.minimize(self.loss)
def calc_f1_score(predictions,labels):
predictions = np.argmax(predictions,1)
labels = np.argmax(labels,1)
tp = fp = tn = fn = 0
for a,b in zip(predictions,labels):
if a == 1:
if a == b:
tp += 1
else:
fp += 1
else:
if a == b:
tn += 1
else:
fn += 1
precision = ( tp / (tp + fp) ) if (tp + fp) > 0 else 0
recall = ( tp / (tp + fn) ) if (tp + fn) > 0 else 0
f1_score = 2*((precision * recall) / (precision + recall )) if (precision + recall) > 0 else 0
return f1_score
#Argument handling,copy paste from tflearn_rnn.py
def categorical_accuracy_with_variable_timestep(y_true, y_pred):
# Actually discarding is not needed if the dummy is an all-zeros array
# (It is indeed encoded in an all-zeros array by
# CaptionPreprocessing.preprocess_batch)
y_true = y_true[:, :-1, :] # discard the last timestep/word (dummy)
y_pred = y_pred[:, :] # discard the last timestep/word (dummy)
# Flatten the timestep dimension
shape = tf.shape(y_true)
y_true = tf.reshape(y_true, shape[-1]])
y_pred = tf.reshape(y_pred, shape[-1]])
# discard rows that are all zeros as they represent dummy or padding words.
is_zero_y_true = tf.equal(y_true, 0)
is_zero_row_y_true = tf.reduce_all(is_zero_y_true, axis=-1)
y_true = tf.boolean_mask(y_true, ~is_zero_row_y_true)
y_pred = tf.boolean_mask(y_pred, ~is_zero_row_y_true)
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(y_true, axis=1),
tf.argmax(y_pred, axis=1)),
dtype=tf.float32))
return accuracy
# As Keras stores a function's name as its metric's name
def _extract_argmax_and_embed(embedding, output_projection=None,
update_embedding=True):
"""Get a loop_function that extracts the prevIoUs symbol and embeds it.
Args:
embedding: embedding tensor for symbols.
output_projection: None or a pair (W,each fed prevIoUs
output will first be multiplied by W and added B.
update_embedding: Boolean; if False,the gradients will not propagate
through the embeddings.
Returns:
A loop function.
"""
def loop_function(prev, _):
if output_projection is not None:
prev = nn_ops.xw_plus_b(
prev, output_projection[0], output_projection[1])
prev_symbol = math_ops.argmax(prev, 1)
# Note that gradients will not propagate through the second parameter of
# embedding_lookup.
emb_prev = embedding_ops.embedding_lookup(embedding, prev_symbol)
if not update_embedding:
emb_prev = array_ops.stop_gradient(emb_prev)
return emb_prev
return loop_function
def model_argmax(sess, x, predictions, samples, Feed=None):
"""
Helper function that computes the current class prediction
:param sess: TF session
:param x: the input placeholder
:param predictions: the model's symbolic output
:param samples: numpy array with input samples (dims must match x)
:param Feed: An optional dictionary that is appended to the Feeding
dictionary before the session runs. Can be used to Feed
the learning phase of a Keras model for instance.
:return: the argmax output of predictions,i.e. the current predicted class
"""
Feed_dict = {x: samples}
if Feed is not None:
Feed_dict.update(Feed)
probabilities = sess.run(predictions, Feed_dict)
if samples.shape[0] == 1:
return np.argmax(probabilities)
else:
return np.argmax(probabilities, axis=1)
def testTrainEvalWithReuse(self):
train_batch_size = 5
eval_batch_size = 2
height, width = 150, 150
num_classes = 1000
train_inputs = tf.random_uniform((train_batch_size, height, width, 3))
inception.inception_v2(train_inputs, num_classes)
eval_inputs = tf.random_uniform((eval_batch_size, 3))
logits, _ = inception.inception_v2(eval_inputs, num_classes, reuse=True)
predictions = tf.argmax(logits, 1)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,))
def testTrainEvalWithReuse(self):
train_batch_size = 5
eval_batch_size = 2
height, 150
num_classes = 1000
with self.test_session() as sess:
train_inputs = tf.random_uniform((train_batch_size, 3))
inception.inception_resnet_v2(train_inputs, num_classes)
eval_inputs = tf.random_uniform((eval_batch_size, 3))
logits, _ = inception.inception_resnet_v2(eval_inputs,
num_classes,
is_training=False,
reuse=True)
predictions = tf.argmax(logits, 1)
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape,))
def testTrainEvalWithReuse(self):
train_batch_size = 5
eval_batch_size = 2
height, 3))
inception.inception_v3(train_inputs, _ = inception.inception_v3(eval_inputs,
is_training=False,))
def testTrainEvalWithReuse(self):
train_batch_size = 5
eval_batch_size = 2
height, 3))
inception.inception_v4(train_inputs, _ = inception.inception_v4(eval_inputs,
num_classes,
is_training=False,
reuse=True)
predictions = tf.argmax(logits,))
def __init__(self, embedding_length):
self._calculator_loom = CalculatorLoom(embedding_length)
self._labels_placeholder = tf.placeholder(tf.float32)
self._classifier_weights = tf.Variable(
tf.truncated_normal([embedding_length, 3],
dtype=tf.float32,
stddev=1),
name='classifier_weights')
self._output_weights = tf.matmul(
self._calculator_loom.output(), self._classifier_weights)
self._loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=self._output_weights, labels=self._labels_placeholder))
self._true_labels = tf.argmax(self._labels_placeholder, dimension=1)
self._prediction = tf.argmax(self._output_weights, dimension=1)
self._accuracy = tf.reduce_mean(tf.cast(
tf.equal(self._true_labels, self._prediction),
dtype=tf.float32))
def add_evaluation_step(result_tensor, ground_truth_tensor):
"""Inserts the operations we need to evaluate the accuracy of our results.
Args:
result_tensor: The new final node that produces results.
ground_truth_tensor: The node we Feed ground truth data
into.
Returns:
Tuple of (evaluation step,prediction).
"""
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
prediction = tf.argmax(result_tensor, 1)
correct_prediction = tf.equal(
prediction, 1))
with tf.name_scope('accuracy'):
evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', evaluation_step)
return evaluation_step, prediction
def _get_top_k(scores1, scores2, k, max_span_size, support2question):
max_support_length = tf.shape(scores1)[1]
doc_idx, pointer1, topk_scores1 = segment_top_k(scores1, support2question, k)
# [num_questions * beam_size]
doc_idx_flat = tf.reshape(doc_idx, [-1])
pointer_flat1 = tf.reshape(pointer1, [-1])
# [num_questions * beam_size,support_length]
scores_gathered2 = tf.gather(scores2, doc_idx_flat)
if max_span_size < 0:
pointer_flat1, max_span_size = pointer_flat1 + max_span_size + 1, -max_span_size
left_mask = misc.mask_for_lengths(tf.cast(pointer_flat1, tf.int32),
max_support_length, mask_right=False)
right_mask = misc.mask_for_lengths(tf.cast(pointer_flat1 + max_span_size,
max_support_length)
scores_gathered2 = scores_gathered2 + left_mask + right_mask
pointer2 = tf.argmax(scores_gathered2, output_type=tf.int32)
topk_score2 = tf.gather_nd(scores2, tf.stack([doc_idx_flat, pointer2], 1))
return doc_idx, tf.reshape(pointer2, k]), topk_scores1 + tf.reshape(topk_score2, k])
def train_neural_network(x):
prediction = convolutional_neural_network(x)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(prediction, y))
optimizer = tf.train.AdamOptimizer().minimize(cost)
hm_epochs = 10
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for epoch in range(hm_epochs):
epoch_loss = 0
for _ in range(int(mnist.train.num_examples / batch_size)):
epoch_x, epoch_y = mnist.train.next_batch(batch_size)
_, c = sess.run([optimizer, cost], Feed_dict={x: epoch_x, y: epoch_y})
epoch_loss += c
print('Epoch', epoch, 'completed out of', hm_epochs, 'loss:', epoch_loss)
correct = tf.equal(tf.argmax(prediction, tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
print('Accuracy:', accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
train_height, train_width = 224, 224
eval_height, eval_width = 256, 256
num_classes = 1000
with self.test_session():
train_inputs = tf.random_uniform(
(train_batch_size, train_height, train_width, _ = vgg.vgg_a(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
tf.get_variable_scope().reuse_variables()
eval_inputs = tf.random_uniform(
(eval_batch_size, eval_height, eval_width, _ = vgg.vgg_a(eval_inputs, is_training=False,
spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 2, num_classes])
logits = tf.reduce_mean(logits, [1, 2])
predictions = tf.argmax(logits, 1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
train_height, _ = vgg.vgg_16(train_inputs)
self.assertListEqual(logits.get_shape().as_list(), _ = vgg.vgg_16(eval_inputs,
spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(), [eval_batch_size])
def testTrainEvalWithReuse(self):
train_batch_size = 5
eval_batch_size = 2
height, 1)
with self.test_session() as sess:
sess.run(tf.initialize_all_variables())
output = sess.run(predictions)
self.assertEquals(output.shape,))
def testTrainEvalWithReuse(self):
train_batch_size = 5
eval_batch_size = 2
height, 1)
sess.run(tf.initialize_all_variables())
output = sess.run(predictions)
self.assertEquals(output.shape,))
def testTrainEvalWithReuse(self):
train_batch_size = 5
eval_batch_size = 2
height,))
def testTrainEvalWithReuse(self):
train_batch_size = 5
eval_batch_size = 2
height, width = 224, 224
num_classes = 1000
train_inputs = tf.random_uniform((train_batch_size, 3))
inception.inception_v1(train_inputs, _ = inception.inception_v1(eval_inputs,))
def sample_dtype(self):
return tf.int32
# WRONG SECOND DERIVATIVES
# class CategoricalPd(Pd):
# def __init__(self,axis=-1)
# def logp(self,axis=-1)
def add_evaluation_step(result_tensor, ground_truth_tensor):
"""Inserts the operations we need to evaluate the accuracy of our results.
Args:
result_tensor: The new final node that produces results.
ground_truth_tensor: The node we Feed ground truth data
into.
Returns:
nothing.
"""
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
correct_prediction = tf.equal(tf.argmax(result_tensor, \
tf.argmax(ground_truth_tensor, tf.float32))
tf.summary.scalar('accuracy', evaluation_step)
return evaluation_step
def add_evaluation_step(result_tensor, prediction
def cal_loss(self):
one_hot_labels = tf.one_hot(
self.labels, depth=self.conf.class_num,
axis=self.channel_axis, name='labels/one_hot')
losses = tf.losses.softmax_cross_entropy(
one_hot_labels, self.predictions, scope='loss/losses')
self.loss_op = tf.reduce_mean(losses, name='loss/loss_op')
self.decoded_preds = tf.argmax(
self.predictions, self.channel_axis, name='accuracy/decode_pred')
correct_prediction = tf.equal(
self.labels, self.decoded_preds,
name='accuracy/correct_pred')
self.accuracy_op = tf.reduce_mean(
tf.cast(correct_prediction, tf.float32, name='accuracy/cast'),
name='accuracy/accuracy_op')
# weights = tf.cast(
# tf.greater(self.decoded_preds,name='m_IoU/greater'),
# tf.int32,name='m_IoU/weights')
weights = tf.cast(
tf.less(self.labels, self.conf.channel, name='m_IoU/greater'),
tf.int64, name='m_IoU/weights')
labels = tf.multiply(self.labels, weights, name='m_IoU/mul')
self.m_IoU, self.mIoU_op = tf.metrics.mean_IoU(
self.labels, self.conf.class_num,
weights, name='m_IoU/m_IoUs')
def _joint_positions(self):
highest_activation = tf.reduce_max(self.sigm_network, 2])
x = tf.argmax(tf.reduce_max(self.smoothed_sigm_network, 1)
y = tf.argmax(tf.reduce_max(self.smoothed_sigm_network, 2), 1)
x = tf.cast(x, tf.float32)
y = tf.cast(y, tf.float32)
a = tf.cast(highest_activation, tf.float32)
scale_coef = (self.image_size / self.heatmap_size)
x *= scale_coef
y *= scale_coef
out = tf.stack([y, a])
return out
def create(config):
batch_size = config["batch_size"]
x = tf.placeholder(tf.float32, [batch_size, X_Dims[0], X_Dims[1], name="x")
y = tf.placeholder(tf.float32, Y_Dims], name="y")
hidden = hidden_layers(config, x)
output = output_layer(config, hidden)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(output, y), name="loss")
output = tf.nn.softmax(output)
correct_prediction = tf.equal(tf.argmax(output,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
variables = tf.trainable_variables()
optimizer = tf.train.GradientDescentOptimizer(config['learning_rate']).minimize(loss)
set_tensor("x", x)
set_tensor("y", y)
set_tensor("loss", loss)
set_tensor("optimizer", optimizer)
set_tensor("accuracy", accuracy)
def build(self, inputData, ss, keepProb=1):
self.conv1_1 = self._conv_layer(inputData, params=self._params["depth/conv1_1"])
self.conv1_2 = self._conv_layer(self.conv1_1, params=self._params["depth/conv1_2"])
self.pool1 = self._average_pool(self.conv1_2, 'depth/pool')
self.conv2_1 = self._conv_layer(self.pool1, params=self._params["depth/conv2_1"])
self.conv2_2 = self._conv_layer(self.conv2_1, params=self._params["depth/conv2_2"])
self.conv2_3 = self._conv_layer(self.conv2_2, params=self._params["depth/conv2_3"])
self.conv2_4 = self._conv_layer(self.conv2_3, params=self._params["depth/conv2_4"])
self.pool2 = self._average_pool(self.conv2_4, 'depth/pool')
self.fcn1 = self._conv_layer_dropout(self.pool2, params=self._params["depth/fcn1"], keepProb=keepProb)
self.fcn2 = self._conv_layer_dropout(self.fcn1, params=self._params["depth/fcn2"], keepProb=keepProb)
self.outputData = self._upscore_layer(self.fcn2, params=self._params["depth/upscore"],
shape=tf.shape(inputData))
self.outputDataArgMax = tf.argmax(input=self.outputData, dimension=3)
def add_evaluation_step(result_tensor, tf.float32))
tf.scalar_summary('accuracy', evaluation_step)
return evaluation_step
def categorical_max(logits, d):
value = tf.argmax(logits - tf.reduce_max(logits, [1], keep_dims=True), axis=1)
return tf.one_hot(value, d)
def argmax(x, axis=None):
return tf.argmax(x, axis=axis)
def categorical_sample_logits(X):
# https://github.com/tensorflow/tensorflow/issues/456
U = tf.random_uniform(tf.shape(X))
return argmax(X - tf.log(-tf.log(U)), axis=1)
def sample(self):
u = tf.random_uniform(tf.shape(self.logits))
return tf.argmax(self.logits - tf.log(-tf.log(u)), axis=1)
def _region_proposal(self, net_conv, initializer):
rpn = slim.conv2d(net_conv, cfg.rpn_CHANNELS, [3, trainable=is_training, weights_initializer=initializer,
scope="rpn_conv/3x3")
self._act_summaries.append(rpn)
rpn_cls_score = slim.conv2d(rpn, self._num_anchors * 2,
weights_initializer=initializer,
padding='VALID', activation_fn=None, scope='rpn_cls_score')
# change it so that the score has 2 as its channel size
rpn_cls_score_reshape = self._reshape_layer(rpn_cls_score, 'rpn_cls_score_reshape')
rpn_cls_prob_reshape = self._softmax_layer(rpn_cls_score_reshape, "rpn_cls_prob_reshape")
rpn_cls_pred = tf.argmax(tf.reshape(rpn_cls_score_reshape, 2]), name="rpn_cls_pred")
rpn_cls_prob = self._reshape_layer(rpn_cls_prob_reshape, "rpn_cls_prob")
rpn_bBox_pred = slim.conv2d(rpn, self._num_anchors * 4, scope='rpn_bBox_pred')
if is_training:
rois, roi_scores = self._proposal_layer(rpn_cls_prob, rpn_bBox_pred, "rois")
rpn_labels = self._anchor_target_layer(rpn_cls_score, "anchor")
# Try to have a deterministic order for the computing graph,for reproducibility
with tf.control_dependencies([rpn_labels]):
rois, _ = self._proposal_target_layer(rois, roi_scores, "rpn_rois")
else:
if cfg.TEST.MODE == 'nms':
rois, _ = self._proposal_layer(rpn_cls_prob, "rois")
elif cfg.TEST.MODE == 'top':
rois, _ = self._proposal_top_layer(rpn_cls_prob, "rois")
else:
raise NotImplementedError
self._predictions["rpn_cls_score"] = rpn_cls_score
self._predictions["rpn_cls_score_reshape"] = rpn_cls_score_reshape
self._predictions["rpn_cls_prob"] = rpn_cls_prob
self._predictions["rpn_cls_pred"] = rpn_cls_pred
self._predictions["rpn_bBox_pred"] = rpn_bBox_pred
self._predictions["rois"] = rois
return rois
版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。