Python keras.backend 模块,greater() 实例源码
我们从Python开源项目中,提取了以下10个代码示例,用于说明如何使用keras.backend.greater()。
def yoloclassloss(y_true, y_pred, t):
lo = K.square(y_true-y_pred)
value_if_true = lamda_class*(lo)
value_if_false = K.zeros_like(y_true)
loss1 = tf.select(t, value_if_true, value_if_false)
# only extract predicted class value at obj location
cat = K.sum(tf.select(t, K.zeros_like(y_pred)), axis=1)
# check valid class value
objsum = K.sum(y_true, axis=1)
# if objsum > 0.5,means it contain some valid obj(may be 1,2.. objs)
isobj = K.greater(objsum, 0.5)
# only extract class value at obj location
valid_cat = tf.select(isobj, cat, K.zeros_like(cat))
# prevent div 0
ave_cat = tf.select(K.greater(K.sum(objsum),0.5), K.sum(valid_cat) / K.sum(objsum) , -1)
return K.mean(loss1), ave_cat
def get_split_averages(input_tensor, input_mask, indices):
# Splits input tensor into three parts based on the indices and
# returns average of values prior to index,values at the index and
# average of values after the index.
# input_tensor: (batch_size,input_length,input_dim)
# input_mask: (batch_size,input_length)
# indices: (batch_size,1)
# (1,input_length)
length_range = K.expand_dims(K.arange(K.shape(input_tensor)[1]), dim=0)
# (batch_size,input_length)
batched_range = K.repeat_elements(length_range, K.shape(input_tensor)[0], 0)
tiled_indices = K.repeat_elements(indices, K.shape(input_tensor)[1], 1) # (batch_size,input_length)
greater_mask = K.greater(batched_range, tiled_indices) # (batch_size,input_length)
lesser_mask = K.lesser(batched_range,input_length)
equal_mask = K.equal(batched_range,input_length)
# We also need to mask these masks using the input mask.
# (batch_size,input_length)
if input_mask is not None:
greater_mask = switch(input_mask, greater_mask, K.zeros_like(greater_mask))
lesser_mask = switch(input_mask, lesser_mask, K.zeros_like(lesser_mask))
post_sum = K.sum(switch(K.expand_dims(greater_mask), input_tensor, K.zeros_like(input_tensor)), axis=1) # (batch_size,input_dim)
pre_sum = K.sum(switch(K.expand_dims(lesser_mask),input_dim)
values_at_indices = K.sum(switch(K.expand_dims(equal_mask),input_dim)
post_normalizer = K.expand_dims(K.sum(greater_mask, axis=1) + K.epsilon(), dim=1) # (batch_size,1)
pre_normalizer = K.expand_dims(K.sum(lesser_mask,1)
return K.cast(pre_sum / pre_normalizer, 'float32'), values_at_indices, K.cast(post_sum / post_normalizer, 'float32')
def masked_categorical_accuracy(y_true, y_pred):
mask = K.cast(K.expand_dims(K.greater(K.argmax(y_true, axis=-1), 0), 'float32')
accuracy = K.cast(K.equal(K.argmax(y_true, K.argmax(y_pred, axis=-1)), 'float32')
accuracy *= K.squeeze(mask, -1)
## Normalize by number of real segments,using a small non-zero denominator in cases of padding characters
## in order to avoid division by zero
#accuracy /= (K.mean(mask) + (1e-10*(1-K.mean(mask))))
return accuracy
def discriminator_loss(y_true, y_pred):
loss = mean_squared_error(y_true, y_pred)
is_large = k.greater(loss, k.constant(_disc_train_thresh)) # threshold
is_large = k.cast(is_large, k.floatx())
return loss * is_large # binary threshold the loss to prevent overtraining the discriminator
def yoloclassloss(y_true, t):
#real_y_true = tf.select(t,y_true,K.zeros_like(y_true))
lo = K.square(y_true-y_pred)
value_if_true = lamda_class*(lo)
value_if_false = K.zeros_like(y_true)
tlist =[]
for i in range(classes):
tlist.append(t)
tt = K.concatenate(tlist,1)
loss1 = tf.select(tt, value_if_false)
## only extract predicted class value at obj location
#nouse_cat = K.sum(tf.select(t,y_pred,K.zeros_like(y_pred)),axis=1)
## check valid class value
#nouse_objsum = K.sum(y_true,axis=1)
## if objsum > 0.5,2.. objs)
#nouse_isobj = K.greater(objsum,0.5)
## only extract class value at obj location
#nouse_valid_cat = tf.select(isobj,cat,K.zeros_like(cat))
## prevent div 0
#nouse_ave_cat = tf.select(K.greater(K.sum(objsum),0.5),K.sum(valid_cat) / K.sum(objsum),-1)
t_y_true = K.greater(y_true, 0.5)
cat = K.sum(tf.select(t_y_true, K.zeros_like(y_pred)))
objsum = K.sum(y_true)
return K.sum(loss1)/(objsum+0.0000001), cat/(objsum+0.0000001), loss1, lo
def overlap(x1, w1, x2, w2):
l1 = (x1) - w1/2
l2 = (x2) - w2/2
left = tf.select(K.greater(l1,l2), l1, l2)
r1 = (x1) + w1/2
r2 = (x2) + w2/2
right = tf.select(K.greater(r1,r2), r2, r1)
result = right - left
return result
def limit(x):
y = tf.select(K.greater(x,100000), 1000000.*K.ones_like(x), x)
z = tf.select(K.lesser(y,-100000), -1000000.*K.ones_like(x), y)
return z
def get_updates(self, params, constraints, loss):
grads = self.get_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
lr = self.lr
if self.inital_decay > 0:
lr *= (1. / (1. + self.decay * self.iterations))
t = self.iterations + 1
lr_t = lr * K.sqrt(1. - K.pow(self.beta_2, t)) / (1. - K.pow(self.beta_1, t))
shapes = [K.get_variable_shape(p) for p in params]
ms = [K.zeros(shape) for shape in shapes]
vs = [K.zeros(shape) for shape in shapes]
f = K.variable(0)
d = K.variable(1)
self.weights = [self.iterations] + ms + vs + [f, d]
cond = K.greater(t, K.variable(1))
small_delta_t = K.switch(K.greater(loss, f), self.small_k + 1, 1. / (self.big_K + 1))
big_delta_t = K.switch(K.greater(loss, self.big_K + 1, 1. / (self.small_k + 1))
c_t = K.minimum(K.maximum(small_delta_t, loss / (f + self.epsilon)), big_delta_t)
f_t = c_t * f
r_t = K.abs(f_t - f) / (K.minimum(f_t, f))
d_t = self.beta_3 * d + (1 - self.beta_3) * r_t
f_t = K.switch(cond, f_t, loss)
d_t = K.switch(cond, d_t, K.variable(1.))
self.updates.append(K.update(f, f_t))
self.updates.append(K.update(d, d_t))
for p, g, m, v in zip(params, grads, ms, vs):
m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)
p_t = p - lr_t * m_t / (d_t * K.sqrt(v_t) + self.epsilon)
self.updates.append(K.update(m, m_t))
self.updates.append(K.update(v, v_t))
new_p = p_t
# apply constraints
if p in constraints:
c = constraints[p]
new_p = c(new_p)
self.updates.append(K.update(p, new_p))
return self.updates
版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 [email protected] 举报,一经查实,本站将立刻删除。