微信公众号搜"智元新知"关注
微信扫一扫可直接关注哦!

Python tensorflow 模块-round() 实例源码

Python tensorflow 模块,round() 实例源码

我们从Python开源项目中,提取了以下20代码示例,用于说明如何使用tensorflow.round()

项目:lung-cancer-detector    作者:YichenGong    | 项目源码 | 文件源码
def resample(patient, new_spacing=[1,1,1]):
    scan = get_scan(patient)
    image = get_3D_data(patient)

    # Determine current pixel spacing
    spacing = np.array([scan[0].SliceThickness] + scan[0].PixelSpacing, dtype=np.float32)

    resize_factor = spacing / new_spacing
    new_real_shape = image.shape * resize_factor
    new_shape = np.round(new_real_shape)
    real_resize_factor = new_shape / image.shape
    new_spacing = spacing / real_resize_factor

    image = nd.interpolation.zoom(image, real_resize_factor, mode='nearest')

    return image

# For the sake of testing the network,we'll be using the sample dataset
# For this,we'll use the maximum size of the image
# and PAD any image with -1000 values which is smaller than that

#PS: only the first dimension is different in sample dataset
#which is not the case in actual dataset
项目:HyperGAN    作者:255BITS    | 项目源码 | 文件源码
def sample_output(self, val):
        vocabulary = self.get_vocabulary()
        if self.one_hot:
            vals = [ np.argmax(r) for r in val ]
            ox_val = [vocabulary[obj] for obj in list(vals)]
            string = "".join(ox_val)
            return string
        else:
            val = np.reshape(val, [-1])
            val *= len(vocabulary)/2.0
            val += len(vocabulary)/2.0
            val = np.round(val)

            val = np.maximum(0, val)
            val = np.minimum(len(vocabulary)-1, val)

            ox_val = [self.get_character(obj) for obj in list(val)]
            string = "".join(ox_val)
            return string
项目:hart    作者:akosiorek    | 项目源码 | 文件源码
def _bBox_to_mask(yy, region_size, dtype):
    # trim bounding Box exeeding region_size on top and left
    neg_part = tf.nn.relu(-yy[:2])
    core = tf.ones(tf.to_int32(tf.round(yy[2:] - neg_part)), dtype=dtype)

    y1 = tf.maximum(yy[0], 0.)
    x1 = tf.maximum(yy[1], 0.)

    y2 = tf.minimum(region_size[0], yy[0] + yy[2])
    x2 = tf.minimum(region_size[1], yy[1] + yy[3])

    padding = (y1, region_size[0] - y2, x1, region_size[1] - x2)
    padding = tf.reshape(tf.stack(padding), (-1, 2))
    padding = tf.to_int32(tf.round(padding))
    mask = tf.pad(core, padding)

    # trim bounding Box exeeding region_size on bottom and right
    rs = tf.to_int32(tf.round(region_size))
    mask = mask[:rs[0], :rs[1]]
    mask.set_shape((None, None))
    return mask
项目:fast-neural-style    作者:coder-james    | 项目源码 | 文件源码
def get_masks(origin_images, height, width, channels=3):
    """add horizon color lines and set empty"""
    quarty = tf.random_uniform([height/4, 1])
    prop = tf.scalar_mul(tf.convert_to_tensor(0.2), tf.ones([height/4, 1]))
    quarty = tf.round(tf.add(quarty, prop))
    y = tf.reshape(tf.stack([quarty, quarty, quarty], axis=1), [height, 1])
    mask = tf.matmul(y, tf.ones([1, width]))
    masks = tf.expand_dims(mask, 0)
    masks = tf.expand_dims(masks, -1)
    maskedimages = tf.mul(origin_images, masks)
    """add noise"""
    scale = tf.random_uniform([channels, 1])
    y = tf.subtract(tf.ones([height, 1]), y)
    y = tf.expand_dims(y, 0)
    y = tf.scalar_mul(tf.convert_to_tensor(255.), tf.multiply(scale, y))
    noise = tf.add(mask, tf.matmul(y, tf.ones([channels, 1, width])))
    noise = tf.pack(tf.split(value=noise, num_or_size_splits=noise.get_shape()[0], axis=0), axis=3)
    maskedimages = tf.add(maskedimages, noise)
    return maskedimages
项目:DMNN    作者:magnux    | 项目源码 | 文件源码
def sim_occlusions(poses, dm_shape, batch_size, max_length, n_dims, body_splits, _int_type=tf.int32, _float_type=tf.float32):
    def occluded_poses():
        body_splits_tf = tf.constant(body_splits, dtype=_int_type)
        occ_idcs = tf.random_uniform([batch_size, 1], minval=0, maxval=len(body_splits), dtype=_int_type)
        occ_idcs = tf.gather_nd(body_splits_tf, occ_idcs)
        noise_mask = tf.tile(
            tf.reshape(
                tf.cast(tf.reduce_sum(tf.one_hot(occ_idcs, dm_shape[0]), dtype=tf.bool),
                [batch_size, dm_shape[0],
            [1, n_dims])
        noisy_poses = poses * tf.random_uniform([batch_size, n_dims], minval=0.8, maxval=1.2, dtype=_float_type)
        return tf.where(noise_mask, noisy_poses, poses)

    occlude_rate = 0.5
    return tf.cond(tf.cast(tf.round(tf.random_uniform([], minval=-0.5, maxval=0.5) + occlude_rate), tf.bool),
                   occluded_poses, lambda: poses)
项目:skiprnn-2017-telecombcn    作者:imatge-upc    | 项目源码 | 文件源码
def _binary_round(x):
    """
    Rounds a tensor whose values are in [0,1] to a tensor with values in {0,1},
    using the straight through estimator for the gradient.

    Based on http://r2rt.com/binary-stochastic-neurons-in-tensorflow.html

    :param x: input tensor
    :return: y=round(x) with gradients defined by the identity mapping (y=x)
    """
    g = tf.get_default_graph()

    with ops.name_scope("BinaryRound") as name:
        with g.gradient_override_map({"Round": "Identity"}):
            return tf.round(x, name=name)
项目:distributional_perspective_on_RL    作者:Kiwoo    | 项目源码 | 文件源码
def mode(self):
        return tf.round(self.ps)
项目:lung-cancer-detector    作者:YichenGong    | 项目源码 | 文件源码
def glimpseSensor(normalLocation, inputPlaceholder):
    location = tf.round(tf.multiply((normalLocation + 1)/2.0, InputimageSize))
    location = tf.cast(location, tf.int32)

    images = tf.reshape(inputPlaceholder, (batchSize, InputimageSize[0], 
                                          InputimageSize[1], 
                                          InputimageSize[2]))

    zooms = []
    for k in xrange(batchSize):
        imgZooms = []
        img = images[k]

        loc = location[k]

        for i in xrange(glimpseDepth):
            radius = int(glimpseRadius * (2 ** i))
            glimpse = getGlipmse(img, loc, radius)
            glimpse = tf.reshape(glimpse, (glimpseBandwidth, glimpseBandwidth, glimpseBandwidth))

            imgZooms.append(glimpse)

        zooms.append(tf.pack(imgZooms))

    zooms = tf.pack(zooms)

    return zooms
项目:TFCommon    作者:MU94W    | 项目源码 | 文件源码
def binary_accuracy(y_true, y_pred, mask=1):
    round_y_pred = tf.round(y_pred)
    right_cnt = tf.cast(tf.equal(y_true, round_y_pred), tf.float32)
    return compute_weighted_loss(right_cnt, mask)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def round(x):
    '''Element-wise rounding to the closest integer.
    '''
    return tf.round(x)
项目:HyperGAN    作者:255BITS    | 项目源码 | 文件源码
def modal(config, gan, net):
    net = tf.round(net*float(config.modes))/float(config.modes)
    return net
项目:HyperGAN    作者:255BITS    | 项目源码 | 文件源码
def __init__(self, args):
        with tf.device(args.device):
            def circle(x):
                spherenet = tf.square(x)
                spherenet = tf.reduce_sum(spherenet, 1)
                lam = tf.sqrt(spherenet)
                return x/tf.reshape(lam,[int(lam.get_shape()[0]), 1])

            def modes(x):
                return tf.round(x*2)/2.0

            if args.distribution == 'circle':
                x = tf.random_normal([args.batch_size, 2])
                x = circle(x)
            elif args.distribution == 'modes':
                x = tf.random_uniform([args.batch_size, 2], -1, 1)
                x = modes(x)
            elif args.distribution == 'sin':
                x = tf.random_uniform((1, args.batch_size), -10.5, 10.5 )
                x = tf.transpose(x)
                r_data = tf.random_normal((args.batch_size,1), mean=0, stddev=0.1)
                xy = tf.sin(0.75*x)*7.0+x*0.5+r_data*1.0
                x = tf.concat([xy,x], 1)/16.0

            elif args.distribution == 'arch':
                offset1 = tf.random_uniform((1, -10, 10 )
                xa = tf.random_uniform((1, 1), 4 )
                xb = tf.random_uniform((1, 4 )
                x1 = tf.random_uniform((1, 1 )
                xcos = tf.cos(x1*np.pi + offset1)*xa
                xsin = tf.sin(x1*np.pi + offset1)*xb
                x = tf.transpose(tf.concat([xcos,xsin], 0))/16.0

            self.x = x
            self.xy = tf.zeros_like(self.x)
项目:baselines    作者:openai    | 项目源码 | 文件源码
def mode(self):
        return tf.round(self.ps)
项目:deep-spike    作者:electronicvisions    | 项目源码 | 文件源码
def quantize_weight(W, precision=weight_quantization):
    '''
    For a given weight matrix,returns weights of values -1,0 or 1
    :param W:
    :return:
    '''
    W_ = tf.round(W * precision) / precision
    return W_

########## Loading the dataset
项目:deep-spike    作者:electronicvisions    | 项目源码 | 文件源码
def quantize_weight(W,0 or 1
    :param W:
    :return:
    '''
    W_ = tf.round(W * precision) / precision
    return W_

########## Loading the dataset
项目:deep-spike    作者:electronicvisions    | 项目源码 | 文件源码
def quantize_weight(W,0 or 1
    :param W:
    :return:
    '''
    W_ = tf.round(W * precision) / precision
    return W_

########## Loading the dataset
项目:deep-spike    作者:electronicvisions    | 项目源码 | 文件源码
def quantize_weight(W,0 or 1
    :param W:
    :return:
    '''
    W_ = tf.round(W * precision) / precision
    return W_

########## Loading the dataset
项目:deep-spike    作者:electronicvisions    | 项目源码 | 文件源码
def quantize_weight(W,0 or 1
    :param W:
    :return:
    '''
    W_ = tf.round(W * precision) / precision
    return W_

########## Loading the dataset
项目:Deep-Learning-Experiments    作者:roatienza    | 项目源码 | 文件源码
def fnn_model_fn(features,labels,mode):
    print(features)
    print(labels)
    # output_labels = tf.reshape(labels,[-1,1])
    dense = tf.layers.dense(features,units=nhidden,activation=tf.nn.relu,use_bias=True)
    print(dense)
    logits = tf.layers.dense(dense,units=1,use_bias=True)
    print(logits)
    onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=1)
    if mode != learn.ModeKeys.EVAL:
        # loss = tf.losses.sigmoid_cross_entropy(output_labels,logits)
        # loss = tf.losses.mean_squared_error(labels=output_labels,predictions=logits)
        loss = tf.losses.softmax_cross_entropy(
             onehot_labels=onehot_labels, logits=logits)
    if mode==learn.ModeKeys.TRAIN:
        train_op = tf.contrib.layers.optimize_loss(
            loss=loss,
            global_step=tf.contrib.framework.get_global_step(),
            learning_rate=learning_rate,
            optimizer="SGD")
    predictions = {
        "classes": tf.round(logits),
        "probabilities": tf.nn.softmax(
             logits, name="softmax_tensor")
    }
    return model_fn.ModelFnops(
        mode=mode, predictions=predictions, loss=loss, train_op=train_op)
项目:rec-attend-public    作者:renmengye    | 项目源码 | 文件源码
def f_segm_match(IoU, s_gt):
  """Matching between segmentation output and groundtruth.
  Args:
    y_out: [B,T,H,W],output segmentations
    y_gt: [B,groundtruth segmentations
    s_gt: [B,T],groudtruth score sequence
  """
  global hungarian_module
  if hungarian_module is None:
    mod_name = './hungarian.so'
    hungarian_module = tf.load_op_library(mod_name)
    log.info('Loaded library "{}"'.format(mod_name))

  # Mask X,[B,M] => [B,1,M]
  mask_x = tf.expand_dims(s_gt, dim=1)
  # Mask Y,N,1]
  mask_y = tf.expand_dims(s_gt, dim=2)
  IoU_mask = IoU * mask_x * mask_y

  # Keep certain precision so that we can get optimal matching within
  # reasonable time.
  eps = 1e-5
  precision = 1e6
  IoU_mask = tf.round(IoU_mask * precision) / precision
  match_eps = hungarian_module.hungarian(IoU_mask + eps)[0]

  # [1,1]
  s_gt_shape = tf.shape(s_gt)
  num_segm_out = s_gt_shape[1]
  num_segm_out_mul = tf.pack([1, num_segm_out, 1])
  # Mask the graph algorithm output.
  match = match_eps * mask_x * mask_y

  return match

版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。

相关推荐