微信公众号搜"智元新知"关注
微信扫一扫可直接关注哦!

Python torch.nn 模块-ConvTranspose2d() 实例源码

Python torch.nn 模块,ConvTranspose2d() 实例源码

我们从Python开源项目中,提取了以下50代码示例,用于说明如何使用torch.nn.ConvTranspose2d()

项目:neural-style    作者:ctliu3    | 项目源码 | 文件源码
def __init__(self):
        super(ImageTransformNet, self).__init__()
        self.conv1 = nn.Conv2d(3, 32, kernel_size=9, stride=1, padding=4)
        self.bn1 = nn.Batchnorm2d(32)

        self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2, padding=1)
        self.bn2 = nn.Batchnorm2d(64)

        self.conv3 = nn.Conv2d(64, 128, padding=1)
        self.bn3 = nn.Batchnorm2d(128)

        self.res1 = ResidualBlock(128, 128)
        self.res2 = ResidualBlock(128, 128)
        self.res3 = ResidualBlock(128, 128)
        self.res4 = ResidualBlock(128, 128)
        self.res5 = ResidualBlock(128, 128)

        self.conv4 = nn.ConvTranspose2d(128, padding=1)
        self.bn4 = nn.Batchnorm2d(64)

        self.conv5 = nn.ConvTranspose2d(64, padding=1)
        self.bn5 = nn.Batchnorm2d(32)

        self.conv6 = nn.ConvTranspose2d(32, 3, padding=4)
项目:deeplearning    作者:zxjzxj9    | 项目源码 | 文件源码
def __init__(self, prior_size):
        super(DCGenerator, self).__init__()
        self.prior_size = prior_size
        self.linear1 = nn.Linear(prior_size, 4*4*512)
        # 4x4 --> 8x8
        self.deconv1 = nn.ConvTranspose2d(512, 256, (5,5))
        # Batch normalization
        self.bn1 = nn.Batchnorm2d(256)

        # 8x8 --> 16x16,stride 2
        self.deconv2 = nn.ConvTranspose2d(256,5), stride = (2,2), padding = (2, output_padding = (1,1))
        # Batch normalization
        self.bn2 = nn.Batchnorm2d(128)

        # 16x16 --> 32x32,stride
        self.deconv3 = nn.ConvTranspose2d(128,1))
项目:FastNeuralStyle    作者:bengxy    | 项目源码 | 文件源码
def __init__(self):
        super(StylePart, padding=4)
        self.bn1 = nn.Batchnorm2d(32)
        self.conv2 = nn.Conv2d(32, padding=1)
        self.bn2 = nn.Batchnorm2d(64) 
        self.conv3 = nn.Conv2d(64, padding=1)
        self.bn3 = nn.Batchnorm2d(128)       
        self.res1 = ResBlock(128)
        self.res2 = ResBlock(128)
        self.res3 = ResBlock(128)
        self.res4 = ResBlock(128)
        self.res5 = ResBlock(128)     
        self.deconv1 = nn.ConvTranspose2d(128, padding=1)
        self.bn4 = nn.Batchnorm2d(64)
        self.deconv2 = nn.ConvTranspose2d(64, padding=1)
        self.bn5 = nn.Batchnorm2d(32)
        self.deconv3 = nn.Conv2d(32, padding=4)
项目:lr-gan.pytorch    作者:jwyang    | 项目源码 | 文件源码
def buildNetGbg(self, nsize): # take vector as input,and outout bgimg
        net = nn.Sequential()
        size_map = 1
        name = str(size_map)
        net.add_module('convt' + name, nn.ConvTranspose2d(nz, ngf * 4, 4, 0, bias=True))
        net.add_module('bn' + name, nn.Batchnorm2d(ngf * 4))
        net.add_module('relu' + name, nn.ReLU(True))
        size_map = 4
        depth_in = 4 * ngf
        depth_out = 2 * ngf
        while size_map < nsize / 2:
            name = str(size_map)
            net.add_module('convt' + name, nn.ConvTranspose2d(depth_in, depth_out, 2, 1, bias=True))
            net.add_module('bn' + name, nn.Batchnorm2d(depth_out))
            net.add_module('relu' + name, nn.ReLU(True))
            depth_in = depth_out
            depth_out = max(depth_in / 2, 64)
            size_map = size_map * 2
        return net, depth_in
项目:lr-gan.pytorch    作者:jwyang    | 项目源码 | 文件源码
def buildNetGfg(self,and output fgimg and fgmask
        net = nn.Sequential()
        size_map = 1
        name = str(size_map)
        net.add_module('convt' + name, ngf * 8, bias=False))
        net.add_module('bn' + name, nn.Batchnorm2d(ngf * 8))
        net.add_module('relu' + name, nn.ReLU(True))
        size_map = 4
        depth_in = 8 * ngf
        depth_out = 4 * ngf
        while size_map < nsize / 2:
            name = str(size_map)
            net.add_module('convt' + name, bias=False))
            net.add_module('bn' + name, depth_in
项目:pytorch-semantic-segmentation    作者:ZijunDeng    | 项目源码 | 文件源码
def __init__(self, num_classes, pretrained=True):
        super(FCN32VGG, self).__init__()
        vgg = models.vgg16()
        if pretrained:
            vgg.load_state_dict(torch.load(vgg16_caffe_path))
        features, classifier = list(vgg.features.children()), list(vgg.classifier.children())

        features[0].padding = (100, 100)

        for f in features:
            if 'MaxPool' in f.__class__.__name__:
                f.ceil_mode = True
            elif 'ReLU' in f.__class__.__name__:
                f.inplace = True

        self.features5 = nn.Sequential(*features)

        fc6 = nn.Conv2d(512, 4096, kernel_size=7)
        fc6.weight.data.copy_(classifier[0].weight.data.view(4096, 512, 7, 7))
        fc6.bias.data.copy_(classifier[0].bias.data)
        fc7 = nn.Conv2d(4096, kernel_size=1)
        fc7.weight.data.copy_(classifier[3].weight.data.view(4096, 1))
        fc7.bias.data.copy_(classifier[3].bias.data)
        score_fr = nn.Conv2d(4096, kernel_size=1)
        score_fr.weight.data.zero_()
        score_fr.bias.data.zero_()
        self.score_fr = nn.Sequential(
            fc6, nn.ReLU(inplace=True), nn.Dropout(), fc7, score_fr
        )

        self.upscore = nn.ConvTranspose2d(num_classes, kernel_size=64, stride=32, bias=False)
        self.upscore.weight.data.copy_(get_upsampling_weight(num_classes, 64))
项目:pytorch-semantic-segmentation    作者:ZijunDeng    | 项目源码 | 文件源码
def __init__(self, in_channels, out_channels, num_conv_layers):
        super(_DecoderBlock, self).__init__()
        middle_channels = in_channels / 2
        layers = [
            nn.ConvTranspose2d(in_channels, kernel_size=2, stride=2),
            nn.Conv2d(in_channels, middle_channels, kernel_size=3, padding=1),
            nn.Batchnorm2d(middle_channels),
            nn.ReLU(inplace=True)
        ]
        layers += [
                      nn.Conv2d(middle_channels,
                      nn.Batchnorm2d(middle_channels),
                      nn.ReLU(inplace=True),
                  ] * (num_conv_layers - 2)
        layers += [
            nn.Conv2d(middle_channels,
            nn.Batchnorm2d(out_channels),
            nn.ReLU(inplace=True),
        ]
        self.decode = nn.Sequential(*layers)
项目:deeplearning    作者:zxjzxj9    | 项目源码 | 文件源码
def __init__(self,1))
项目:pointGAN    作者:fxia22    | 项目源码 | 文件源码
def __init__(self, num_points = 2048):
        super(PointGenPSG, self).__init__()
        self.num_points = num_points
        self.fc1 = nn.Linear(100, 256)
        self.fc2 = nn.Linear(256, 512)
        self.fc3 = nn.Linear(512, 1024)
        self.fc4 = nn.Linear(1024, self.num_points / 4 * 3 * 1)
        self.th = nn.Tanh()

        self.conv1 = nn.ConvTranspose2d(100,1024,(2,3))
        self.conv2 = nn.ConvTranspose2d(1024, 1)
        self.conv3 = nn.ConvTranspose2d(512, 1)
        self.conv4= nn.ConvTranspose2d(256, 1)
        self.conv5= nn.ConvTranspose2d(128, 1)

        self.bn1 = torch.nn.Batchnorm2d(1024)
        self.bn2 = torch.nn.Batchnorm2d(512)
        self.bn3 = torch.nn.Batchnorm2d(256)
        self.bn4 = torch.nn.Batchnorm2d(128)
        self.bn5 = torch.nn.Batchnorm2d(3)
项目:generative_zoo    作者:DL-IT    | 项目源码 | 文件源码
def make_conv_layer(layer_list, in_dim, out_dim, back_conv, batch_norm=True, activation='ReLU', k_s_p=[4,2,1]):
    k, s, p = k_s_p[0], k_s_p[1], k_s_p[2]
    if back_conv == False:
        layer_list.append(nn.Conv2d(in_dim, kernel_size=k, stride=s, padding=p, bias=False))
    elif back_conv == True:
        layer_list.append(nn.ConvTranspose2d(in_dim, bias=False))

    if batch_norm == True:
        layer_list.append(nn.Batchnorm2d(out_dim))

    if activation == 'ReLU':
        layer_list.append(nn.ReLU(True))
    elif activation == 'Sigmoid':
        layer_list.append(nn.Sigmoid())
    elif activation == 'Tanh':
        layer_list.append(nn.Tanh())
    elif activation == 'LeakyReLU':
        layer_list.append(nn.LeakyReLU(0.2, inplace=True))

    return layer_list
项目:PytorchDL    作者:FredHuangBia    | 项目源码 | 文件源码
def __init__(self, params, nclasses, encoder):
        super().__init__()

        self.encoder = encoder
        self.pooling_modules = []

        for mod in self.encoder.modules():
            try:
                if mod.other.downsample:
                    self.pooling_modules.append(mod.other)
            except AttributeError:
                pass

        self.layers = []
        for i, params in enumerate(params):
            if params['upsample']:
                params['pooling_module'] = self.pooling_modules.pop(-1)
            layer = DecoderModule(**params)
            self.layers.append(layer)
            layer_name = 'decoder{:02d}'.format(i)
            super().__setattr__(layer_name, layer)

        self.output_conv = nn.ConvTranspose2d(16, padding=0, output_padding=0, bias=True)
项目:PytorchDL    作者:FredHuangBia    | 项目源码 | 文件源码
def __init__(self, numClasses, prelus=False):
        super().__init__()

        self.upsampler7 = upsamplerA(128, 128)
        self.conv7 = nonBt1d(128, 0.1, prelus, 1)
        self.upsampler6 = upsamplerA(128, 128)
        self.conv6 = nonBt1d(128, 1)
        self.upsampler5 = upsamplerA(128, 128)
        self.conv5 = nonBt1d(128, 1)
        self.upsampler4 = upsamplerA(128, 128)
        self.conv4 = nonBt1d(128, 1)
        self.upsampler3 = upsamplerA(128, 128)
        self.conv3 = nonBt1d(128, 1)

        self.upsampler2 = upsamplerA(128, 64)
        self.conv2a = nonBt1d(64, 2)
        self.conv2b = nonBt1d(64, 4)

        self.upsampler1 = upsamplerB(64, numClasses)
        self.conv1a = nonBt1d(numClasses, 2)
        self.conv1b = nonBt1d(numClasses, 4)
        self.conv1c = nonBt1d(numClasses, 8)

        self.convFinal = nn.ConvTranspose2d(numClasses, stride=2)
项目:examples    作者:pytorch    | 项目源码 | 文件源码
def __init__(self, ngpu):
        super(_netG, self).__init__()
        self.ngpu = ngpu
        self.main = nn.Sequential(
            # input is Z,going into a convolution
            nn.ConvTranspose2d(     nz, bias=False),
            nn.Batchnorm2d(ngf * 8),
            nn.ReLU(True),
            # state size. (ngf*8) x 4 x 4
            nn.ConvTranspose2d(ngf * 8,
            nn.Batchnorm2d(ngf * 4),
            # state size. (ngf*4) x 8 x 8
            nn.ConvTranspose2d(ngf * 4, ngf * 2,
            nn.Batchnorm2d(ngf * 2),
            # state size. (ngf*2) x 16 x 16
            nn.ConvTranspose2d(ngf * 2,     ngf,
            nn.Batchnorm2d(ngf),
            # state size. (ngf) x 32 x 32
            nn.ConvTranspose2d(    ngf,      nc,
            nn.Tanh()
            # state size. (nc) x 64 x 64
        )
项目:pytorch-fcn    作者:wkenTaro    | 项目源码 | 文件源码
def get_parameters(model, bias=False):
    import torch.nn as nn
    modules_skipped = (
        nn.ReLU,
        nn.MaxPool2d,
        nn.Dropout2d,
        nn.Sequential,
        torchfcn.models.FCN32s,
        torchfcn.models.FCN16s,
        torchfcn.models.FCN8s,
    )
    for m in model.modules():
        if isinstance(m, nn.Conv2d):
            if bias:
                yield m.bias
            else:
                yield m.weight
        elif isinstance(m, nn.ConvTranspose2d):
            # weight is frozen because it is just a bilinear upsampling
            if bias:
                assert m.bias is None
        elif isinstance(m, modules_skipped):
            continue
        else:
            raise ValueError('Unexpected module: %s' % str(m))
项目:MachineLearning    作者:timomernick    | 项目源码 | 文件源码
def __init__(self):
        super(Generator, self).__init__()
        self.main = nn.Sequential(
            nn.ConvTranspose2d(nz,
            nn.ConvTranspose2d(ngf * 8,
            nn.ConvTranspose2d(ngf * 4,
            nn.ConvTranspose2d(ngf * 2, ngf * 1,
            nn.Batchnorm2d(ngf * 1),
            nn.ConvTranspose2d(ngf * 1, nc,
            nn.Tanh()
        )
        self.apply(weights_init)
        self.optimizer = optim.Adam(self.parameters(), lr=learning_rate, betas=(beta_1, beta_2))
        #self.optimizer = optim.RMSprop(self.parameters(),lr=learning_rate,alpha=beta_2)
项目:CBEGAN    作者:taey16    | 项目源码 | 文件源码
def __init__(self, ngf, hidden_size, condition=False, condition_size=0):
    super(Decoder, self).__init__()
    self.condition = condition

    self.decode_cond = nn.ConvTranspose2d(condition_size, kernel_size=8,stride=1,padding=0)
    # 1
    self.decode = nn.ConvTranspose2d(hidden_size,padding=0)
    # 8
    self.dconv6 = deconv_block(ngf*2, ngf)
    # 16
    self.dconv5 = deconv_block(ngf, ngf)
    # 32
    self.dconv4 = deconv_block(ngf, ngf)
    # 64 
    self.dconv3 = deconv_block(ngf, ngf)
    # 128 
    #self.dconv2 = deconv_block(ngf,ngf)
    # 256
    self.dconv1 = nn.Sequential(nn.Conv2d(ngf,ngf,kernel_size=3,padding=1),
                                nn.ELU(True),
                                nn.Conv2d(ngf,
                                nn.Tanh())
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def __init__(self, self).__init__()
        self.ngpu = ngpu
        self.main = nn.Sequential(
            # b,nz,1,1
            nn.ConvTranspose2d(nz, 28 * 28,
            # b,28*28,1
            nn.Batchnorm2d(28 * 28),
            nn.ConvTranspose2d(28 * 28, 14 * 14,14*14,2,2
            nn.Batchnorm2d(14 * 14),
            nn.ConvTranspose2d(14 * 14, 7 * 7,7*7,4,4
            nn.Batchnorm2d(7 * 7),
            nn.ConvTranspose2d(7 * 7, stride=7,
            # b. 1,28,28
            nn.Sigmoid()
        )
项目:infoGAN-pytorch    作者:pianomania    | 项目源码 | 文件源码
def __init__(self):
    super(G, self).__init__()

    self.main = nn.Sequential(
      nn.ConvTranspose2d(74, 1024,
      nn.Batchnorm2d(1024),
      nn.ReLU(True),
      nn.ConvTranspose2d(1024,
      nn.Batchnorm2d(128),
      nn.ConvTranspose2d(128,
      nn.Batchnorm2d(64),
      nn.ConvTranspose2d(64,
      nn.Sigmoid()
    )
项目:food-GAN    作者:rtlee9    | 项目源码 | 文件源码
def __init__(self,
            nn.Tanh()
            # state size. (nc) x 64 x 64
        )
项目:food-GAN    作者:rtlee9    | 项目源码 | 文件源码
def __init__(self,
            nn.Tanh()
            # state size. (nc) x 64 x 64
        )
项目:Machine-Learning    作者:hadikazemi    | 项目源码 | 文件源码
def __init__(self):
        super(Generator, self).__init__()
        self.model = nn.Sequential(
            nn.Linear(z_dim+10, 4*4*256),
            nn.LeakyReLU()
        )

        self.cnn = nn.Sequential(
            nn.ConvTranspose2d(256, output_padding=0),
            nn.LeakyReLU(),
            nn.ConvTranspose2d(128, padding=1,
            nn.ConvTranspose2d(64, padding=2, output_padding=1),
            nn.Conv2d(64,
            nn.Tanh()
        )
项目:Machine-Learning    作者:hadikazemi    | 项目源码 | 文件源码
def __init__(self):
        super(Generator, self).__init__()
        self.model = nn.Sequential(
            nn.Linear(100,
            nn.Tanh()
        )
项目:MNIST-invert-color    作者:BlackBindy    | 项目源码 | 文件源码
def __init__(self):
        super(Generator, self).__init__()
        self.layer1 = nn.Sequential(
            nn.Conv2d(1, 16, # 28*28 -> 14*14
            nn.Batchnorm2d(16),
            nn.LeakyReLU()
        )
        self.layer2 = nn.Sequential(
            nn.Conv2d(16, # 14*14 -> 14*14
            nn.Batchnorm2d(16),
            nn.LeakyReLU()
        )
        self.layer3 = nn.Sequential(
            nn.ConvTranspose2d(16, # 14*14 -> 28*28
            nn.Tanh()
        )
项目:pytorch-cns    作者:awentzonline    | 项目源码 | 文件源码
def __init__(self):
        super(Decoder, self).__init__()
        self.main = nn.Sequential(
            # input is Z,
            #nn.Batchnorm2d(ngf * 8),
            #nn.Batchnorm2d(ngf * 4),
            #nn.Batchnorm2d(ngf * 2),
            #nn.Batchnorm2d(ngf),
            nn.Tanh()
            # state size. (nc) x 64 x 64
        )
项目:pytorch-cns    作者:awentzonline    | 项目源码 | 文件源码
def __init__(self, ngpu):
        super(NetG,
            nn.Tanh()
            # state size. (nc) x 64 x 64
        )
项目:pytorch-semseg    作者:meetshah1995    | 项目源码 | 文件源码
def __init__(self, n_filters, k_size,  stride, padding, bias=True):
        super(deconv2DBatchnorm, self).__init__()

        self.dcb_unit = nn.Sequential(nn.ConvTranspose2d(int(in_channels), int(n_filters), kernel_size=k_size,
                                               padding=padding, stride=stride, bias=bias),
                                 nn.Batchnorm2d(int(n_filters)),)
项目:pytorch-semseg    作者:meetshah1995    | 项目源码 | 文件源码
def __init__(self, stride, bias=True):
        super(deconv2DBatchnormRelu, self).__init__()

        self.dcbr_unit = nn.Sequential(nn.ConvTranspose2d(int(in_channels),
                                                padding=padding,
                                 nn.ReLU(inplace=True), in_size, out_size, is_deconv):
        super(unetUp, self).__init__()
        self.conv = unetConv2(in_size, False)
        if is_deconv:
            self.up = nn.ConvTranspose2d(in_size, stride=2)
        else:
            self.up = nn.UpsamplingBilinear2d(scale_factor=2)
项目:lsun-room    作者:leVirve    | 项目源码 | 文件源码
def _initialize_weights(self):
        vgg16 = torchvision.models.vgg16(pretrained=True)

        for m in self.modules():
            if isinstance(m, nn.ConvTranspose2d):
                assert m.kernel_size[0] == m.kernel_size[1]
                m.weight.data = weight_init.kaiming_normal(m.weight.data)
        for a, b in zip(vgg16.features, self.features):
            if (isinstance(a, nn.Conv2d) and isinstance(b, nn.Conv2d)):
                b.weight.data = a.weight.data
                b.bias.data = a.bias.data
        for i in [0, 3]:
            a, b = vgg16.classifier[i], self.classifier[i]
            b.weight.data = a.weight.data.view(b.weight.size())
            b.bias.data = a.bias.data.view(b.bias.size())
项目:PaintsPytorch    作者:orashi    | 项目源码 | 文件源码
def U_weight_init(ms):
    for m in ms.modules():
        classname = m.__class__.__name__
        if classname.find('Conv2d') != -1:
            m.weight.data = init.kaiming_normal(m.weight.data, a=0.2)
        elif classname.find('ConvTranspose2d') != -1:
            m.weight.data = init.kaiming_normal(m.weight.data)
            print ('worked!')  # Todo: kill this
        elif classname.find('Batchnorm') != -1:
            m.weight.data.normal_(1.0, 0.02)
            m.bias.data.fill_(0)
        elif classname.find('Linear') != -1:
            m.weight.data = init.kaiming_normal(m.weight.data)
项目:PaintsPytorch    作者:orashi    | 项目源码 | 文件源码
def U_weight_init(ms):
    for m in ms.modules():
        classname = m.__class__.__name__
        if classname.find('Conv2d') != -1:
            m.weight.data = init.kaiming_normal(m.weight.data, 0.02)
            m.bias.data.fill_(0)
        elif classname.find('Linear') != -1:
            m.weight.data = init.kaiming_normal(m.weight.data)
项目:CycleGAN-Tensorflow-PyTorch-Simple    作者:LynnHo    | 项目源码 | 文件源码
def dconv_norm_act(in_dim, kernel_size,
                   output_padding=0, norm=nn.Batchnorm2d, relu=nn.ReLU):
    return nn.Sequential(
        nn.ConvTranspose2d(in_dim,
                           padding, output_padding,
        norm(out_dim),
        relu())
项目:distanceGAN    作者:sagiebenaim    | 项目源码 | 文件源码
def deconv(c_in, c_out, pad=1, bn=True):
    """Custom deconvolutional layer for simplicity."""
    layers = []
    layers.append(nn.ConvTranspose2d(c_in, pad, bias=False))
    if bn:
        layers.append(nn.Batchnorm2d(c_out))
    return nn.Sequential(*layers)
项目:distanceGAN    作者:sagiebenaim    | 项目源码 | 文件源码
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.Batchnorm2d, use_dropout=False, n_blocks=6, gpu_ids=[]):
        assert(n_blocks >= 0)
        super(resnetGenerator, self).__init__()
        self.input_nc = input_nc
        self.output_nc = output_nc
        self.ngf = ngf
        self.gpu_ids = gpu_ids

        model = [nn.Conv2d(input_nc, kernel_size=7, padding=3),
                 norm_layer(ngf, affine=True),
                 nn.ReLU(True)]

        n_downsampling = 2
        for i in range(n_downsampling):
            mult = 2**i
            model += [nn.Conv2d(ngf * mult, ngf * mult * 2,
                                stride=2,
                      norm_layer(ngf * mult * 2,
                      nn.ReLU(True)]

        mult = 2**n_downsampling
        for i in range(n_blocks):
            model += [resnetBlock(ngf * mult, 'zero', norm_layer=norm_layer, use_dropout=use_dropout)]

        for i in range(n_downsampling):
            mult = 2**(n_downsampling - i)
            model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
                                         kernel_size=3,
                                         padding=1,
                      norm_layer(int(ngf * mult / 2),
                      nn.ReLU(True)]

        model += [nn.Conv2d(ngf, padding=3)]
        model += [nn.Tanh()]

        self.model = nn.Sequential(*model)
项目:distanceGAN    作者:sagiebenaim    | 项目源码 | 文件源码
def __init__(self, outer_nc, inner_nc,
                 submodule=None, outermost=False, innermost=False, use_dropout=False):
        super(UnetSkipConnectionBlock, self).__init__()
        self.outermost = outermost

        downconv = nn.Conv2d(outer_nc,
                             stride=2, padding=1)
        downrelu = nn.LeakyReLU(0.2, True)
        downnorm = norm_layer(inner_nc, affine=True)
        uprelu = nn.ReLU(True)
        upnorm = norm_layer(outer_nc, affine=True)

        if outermost:
            upconv = nn.ConvTranspose2d(inner_nc * 2,
                                        kernel_size=4,
                                        padding=1)
            down = [downconv]
            up = [uprelu, upconv, nn.Tanh()]
            model = down + [submodule] + up
        elif innermost:
            upconv = nn.ConvTranspose2d(inner_nc,
                                        padding=1)
            down = [downrelu, downconv]
            up = [uprelu, upnorm]
            model = down + up
        else:
            upconv = nn.ConvTranspose2d(inner_nc * 2, downconv, downnorm]
            up = [uprelu, upnorm]

            if use_dropout:
                model = down + [submodule] + up + [nn.Dropout(0.5)]
            else:
                model = down + [submodule] + up

        self.model = nn.Sequential(*model)
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def test_ConvTranspose2d_output_size(self):
        m = nn.ConvTranspose2d(3, 2)
        i = Variable(torch.randn(2, 6, 6))
        for h in range(15, 22):
            for w in range(15, 22):
                if 18 <= h <= 20 and 18 <= w <= 20:
                    size = (h, w)
                    if h == 19:
                        size = torch.LongStorage(size)
                    elif h == 2:
                        size = torch.LongStorage((2, 4) + size)
                    m(i, output_size=(h, w))
                else:
                    self.assertRaises(ValueError, lambda: m(i, (h, w)))
项目:DeepIllumination    作者:CreativeCodingLab    | 项目源码 | 文件源码
def __init__(self, n_channel_input, n_channel_output, n_filters):
        super(G, self).__init__()
        self.conv1 = nn.Conv2d(n_channel_input, 1)
        self.conv2 = nn.Conv2d(n_filters, n_filters * 2, 1)
        self.conv3 = nn.Conv2d(n_filters * 2, n_filters * 4, 1)
        self.conv4 = nn.Conv2d(n_filters * 4, n_filters * 8, 1)
        self.conv5 = nn.Conv2d(n_filters * 8, 1)
        self.conv6 = nn.Conv2d(n_filters * 8, 1)
        self.conv7 = nn.Conv2d(n_filters * 8, 1)
        self.conv8 = nn.Conv2d(n_filters * 8, 1)

        self.deconv1 = nn.ConvTranspose2d(n_filters * 8, 1)
        self.deconv2 = nn.ConvTranspose2d(n_filters * 8 * 2, 1)
        self.deconv3 = nn.ConvTranspose2d(n_filters * 8 * 2, 1)
        self.deconv4 = nn.ConvTranspose2d(n_filters * 8 * 2, 1)
        self.deconv5 = nn.ConvTranspose2d(n_filters * 8 * 2, 1)
        self.deconv6 = nn.ConvTranspose2d(n_filters * 4 * 2, 1)
        self.deconv7 = nn.ConvTranspose2d(n_filters * 2 * 2, 1)
        self.deconv8 = nn.ConvTranspose2d(n_filters * 2, 1)

        self.batch_norm = nn.Batchnorm2d(n_filters)
        self.batch_norm2 = nn.Batchnorm2d(n_filters * 2)
        self.batch_norm4 = nn.Batchnorm2d(n_filters * 4)
        self.batch_norm8 = nn.Batchnorm2d(n_filters * 8)

        self.leaky_relu = nn.LeakyReLU(0.2, True)
        self.relu = nn.ReLU(True)

        self.dropout = nn.Dropout(0.5)

        self.tanh = nn.Tanh()
项目:torch_light    作者:ne7ermore    | 项目源码 | 文件源码
def __init__(self, out_h, out_w, channel_dims, z_dim=100):
        super().__init__()

        assert len(channel_dims) == 4, "length of channel dims should be 4"

        conv1_dim, conv2_dim, conv3_dim, conv4_dim = channel_dims
        conv1_h, conv2_h, conv3_h, conv4_h = map(conv_size, [(out_h, step) for step in [4 ,3 ,2 ,1]])
        conv1_w, conv2_w, conv3_w, conv4_w = map(conv_size, [(out_w,1]])

        self.fc = nn.Linear(z_dim, conv1_dim*conv1_h*conv1_w)
        self.deconvs = nn.Sequential(
                nn.Batchnorm2d(conv1_dim),
                nn.ReLU(),

                nn.ConvTranspose2d(conv1_dim,
                nn.Batchnorm2d(conv2_dim),

                nn.ConvTranspose2d(conv2_dim,
                nn.Batchnorm2d(conv3_dim),  

                nn.ConvTranspose2d(conv3_dim, conv4_dim,
                nn.Batchnorm2d(conv4_dim),          

                nn.ConvTranspose2d(conv4_dim,   
                nn.Tanh(),                          
            )
        self.conv1_size = (conv1_dim, conv1_h, conv1_w)

        self._init_weight()
项目:torch_light    作者:ne7ermore    | 项目源码 | 文件源码
def _init_weight(self):
        self.fc.weight.data.normal_(.0, 0.02)
        for layer in self.deconvs:
            if isinstance(layer, nn.ConvTranspose2d):
                layer.weight.data.normal_(.0, 0.02)
            if isinstance(layer, nn.Batchnorm2d):
                layer.weight.data.normal_(1., 0.02)
                layer.bias.data.fill_(0)
项目:mnist-svhn-transfer    作者:yunjey    | 项目源码 | 文件源码
def deconv(c_in, bias=False))
    if bn:
        layers.append(nn.Batchnorm2d(c_out))
    return nn.Sequential(*layers)
项目:simple-pix2pix-pytorch    作者:Eiji-Kb    | 项目源码 | 文件源码
def __init__(self, out_channels):
        super(Generator, self).__init__()

        self.c0 = nn.Conv2d(in_channels, padding=1)
        self.c1 = nn.Conv2d(64, padding=1)
        self.c2 = nn.Conv2d(128, padding=1)
        self.c3 = nn.Conv2d(256, padding=1)
        self.c4 = nn.Conv2d(512, padding=1)
        self.c5 = nn.Conv2d(512, padding=1)
        self.c6 = nn.Conv2d(512, padding=1)
        self.c7 = nn.Conv2d(512, padding=1)

        self.d7 = nn.ConvTranspose2d(512, padding=1)
        self.d6 = nn.ConvTranspose2d(1024, padding=1)
        self.d5 = nn.ConvTranspose2d(1024, padding=1)
        self.d4 = nn.ConvTranspose2d(1024, padding=1)
        self.d3 = nn.ConvTranspose2d(1024, padding=1)
        self.d2 = nn.ConvTranspose2d(512, padding=1)
        self.d1 = nn.ConvTranspose2d(256, padding=1)
        self.d0 = nn.ConvTranspose2d(128, padding=1)

        self.bnc1 = nn.Batchnorm2d(128)
        self.bnc2 = nn.Batchnorm2d(256)
        self.bnc3 = nn.Batchnorm2d(512)
        self.bnc4 = nn.Batchnorm2d(512)
        self.bnc5 = nn.Batchnorm2d(512)
        self.bnc6 = nn.Batchnorm2d(512)

        self.bnd7 = nn.Batchnorm2d(512)
        self.bnd6 = nn.Batchnorm2d(512)
        self.bnd5 = nn.Batchnorm2d(512)
        self.bnd4 = nn.Batchnorm2d(512)
        self.bnd3 = nn.Batchnorm2d(256)
        self.bnd2 = nn.Batchnorm2d(128)
        self.bnd1 = nn.Batchnorm2d(64)
项目:MMD-GAN    作者:OctoberChang    | 项目源码 | 文件源码
def __init__(self, isize, k=100, ngf=64):
        super(Decoder, self).__init__()
        assert isize % 16 == 0, "isize has to be a multiple of 16"

        cngf, tisize = ngf // 2, 4
        while tisize != isize:
            cngf = cngf * 2
            tisize = tisize * 2

        main = nn.Sequential()
        main.add_module('initial.{0}-{1}.convt'.format(k, cngf), nn.ConvTranspose2d(k, cngf, bias=False))
        main.add_module('initial.{0}.batchnorm'.format(cngf), nn.Batchnorm2d(cngf))
        main.add_module('initial.{0}.relu'.format(cngf), nn.ReLU(True))

        csize = 4
        while csize < isize // 2:
            main.add_module('pyramid.{0}-{1}.convt'.format(cngf, cngf // 2),
                            nn.ConvTranspose2d(cngf, cngf // 2, bias=False))
            main.add_module('pyramid.{0}.batchnorm'.format(cngf // 2),
                            nn.Batchnorm2d(cngf // 2))
            main.add_module('pyramid.{0}.relu'.format(cngf // 2),
                            nn.ReLU(True))
            cngf = cngf // 2
            csize = csize * 2

        main.add_module('final.{0}-{1}.convt'.format(cngf, nc), nn.ConvTranspose2d(cngf, bias=False))
        main.add_module('final.{0}.tanh'.format(nc),
                        nn.Tanh())

        self.main = main
项目:pytorch-tutorial    作者:yunjey    | 项目源码 | 文件源码
def deconv(c_in, pad))
    if bn:
        layers.append(nn.Batchnorm2d(c_out))
    return nn.Sequential(*layers)
项目:FlowNetPytorch    作者:ClementPinard    | 项目源码 | 文件源码
def deconv(in_planes, out_planes):
    return nn.Sequential(
        nn.ConvTranspose2d(in_planes, out_planes,
        nn.LeakyReLU(0.1,inplace=True)
    )
项目:FlowNetPytorch    作者:ClementPinard    | 项目源码 | 文件源码
def __init__(self,batchnorm=True):
        super(FlowNetS,self).__init__()

        self.batchnorm = batchnorm
        self.conv1   = conv(self.batchnorm,   6,   64, stride=2)
        self.conv2   = conv(self.batchnorm,  64,  128, kernel_size=5, stride=2)
        self.conv3   = conv(self.batchnorm,  256, stride=2)
        self.conv3_1 = conv(self.batchnorm,  256)
        self.conv4   = conv(self.batchnorm,  512, stride=2)
        self.conv4_1 = conv(self.batchnorm,  512)
        self.conv5   = conv(self.batchnorm, stride=2)
        self.conv5_1 = conv(self.batchnorm,  512)
        self.conv6   = conv(self.batchnorm, stride=2)
        self.conv6_1 = conv(self.batchnorm, 1024)

        self.deconv5 = deconv(1024,512)
        self.deconv4 = deconv(1026,256)
        self.deconv3 = deconv(770,128)
        self.deconv2 = deconv(386,64)

        self.predict_flow6 = predict_flow(1024)
        self.predict_flow5 = predict_flow(1026)
        self.predict_flow4 = predict_flow(770)
        self.predict_flow3 = predict_flow(386)
        self.predict_flow2 = predict_flow(194)

        self.upsampled_flow6_to_5 = nn.ConvTranspose2d(2, bias=False)
        self.upsampled_flow5_to_4 = nn.ConvTranspose2d(2, bias=False)
        self.upsampled_flow4_to_3 = nn.ConvTranspose2d(2, bias=False)
        self.upsampled_flow3_to_2 = nn.ConvTranspose2d(2, bias=False)

        for m in self.modules():
            if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
                kaiming_normal(m.weight.data)
                if m.bias is not None:
                    m.bias.data.zero_()
            elif isinstance(m, nn.Batchnorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
项目:unet-pytorch    作者:jaxony    | 项目源码 | 文件源码
def upconv2x2(in_channels, mode='transpose'):
    if mode == 'transpose':
        return nn.ConvTranspose2d(
            in_channels,
            out_channels,
            kernel_size=2,
            stride=2)
    else:
        # out_channels is always going to be the same
        # as in_channels
        return nn.Sequential(
            nn.Upsample(mode='bilinear', scale_factor=2),
            conv1x1(in_channels, out_channels))
项目:crowdcount-cascaded-mtl    作者:svishwa    | 项目源码 | 文件源码
def __init__(self, bn=False, num_classes=10):
        super(CMTL, self).__init__()

        self.num_classes = num_classes        
        self.base_layer = nn.Sequential(Conv2d( 1, 9, same_padding=True, NL='prelu', bn=bn),                                     
                                        Conv2d(16, bn=bn))

        self.hl_prior_1 = nn.Sequential(Conv2d( 32,
                                     nn.MaxPool2d(2),
                                     Conv2d(16,
                                     Conv2d(32, 8,  7, bn=bn))

        self.hl_prior_2 = nn.Sequential(nn.AdaptiveMaxPool2d((32,32)),
                                        Conv2d( 8, bn=bn))

        self.hl_prior_fc1 = FC(4*1024,512, NL='prelu')
        self.hl_prior_fc2 = FC(512,256,    NL='prelu')
        self.hl_prior_fc3 = FC(256, self.num_classes,     NL='prelu')


        self.de_stage_1 = nn.Sequential(Conv2d( 32, 20,
                                     Conv2d(20, 40, 5,
                                     Conv2d(40, 10, bn=bn))

        self.de_stage_2 = nn.Sequential(Conv2d( 18, 24,
                                        Conv2d( 24,                                        
                                        nn.ConvTranspose2d(32,16,4,stride=2,padding=1,output_padding=0,bias=True),
                                        nn.PReLU(),
                                        nn.ConvTranspose2d(16,8,
                                        Conv2d(8, NL='relu', bn=bn))
项目:carvana-challenge    作者:chplushsieh    | 项目源码 | 文件源码
def __init__(self, in_: int, out: int, *, bn=True, activation='relu', up='upconv'):
        super().__init__()
        self.l1 = Conv3BN(in_, out, bn, activation)
        self.l2 = Conv3BN(out, activation)

        if up == 'upconv':
            self.up = nn.ConvTranspose2d(in_, stride=2)
        elif up == 'upsample':
            self.up = nn.Upsample(scale_factor=2)
项目:carvana-challenge    作者:chplushsieh    | 项目源码 | 文件源码
def __init__(self, up='upsample'):
        super().__init__()
        self.l1 = Conv3BN(in_, activation)
        self.l3 = Conv3BN(out, activation)
        self.l4 = Conv3BN(out, stride=2)
        elif up == 'upsample':
            self.up = nn.Upsample(scale_factor=2)

版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。

相关推荐