增加 WGAN 网络中的 image_size

如何解决增加 WGAN 网络中的 image_size

我正在使用 WGAN 来合成医学图像。不过目前Img_size是64,分辨率太低了。

如何更改生成器和鉴别器以实现 128*128 高分辨率?

下面是我的代码。

grade =4

dataroot = f"../processed/{grade}/test/"

# Number of workers for dataloader
workers = 2

# Batch size during training
batch_size = 128

# Spatial size of training images. All images will be resized to this
#   size using a transformer.
image_size = 64

# Number of channels in the training images. For color images this is 3
nc = 3

# Size of z latent vector (i.e. size of generator input)
nz = 64

# Size of feature maps in generator
ngf = 64

# Size of feature maps in discriminator
ndf = 64

# Learning rate for optimizers
lr = 0.0002

# Beta1 hyperparam for Adam optimizers
beta1 = 0.5

# Number of GPUs available. Use 0 for CPU mode.
ngpu = 1

和 Weights_init

def weights_init(m):
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        nn.init.normal_(m.weight.data,0.0,0.02)
    elif classname.find('BatchNorm') != -1:
        nn.init.normal_(m.weight.data,1.0,0.02)
        nn.init.constant_(m.bias.data,0)

发电机

class Generator(nn.Module):
    def __init__(self,ngpu):
        super(Generator,self).__init__()
        self.ngpu = ngpu
        self.main = nn.Sequential(
            # input is Z,going into a convolution
            nn.ConvTranspose2d(nz,ngf * 16,2,1,bias=False),nn.BatchNorm2d(ngf * 16),nn.ReLU(True),#in-place option = True?
            # state size. (ngf*16) x 2 x 2
            nn.ConvTranspose2d(ngf*16,ngf*8,4,nn.BatchNorm2d(ngf*8),# state size. (ngf*8) x 4 x 4
            nn.ConvTranspose2d(ngf * 8,ngf * 4,nn.BatchNorm2d(ngf * 4),# state size. (ngf*4) x 8 x 8
            nn.ConvTranspose2d( ngf * 4,ngf * 2,nn.BatchNorm2d(ngf * 2),# state size. (ngf*2) x 16 x 16
            nn.ConvTranspose2d( ngf * 2,ngf,nn.BatchNorm2d(ngf),# state size. (ngf) x 32 x 32
            nn.ConvTranspose2d( ngf,nc,nn.Tanh()
            # state size. (nc) x 64 x 64
        )

    def forward(self,input):
        return self.main(input)

# Create the generator
netG = Generator(ngpu).to(device)

# Handle multi-gpu if desired
if (device.type == 'cuda') and (ngpu > 1):
    netG = nn.DataParallel(netG,list(range(ngpu)))

# Apply the weights_init function to randomly initialize all weights
#  to mean=0,stdev=0.2.
netG.apply(weights_init)

鉴别器(评论家)

class Discriminator(nn.Module):
    def __init__(self,ngpu):
        super(Discriminator,self).__init__()
        self.ngpu = ngpu
        self.main = nn.Sequential(
            # input is (nc) x 64 x 64
            nn.Conv2d(nc,ndf,nn.LeakyReLU(0.2,inplace=True),# state size. (ndf) x 32 x 32
            nn.Conv2d(ndf,ndf * 2,nn.BatchNorm2d(ndf * 2),# state size. (ndf*2) x 16 x 16
            nn.Conv2d(ndf * 2,ndf * 4,nn.BatchNorm2d(ndf * 4),# state size. (ndf*4) x 8 x 8
            nn.Conv2d(ndf * 4,ndf * 8,nn.BatchNorm2d(ndf * 8),# state size. (ndf*8) x 4 x 4
            nn.Conv2d(ndf * 8,#nn.Sigmoid() #sigmoid[0,1]
        )

    def forward(self,input):
        return self.main(input) #feedforward


# Create the Discriminator
netD = Discriminator(ngpu).to(device)

# Handle multi-gpu if desired
if (device.type == 'cuda') and (ngpu > 1):
    netD = nn.DataParallel(netD,list(range(ngpu)))
    
# Apply the weights_init function to randomly initialize all weights
#  to mean=0,stdev=0.2.
netD.apply(weights_init)

这是训练代码

# Training Loop

# Lists to keep track of progress
img_list = []
G_losses = []
D_losses = []
iters = 0

print("Starting Training Loop...")
# For each epoch
for epoch in range(num_epochs):
  # For each batch in the dataloader
  for i,data in enumerate(dataloader,0): #enumerate(dataloader,0)
    ############################
    # (1) Update D network: minimize -E(D(x)) + E(D(G(z))) + lambda_gp*E(|grad(D(y)) - 1|^2)
    ###########################
    ## Train with all-real batch
    netD.zero_grad()
    # Format batch
    real_cpu = data[0].to(device) #data[0] mini batch
    b_size = real_cpu.size(0) #minibatch
    # Forward pass real batch through D
    Dreal = netD(real_cpu).view(-1) 
    # Generate batch of latent vectors
    noise = torch.randn(b_size,nz,device=device) #(b_size,1)
    # Generate fake image batch with G
    fake = netG(noise) 
    # Calculate the critic for all fake batch
    Dfake = netD(fake.detach()).view(-1) 
    # Calculate loss on all batch
    errD = -Dreal.mean() + Dfake.mean() + lambda_gp * compute_gradient_penalty(real_images=real_cpu,fake_images=fake) #gradient penalty loss function
    errD.backward() 
    D_x = Dreal.mean().item() #D(real data)
    D_G_z1 = Dfake.mean().item() 
    # Update D
    optimizerD.step()
    netG.zero_grad() 
    ############################
    # (2) Update G network: minimize -E(C(G(z)))
    ###########################
    for j in range(g_iters):
        netG.zero_grad()
        fake = netG(noise) 
        # Since we just updated D,perform another forward pass of all-fake batch through D
        output = netD(fake).view(-1) #D(G(z))
        # Calculate G's loss based on this output
        errG = -output.mean() 
        # Calculate gradients for G
        errG.backward() 
        D_G_z2 = output.mean().item() 
        # Update G
        optimizerG.step() 



    # Output training stats
    if i % 50 == 0:
        print('[%d/%d][%d/%d]\tLoss_D: %.4f\tLoss_G: %.4f\tD(x): %.4f\tD(G(z)): %.4f / %.4f'
              % (epoch,num_epochs,i,len(dataloader),errD.item(),errG.item(),D_x,D_G_z1,D_G_z2))
        #print('{}'.format(datetime.datetime.now()))
    
    # Save Losses for plotting later
    G_losses.append(errG.item())
    D_losses.append(errD.item())
    
    # Check how the generator is doing by saving G's output on fixed_noise
    if (iters % 500 == 0) or ((epoch == num_epochs-1) and (i == len(dataloader)-1)):
        with torch.no_grad():
            fake = netG(fixed_noise).detach().cpu()
        img_list.append(vutils.make_grid(fake,padding=0,normalize=True))
        
    iters += 1

那是我的原始代码。我试着像这样改变。

grade =4

dataroot = f"../processed/{grade}/test/"

# Number of workers for dataloader
workers = 2

# Batch size during training
batch_size = 128

# Spatial size of training images. All images will be resized to this
#   size using a transformer. (originally 64)
image_size = 128

# Number of channels in the training images. For color images this is 3
nc = 3

# Size of z latent vector (i.e. size of generator input)
nz = 64 

# Size of feature maps in generator (originally 64)
ngf = 32

# Size of feature maps in discriminator (originally 64)
ndf = 32

# Learning rate for optimizers
lr = 0.0002

# Beta1 hyperparam for Adam optimizers
beta1 = 0.5

# Number of GPUs available. Use 0 for CPU mode.
ngpu = 1

生成器代码

class Generator(nn.Module):
    def __init__(self,ngf * 32,nn.BatchNorm2d(ngf * 32),#in-place option = True?
            
            
            nn.ConvTranspose2d(ngf*32,ngf*16,nn.BatchNorm2d(ngf*16),# state size. (ngf*16) x 2 x 2
            
            
            nn.ConvTranspose2d(ngf*16,stdev=0.2.
netG.apply(weights_init)

和鉴别器

class Discriminator(nn.Module):
    def __init__(self,ndf * 16,nn.BatchNorm2d(ndf * 16),nn.Conv2d(ndf * 16,stdev=0.2.
netD.apply(weights_init)

但是,如果我像这样更改我的代码,在训练时,Loss_G 得到正数,而它应该是负数。因此,培训不起作用。这是输出。 (Loss_D 根本没有下降)

[0/500][0/10]   Loss_D: 53.7392 Loss_G: 0.1816  D(x): 0.0360    D(G(z)): -0.1816 / -0.1816
[1/500][0/10]   Loss_D: 55.9949 Loss_G: 0.1955  D(x): 0.0360    D(G(z)): -0.1955 / -0.1955
[2/500][0/10]   Loss_D: 66.4417 Loss_G: 0.1168  D(x): 0.0307    D(G(z)): -0.1168 / -0.1168
[3/500][0/10]   Loss_D: 66.9297 Loss_G: 0.2704  D(x): 0.0505    D(G(z)): -0.2704 / -0.2704
[4/500][0/10]   Loss_D: 69.5664 Loss_G: 0.1803  D(x): -0.0246   D(G(z)): -0.1803 / -0.1803
[5/500][0/10]   Loss_D: 65.0955 Loss_G: 0.1722  D(x): 0.0723    D(G(z)): -0.1722 / -0.1722
[6/500][0/10]   Loss_D: 58.5108 Loss_G: 0.2078  D(x): 0.0157    D(G(z)): -0.2078 / -0.2078
[7/500][0/10]   Loss_D: 64.6462 Loss_G: 0.2459  D(x): 0.0378    D(G(z)): -0.2459 / -0.2459
[8/500][0/10]   Loss_D: 64.6244 Loss_G: 0.2015  D(x): 0.0806    D(G(z)): -0.2015 / -0.2015
[9/500][0/10]   Loss_D: 52.2686 Loss_G: 0.1944  D(x): -0.0109   D(G(z)): -0.1944 / -0.1944
[10/500][0/10]  Loss_D: 59.8826 Loss_G: 0.2005  D(x): 0.0591    D(G(z)): -0.2005 / -0.2005
[11/500][0/10]  Loss_D: 56.6620 Loss_G: 0.1919  D(x): -0.0113   D(G(z)): -0.1919 / -0.1919
[12/500][0/10]  Loss_D: 69.9521 Loss_G: 0.1773  D(x): -0.0062   D(G(z)): -0.1773 / -0.1773

感谢您阅读我的问题。

版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。

相关推荐


使用本地python环境可以成功执行 import pandas as pd import matplotlib.pyplot as plt # 设置字体 plt.rcParams['font.sans-serif'] = ['SimHei'] # 能正确显示负号 p
错误1:Request method ‘DELETE‘ not supported 错误还原:controller层有一个接口,访问该接口时报错:Request method ‘DELETE‘ not supported 错误原因:没有接收到前端传入的参数,修改为如下 参考 错误2:cannot r
错误1:启动docker镜像时报错:Error response from daemon: driver failed programming external connectivity on endpoint quirky_allen 解决方法:重启docker -> systemctl r
错误1:private field ‘xxx‘ is never assigned 按Altʾnter快捷键,选择第2项 参考:https://blog.csdn.net/shi_hong_fei_hei/article/details/88814070 错误2:启动时报错,不能找到主启动类 #
报错如下,通过源不能下载,最后警告pip需升级版本 Requirement already satisfied: pip in c:\users\ychen\appdata\local\programs\python\python310\lib\site-packages (22.0.4) Coll
错误1:maven打包报错 错误还原:使用maven打包项目时报错如下 [ERROR] Failed to execute goal org.apache.maven.plugins:maven-resources-plugin:3.2.0:resources (default-resources)
错误1:服务调用时报错 服务消费者模块assess通过openFeign调用服务提供者模块hires 如下为服务提供者模块hires的控制层接口 @RestController @RequestMapping("/hires") public class FeignControl
错误1:运行项目后报如下错误 解决方案 报错2:Failed to execute goal org.apache.maven.plugins:maven-compiler-plugin:3.8.1:compile (default-compile) on project sb 解决方案:在pom.
参考 错误原因 过滤器或拦截器在生效时,redisTemplate还没有注入 解决方案:在注入容器时就生效 @Component //项目运行时就注入Spring容器 public class RedisBean { @Resource private RedisTemplate<String
使用vite构建项目报错 C:\Users\ychen\work>npm init @vitejs/app @vitejs/create-app is deprecated, use npm init vite instead C:\Users\ychen\AppData\Local\npm-
参考1 参考2 解决方案 # 点击安装源 协议选择 http:// 路径填写 mirrors.aliyun.com/centos/8.3.2011/BaseOS/x86_64/os URL类型 软件库URL 其他路径 # 版本 7 mirrors.aliyun.com/centos/7/os/x86
报错1 [root@slave1 data_mocker]# kafka-console-consumer.sh --bootstrap-server slave1:9092 --topic topic_db [2023-12-19 18:31:12,770] WARN [Consumer clie
错误1 # 重写数据 hive (edu)> insert overwrite table dwd_trade_cart_add_inc > select data.id, > data.user_id, > data.course_id, > date_format(
错误1 hive (edu)> insert into huanhuan values(1,'haoge'); Query ID = root_20240110071417_fe1517ad-3607-41f4-bdcf-d00b98ac443e Total jobs = 1
报错1:执行到如下就不执行了,没有显示Successfully registered new MBean. [root@slave1 bin]# /usr/local/software/flume-1.9.0/bin/flume-ng agent -n a1 -c /usr/local/softwa
虚拟及没有启动任何服务器查看jps会显示jps,如果没有显示任何东西 [root@slave2 ~]# jps 9647 Jps 解决方案 # 进入/tmp查看 [root@slave1 dfs]# cd /tmp [root@slave1 tmp]# ll 总用量 48 drwxr-xr-x. 2
报错1 hive> show databases; OK Failed with exception java.io.IOException:java.lang.RuntimeException: Error in configuring object Time taken: 0.474 se
报错1 [root@localhost ~]# vim -bash: vim: 未找到命令 安装vim yum -y install vim* # 查看是否安装成功 [root@hadoop01 hadoop]# rpm -qa |grep vim vim-X11-7.4.629-8.el7_9.x
修改hadoop配置 vi /usr/local/software/hadoop-2.9.2/etc/hadoop/yarn-site.xml # 添加如下 <configuration> <property> <name>yarn.nodemanager.res