微信公众号搜"智元新知"关注
微信扫一扫可直接关注哦!

有没有办法使用函数删除pygame精灵图像?

如何解决有没有办法使用函数删除pygame精灵图像?

正如您可能通过“self.image”判断的那样,它代表玩家的健康状况。我想知道是否有任何方法可以使用“healthDeleted(self)”来在满足特定条件时删除(或更改)self.image。

 <v-select
  label="History. Version #"
  :items="listVersions"
  @input="getSelectedVersion"
/>

async getSelectedVersion (value) {
  if ('_id' in this.entity) {
    await this.getHistory({
      id: this.entity._id.$oid,version: value,})
  }
  this.$emit('toggle',1)
},

解决方法

如果要删除 pygame.sprite.Sprite 对象,必须将其从所有 pygame.sprite.Groups 中删除。 Sprite 可以通过 kill:

从所有 Groups 中删除
class Generator(Model):
    alpha = 0.5
    # Set layers.
    def __init__(self,out_channel_dim):
        super(Generator,self).__init__()
        self.out_channel_dim = out_channel_dim
        self.fc1 = layers.Dense(4 * 4 * 512)
        self.bn1 = layers.BatchNormalization()
        self.conv2tr1 = layers.Conv2DTranspose(256,5,strides=2,padding='VALID')
        self.bn2 = layers.BatchNormalization()
        self.conv2tr3 = layers.Conv2DTranspose(self.out_channel_dim,padding='SAME')

    # Set forward pass.
    def __call__(self,x,is_training=False):
        x = self.fc1(x)
        # Reshape to a 4-D array of images: (batch,height,width,channels)
        # New shape: (batch,4,512)
        x = tf.reshape(x,shape=[-1,512])
        x = self.bn1(x,training=is_training)
        x = layers.Maximum()([self.alpha*x,x])
        # First Deconvolution,image shape: (batch,11,256)
        x = self.conv2tr1(x)
        x = self.bn2(x,x])
        # Third Deconvolution,image shape:(batch,44,out_channel_dim)
        x = self.conv2tr3(x)
        x = tf.nn.tanh(x)
        return x

class Discriminator(Model):
    alpha = 0.2
    # Set layers.
    def __init__(self,out_channel_dim):
        super(Discriminator,self).__init__()
        self.out_channel_dim = out_channel_dim
        self.conv1 = layers.Conv2D(64,padding='SAME')
        self.bn1 = layers.BatchNormalization()
        self.flatten = layers.Flatten()
        self.cf1 = layers.Dense(1)

    # Set forward pass.
    def call(self,is_training=False):
        # First Convolution,22,64)
        x = self.conv1(x)
        x = self.bn1(x,x])

        # Fully connected layers
        x = self.flatten(x)
        x = self.cf1(x)
        return tf.math.log_sigmoid(x)

generator = Generator(out_channel_dim)
discriminator = Discriminator(out_channel_dim)

# Losses.
def generator_loss(reconstructed_image):
    gen_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
        logits=reconstructed_image,labels=tf.ones_like(reconstructed_image)))
    return gen_loss

def discriminator_loss(disc_fake,disc_real):
    disc_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
        logits=disc_real,labels=tf.ones_like(disc_real)))
    disc_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
        logits=disc_fake,labels=tf.zeros_like(disc_real)))
    return disc_loss_real + disc_loss_fake

# Optimizers.
lr_generator = 0.0002
lr_discriminator = 0.0002
optimizer_gen = tf.optimizers.Adam(learning_rate=lr_generator)#,beta_1=0.5,beta_2=0.999)
optimizer_disc = tf.optimizers.Adam(learning_rate=lr_discriminator)#,beta_2=0.999)

# Optimization process. Inputs: real image and noise.
def run_optimization(real_images):

    # Generate noise.
    noise = np.random.normal(-1.,1.,size=[batch_size,noise_dim]).astype(np.float32)
    
    with tf.GradientTape() as g:
            
        fake_images = generator(noise,is_training=True)
        disc_fake = discriminator(fake_images,is_training=True)
        disc_real = discriminator(real_images,is_training=True)

        disc_loss = discriminator_loss(disc_fake,disc_real)
            
    # Training Variables for each optimizer
    gradients_disc = g.gradient(disc_loss,discriminator.trainable_variables)
    optimizer_disc.apply_gradients(zip(gradients_disc,discriminator.trainable_variables))
    
    # Generate noise.
    noise = np.random.normal(-1.,is_training=True)

        gen_loss = generator_loss(disc_fake)
            
    gradients_gen = g.gradient(gen_loss,generator.trainable_variables)
    optimizer_gen.apply_gradients(zip(gradients_gen,generator.trainable_variables))
    
    return gen_loss,disc_loss

# Run training for the given number of steps.
step = 0
for iepoch in range(epoch_count):
    for batch_image in get_batches(batch_size):
        # the shape of batch_image is [batch,width]
        if step == 0:
            # Generate noise.
            noise = np.random.normal(-1.,noise_dim]).astype(np.float32)
            gen_loss = generator_loss(discriminator(generator(noise)))
            disc_loss = discriminator_loss(discriminator(batch_image),discriminator(generator(noise)))
            print("initial: gen_loss: %f,disc_loss: %f" % (gen_loss,disc_loss))
            continue

        # Run the optimization.
        gen_loss,disc_loss = run_optimization(batch_image)
        step += 1
        print("step: %i,gen_loss: %f,disc_loss: %f" % (step,gen_loss,disc_loss))

版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。