微信公众号搜"智元新知"关注
微信扫一扫可直接关注哦!

Python torch.nn 模块-MSELoss() 实例源码

Python torch.nn 模块,MSELoss() 实例源码

我们从Python开源项目中,提取了以下50代码示例,用于说明如何使用torch.nn.MSELoss()

项目:speed    作者:keon    | 项目源码 | 文件源码
def train(e, model, opt, dataset, arg, cuda=False):
    model.train()
    criterion = nn.MSELoss()
    losses = []

    batcher = dataset.get_batcher(shuffle=True, augment=True)
    for b, (x, y) in enumerate(batcher, 1):
        x = V(th.from_numpy(x).float()).cuda()
        y = V(th.from_numpy(y).float()).cuda()
        opt.zero_grad()
        logit = model(x)
        loss = criterion(logit, y)
        loss.backward()
        opt.step()

        losses.append(loss.data[0])
        if arg.verbose and b % 50 == 0:
            loss_t = np.mean(losses[:-49])
            print('[train] [e]:%s [b]:%s - [loss]:%s' % (e, b, loss_t))
    return losses
项目:speed    作者:keon    | 项目源码 | 文件源码
def validate(models, cuda=False):
    criterion = nn.MSELoss()
    losses = []
    batcher = dataset.get_batcher(shuffle=True, augment=False)
    for b, 1):
        x = V(th.from_numpy(x).float()).cuda()
        y = V(th.from_numpy(y).float()).cuda()
        # Ensemble average
        logit = None
        for model, _ in models:
            model.eval()
            logit = model(x) if logit is None else logit + model(x)
        logit = th.div(logit, len(models))
        loss = criterion(logit, y)
        losses.append(loss.data[0])
    return np.mean(losses)
项目:distanceGAN    作者:sagiebenaim    | 项目源码 | 文件源码
def print_network(net):
    num_params = 0
    for param in net.parameters():
        num_params += param.numel()
    print(net)
    print('Total number of parameters: %d' % num_params)


##############################################################################
# Classes
##############################################################################


# Defines the GAN loss which uses either LSGAN or the regular GAN.
# When LSGAN is used,it is basically same as MSELoss,
# but it abstracts away the need to create the target label tensor
# that has the same size as the input
项目:DeblurGAN    作者:Kupynorest    | 项目源码 | 文件源码
def init_loss(opt, tensor):
    disc_loss = None
    content_loss = None

    if opt.model == 'content_gan':
        content_loss = PerceptualLoss()
        content_loss.initialize(nn.MSELoss())
    elif opt.model == 'pix2pix':
        content_loss = ContentLoss()
        content_loss.initialize(nn.L1Loss())
    else:
        raise ValueError("Model [%s] not recognized." % opt.model)

    if opt.gan_type == 'wgan-gp':
        disc_loss = discLossWGANGP() 
    elif opt.gan_type == 'lsgan':
        disc_loss = discLossLS()
    elif opt.gan_type == 'gan':
        disc_loss = discLoss()
    else:
        raise ValueError("GAN [%s] not recognized." % opt.gan_type)
    disc_loss.initialize(opt, tensor)
    return disc_loss, content_loss
项目:PAAC.pytorch    作者:qbx2    | 项目源码 | 文件源码
def get_loss(q_values, values, log_a):
        r"""calculates policy loss and value loss

        :param q_values: Tensor with shape (T,N)
        :param values: Variable with shape (T,N)
        :param log_a: Variable with shape (T,N)
        :return: tuple (policy_loss,value_loss)
        """
        diff = Variable(q_values) - values

        # policy loss
        loss_p = -(Variable(diff.data) * log_a).mean(0)
        # value loss
        # 2 * nn.MSELoss
        double_loss_v = diff.pow(2).mean(0)
        loss = loss_p + 0.25 * double_loss_v
        return loss_p, double_loss_v, loss
项目:e2e-model-learning    作者:locuslab    | 项目源码 | 文件源码
def run_rmse_net(model, variables, X_train, Y_train):
    opt = optim.Adam(model.parameters(), lr=1e-3)

    for i in range(1000):
        opt.zero_grad()
        model.train()
        train_loss = nn.MSELoss()(
            model(variables['X_train_'])[0], variables['Y_train_'])
        train_loss.backward()
        opt.step()

        model.eval()
        test_loss = nn.MSELoss()(
            model(variables['X_test_'])[0], variables['Y_test_'])

        print(i, train_loss.data[0], test_loss.data[0])

    model.eval()
    model.set_sig(variables['X_train_'], variables['Y_train_'])

    return model


# Todo: minibatching
项目:crowdcount-cascaded-mtl    作者:svishwa    | 项目源码 | 文件源码
def __init__(self, ce_weights=None):
        super(CrowdCounter, self).__init__()        
        self.CCN = CMTL()
        if ce_weights is not None:
            ce_weights = torch.Tensor(ce_weights)
            ce_weights = ce_weights.cuda()
        self.loss_mse_fn = nn.MSELoss()
        self.loss_bce_fn = nn.bceloss(weight=ce_weights)
项目:rl    作者:Shmuma    | 项目源码 | 文件源码
def calc_loss_dqn(batch, net, tgt_net, gamma, cuda=False):
    states, actions, rewards, dones, next_states = unpack_batch(batch)

    states_v = Variable(torch.from_numpy(states))
    next_states_v = Variable(torch.from_numpy(next_states), volatile=True)
    actions_v = Variable(torch.from_numpy(actions))
    rewards_v = Variable(torch.from_numpy(rewards))
    done_mask = torch.ByteTensor(dones)
    if cuda:
        states_v = states_v.cuda()
        next_states_v = next_states_v.cuda()
        actions_v = actions_v.cuda()
        rewards_v = rewards_v.cuda()
        done_mask = done_mask.cuda()

    state_action_values = net(states_v).gather(1, actions_v.unsqueeze(-1)).squeeze(-1)
    next_state_values = tgt_net(next_states_v).max(1)[0]
    next_state_values[done_mask] = 0.0
    next_state_values.volatile = False

    expected_state_action_values = next_state_values * gamma + rewards_v
    return nn.MSELoss()(state_action_values, expected_state_action_values)
项目:CycleGANwithperceptionLoss    作者:EliasVansteenkiste    | 项目源码 | 文件源码
def print_network(net):
    num_params = 0
    for param in net.parameters():
        num_params += param.numel()
    print(net)
    print('Total number of parameters: %d' % num_params)



##############################################################################
# Classes
##############################################################################


# Defines the GAN loss which uses either LSGAN or the regular GAN.
# When LSGAN is used,
# but it abstracts away the need to create the target label tensor
# that has the same size as the input
项目:pytorch_cycle_gan    作者:jinfagang    | 项目源码 | 文件源码
def print_network(net):
    num_params = 0
    for param in net.parameters():
        num_params += param.numel()
    print(net)
    print('Total number of parameters: %d' % num_params)


##############################################################################
# Classes
##############################################################################


# Defines the GAN loss which uses either LSGAN or the regular GAN.
# When LSGAN is used,
# but it abstracts away the need to create the target label tensor
# that has the same size as the input
项目:pytorch-CycleGAN-and-pix2pix    作者:junyanz    | 项目源码 | 文件源码
def print_network(net):
    num_params = 0
    for param in net.parameters():
        num_params += param.numel()
    print(net)
    print('Total number of parameters: %d' % num_params)


##############################################################################
# Classes
##############################################################################


# Defines the GAN loss which uses either LSGAN or the regular GAN.
# When LSGAN is used,
# but it abstracts away the need to create the target label tensor
# that has the same size as the input
项目:wasserstein-cyclegan    作者:abhiskk    | 项目源码 | 文件源码
def print_network(net):
    num_params = 0
    for param in net.parameters():
        num_params += param.numel()
    print(net)
    print('Total number of parameters: %d' % num_params)


##############################################################################
# Classes
##############################################################################


# Defines the GAN loss which uses either LSGAN or the regular GAN.
# When LSGAN is used,
# but it abstracts away the need to create the target label tensor
# that has the same size as the input
项目:PytorchDL    作者:FredHuangBia    | 项目源码 | 文件源码
def createCriterion(opt, model):
    "Criterion is still a legacy of pytorch."
    # criterion = nn.MultiCriterion()
    # if opt.absLoss != 0:
    #   criterion.add(nn.AbsCriterion(),weight=opt.absLoss)
    # if opt.mseLoss != 0:
    #   criterion.add(nn.MSECriterion(),weight=opt.absLoss)
    # if opt.gdlLoss != 0:
    #   criterion.add(nn.GDLCriterion(),weight=opt.absLoss)
    # if opt.customLoss != 0:
    #   criterion.add(customCriterion(),weight=opt.customLoss)

    if opt.L1Loss != 0:
        criterion = nn.L1Loss()
    elif opt.mseLoss != 0:
        criterion = nn.MSELoss()
    elif opt.gdlLoss != 0:
        criterion = nn.GDLLoss()

    return criterion
项目:rarepepes    作者:kendricktan    | 项目源码 | 文件源码
def print_network(net):
    num_params = 0
    for param in net.parameters():
        num_params += param.numel()
    print(net)
    print('Total number of parameters: %d' % num_params)


##############################################################################
# Classes
##############################################################################


# Defines the GAN loss which uses either LSGAN or the regular GAN.
# When LSGAN is used,
# but it abstracts away the need to create the target label tensor
# that has the same size as the input
项目:GAN_Liveness_Detection    作者:yunfan0621    | 项目源码 | 文件源码
def print_network(net):
    num_params = 0
    for param in net.parameters():
        num_params += param.numel()
    print(net)
    print('Total number of parameters: {}'.format(num_params))


##############################################################################
# Classes
##############################################################################

# Defines the GAN loss which uses either LSGAN or the regular GAN.
# When LSGAN is used,
# but it abstracts away the need to create the target label tensor
# that has the same size as the input
项目:VIGAN    作者:chaoshangcs    | 项目源码 | 文件源码
def print_network(net):
    num_params = 0
    for param in net.parameters():
        num_params += param.numel()
    print(net)
    print('Total number of parameters: %d' % num_params)


##############################################################################
# Classes
##############################################################################


# Defines the GAN loss used in LSGAN.
# It is basically same as MSELoss,but it abstracts away the need to create
# the target label tensor that has the same size as the input
项目:crowdcount-mcnn    作者:svishwa    | 项目源码 | 文件源码
def __init__(self):
        super(CrowdCounter, self).__init__()        
        self.DME = MCNN()        
        self.loss_fn = nn.MSELoss()
项目:python-utils    作者:zhijian-liu    | 项目源码 | 文件源码
def __init__(self, real_label = 1.0, fake_label = 0.0, use_lsgan = True):
        super(GANLoss, self).__init__()
        self.real_label = real_label
        self.fake_label = fake_label
        self.real_target = None
        self.fake_target = None
        if use_lsgan:
            self.loss = nn.MSELoss()
        else:
            self.loss = nn.bceloss()
项目:PaintsPytorch    作者:orashi    | 项目源码 | 文件源码
def __init__(self, target_real_label=1.0, target_fake_label=0.0,
                 tensor=torch.FloatTensor):
        super(GANLoss, self).__init__()
        self.real_label = target_real_label
        self.fake_label = target_fake_label
        self.real_label_var = None
        self.fake_label_var = None
        self.Tensor = tensor
        self.loss = nn.MSELoss()
项目:PaintsPytorch    作者:orashi    | 项目源码 | 文件源码
def __init__(self, self).__init__()
        self.real_label = target_real_label
        self.fake_label = target_fake_label
        self.real_label_var = None
        self.fake_label_var = None
        self.Tensor = tensor
        self.loss = nn.MSELoss()
项目:distanceGAN    作者:sagiebenaim    | 项目源码 | 文件源码
def __init__(self, use_lsgan=True, self).__init__()
        self.real_label = target_real_label
        self.fake_label = target_fake_label
        self.real_label_var = None
        self.fake_label_var = None
        self.Tensor = tensor
        if use_lsgan:
            self.loss = nn.MSELoss()
        else:
            self.loss = nn.bceloss()
项目:SentEval    作者:facebookresearch    | 项目源码 | 文件源码
def __init__(self, train, valid, test, devscores, config):
        # fix seed
        np.random.seed(config['seed'])
        torch.manual_seed(config['seed'])
        assert torch.cuda.is_available(), 'torch.cuda required for Relatedness'
        torch.cuda.manual_seed(config['seed'])

        self.train = train
        self.valid = valid
        self.test = test
        self.devscores = devscores

        self.inputdim = train['X'].shape[1]
        self.nclasses = config['nclasses']
        self.seed = config['seed']
        self.l2reg = 0.
        self.batch_size = 64
        self.maxepoch = 1000
        self.early_stop = True

        self.model = nn.Sequential(
            nn.Linear(self.inputdim, self.nclasses),
            nn.softmax(),
            )
        self.loss_fn = nn.MSELoss()

        if torch.cuda.is_available():
            self.model = self.model.cuda()
            self.loss_fn = self.loss_fn.cuda()

        self.loss_fn.size_average = False
        self.optimizer = optim.Adam(self.model.parameters(),
                                    weight_decay=self.l2reg)
项目:torch_light    作者:ne7ermore    | 项目源码 | 文件源码
def __init__(self, target, weight):
        super().__init__()

        self.target = target.detach() * weight
        self.weight = weight
        self.criterion = nn.MSELoss()
        self.gm = GramMatrix()
项目:nnmnkwii    作者:r9y9    | 项目源码 | 文件源码
def test_functional_mlpg():
    static_dim = 2
    T = 5

    for windows in _get_windows_set():
        torch.manual_seed(1234)
        means = torch.rand(T, static_dim * len(windows))
        variances = torch.ones(static_dim * len(windows))

        y = G.mlpg(means.numpy(), variances.numpy(), windows)
        y = Variable(torch.from_numpy(y), requires_grad=False)

        means = Variable(means, requires_grad=True)

        # mlpg
        y_hat = AF.mlpg(means, variances, windows)
        assert np.allclose(y.data.numpy(), y_hat.data.numpy())

        # Test backward pass
        nn.MSELoss()(y_hat, y).backward()

        # unit_variance_mlpg
        R = torch.from_numpy(G.unit_variance_mlpg_matrix(windows, T))
        y_hat = AF.unit_variance_mlpg(R, means)
        assert np.allclose(y.data.numpy(), y_hat.data.numpy())

        nn.MSELoss()(y_hat, y).backward()

        # Test 3D tensor inputs
        y_hat = AF.unit_variance_mlpg(R, means.view(1, -1, means.size(-1)))
        assert np.allclose(
            y.data.numpy(), y_hat.data.view(-1, static_dim).numpy())

        nn.MSELoss()(y_hat.view(-1, static_dim), y).backward()
项目:pytorch-skipthoughts    作者:kaniblu    | 项目源码 | 文件源码
def __init__(self, data_generator, epochs, loss):
        self.epochs = epochs
        self.model = model
        self.data_generator = data_generator
        self.loss = loss

        if loss == "smoothl1":
            self.loss_fn = F.smooth_l1_loss
        elif loss == "l1":
            self.loss_fn = nn.L1Loss()
        elif loss == "l2":
            self.loss_fn = nn.MSELoss()
        else:
            raise ValueError("Unrecognized loss type: {}".format(loss))
项目:probability_GAN    作者:MaureenZOU    | 项目源码 | 文件源码
def __init__(self, use_lsgan=False, self).__init__()
        self.real_label = target_real_label
        self.fake_label = target_fake_label
        self.real_label_var = None
        self.fake_label_var = None
        self.Tensor = tensor
        if use_lsgan:
            self.loss = nn.MSELoss()
        else:
            self.loss = nn.bceloss()
项目:probability_GAN    作者:MaureenZOU    | 项目源码 | 文件源码
def __init__(self, self).__init__()
        self.real_label = target_real_label
        self.fake_label = target_fake_label
        self.real_label_var = None
        self.fake_label_var = None
        self.Tensor = tensor
        if use_lsgan:
            self.loss = nn.MSELoss()
        else:
            self.loss = nn.bceloss()
项目:e2c-pytorch    作者:ethanluoyc    | 项目源码 | 文件源码
def compute_loss(x_pred, x_true, z_pred, z_true, beta=0.05):
    mse = nn.MSELoss()
    return mse(x_pred, x_true).add(beta * kl_bernoulli(z_pred, z_true))
项目:optnet    作者:locuslab    | 项目源码 | 文件源码
def train(args, epoch, trainF, trainW, trainX, trainY, optimizer):
    batchSz = args.batchSz

    batch_data_t = torch.FloatTensor(batchSz, trainX.size(1))
    batch_targets_t = torch.FloatTensor(batchSz, trainY.size(1))
    if args.cuda:
        batch_data_t = batch_data_t.cuda()
        batch_targets_t = batch_targets_t.cuda()
    batch_data = Variable(batch_data_t, requires_grad=False)
    batch_targets = Variable(batch_targets_t, requires_grad=False)
    for i in range(0, trainX.size(0), batchSz):
        batch_data.data[:] = trainX[i:i+batchSz]
        batch_targets.data[:] = trainY[i:i+batchSz]
        # Fixed batch size for debugging:
        # batch_data.data[:] = trainX[:batchSz]
        # batch_targets.data[:] = trainY[:batchSz]

        optimizer.zero_grad()
        preds = model(batch_data)
        mseLoss = nn.MSELoss()(preds, batch_targets)
        if args.model == 'optnet' and args.learnD:
            loss = mseLoss + args.Dpenalty*(model.D.norm(1))
        else:
            loss = mseLoss
        loss.backward()
        optimizer.step()

        print('Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.4f}'.format(
            epoch, i+batchSz,
            float(i+batchSz)/trainX.size(0)*100,
            mseLoss.data[0]))

        trainW.writerow((epoch-1+float(i+batchSz)/trainX.size(0), mseLoss.data[0]))
        trainF.flush()
项目:optnet    作者:locuslab    | 项目源码 | 文件源码
def test(args, testF, testW, testX, testY):
    batchSz = args.testBatchSz

    test_loss = 0
    batch_data_t = torch.FloatTensor(batchSz, testX.size(1))
    batch_targets_t = torch.FloatTensor(batchSz, testY.size(1))
    if args.cuda:
        batch_data_t = batch_data_t.cuda()
        batch_targets_t = batch_targets_t.cuda()
    batch_data = Variable(batch_data_t, volatile=True)
    batch_targets = Variable(batch_targets_t, volatile=True)

    for i in range(0, testX.size(0), batchSz):
        print('Testing model: {}/{}'.format(i, testX.size(0)), end='\r')
        batch_data.data[:] = testX[i:i+batchSz]
        batch_targets.data[:] = testY[i:i+batchSz]
        output = model(batch_data)
        if i == 0:
            testOut = os.path.join(args.save, 'test-imgs')
            os.makedirs(testOut, exist_ok=True)
            for j in range(4):
                X = batch_data.data[j].cpu().numpy()
                Y = batch_targets.data[j].cpu().numpy()
                Yhat = output[j].data.cpu().numpy()

                fig, ax = plt.subplots(1, 1)
                plt.plot(X, label='Corrupted')
                plt.plot(Y, label='Original')
                plt.plot(Yhat, label='Predicted')
                plt.legend()
                f = os.path.join(testOut, '{}.png'.format(j))
                fig.savefig(f)
        test_loss += nn.MSELoss()(output, batch_targets)

    nBatches = testX.size(0)/batchSz
    test_loss = test_loss.data[0]/nBatches
    print('TEST SET RESULTS:' + ' ' * 20)
    print('Average loss: {:.4f}'.format(test_loss))

    testW.writerow((epoch, test_loss))
    testF.flush()
项目:optnet    作者:locuslab    | 项目源码 | 文件源码
def train(args, trainX.size(1), trainX.size(2), trainX.size(3))
    batch_targets_t = torch.FloatTensor(batchSz, trainY.size(1), trainX.size(3))
    if args.cuda:
        batch_data_t = batch_data_t.cuda()
        batch_targets_t = batch_targets_t.cuda()
    batch_data = Variable(batch_data_t, batchSz):
        batch_data.data[:] = trainX[i:i+batchSz]
        batch_targets.data[:] = trainY[i:i+batchSz]
        # Fixed batch size for debugging:
        # batch_data.data[:] = trainX[:batchSz]
        # batch_targets.data[:] = trainY[:batchSz]

        optimizer.zero_grad()
        preds = model(batch_data)
        loss = nn.MSELoss()(preds, batch_targets)
        loss.backward()
        optimizer.step()

        err = computeErr(preds.data)/batchSz
        print('Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.4f} Err: {:.4f}'.format(
            epoch,
            loss.data[0], err))

        trainW.writerow((epoch-1+float(i+batchSz)/trainX.size(0), loss.data[0], err))
        trainF.flush()
项目:optnet    作者:locuslab    | 项目源码 | 文件源码
def test(args, testX.size(1), testX.size(2), testX.size(3))
    batch_targets_t = torch.FloatTensor(batchSz, testY.size(1), testX.size(3))
    if args.cuda:
        batch_data_t = batch_data_t.cuda()
        batch_targets_t = batch_targets_t.cuda()
    batch_data = Variable(batch_data_t, volatile=True)

    nErr = 0
    for i in range(0, end='\r')
        batch_data.data[:] = testX[i:i+batchSz]
        batch_targets.data[:] = testY[i:i+batchSz]
        output = model(batch_data)
        test_loss += nn.MSELoss()(output, batch_targets)
        nErr += computeErr(output.data)

    nBatches = testX.size(0)/batchSz
    test_loss = test_loss.data[0]/nBatches
    test_err = nErr/testX.size(0)
    print('TEST SET RESULTS:' + ' ' * 20)
    print('Average loss: {:.4f}'.format(test_loss))
    print('Err: {:.4f}'.format(test_err))

    testW.writerow((epoch, test_loss, test_err))
    testF.flush()
项目:covfefe    作者:deepnn    | 项目源码 | 文件源码
def mse_loss(size_ave=True):
    return nn.MSELoss(size_average=size_ave)
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def get_loss(self, output):
        backend = model.get_backend()

        if backend.get_name() == 'keras':
            return keras_wrap(model, output, 'mean_squared_error')
        elif backend.get_name() == 'pytorch':

            # pylint: disable=import-error
            import torch.nn as nn
            # pylint: enable=import-error

            loss = model.data.move(nn.MSELoss())

            return [
                [
                    (target, model.data.placeholder(target))
                ],
                lambda inputs, output: loss(
                    output, inputs[0]
                )
            ]
        else:
            raise ValueError('Unsupported backend "{}" for loss function "{}"'
                .format(backend.get_name(), self.get_name()))

### EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF
项目:kaggle-dstl    作者:lopuhin    | 项目源码 | 文件源码
def __init__(self, hps: HyperParams):
        self.hps = hps
        self.net = getattr(models, hps.net)(hps)
        self.bce_loss = nn.bceloss()
        self.mse_loss = nn.MSELoss()
        self.optimizer = None  # type: optim.Optimizer
        self.tb_logger = None  # type: tensorboard_logger.Logger
        self.logdir = None  # type: Path
        self.on_gpu = torch.cuda.is_available()
        if self.on_gpu:
            self.net.cuda()
项目:CycleGANwithperceptionLoss    作者:EliasVansteenkiste    | 项目源码 | 文件源码
def __init__(self, self).__init__()
        self.real_label = target_real_label
        self.fake_label = target_fake_label
        self.real_label_var = None
        self.fake_label_var = None
        self.Tensor = tensor
        if use_lsgan:
            self.loss = nn.MSELoss()
        else:
            self.loss = nn.bceloss()
项目:pytorch_cycle_gan    作者:jinfagang    | 项目源码 | 文件源码
def __init__(self, self).__init__()
        self.real_label = target_real_label
        self.fake_label = target_fake_label
        self.real_label_var = None
        self.fake_label_var = None
        self.Tensor = tensor
        if use_lsgan:
            self.loss = nn.MSELoss()
        else:
            self.loss = nn.bceloss()
项目:deeplearning    作者:zxjzxj9    | 项目源码 | 文件源码
def fit_transform(self, data_in):
        criterion = nn.MSELoss()
        # for gpu applications
        self.cuda()
        optimizer = torch.optim.Adam(self.parameters(), lr=5e-5)

        for epoch in range(self.epoch_num):
            for data in DataIterator(data_in, self.batch_num):
                data = torch.from_numpy(data)
                data = data.float()
                data = data.cuda()
                data = Variable(data)

                optimizer.zero_grad()

                # Sparse Autoencoders,L2-norm,this is proved to be have nearly the same effect as original data
                # L1-norm will produce worse models
                # tanh will produce worse models,may be this indicates the figure need normalization
                # loss = criterion(self(data),data) + 0.0001*(F.sigmoid(self.linear1(data))**2).sum()
                # another loss function will be CAE (contractive autoencoder)

                hidden = F.sigmoid(self.linear1(data))
                #gradients = torch.autograd.grad(inputs = data,outputs = hidden,retain_graph = True,create_graph = True)

                # PyTorch evaluating Jacobian Matrix is extremely complicated!
                # Thanks to this blog:
                # https://wiSEOdd.github.io/techblog/2016/12/05/contractive-autoencoder/

                hw = hidden*(1.0 - hidden)
                # CAE is still not enough,only give ~81%,compared to the original data ~86%
                loss = criterion(self(data), data) + 0.01*(hw * (self.linear1.weight**2).sum(dim = 1)).sum()

                loss.backward()
                optimizer.step()
                sys.stdout.write("In epoch %d,total loss %.6f\r" %(epoch, loss.data.cpu().numpy()))
            #print("")
        print("")

        return F.sigmoid(self.linear1(Variable(torch.from_numpy(data_in).cuda()))).data.cpu().numpy()
        #return self(Variable(torch.from_numpy(data_in).cuda())).data.cpu().numpy()
项目:deeplearning    作者:zxjzxj9    | 项目源码 | 文件源码
def fit_transform(self,data) + 0.0001*(F.sigmoid(self.linear1(data))**2).sum()
                # another loss function will be CAE (contractive autoencoder)

                hidden = F.sigmoid(torch.mm(data, self.weight1) + self.bias1)
                #gradients = torch.autograd.grad(inputs = data, data) + 0.01*(hw * (self.weight1**2).sum(dim = 0)).sum()

                loss.backward()
                optimizer.step()
                sys.stdout.write("In epoch %d, loss.data.cpu().numpy()))
            #print("")
        print("")

        linear1 = lambda x: F.sigmoid(torch.mm(x, self.weight1) + self.bias1)
        return F.sigmoid(linear1(Variable(torch.from_numpy(data_in).cuda()))).data.cpu().numpy()
        #return self(Variable(torch.from_numpy(data_in).cuda())).data.cpu().numpy()
项目:pytorch-trpo    作者:mjacar    | 项目源码 | 文件源码
def __init__(self, lr):
    super(ValueFunctionWrapper, self).__init__()
    self.model = model
    self.loss_fn = nn.MSELoss()
    self.lr = lr
项目:EarlyWarning    作者:wjlei1990    | 项目源码 | 文件源码
def main():
    waveforms, magnitudes = load_data()
    loader = make_DataLoader(waveforms, magnitudes)

    rnn = RNN(input_size, hidden_size, num_layers)
    print(rnn)

    optimizer = torch.optim.Adam(rnn.parameters(), lr=LR)
    loss_func = nn.MSELoss()

    for epoch in range(3):
        loss_epoch = []
        for step, (batch_x, batch_y) in enumerate(loader):
            x = torch.unsqueeze(batch_x[0, :, :].t(), dim=1)
            print('Epoch: ', '| Step: ', step, '| x: ',
                  x.size(), '| y: ', batch_y.numpy())
            x = Variable(x)
            y = Variable(torch.Tensor([batch_y.numpy(), ]))
            prediction = rnn(x)
            loss = loss_func(prediction, y)
            optimizer.zero_grad()  # clear gradients for this training step
            loss.backward()  # backpropagation,compute gradients
            optimizer.step()
            loss_epoch.append(loss.data[0])
            print("Current loss: %e --- loss mean: %f"
                  % (loss.data[0], np.mean(loss_epoch)))
项目:loop    作者:facebookresearch    | 项目源码 | 文件源码
def __init__(self):
        super(MaskedMSE, self).__init__()
        self.criterion = nn.MSELoss(size_average=False)

    # Taken from
    # https://github.com/spro/practical-pytorch/blob/master/seq2seq-translation
项目:pytorch_crowd_count    作者:BingzheWu    | 项目源码 | 文件源码
def train():
    net = predict_net()
    data_loader = DataLoader('dataset/processed_lmdb_image', 'dataset/processed_lmdb_label')
    criterion = nn.MSELoss().cuda()
    net = net.cuda()
    for module in net.modules():
        module.cuda()
        print(module)
    net.train()
    optimizer = torch.optim.SGD(net.parameters(), lr = 0.001, momentum = 0.9)
    for epoch in range(20):
        for i, data in enumerate(data_loader):
            if i>=201:
                break
            inputs, gts = data
            inputs, gts = torch.autograd.Variable(inputs), torch.autograd.Variable(gts)
            inputs = inputs.cuda()
            gts = gts.cuda()
            optimizer.zero_grad()
            outputs = net(inputs)
            loss = criterion(outputs, gts)
            loss.backward()
            optimizer.step()
            running_loss  = loss.data[0]
            if i%50 == 0:
                print('[%d,%5d] loss: %.3f' %
                  (epoch + 1, i + 1, running_loss))
        torch.save(net.state_dict(), 'checkpoint/crowd_net%d.pth'%(epoch))
                #running_loss = 0.0
项目:generative_zoo    作者:DL-IT    | 项目源码 | 文件源码
def __init__(self, image_size, n_z, n_chan, hiddens, ngpu, loss=['KLD', 'BCE']):
        """
        VAE object. This class is a wrapper of a VAE as explained in the paper:
            AUTO-ENCODING VARIATIONAL BAYES by Kingma et.al. 

        Instance of this class initializes the parameters required for the Encoder and Decoder.
        Arguments:
            image_size      = Height / width of the real images
            n_z         = Dimensionality of the latent space
            n_chan          = Number of channels of the real images
            hiddens         = Number of nodes in the hidden layers of the encoder and decoder
                          Format:
                            hiddens = {'enc': n_enc_hidden,
                                   'dec': n_dec_hidden
                                  }
            ngpu            = Number of gpus to be allocated,if to be run on gpu
            loss            = The loss function to be used. For multiple losses,add them in a list
        """
        super(VAE, self).__init__()
        self.vae_net    = vae(image_size, hiddens['enc'], hiddens['dec'], ngpu)
        self.ngpu   = ngpu
        self.n_z    = n_z
        self.image_size = image_size
        self.n_chan = n_chan
        if 'BCE' in loss:
            self.recons_loss    = nn.bceloss(size_average=False)
        elif 'MSE' in loss:
            self.recons_loss    = nn.MSELoss()
        self.KLD_loss           = u.KLD
项目:generative_zoo    作者:DL-IT    | 项目源码 | 文件源码
def __init__(self, depths, loss='BCE'):
        """
        MLPGAN object. This class is a wrapper of a generalized MLPGAN.
        Instance of this class initializes the Generator and the discriminator.
        Arguments:
            image_size      = Height / width of the real images
            n_z         = Dimensionality of the latent space
            n_chan          = Number of channels of the real images
            hiddens         = Number of nodes in the hidden layers of the generator and discriminator
                          Format:
                            hiddens = {'gen': n_gen_hidden,
                                   'dis': n_dis_hidden
                                  }
            depths          = Number of fully connected layers in the generator and discriminator
                          Format:
                            depths  = {'gen': n_gen_depth,
                                   'dis': n_dis_depth
                                  }
            ngpu            = Number of gpus to allocated,if to be run on gpu
            loss (opt)      = The loss function to be used. Default is BCE loss
        """
        super(MLPGAN, self).__init__()
        self.Gen_net    = Generator(image_size, hiddens['gen'], depths['gen'], ngpu)
        self.dis_net    = discriminator(image_size, hiddens['dis'], depths['dis'], ngpu)
        self.ngpu   = ngpu
        self.n_z    = n_z
        self.image_size = image_size
        self.n_chan = n_chan
        if loss == 'BCE':
            self.loss   = nn.bceloss()
        elif loss == 'MSE':
            self.loss   = nn.MSELoss()
项目:gan-error-avoidance    作者:aleju    | 项目源码 | 文件源码
def embed_real_images(gen, r, images, code_size, lr=0.0001, test_steps=100000):
    """Function to embed images to noise vectors that result in as similar
    images as possible (when Feeding the approximated noise vectors through
    G). This is intended for real images,not images that came from the
    generator. It also didn't seem to work very well."""
    testfunc = nn.MSELoss()

    for param in gen.parameters():
        param.requires_grad = False
    best_code = torch.Tensor(len(images), code_size).cuda()

    batch_size = len(images)
    batch_code = Variable(torch.zeros(batch_size, code_size).cuda())
    batch_code.requires_grad = True

    batch_target = torch.Tensor(batch_size, images[0].size(0), images[0].size(1), images[0].size(2))
    for i, image in enumerate(images):
        batch_target[i].copy_(image)
    batch_target = Variable(batch_target.cuda())
    batch_code.data.copy_(r(batch_target).data)

    test_opt = optim.Adam([batch_code], lr=lr)
    for j in range(test_steps):
        generated, _ = gen(batch_code)
        loss = testfunc(generated, batch_target)
        loss.backward()
        test_opt.step()
        batch_code.grad.data.zero_()
        if j % 100 == 0:
            #lr = lr * 0.98
            print("Embedding real images... iter %d with loss %.08f and lr %.08f" % (j,loss.data[0], lr))
            #test_opt = optim.RMSprop([batch_code],lr=lr)
    best_code = batch_code.data

    for param in gen.parameters():
        param.requires_grad = True

    return best_code
项目:pytorch-CycleGAN-and-pix2pix    作者:junyanz    | 项目源码 | 文件源码
def __init__(self, self).__init__()
        self.real_label = target_real_label
        self.fake_label = target_fake_label
        self.real_label_var = None
        self.fake_label_var = None
        self.Tensor = tensor
        if use_lsgan:
            self.loss = nn.MSELoss()
        else:
            self.loss = nn.bceloss()
项目:facenet_pytorch    作者:liorshk    | 项目源码 | 文件源码
def get_center_loss(self,target, alpha):
        batch_size = target.size(0)
        features_dim = self.features.size(1)

        target_expand = target.view(batch_size,1).expand(batch_size,features_dim)

        centers_var = Variable(self.centers)
        centers_batch = centers_var.gather(0,target_expand).cuda()

        criterion = nn.MSELoss()
        center_loss = criterion(self.features,  centers_batch)

        diff = centers_batch - self.features

        unique_label, unique_reverse, unique_count = np.unique(target.cpu().data.numpy(), return_inverse=True, return_counts=True)

        appear_times = torch.from_numpy(unique_count).gather(0,torch.from_numpy(unique_reverse))

        appear_times_expand = appear_times.view(-1,features_dim).type(torch.FloatTensor)

        diff_cpu = diff.cpu().data / appear_times_expand.add(1e-6)

        #?c_j =(sum_i=1^m ?(yi = j)(c_j ? x_i)) / (1 + sum_i=1^m ?(yi = j))
        diff_cpu = alpha * diff_cpu

        for i in range(batch_size):
            #Update the parameters c_j for each j by c^(t+1)_j = c^t_j ? ? · ?c^t_j
            self.centers[target.data[i]] -= diff_cpu[i].type(self.centers.type())

        return center_loss, self.centers
项目:wasserstein-cyclegan    作者:abhiskk    | 项目源码 | 文件源码
def __init__(self, self).__init__()
        self.real_label = target_real_label
        self.fake_label = target_fake_label
        self.real_label_var = None
        self.fake_label_var = None
        self.Tensor = tensor
        if use_lsgan:
            self.loss = nn.MSELoss()
        else:
            self.loss = nn.bceloss()
项目:treelstm-pytorch    作者:pklfz    | 项目源码 | 文件源码
def mse(self, predictions, labels):
        x = Var(deepcopy(predictions), volatile=True)
        y = Var(deepcopy(labels), volatile=True)
        return nn.MSELoss()(x, y).data[0]

版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。

相关推荐