Python keras.regularizers 模块,l1l2() 实例源码
我们从Python开源项目中,提取了以下9个代码示例,用于说明如何使用keras.regularizers.l1l2()。
def test_W_reg():
(X_train, Y_train), (X_test, Y_test), test_ids = get_data()
for reg in [regularizers.l1(),
regularizers.l2(),
regularizers.l1l2()]:
model = create_model(weight_reg=reg)
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.fit(X_train, Y_train, batch_size=batch_size,
nb_epoch=nb_epoch, verbose=0)
model.evaluate(X_test[test_ids, :], Y_test[test_ids, verbose=0)
def l1l2(l1_weight=0, l2_weight=0):
if keras_2:
from keras.regularizers import L1L2
return L1L2(l1_weight, l2_weight)
else:
from keras.regularizers import l1l2
return l1l2(l1_weight, l2_weight)
def l1l2_penalty_reg(alpha=1.0, l1_ratio=0.5):
'''Calculate L1 and L2 penalties for a Keras layer
This follows the same formulation as in the R package glmnet and Sklearn
Args:
alpha ([float]): amount of regularization.
l1_ratio ([float]): portion of L1 penalty. Setting to 1.0 equals
Lasso.
'''
if l1_ratio == .0:
return l2(alpha)
elif l1_ratio == 1.:
return l1(alpha)
else:
return l1l2(l1_ratio*alpha, 1./2*(1 - l1_ratio)*alpha)
def test_W_reg(self):
for reg in [regularizers.identity(), regularizers.l1(), regularizers.l2(), regularizers.l1l2()]:
model = create_model(weight_reg=reg)
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.fit(X_train, nb_epoch=nb_epoch, verbose=0)
model.evaluate(X_test[test_ids, verbose=0)
def test_W_reg():
(X_train, optimizer='rmsprop')
assert len(model.losses) == 1
model.fit(X_train, verbose=0)
def test_W_reg(self):
for reg in [regularizers.identity(), verbose=0)
def test_W_reg(self):
for reg in [regularizers.identity(), verbose=0)
版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。