微信公众号搜"智元新知"关注
微信扫一扫可直接关注哦!

scipy.optimize.minimize:矩阵行中的 l2 范数约束

如何解决scipy.optimize.minimize:矩阵行中的 l2 范数约束

我有兴趣在 AWS::Serverless::Api/DeFinitionBody 中参数矩阵的每一行中应用 l2 范数约束。到目前为止我尝试过的是

scipy.optimize.minimize

但计算参数 def l2_const(x): x = x.reshape(r,c) b = np.sqrt((x**2).sum(axis=1)) - 1 return np.broadcast_to(b[:,None],(r,c)).flatten() x0 = np.random.random((r,c)) const = ({'type': 'eq','fun': l2_const},) f_min = minimize(fun=cost,x0=x0,method='SLSQP',jac=gradient,constraints=const) 都是零。有谁知道如何正确实施这种类型的约束?

编辑 1:可以在我之前对 post 的回答中找到应用此类约束的示例。

编辑 2:您可以在下面找到一个完整的工作示例。当使用约束时,结果非常低。欢迎提出任何建议。

班级:

f_min.x

主要内容

import numpy as np
from scipy.optimize import minimize
from sklearn import preprocessing

class myLR():

    def __init__(self,reltol=1e-8,maxit=1000,opt_method=None,verbose=True,seed=0):
        self.maxit = maxit
        self.reltol = reltol
        self.seed = seed
        self.verbose = verbose
        self.opt_method = opt_method

        self.lbin = preprocessing.LabelBinarizer()

    def w_2d(self,w,n_classes):
        return np.reshape(w,(n_classes,-1))

    def softmax(self,W,X):
        a = np.exp(X @ W.T)
        o = a / np.sum(a,axis=1,keepdims=True)
        return o

    def squared_norm(self,x):
        x = np.ravel(x,order='K')
        return np.dot(x,x)

    def cost(self,X,T,n_samples,n_classes):
        W = self.w_2d(W,n_classes)
        log_O = np.log(self.softmax(W,X))
        c = -(T * log_O).sum()
        return c / n_samples

    def gradient(self,n_classes)
        O = self.softmax(W,X)
        grad = -(T - O).T.dot(X)
        return grad.ravel() / n_samples

    def l1_constraint(self,x,n_classes,n_features):

        x = x.reshape(n_classes,-1)
        b = x.sum(axis=1) - 1
        return np.broadcast_to(b[:,n_features)).flatten()

    def fit(self,y=None):
        n_classes = len(np.unique(y))
        n_samples,n_features = X.shape

        if n_classes == 2:
            T = np.zeros((n_samples,n_classes),dtype=np.float64)
            for i,cls in enumerate(np.unique(y)):
                T[y == cls,i] = 1
        else:
            T = self.lbin.fit_transform(y)

        np.random.seed(self.seed)
        W_0 = np.random.random((n_classes,n_features))

        const = ({'type': 'eq','fun': self.l1_constraint,'args': (n_classes,n_features,)},)
        options = {'disp': self.verbose,'maxiter': self.maxit}
        f_min = minimize(fun=self.cost,x0=W_0,args=(X,method=self.opt_method,constraints=const,jac=self.gradient,options=options)

        self.coef_ = self.w_2d(f_min.x,n_classes)
        self.W_ = self.coef_

        return self

    def predict_proba(self,X):
        O = self.softmax(self.W_,X)
        return O

    def predict(self,X):
        sigma = self.predict_proba(X)
        y_pred = np.argmax(sigma,axis=1)
        return y_pred

编辑 3:我用

替换了约束
import numpy as np
from sklearn import datasets
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score

from myLR import myLR

iris = datasets.load_iris()
X = iris.data[:,0:2]
y = iris.target

par_dict2 = {'reltol': 1e-6,'maxit': 20000,'verbose': 20,'seed': 0}

# Create different classifiers.
classifiers = {
    'myLR': myLR(**par_dict2),}

n_classifiers = len(classifiers)

plt.figure(figsize=(3 * 2,n_classifiers * 2))
plt.subplots_adjust(bottom=.2,top=.95)

xx = np.linspace(3,9,100)
yy = np.linspace(1,5,100).T
xx,yy = np.meshgrid(xx,yy)
Xfull = np.c_[xx.ravel(),yy.ravel()]
accuracy_score
for index,(name,classifier) in enumerate(classifiers.items()):
    classifier.fit(X,y)

    coef_ = classifier.coef_
    print(np.linalg.norm(coef_,axis=1))

    y_pred = classifier.predict(X)
    accuracy = accuracy_score(y,y_pred)
    print("Accuracy (train) for %s: %0.1f%% " % (name,accuracy * 100))

    # View probabilities:
    probas = classifier.predict_proba(Xfull)
    n_classes = np.unique(y_pred).size
    for k in range(n_classes):
        plt.subplot(n_classifiers,index * n_classes + k + 1)
        plt.title("Class %d" % k)
        if k == 0:
            plt.ylabel(name)
        imshow_handle = plt.imshow(probas[:,k].reshape((100,100)),extent=(3,1,5),origin='lower')
        plt.xticks(())
        plt.yticks(())
        idx = (y_pred == k)
        if idx.any():
            plt.scatter(X[idx,0],X[idx,1],marker='o',c='w',edgecolor='k')

ax = plt.axes([0.15,0.04,0.7,0.05])
plt.title("Probability")
plt.colorbar(imshow_handle,cax=ax,orientation='horizontal')

plt.show()

它产生更好的结果。但是,计算出的分量 def l1_constraint(self,n_features): x = x.reshape(n_classes,-1) b = x.sum(axis=1) - 1 return b x1 的总和不为 1?可以吗?

版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。