如何解决使用多个隐藏层时神经网络的准确性非常差
我创建了以下神经网络:
def init_weights(m,n=1):
"""
initialize a matrix/vector of weights with xavier initialization
:param m: out dim
:param n: in dim
:return: matrix/vector of random weights
"""
limit = (6 / (n * m)) ** 0.5
weights = np.random.uniform(-limit,limit,size=(m,n))
if n == 1:
weights = weights.reshape((-1,))
return weights
def softmax(v):
exp = np.exp(v)
return exp / np.tile(exp.sum(1),(v.shape[1],1)).T
def relu(x):
return np.maximum(x,0)
def sign(x):
return (x > 0).astype(int)
class Model:
"""
A class for neural network model
"""
def __init__(self,sizes,lr):
self.lr = lr
self.weights = []
self.biases = []
self.memory = []
for i in range(len(sizes) - 1):
self.weights.append(init_weights(sizes[i + 1],sizes[i]))
self.biases.append(init_weights(sizes[i + 1]))
def forward(self,X):
self.memory = [X]
X = np.dot(self.weights[0],X.T).T + self.biases[0]
for W,b in zip(self.weights[1:],self.biases[1:]):
X = relu(X)
self.memory.append(X)
X = np.dot(W,X.T).T + b
return softmax(X)
def backward(self,y,y_pred):
# calculate the errors for each layer
y = np.eye(y_pred.shape[1])[y]
errors = [y_pred - y]
for i in range(len(self.weights) - 1,-1):
new_err = sign(self.memory[i]) * \
np.dot(errors[0],self.weights[i])
errors.insert(0,new_err)
# update weights
for i in range(len(self.weights)):
self.weights[i] -= self.lr *\
np.dot(self.memory[i].T,errors[i]).T
self.biases[i] -= self.lr * errors[i].sum(0)
数据有 10 个类。当使用单个隐藏层时,准确率几乎为 40%。当使用 2 或 3 个隐藏层时,准确率大约为第一个 epoch 的 9-10%,并且保持不变。训练集上的准确率也在这个范围内。我的实现是否存在可能导致这种情况的问题?
版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。