如何解决用LSTM
我正在使用此NET: https://www.kaggle.com/ceshine/pytorch-temporal-convolutional-networks
我想用LSTM代替解码器,但出现错误。
旧:
class TCNModel(nn.Module):
def __init__(self,num_channels,kernel_size=2,dropout,history):
super(TCNModel,self).__init__()
self.tcn = TemporalConvNet(
history,kernel_size=kernel_size,dropout=dropout)
self.dropout = nn.Dropout(dropout)
self.decoder = nn.Linear(num_channels[-1],1)
def forward(self,x):
#print(np.shape( self.dropout(self.tcn(x)[:,:,-1])) )
return self.decoder(self.dropout(self.tcn(x)[:,-1]))
新功能(不要工作)
class TCNModel(nn.Module):
def __init__(self,dropout=0.02,history_len = 3):
super(TCNModel,self).__init__()
self.tcn = TemporalConvNet(
history_len,dropout=dropout)
self.dropout = nn.Dropout(dropout)
#self.decoder = nn.Linear(num_channels[-1],1)
self.hidden_dim = 2
self.decoder = nn.LSTM(input_size=num_channels[-1],hidden_size=self.hidden_dim,num_layers=2,bidirectional=False)
#self.decoder = LLinear(num_channels[-1],1)
def forward(self,x):
print(np.shape( self.dropout(self.tcn(x)[:,-1])) )
#return self.decoder(self.dropout(self.tcn(x)[:,-1]))
y = self.tcn(x)[:,-1]
h0 = torch.zeros(2,y.size(0),self.hidden_dim).requires_grad_()
# Initialize cell state
c0 = torch.zeros(2,self.hidden_dim).requires_grad_()
out,(hn,cn) = self.decoder(y,(h0.detach(),c0.detach()))
return self.decoder(self.dropout(out) )
版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。