如何解决无法通过PyTorch DataLoader进行迭代
我正在尝试学习PyTorch并创建我的第一个神经网络。我正在使用自定义数据集,这是数据示例:
chat = json['chat'] != null ? new Chat.fromJson(Map<String,dynamic>.from(json['chat'])) : null;
我将数据分成以下训练/测试/验证数据:
ID_REF cg00001854 cg00270460 cg00293191 cg00585219 cg00702638 cg01434611 cg02370734 cg02644867 cg02879967 cg03036557 cg03123104 cg03670302 cg04146801 cg04570540 cg04880546 cg07044749 cg07135408 cg07303143 cg07475178 cg07553761 cg07917901 cg08016257 cg08548498 cg08715791 cg09334636 cg11153071 cg11441796 cg11642652 cg12256803 cg12352902 cg12541127 cg13313833 cg13500819 cg13975075 cg14061946 cg14086922 cg14224196 cg14530143 cg15456742 cg16230982 cg16734549 cg17166941 cg17290213 cg17292667 cg18266594 cg18335535 cg18584803 cg19273773 cg19378199 cg19523692 cg20115827 cg20558024 cg20608895 cg20899581 cg21186299 cg22115892 cg22454769 cg22549547 cg23098693 cg23193759 cg23500537 cg23606718 cg24079702 cg24888989 cg25090514 cg25344401 cg25635000 cg25726357 cg25743481 cg26019498 cg26647566 cg26792755 cg26928195 cg26940620 Age
0 0.252486 0.284724 0.243242 0.200685 0.904132 0.102795 0.473919 0.264084 0.367480 0.671434 0.075955 0.329343 0.217375 0.210861 1.000000 0.356048 0.577945 0.557148 0.249014 0.847134 0.254539 0.319858 0.220589 0.796789 0.361994 0.296101 0.105965 0.239796 0.169738 0.357586 0.365674 0.132575 0.250932 0.283227 1.000000 0.262259 0.208146 0.290623 0.113049 0.255710 0.555382 0.281046 0.168826 0.492007 0.442871 0.509569 0.219183 0.641244 0.339088 0.164062 0.227678 0.340220 0.541491 0.423010 0.621303 0.243750 0.869947 0.124120 0.317660 0.985243 0.645869 0.590888 0.841485 0.825372 0.904037 0.407343 0.223722 0.352113 0.855653 0.289593 0.428849 0.719758 0.800240 0.473586 68
1 0.867671 0.606590 0.803673 0.845942 0.086222 0.996915 0.871998 0.791823 0.877639 0.095326 0.857108 0.959701 0.688322 0.650640 0.062329 0.920434 0.687537 0.193038 0.891809 0.273775 0.583457 0.793486 0.798427 0.102910 0.773496 0.658568 0.759050 0.754877 0.787817 0.585895 0.792240 0.734543 0.854528 0.735642 0.389495 0.736709 0.600386 0.775989 0.819579 0.696350 0.110374 0.878199 0.659849 0.716714 0.771206 0.870711 0.919629 0.359592 0.677752 0.693433 0.683448 0.792423 0.933971 0.170669 0.249908 0.879879 0.111498 0.623053 0.626821 0.000000 0.157429 0.197567 0.160809 0.183031 0.202754 0.597896 0.826429 0.886736 0.086038 0.844088 0.761793 0.056548 0.270670 0.940083 21
2 0.789439 0.594060 0.857086 0.633195 0.000000 0.953293 0.832107 0.692119 0.641294 0.169303 0.935807 0.674698 0.789146 0.796555 0.208590 0.791318 0.777537 0.221895 0.804405 0.138006 0.738616 0.758083 0.749127 0.180998 0.769312 0.592938 0.578885 0.896125 0.553588 0.781393 0.898768 0.705339 0.861029 0.966552 0.274496 0.575738 0.490313 0.951172 0.833724 0.901890 0.115235 0.651489 0.619196 0.760758 0.902768 0.835082 0.610065 0.294962 0.907979 0.703284 0.775867 0.910324 0.858090 0.190595 0.041909 0.792941 0.146005 0.615639 0.761822 0.254161 0.101765 0.343289 0.356166 0.088915 0.114347 0.628616 0.697758 0.910687 0.133282 0.775737 0.809420 0.129848 0.126485 0.875580 20
3 0.615803 0.710968 0.874037 0.771136 0.199428 0.861378 0.861346 0.695713 0.638599 0.158479 0.903668 0.758718 0.581146 0.857357 0.307756 0.977337 0.805049 0.188333 0.788991 0.312119 0.706578 0.782006 0.793232 0.288111 0.691131 0.758102 0.829221 1.000000 0.742666 0.897607 0.797869 0.803221 0.912101 0.736800 0.315636 0.760577 0.609101 0.733923 0.578598 0.796944 0.096960 0.924135 0.612601 0.727117 0.905177 0.776481 0.727865 0.429820 0.666803 0.924595 0.567474 0.752196 0.742709 0.303662 0.168286 0.720899 0.099313 0.595328 0.734024 0.268583 0.293437 0.244840 0.311726 0.213415 0.418673 0.819981 0.816660 0.684730 0.146797 0.686270 0.777680 0.087826 0.335125 1.000000 23
4 0.847329 0.735766 0.858018 0.896453 0.186994 0.831964 0.762522 0.840186 0.830930 0.199264 0.788487 0.912629 0.702284 0.838771 0.065271 0.959230 0.912387 0.377203 0.794480 0.207909 0.766246 0.582117 0.902944 0.301144 0.765401 0.715115 0.646735 0.812084 0.697886 0.714310 0.890658 0.826644 0.944022 0.729517 0.530379 0.756268 0.764899 0.914573 0.825766 0.673394 0.017316 0.949335 0.614375 0.650553 0.898788 0.685396 0.823348 0.210175 0.831852 0.829067 0.858212 0.916433 0.778864 0.241186 0.144072 0.889536 0.058360 0.703567 0.852496 0.094223 0.341236 0.284903 0.231957 0.125196 0.333207 0.752592 0.899356 0.839006 0.174601 0.937948 0.716135 0.000000 0.114062 0.969760 22
这是到目前为止的网络(非常基本,只是为了测试它是否有效):
train_df,rest_df = train_test_split(df,test_size=0.4)
test_df,val_df = train_test_split(rest_df,test_size=0.5)
x_train_tensor = torch.tensor(train_df.drop('Age',axis=1).to_numpy(),requires_grad=True)
y_train_tensor = torch.tensor(train_df['Age'].to_numpy())
x_test_tensor = torch.tensor(test_df.drop('Age',requires_grad=True)
y_test_tensor = torch.tensor(test_df['Age'].to_numpy())
x_val_tensor = torch.tensor(val_df.drop('Age',requires_grad=True)
y_val_tensor = torch.tensor(val_df['Age'].to_numpy())
bs = len(train_df.index)//10
train_dl = DataLoader(train_df,bs,shuffle=True)
test_dl = DataLoader(test_df,len(test_df),shuffle=False)
val_dl = DataLoader(val_df,shuffle=False)
这是我得到错误的地方,在最后一行:
class Net(nn.Module):
def __init__(self):
super().__init__()
input_size = len(df.columns)-1
self.fc1 = nn.Linear(input_size,input_size//2)
self.fc2 = nn.Linear(input_size//2,input_size//4)
self.fc3 = nn.Linear(input_size//4,1)
def forward(self,x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
return x
net = Net()
print(net)
loss = torch.nn.MSELoss()
optimizer = optim.Adam(net.parameters(),lr=0.001)
EPOCHS = 3
STEPS_PER_EPOCH = len(train_dl.dataset)//bs
iterator = iter(train_dl)
print(train_dl.dataset)
for epoch in range(EPOCHS):
for s in range(STEPS_PER_EPOCH):
print(iterator)
iterator.next()
我真的不知道该错误意味着什么或在哪里寻找。 非常感谢您提供一些指导,谢谢!
解决方法
使用Numpy
数组代替dataframe
。您可以使用to_numpy()
将数据帧转换为numpy数组。
train_dl = DataLoader(train_df.to_numpy(),bs,shuffle=True)
test_dl = DataLoader(test_df.to_numpy(),len(test_df),shuffle=False)
val_dl = DataLoader(val_df.to_numpy(),shuffle=False)
版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。