微信公众号搜"智元新知"关注
微信扫一扫可直接关注哦!

用于预测输出数据的前馈人工神经网络工程动力学

如何解决用于预测输出数据的前馈人工神经网络工程动力学

我是一名初学者,希望在 PyTorch 中编写 ANN 以预测工程数据集。我正在尝试训练 ANN 以预测以下数据集中的输出,但无法启动训练循环。数据集由6个输入和3个输出组成,如下图:

dataset

我有一个基本代码,我相信它应该能够执行此任务,但是我相信数据集的标签和使用的数据类型可能存在问题,尽管我不完全确定。我曾尝试转换为 longtensor 数据类型,但这并没有帮助。另外,我不完全确定我是否为此任务使用了正确的优化器和反向传播算法。

输出数据类型更改为 float32 时收到以下错误

“预期标量类型为 Long,但发现为 Float。”

当我把它作为 int64 时,我收到:

“目标 85 超出范围。”

请看一看,任何建议将不胜感激。我已经包含了以下代码

import os
import pandas as pd
import numpy as np
import torch
from torch.utils.data import Dataset,DataLoader
import torch.nn as nn
import torch.nn.functional as F

SEED = 4096
torch.manual_seed(SEED)
if torch.cuda.is_available():
    torch.cuda.manual_seed_all(SEED)
np.random.seed(SEED)

file_path = "./Dynamics of Sterling Engine Data(1).csv"
df = pd.read_csv(
    file_path,header=None,names=[
        "Kdp(N/m)","Kpp(N/m)","Cdp(Ns/m)","Cl(Ns/m)","mdp(kg)","mpp(kg)","f(Hz)","γ(DP/PP)","α(°)",],)

n = len(df.index)  # 55
shuffle_indices = np.random.permutation(n)
df = df.iloc[shuffle_indices]

x = df.iloc[:,:6].values.astype(np.float32)
y = df.iloc[:,-3].values.astype(np.float32)

mu = x.mean(axis=0)
span = x.max(axis=0) - x.min(axis=0)

def rescale(inputs):
    return (inputs - mu) / span
x = rescale(x)

num_train = int(n * 0.82)
num_test = n - num_train

x_train = x[:num_train]
y_train = y[:num_train]
x_test = x[-num_test:]
y_test = y[-num_test:]


class NpDataset(Dataset):
    def __init__(self,data,label):
        assert len(data) == len(label)

        self.data = torch.from_numpy(data)
        self.label = torch.from_numpy(label)

    def __getitem__(self,index):
        return self.data[index],self.label[index]

    def __len__(self):
        return len(self.label)

train_dataset = NpDataset(x_train,y_train)
test_dataset = NpDataset(x_test,y_test)

train_DataLoader = DataLoader(train_dataset,batch_size=128,shuffle=False)
test_DataLoader = DataLoader(test_dataset,shuffle=False)

device = torch.device("cpu")
print(device)

class SterlingNN(nn.Module):
    def __init__(self):
        super(SterlingNN,self).__init__()

        # 6 input feautures per data point
        self.fn1 = nn.Linear(6,6)  # 6 features,6 nodes in hidden layer
        self.fn2 = nn.Linear(6,3)  # 6 nodes in hidden layer,3 outputs

    def forward(self,x):
        x = torch.sigmoid(self.fn1(x))  # sigmoid activation function
        x = self.fn2(x)
        return x

model = SterlingNN()
print(model.to(device))

loss_fn = nn.CrossEntropyLoss()
optimiser = torch.optim.Adam(
    model.parameters(),lr=0.01,weight_decay=0.01
)

x,y = next(iter(train_DataLoader))
x = x[:5].to(device)
score = model(x)
print(score)

def train():
    model.train()  # model into training mode and iteratate through data loader

    for x,y in train_DataLoader:
        x = x.to(device)
        y = y.to(device)
        n = x.size(0)

        optimiser.zero_grad()
        score = model(x)
        loss = loss_fn(score,y)

        loss.backward()
        optimiser.step()

        predictions = score.max(1,keepdim=True)[1]
        num_correct = predictions.eq(y.view_as(predictions)).sum().item()

    acc = num_correct / n
    return loss,acc


def evaluate():
    model.eval()

    with torch.no_grad():
        for x,y in test_DataLoader:
            x = x.to(device)
            y = y.to(device)
            n = x.size(0)

            score = model(x)
            loss = loss_fn(score,y)

            predictions = score.max(1,keepdim=True)[1]
            num_correct = predictions.eq(y.view_as(predictions)).sum().item()

    acc = num_correct / n
    return loss,acc

max_epochs = 128
for epoch in range(max_epochs):
    tr_loss,tr_acc = train()
    eva_loss,eva_acc = evaluate()

    print(
        "[{epoch}/{max_epochs}] Train loss:{tr_loss:.4f} acc:{tr_acc*100:.2f}% - Test loss:{eva_loss:.4f} acc:{eva_acc*100:.2f}%".format()
    )
    

版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。