PyTorch错误张量a(128)的大小必须与非单态维度0处张量b(9)的大小相匹配

PyTorch错误张量a(128)的大小必须与非单态维度0处张量b(9)的大小相匹配,pytorch,tensor,datamodel,Pytorch,Tensor,Datamodel,我正在运行一个CNN的HAR模型,在Anaconda中使用PyTorch 1.01和GPU 在进行迭代时,它给了我一个错误 张量a(128)的大小必须与张量b(9)在非单态维度0的大小相匹配。 我相信这是数据模型,而列举列车模型给出的错误。有人在PyTorch遇到过类似的问题吗?作为PyTorch的新手,几乎不需要支持 我已经尝试了在谷歌找到的所有数据模型技巧 ''' def系列(型号、优化器、系列加载器、测试加载器): n\u batch=len(train\u loader.dataset)

我正在运行一个CNN的HAR模型,在Anaconda中使用PyTorch 1.01和GPU 在进行迭代时,它给了我一个错误 张量a(128)的大小必须与张量b(9)在非单态维度0的大小相匹配。 我相信这是数据模型,而列举列车模型给出的错误。有人在PyTorch遇到过类似的问题吗?作为PyTorch的新手,几乎不需要支持

我已经尝试了在谷歌找到的所有数据模型技巧

'''

def系列(型号、优化器、系列加载器、测试加载器):
n\u batch=len(train\u loader.dataset)//批大小
标准=nn.CrossEntropyLoss()
对于范围内的e(N_历元):
模型列车()
正确,总损耗=0,0
总数=0
对于枚举(列车装载机)中的索引(样本、目标):
sample,target=sample.to(DEVICE.float(),target.to(DEVICE.long())
sample=sample.view(-1,9,1128)
输出=模型(示例)
损失=标准(输出、目标)
optimizer.zero_grad()
loss.backward()
optimizer.step()
总损失+=损失项目()
_,预测=火炬最大值(输出数据,1)
总计+=目标大小(0)
正确+=(预测==目标).sum()
如果索引%20==0:
tqdm.tqdm.write('Epoch:[{}/{}],Batch:[{}/{}],loss:{.4f}'。格式(e+1,N_Epoch,index+1,N_Batch,
loss.item())
acc_系列=浮球(正确)*100.0/(批次尺寸*n批次)
tqdm.tqdm.write(
'历元:[{}/{}],损耗:{:.4f},列车加速:{:.2f}%'。格式(e+1,N_历元,总损耗*1.0/N_批次,
acc(列车)
#测试
列车模型(错误)
使用手电筒。无梯度()
正确,总计=0,0
对于样本,测试加载程序中的目标:
sample,target=sample.to(DEVICE.float(),target.to(DEVICE.long())
sample=sample.view(-1,9,1128)
输出=模型(示例)
_,预测=火炬最大值(输出数据,1)
总计+=目标大小(0)
正确+=(预测==目标).sum()
acc_测试=浮动(正确)*100/总计
tqdm.tqdm.write('Epoch:[{}/{}],test acc:{:.2f}%'。格式(e+1,N_Epoch,float(correct)*100/总计))
结果。追加([acc\U列车,acc\U测试])
result\u np=np.array(result,dtype=float)
np.savetxt('result.csv',result_np,fmt='%.2f',分隔符='','))
错误----------------------------
(7352, 1152)
(7352, 128, 9)
(2947, 1152)
(2947, 128, 9)
----------------------------------------------------------------- 
----------
运行时错误回溯(最近的
最后一次呼叫)
在里面
86 model=net.Network().to(设备)
87 optimizer=optim.SGD(params=model.parameters(),
lr=学习率,动量=0.9)
--->88列车(模型、优化器、列车装载机、测试装载机)
89 result=np.array(result,dtype=float)
90 np.savetxt('result.csv',result,fmt='%.2f',分隔符='','))
列车内(模型、优化器、,
列车装载机、测试装载机)
29正确,总损耗=0,0
30总计=0
--->31为中的指数(样本、目标)
列举(列车装载机):
32 sample,target=sample.to(DEVICE.float(),
target.to(DEVICE.long)()
33打印(“样本”,样本)
~/anaconda3/envs/rnn_lstm_har_pytorch/lib/python3.6/site-
packages/torch/utils/data/dataloader.py在下一个(self)
613如果self.num_workers==0:#相同的进程加载
614指数=下一个(自身样本)可能上升
停止迭代
-->615 batch=self.collate\u fn([self.dataset[i]表示i
在索引中])
616如果self.pin_存储器:
617批次=引脚\内存\批次(批次)
~/anaconda3/envs/rnn_lstm_har_pytorch/lib/python3.6/site-
(.0)中的packages/torch/utils/data/dataloader.py
613如果self.num_workers==0:#相同的进程加载
614指数=下一个(自身样本)可能上升
停止迭代
-->615 batch=self.collate\u fn([self.dataset[i]表示i
在里面
指数])
616如果self.pin_存储器:
617批次=引脚\内存\批次(批次)
~/anaconda3/envs/rnn_lstm_har_pytorch/data_preprocess.py in
__getitem_u_;(自身,索引)
97 def_uuugetItem_uuuu(自,索引):
98样本,目标=自身样本[索引],
self.labels[索引]
--->99返回self.T(样本),目标
100
101定义长度(自):
~/anaconda3/envs/rnn_lstm_har_pytorch/lib/python3.6/site-
packages/torchvision/transforms/transforms.py在调用中(self,
(img)
58定义呼叫(自我,img):
59对于self.transforms中的t:
--->60 img=t(img)
61返回img
62
~/anaconda3/envs/rnn_lstm_har_pytorch/lib/python3.6/site-
packages/torchvision/transforms/transforms.py在调用中(self,
张量
161张量:归一化张量图像。
162         """
-->163返回F.归一化(张量、自均值、自标准、,
自我保护(就地)
164
165定义报告(自我):
~/anaconda3/envs/rnn_lstm_har_pytorch/lib/python3.6/site-
标准化中的packages/torchvision/transforms/functional.py(张量、,
平均值,标准,就地)
206平均值=火炬张量(平均值,数据类型=火炬32)
207标准=火炬张量(标准,数据类型=火炬32)
-->208张量sub_(平均值[:,无,无]).div_(标准值[:,无,
无])
209返回张量
210
RuntimeError:张量a(128)的大小必须与
非单态维数为0的张量b(9)
#这是用于解析X数据的,如果不这样做,可以忽略它
需要预处理吗
def格式数据(数据文件):
x_数据=无
对于数据文件中的项:
item_data=np.loadtxt(item,dtype=np.float)
如果x_数据为无:
x_data=np.zero((len(i
    def train(model, optimizer, train_loader, test_loader):
    n_batch = len(train_loader.dataset) // BATCH_SIZE    
    criterion = nn.CrossEntropyLoss()

     for e in range(N_EPOCH):
       model.train()
       correct, total_loss = 0, 0
        total = 0
         for index, (sample, target) in enumerate(train_loader):
        sample, target = sample.to(DEVICE).float(), target.to(DEVICE).long()            
        sample = sample.view(-1, 9, 1, 128)
        output = model(sample)
        loss = criterion(output, target)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        total_loss += loss.item()
        _, predicted = torch.max(output.data, 1)
        total += target.size(0)
        correct += (predicted == target).sum()

        if index % 20 == 0:
            tqdm.tqdm.write('Epoch: [{}/{}], Batch: [{}/{}], loss:{:.4f}'.format(e + 1, N_EPOCH, index + 1, n_batch,
                                                                                 loss.item()))
    acc_train = float(correct) * 100.0 / (BATCH_SIZE * n_batch)
    tqdm.tqdm.write(
        'Epoch: [{}/{}], loss: {:.4f}, train acc: {:.2f}%'.format(e + 1, N_EPOCH, total_loss * 1.0 / n_batch,
                                                                  acc_train))

    # Testing
    model.train(False)
    with torch.no_grad():
        correct, total = 0, 0
        for sample, target in test_loader:
            sample, target = sample.to(DEVICE).float(), target.to(DEVICE).long()
            sample = sample.view(-1, 9, 1, 128)
            output = model(sample)
            _, predicted = torch.max(output.data, 1)
            total += target.size(0)
            correct += (predicted == target).sum()
    acc_test = float(correct) * 100 / total
    tqdm.tqdm.write('Epoch: [{}/{}], test acc: {:.2f}%'.format(e + 1, N_EPOCH, float(correct) * 100 / total))
    result.append([acc_train, acc_test])
    result_np = np.array(result, dtype=float)
    np.savetxt('result.csv', result_np, fmt='%.2f', delimiter=',')   

 Error ----------------------------
 (7352, 1152)
 (7352, 128, 9)
 (2947, 1152)
  (2947, 128, 9)
   ----------------------------------------------------------------- 
    ----------
 RuntimeError                              Traceback (most recent 
call last)
 <ipython-input-1-64c1adae4ee0> in <module>
 86     model = net.Network().to(DEVICE)
 87     optimizer = optim.SGD(params=model.parameters(), 
 lr=LEARNING_RATE, momentum=0.9)
---> 88     train(model, optimizer, train_loader, test_loader)
 89     result = np.array(result, dtype=float)
 90     np.savetxt('result.csv', result, fmt='%.2f', delimiter=',')

 <ipython-input-1-64c1adae4ee0> in train(model, optimizer, 
 train_loader, test_loader)
 29         correct, total_loss = 0, 0
 30         total = 0
 ---> 31         for index, (sample, target) in 
 enumerate(train_loader):
 32             sample, target = sample.to(DEVICE).float(), 
 target.to(DEVICE).long()
 33             print('Sample',sample)

 ~/anaconda3/envs/rnn_lstm_har_pytorch/lib/python3.6/site- 
 packages/torch/utils/data/dataloader.py in __next__(self)
613         if self.num_workers == 0:  # same-process loading
614             indices = next(self.sample_iter)  # may raise 
StopIteration
--> 615             batch = self.collate_fn([self.dataset[i] for i 
in indices])
616             if self.pin_memory:
617                 batch = pin_memory_batch(batch)

  ~/anaconda3/envs/rnn_lstm_har_pytorch/lib/python3.6/site- 
 packages/torch/utils/data/dataloader.py in <listcomp>(.0)
  613         if self.num_workers == 0:  # same-process loading
  614             indices = next(self.sample_iter)  # may raise 
 StopIteration
 --> 615             batch = self.collate_fn([self.dataset[i] for i 
 in 
  indices])
  616             if self.pin_memory:
  617                 batch = pin_memory_batch(batch)

 ~/anaconda3/envs/rnn_lstm_har_pytorch/data_preprocess.py in 
 __getitem__(self, index)
  97     def __getitem__(self, index):
  98         sample, target = self.samples[index], 
  self.labels[index]
  ---> 99         return self.T(sample), target
  100 
  101     def __len__(self):

 ~/anaconda3/envs/rnn_lstm_har_pytorch/lib/python3.6/site- 
   packages/torchvision/transforms/transforms.py in __call__(self, 
   img)
   58     def __call__(self, img):
   59         for t in self.transforms:
   ---> 60             img = t(img)
   61         return img
   62 

   ~/anaconda3/envs/rnn_lstm_har_pytorch/lib/python3.6/site- 
   packages/torchvision/transforms/transforms.py in __call__(self, 
  tensor)
   161             Tensor: Normalized Tensor image.
   162         """
  --> 163         return F.normalize(tensor, self.mean, self.std, 
  self.inplace)
   164 
   165     def __repr__(self):

  ~/anaconda3/envs/rnn_lstm_har_pytorch/lib/python3.6/site- 
  packages/torchvision/transforms/functional.py in normalize(tensor, 
  mean, std, inplace)
  206     mean = torch.tensor(mean, dtype=torch.float32)
  207     std = torch.tensor(std, dtype=torch.float32)
 --> 208     tensor.sub_(mean[:, None, None]).div_(std[:, None, 
  None])
  209     return tensor
  210 

RuntimeError: The size of tensor a (128) must match the size of 
tensor b (9) at non-singleton dimension 0

   # This is for parsing the X data, you can ignore it if you do not 
   need preprocessing
   def format_data_x(datafile):
    x_data = None
    for item in datafile:
    item_data = np.loadtxt(item, dtype=np.float)
    if x_data is None:
        x_data = np.zeros((len(item_data), 1))
    x_data = np.hstack((x_data, item_data))
    x_data = x_data[:, 1:]
    print(x_data.shape)
    X = None
    for i in range(len(x_data)):
    row = np.asarray(x_data[i, :])
    row = row.reshape(9, 128).T
    if X is None:
        X = np.zeros((len(x_data), 128, 9))
    X[i] = row
    print(X.shape)
    return X


    # This is for parsing the Y data, you can ignore it if you do not 
    need preprocessing
    def format_data_y(datafile):
    data = np.loadtxt(datafile, dtype=np.int) - 1
    YY = np.eye(6)[data]
    return YY


    # Load data function, if there exists parsed data file, then use 
    it
    # If not, parse the original dataset from scratch
   def load_data():
   import os

    # This for processing the dataset from scratch
    # After downloading the dataset, program put it in the DATA_PATH 
   folder

    #str_folder = 'data/' + 'UCI HAR Dataset/'
    DATA_PATH = 'data/'
    DATASET_PATH = DATA_PATH + 'UCI HAR Dataset/'
    TRAIN = 'train/'
    TEST = 'test/'

    INPUT_SIGNAL_TYPES = [
        "body_acc_x_",
        "body_acc_y_",
        "body_acc_z_",
        "body_gyro_x_",
        "body_gyro_y_",
        "body_gyro_z_",
        "total_acc_x_",
        "total_acc_y_",
        "total_acc_z_"
    ]

    str_train_files = [DATASET_PATH + TRAIN + 'Inertial Signals/' + 
    item + 'train.txt' for item in
                       INPUT_SIGNAL_TYPES]
    str_test_files = [DATASET_PATH + TEST + 'Inertial Signals/' + item 
    + 'test.txt' for item in INPUT_SIGNAL_TYPES]
    str_train_y = DATASET_PATH + TRAIN + 'y_train.txt'
    str_test_y = DATASET_PATH + TEST + 'y_test.txt'

    X_train = format_data_x(str_train_files)
    X_test = format_data_x(str_test_files)
    Y_train = format_data_y(str_train_y)
    Y_test = format_data_y(str_test_y)

   return X_train, onehot_to_label(Y_train), X_test, 
   onehot_to_label(Y_test)


  def onehot_to_label(y_onehot):
  a = np.argwhere(y_onehot == 1)
  return a[:, -1]

    class data_loader(Dataset):
    def __init__(self, samples, labels, t):
    self.samples = samples
    self.labels = labels
    self.T = t

    def __getitem__(self, index):
    sample, target = self.samples[index], self.labels[index]
    return self.T(sample), target

    def __len__(self):
    return len(self.samples)


   def load(batch_size=64):
   x_train, y_train, x_test, y_test = load_data()
   x_train, x_test = x_train.reshape((-1, 9, 1, 128)), 
   x_test.reshape((-1, 9, 1, 128))
   transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize(mean=(0,0,0,0,0,0,0,0,0), std= 
    (1,1,1,1,1,1,1,1,1))
    ])
  train_set = data_loader(x_train, y_train, transform)
  test_set = data_loader(x_test, y_test, transform)    
  train_loader = DataLoader(train_set, batch_size=batch_size, 
  shuffle=True, drop_last=True)
  test_loader = DataLoader(test_set, batch_size=batch_size, 
  shuffle=False)
  return train_loader, test_loader
def reshape_tensor(x):
    return x.reshape(9, 1, 128)

train_dataset = datasets.ImageFolder(
    traindir,
    transforms.Compose([
        ...,
        reshape_tensor,
        normalize,
    ]))