Python 在训练模式下,目标应该通过
我刚刚开始深入学习,在大学里有一个关于检测交通信号灯的项目,我们可以使用开源代码 所以我试着在kaggle上运行代码 然而,在视频上测试保存的模型时,我犯了这样一个错误:“在训练模式下,目标应该通过” 我不知道为什么我需要在测试阶段通过目标。我不明白是因为最初的模型有问题,还是视频捕获部分写错了 模型Python 在训练模式下,目标应该通过,python,pytorch,faster-rcnn,Python,Pytorch,Faster Rcnn,我刚刚开始深入学习,在大学里有一个关于检测交通信号灯的项目,我们可以使用开源代码 所以我试着在kaggle上运行代码 然而,在视频上测试保存的模型时,我犯了这样一个错误:“在训练模式下,目标应该通过” 我不知道为什么我需要在测试阶段通过目标。我不明白是因为最初的模型有问题,还是视频捕获部分写错了 模型 from torchvision.models.detection.faster_rcnn import FastRCNNPredictor def _get_instance_
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
def _get_instance_segmentation_model(num_classes):
model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
in_features = model.roi_heads.box_predictor.cls_score.in_features
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
return model
model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
N_CLASS = 4
INP_FEATURES = model.roi_heads.box_predictor.cls_score.in_features
model.roi_heads.box_predictor = FastRCNNPredictor(INP_FEATURES, N_CLASS)
model.to(device)
params = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.Adam(params)
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer)
培训
lossHist = LossAverager()
valLossHist = LossAverager()
for epoch in range(EPOCHS):
start_time = time()
model.train()
lossHist.reset()
for images, targets, image_ids in tqdm(trainDataLoader):
#bbox = check_bbox(bbox)
images = torch.stack(images).to(device)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
bs = images.shape[0]
loss_dict = model(images, targets)
totalLoss = sum(loss for loss in loss_dict.values())
lossValue = totalLoss.item()
lossHist.update(lossValue,bs)
optimizer.zero_grad()
totalLoss.backward()
optimizer.step()
if lr_scheduler is not None:
lr_scheduler.step(totalLoss)
print(f"[{str(datetime.timedelta(seconds = time() - start_time))[2:7]}]")
print(f"Epoch {epoch}/{EPOCHS}")
print(f"Train loss: {lossHist.avg}")
if(epoch == 10):
torch.save(model.state_dict(), 'fasterrcnn_resnet{}_fpn.pth'.format(epoch))
torch.save(model.state_dict(), 'fasterrcnn_resnet{}_fpn.pth'.format(epoch))
在有错误的视频上测试
ValueError回溯(最近一次调用上次)
在()
8打印(输入)
9
--->10结果=模型(输入)
11
12个框=结果[0]['box'].类型(torch.cuda.FloatTensor)
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in_call_impl(self,*input,**kwargs)
725结果=self.\u slow\u forward(*输入,**kwargs)
726其他:
-->727结果=自转发(*输入,**kwargs)
728用于itertools.chain中的挂钩(
729 _全局_向前_hooks.values(),
/usr/local/lib/python3.7/dist-packages/torchvision/models/detection/generalized_rcnn.py in forward(自我、图像、目标)
58 """
59如果自我培训和目标为无:
--->60提高值错误(“在培训模式下,应通过目标”)
61如果自我培训:
62断言目标不是无
ValueError:在训练模式下,应通过目标
提前谢谢!如果您能告诉我如何更正模型或视频捕获代码,我将非常感激
while(True):
ret, input = cap.read()
image = input.copy()
input = preprocess(input).float()
input = input.unsqueeze_(0)
input = input.type(torch.cuda.FloatTensor)
print(input)
result = model(input)
boxes = result[0]['boxes'].type(torch.cuda.FloatTensor)
scores = result[0]['scores'].type(torch.cuda.FloatTensor)
labels = result[0]['labels'].type(torch.cuda.FloatTensor)
mask = nms(boxes,scores,0.3)
boxes = boxes[mask]
scores = scores[mask]
labels = labels[mask]
boxes = boxes.data.cpu().numpy().astype(np.int32)
scores = scores.data.cpu().numpy()
labels = labels.data.cpu().numpy()
mask = scores >= 0.5
boxes = boxes[mask]
scores = scores[mask]
labels = labels[mask]
colors = {1:(0,255,0), 2:(255,255,0), 3:(255,0,0)}
for box,label in zip(boxes,labels):
image = cv2.rectangle(image,
(box[0], box[1]),
(box[2], box[3]),
(0,0,255), 1)
cv2.imshow("image", image)
if cv2.waitKey(0):
break
ValueError Traceback (most recent call last)
<ipython-input-84-e32f9d25d942> in <module>()
8 print(input)
9
---> 10 result = model(input)
11
12 boxes = result[0]['boxes'].type(torch.cuda.FloatTensor)
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
/usr/local/lib/python3.7/dist-packages/torchvision/models/detection/generalized_rcnn.py in forward(self, images, targets)
58 """
59 if self.training and targets is None:
---> 60 raise ValueError("In training mode, targets should be passed")
61 if self.training:
62 assert targets is not None
ValueError: In training mode, targets should be passed