Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/python/298.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181

Warning: file_get_contents(/data/phpspider/zhask/data//catemap/8/vim/5.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Python NLP:使用自己的预训练模型时出错_Python_Error Handling_Deep Learning_Nlp_Pytorch - Fatal编程技术网

Python NLP:使用自己的预训练模型时出错

Python NLP:使用自己的预训练模型时出错,python,error-handling,deep-learning,nlp,pytorch,Python,Error Handling,Deep Learning,Nlp,Pytorch,我正在使用RoBERTa模型处理NLP任务。由于培训持续了很长时间,我保存了我的模型,但是现在由于某种原因,我的部分代码与这个预先培训过的模型不兼容(出现错误),在保存和新上传之前,代码与培训过的模型配合得很好。我的代码: model = model.from_pretrained("/content/drive/MyDrive/model") model = model.cuda() model.eval() def to_list(tensor): return

我正在使用RoBERTa模型处理NLP任务。由于培训持续了很长时间,我保存了我的模型,但是现在由于某种原因,我的部分代码与这个预先培训过的模型不兼容(出现错误),在保存和新上传之前,代码与培训过的模型配合得很好。我的代码:

model = model.from_pretrained("/content/drive/MyDrive/model")
model = model.cuda()
model.eval()

def to_list(tensor):
    return tensor.detach().cpu().tolist()

def run_prediction(question_texts, context_text):
    """Setup function to compute predictions"""
    examples = []

    for i, question_text in enumerate(question_texts):
        example = SquadExample(
            qas_id=str(i),
            question_text=question_text,
            context_text=context_text,
            answer_text=None,
            start_position_character=None,
            title="Predict",
            is_impossible=False,
            answers=None,
        )

        examples.append(example)

    features, dataset = squad_convert_examples_to_features(
        examples=examples,
        tokenizer=tokenizer,
        max_seq_length=384,
        doc_stride=128,
        max_query_length=64,
        is_training=False,
        return_dataset="pt",
      #  threads=1,
    )

    eval_sampler = SequentialSampler(dev_dataset)
    eval_dataloader = DataLoader(dev_dataset, sampler=eval_sampler, batch_size=10)

    all_results = []

    for batch in eval_dataloader:
        model.eval()
        batch = tuple(t.to(device) for t in batch)

        with torch.no_grad():
            inputs = {
                "input_ids": batch[0],
                "attention_mask": batch[1],
                "token_type_ids": batch[2],
            }

            example_indices = batch[3]

            outputs = model(**inputs, return_dict=False)

            for i, example_index in enumerate(example_indices):
                eval_feature = features[example_index.item()]
                unique_id = int(eval_feature.unique_id)

                output = [to_list(output[i]) for output in outputs]

                start_logits, end_logits = output
                result = SquadResult(unique_id, start_logits, end_logits)
                all_results.append(result)

    output_prediction_file = "predictions.json"
    output_nbest_file = "nbest_predictions.json"
    output_null_log_odds_file = "null_predictions.json"

    predictions = compute_predictions_logits(
        examples,
        features,
        all_results,
        n_best_size,
        max_answer_length,
        do_lower_case,
        output_prediction_file,
        output_nbest_file,
        output_null_log_odds_file,
        False,  # verbose_logging
        True,  # version_2_with_negative
        null_score_diff_threshold,
        tokenizer,
    )

    return predictions

context = "New Zealand (Māori: Aotearoa) is a sovereign island country in the southwestern Pacific Ocean. It has a total land area of 268,000 square kilometres (103,500 sq mi), and a population of 4.9 million. New Zealand's capital city is Wellington, and its most populous city is Auckland."
questions = ["How many people live in New Zealand?", 
             "What's the largest city?"]
             
predictions = run_prediction(questions, context)

# Print results
for key in predictions.keys():
  print(predictions[key])
收到的错误:

有没有办法解决这个问题

(此代码部分主要基于此笔记本: )