C++ CNTK评估模型有两个输入C++;

C++ CNTK评估模型有两个输入C++;,c++,machine-learning,cntk,C++,Machine Learning,Cntk,我有一个基于CNTK 2.3的项目。我使用集成测试中的代码来训练MNIST分类器,如下所示: auto device = DeviceDescriptor::GPUDevice(0); const size_t inputDim = sizeBlob * sizeBlob; const size_t numOutputClasses = numberOfClasses; const size_t hiddenLayerDim = 200; auto i

我有一个基于CNTK 2.3的项目。我使用集成测试中的代码来训练MNIST分类器,如下所示:

    auto device = DeviceDescriptor::GPUDevice(0);

    const size_t inputDim = sizeBlob * sizeBlob;
    const size_t numOutputClasses = numberOfClasses;
    const size_t hiddenLayerDim = 200;

    auto input = InputVariable({ inputDim }, CNTK::DataType::Float, L"features");

    auto scaledInput = ElementTimes(Constant::Scalar(0.00390625f, device), input);
    auto classifierOutput = FullyConnectedDNNLayer(scaledInput, hiddenLayerDim, device, std::bind(Sigmoid, _1, L""));
    auto outputTimesParam = Parameter(NDArrayView::RandomUniform<float>({ numOutputClasses, hiddenLayerDim }, -0.05, 0.05, 1, device));
    auto outputBiasParam = Parameter(NDArrayView::RandomUniform<float>({ numOutputClasses }, -0.05, 0.05, 1, device));
    classifierOutput = Plus(outputBiasParam, Times(outputTimesParam, classifierOutput), L"classifierOutput");

    auto labels = InputVariable({ numOutputClasses }, CNTK::DataType::Float, L"labels");
    auto trainingLoss = CNTK::CrossEntropyWithSoftmax(classifierOutput, labels, L"lossFunction");;
    auto prediction = CNTK::ClassificationError(classifierOutput, labels, L"classificationError");

    // Test save and reload of model

    Variable classifierOutputVar = classifierOutput;
    Variable trainingLossVar = trainingLoss;
    Variable predictionVar = prediction;
    auto combinedNet = Combine({ trainingLoss, prediction, classifierOutput }, L"MNISTClassifier");
    //SaveAndReloadModel<float>(combinedNet, { &input, &labels, &trainingLossVar, &predictionVar, &classifierOutputVar }, device);

    classifierOutput = classifierOutputVar;
    trainingLoss = trainingLossVar;
    prediction = predictionVar;


    const size_t minibatchSize = 64;
    const size_t numSamplesPerSweep = 60000;
    const size_t numSweepsToTrainWith = 2;
    const size_t numMinibatchesToTrain = (numSamplesPerSweep * numSweepsToTrainWith) / minibatchSize;

    auto featureStreamName = L"features";
    auto labelsStreamName = L"labels";
    auto minibatchSource = TextFormatMinibatchSource(trainingSet, { { featureStreamName, inputDim },{ labelsStreamName, numOutputClasses } });

    auto featureStreamInfo = minibatchSource->StreamInfo(featureStreamName);
    auto labelStreamInfo = minibatchSource->StreamInfo(labelsStreamName);

    LearningRateSchedule learningRatePerSample = TrainingParameterPerSampleSchedule<double>(0.003125);
    auto trainer = CreateTrainer(classifierOutput, trainingLoss, prediction, { SGDLearner(classifierOutput->Parameters(), learningRatePerSample) });

    size_t outputFrequencyInMinibatches = 20;
    for (size_t i = 0; i < numMinibatchesToTrain; ++i)
    {
        auto minibatchData = minibatchSource->GetNextMinibatch(minibatchSize, device);
        trainer->TrainMinibatch({ { input, minibatchData[featureStreamInfo] },{ labels, minibatchData[labelStreamInfo] } }, device);
        PrintTrainingProgress(trainer, i, outputFrequencyInMinibatches);

        size_t trainingCheckpointFrequency = 100;
        if ((i % trainingCheckpointFrequency) == (trainingCheckpointFrequency - 1))
        {
            const wchar_t* ckpName = L"feedForward.net";
            //trainer->SaveCheckpoint(ckpName);
            //trainer->RestoreFromCheckpoint(ckpName);
        }
    }

    combinedNet->Save(g_dnnFile);
auto-device=DeviceDescriptor::GPUDevice(0);
const size\u t inputDim=sizeBlob*sizeBlob;
const size\u t numOutputClasses=numberOfClasses;
常数大小\u t hiddenLayerDim=200;
自动输入=InputVariable({inputDim},CNTK::DataType::Float,L“features”);
自动缩放输入=元素时间(常数::标量(0.00390625f,设备),输入);
auto ClassifiedRoutPut=FullyConnectedDNLayer(缩放输入、隐藏层、设备、标准::绑定(Sigmoid,_1,L“”);
auto outputTimesParam=参数(NDArrayView::RandomUniform({numoutClasses,hiddenLayerDim},-0.05,0.05,1,device));
auto outputBiasParam=参数(NDArrayView::RandomUniform({numoutclasses},-0.05,0.05,1,device));
ClassifiedRoutPut=Plus(outputBiasParam,Times(outputTimesParam,ClassifiedRoutPut),L“ClassifiedRoutPut”);
自动标签=InputVariable({numOutputClasses},CNTK::DataType::Float,L“标签”);
auto trainingLoss=CNTK::CrossEntropyWithSoftmax(分类输出、标签、L“lossFunction”);;
自动预测=CNTK::ClassificationError(ClassificationRoutPut,labels,L“ClassificationError”);
//模型的测试保存和重新加载
变量ClassifiedRoutPutVar=ClassifiedRoutPut;
可变培训损失Var=培训损失;
变量预测var=预测;
自动组合网络=组合({trainingLoss,prediction,ClassifiedRoutPut},L“MNISTClassifier”);
//SaveAndReloadModel(组合网络,{&input,&labels,&trainingLossVar,&predictionVar,&classifiedroutputvar},设备);
ClassifiedRoutPut=ClassifiedRoutPutVar;
trainingLoss=trainingLossVar;
预测=预测变量;
常量大小\u t minibatchSize=64;
const size\u t numSamplesPerSweep=60000;
const size\u t numSweepsToTrainWith=2;
const size\u t numMinibatchesToTrain=(numSamplesPerSweep*numSweepsToTrainWith)/minibatchSize;
自动功能StreamName=L“功能”;
自动标签StreamName=L“标签”;
auto minibatchSource=TextFormatMinibatchSource(训练集,{{featureStreamName,inputDim},{LabelStreamName,numOutputClasses}});
自动featureStreamInfo=minibatchSource->StreamInfo(featureStreamName);
自动labelStreamInfo=minibatchSource->StreamInfo(LabelStreamName);
LearningRateSchedule learningRatePerSample=培训参数PerSampleSchedule(0.003125);
auto trainer=CreateTrainer(分类输出,训练损失,预测,{SGDLearner(分类输出->参数(),learningRatePerSample)});
小批量的大小\u t输出频率=20;
对于(尺寸i=0;iGetNextMinibatch(minibatchSize,设备);
trainer->TrainMinibatch({input,minibatchData[featureStreamInfo]},{labels,minibatchData[labelStreamInfo]},设备);
打印培训进度(培训师、i、小批量输出频率);
大小\u t培训检查点频率=100;
如果((i%培训检查点频率)=(培训检查点频率-1))
{
const wchar_t*ckpName=L“feedForward.net”;
//培训师->保存检查点(ckpName);
//培训师->恢复检查点(ckpName);
}
}
组合网络->保存(g_dnnFile);
该部分工作正常,我训练模型,然后保存到模型文件中。但是,当我尝试评估一个简单的图像来测试模型时,模型中似乎有什么地方出错了

// Load the model.
    // The model is trained by <CNTK>/Examples/Image/Classification/ResNet/Python/TrainResNet_CIFAR10.py
    // Please see README.md in <CNTK>/Examples/Image/Classification/ResNet about how to train the model.
    FunctionPtr modelFunc = Function::Load(modelFile, device);

    // Get input variable. The model has only one single input.
    std::vector<Variable> inputs = modelFunc->Arguments();
    Variable inputVar = modelFunc->Arguments()[0];

    // The model has only one output.
    // If the model has more than one output, use modelFunc->Outputs to get the list of output variables.
    std::vector<Variable> outputs = modelFunc->Outputs();
    Variable outputVar = outputs[0];

    // Prepare input data.
    // For evaluating an image, you first need to perform some image preprocessing to make sure that the input image has the correct size and layout
    // that match the model inputs.
    // Please note that the model used by this example expects the CHW image layout.
    // inputVar.Shape[0] is image width, inputVar.Shape[1] is image height, and inputVar.Shape[2] is channels.
    // For simplicity and avoiding external dependencies, we skip the preprocessing step here, and just use some artificially created data as input.
    Mat image = imread(".....");
    uint8_t* imagePtr = (uint8_t*)(image).data;
    auto width = image.cols;
    auto heigth = image.rows;


    std::vector<float> inputData(inputVar.Shape().TotalSize());
    for (size_t i = 0; i < inputData.size(); ++i)
    {
        auto curChVal = imagePtr[(i)];
        inputData[i] = curChVal;
    }

    // Create input value and input data map
    ValuePtr inputVal = Value::CreateBatch(inputVar.Shape(), inputData, device);
    std::unordered_map<Variable, ValuePtr> inputDataMap = { { inputVar, inputVal } };

    // Create output data map. Using null as Value to indicate using system allocated memory.
    // Alternatively, create a Value object and add it to the data map.
    std::unordered_map<Variable, ValuePtr> outputDataMap = { { outputVar, nullptr } };

    // Start evaluation on the device
    modelFunc->Evaluate(inputDataMap, outputDataMap, device);

    // Get evaluate result as dense output
    ValuePtr outputVal = outputDataMap[outputVar];
    std::vector<std::vector<float>> outputData;
    outputVal->CopyVariableValueTo(outputVar, outputData);

    PrintOutput<float>(outputVar.Shape().TotalSize(), outputData);
//加载模型。
//该模型由/Examples/Image/Classification/ResNet/Python/TrainResNet_CIFAR10.py进行训练
//关于如何训练模型,请参见/Examples/Image/Classification/ResNet中的README.md。
FunctionPtr modelFunc=函数::加载(模型文件、设备);
//获取输入变量。该模型只有一个输入。
std::vector inputs=modelFunc->Arguments();
变量inputVar=modelFunc->Arguments()[0];
//模型只有一个输出。
//如果模型有多个输出,请使用modelFunc->Outputs获取输出变量列表。
std::vector outputs=modelFunc->outputs();
变量outputVar=输出[0];
//准备输入数据。
//要评估图像,首先需要执行一些图像预处理,以确保输入图像具有正确的大小和布局
//与模型输入相匹配。
//请注意,本示例使用的模型需要CHW图像布局。
//inputVar.Shape[0]是图像宽度,inputVar.Shape[1]是图像高度,inputVar.Shape[2]是通道。
//为了简单和避免外部依赖,我们跳过了这里的预处理步骤,只使用一些人工创建的数据作为输入。
Mat image=imread(“…”);
uint8_t*imagePtr=(uint8_t*)(图像);
自动宽度=image.cols;
自动高度=image.rows;
std::vector inputData(inputVar.Shape().TotalSize());
对于(size_t i=0;iEvaluate(输入数据映射、输出数据映射、设备);
//将评估结果作为密集输出
ValuePtr outputVal=outputDataMap[outputVar];
std::矢量输出数据;
outputVal->CopyVariableValueTo(outputVar,outputData);
打印输出(outputVar.Shape().TotalSize(),outputData);
我在c#上运行了相同的代码,效果很好。我发现的是
Variable inputVar;
GetInputVariableByName(modelFunc, L"features", inputVar);

Variable outputVar;
GetOutputVaraiableByName(modelFunc, L"classifierOutput", outputVar);