Warning: file_get_contents(/data/phpspider/zhask/data//catemap/9/java/355.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181

Warning: file_get_contents(/data/phpspider/zhask/data//catemap/9/apache-flex/4.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Java OpenImaj-识别形状_Java_Openimaj - Fatal编程技术网

Java OpenImaj-识别形状

Java OpenImaj-识别形状,java,openimaj,Java,Openimaj,我的目的是在训练了一个分类器之后识别一个形状,类似于OpenIMAJ教程第12章中所做的。 第12章使用Caltech101类,这对我没有帮助,因为我想使用自己的一组图像来训练分类器。我根据第12章创建了此工作代码: package com.mycompany.video.analytics; import de.bwaldvogel.liblinear.SolverType; import org.openimaj.data.DataSource; import org.openimaj.d

我的目的是在训练了一个分类器之后识别一个形状,类似于OpenIMAJ教程第12章中所做的。 第12章使用Caltech101类,这对我没有帮助,因为我想使用自己的一组图像来训练分类器。我根据第12章创建了此工作代码:

package com.mycompany.video.analytics;

import de.bwaldvogel.liblinear.SolverType;
import org.openimaj.data.DataSource;
import org.openimaj.data.dataset.Dataset;
import org.openimaj.data.dataset.GroupedDataset;
import org.openimaj.data.dataset.ListDataset;
import org.openimaj.data.dataset.VFSGroupDataset;
import org.openimaj.experiment.dataset.sampling.GroupSampler;
import org.openimaj.experiment.dataset.sampling.GroupedUniformRandomisedSampler;
import org.openimaj.experiment.dataset.split.GroupedRandomSplitter;
import org.openimaj.experiment.evaluation.classification.ClassificationEvaluator;
import org.openimaj.experiment.evaluation.classification.ClassificationResult;
import org.openimaj.experiment.evaluation.classification.analysers.confusionmatrix.CMAnalyser;
import org.openimaj.experiment.evaluation.classification.analysers.confusionmatrix.CMResult;
import org.openimaj.feature.DoubleFV;
import org.openimaj.feature.FeatureExtractor;
import org.openimaj.feature.SparseIntFV;
import org.openimaj.feature.local.data.LocalFeatureListDataSource;
import org.openimaj.feature.local.list.LocalFeatureList;
import org.openimaj.image.FImage;
import org.openimaj.image.ImageUtilities;
import org.openimaj.image.feature.dense.gradient.dsift.ByteDSIFTKeypoint;
import org.openimaj.image.feature.dense.gradient.dsift.DenseSIFT;
import org.openimaj.image.feature.dense.gradient.dsift.PyramidDenseSIFT;
import org.openimaj.image.feature.local.aggregate.BagOfVisualWords;
import org.openimaj.image.feature.local.aggregate.BlockSpatialAggregator;
import org.openimaj.io.IOUtils;
import org.openimaj.ml.annotation.ScoredAnnotation;
import org.openimaj.ml.annotation.linear.LiblinearAnnotator;
import org.openimaj.ml.clustering.ByteCentroidsResult;
import org.openimaj.ml.clustering.assignment.HardAssigner;
import org.openimaj.ml.clustering.kmeans.ByteKMeans;
import org.openimaj.util.pair.IntFloatPair;

import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Map;

/**
 * Created by yschondorf on 5/29/2018.
 */
public class Chapter12Generic {
    private static String IMAGES_PATH = "C:\\Development\\Video Analytics\\tpImages";

    public static void main(String[] args) {

        try {
            LiblinearAnnotator<FImage, String> trainer = null;
            VFSGroupDataset<FImage> allData = null;
            allData = new VFSGroupDataset<FImage>(
                    IMAGES_PATH,
                    ImageUtilities.FIMAGE_READER);

            GroupedDataset<String, ListDataset<FImage>, FImage> data =
                    GroupSampler.sample(allData, 1, false);

            GroupedRandomSplitter<String, FImage> splits =
                    new GroupedRandomSplitter<String, FImage>(data, 15, 0, 15); // 15 training, 15 testing


            DenseSIFT denseSIFT = new DenseSIFT(5, 7);
            PyramidDenseSIFT<FImage> pyramidDenseSIFT = new PyramidDenseSIFT<FImage>(denseSIFT, 6f, 7);

            GroupedDataset<String, ListDataset<FImage>, FImage> sample =
                    GroupedUniformRandomisedSampler.sample(splits.getTrainingDataset(), 15);

            HardAssigner<byte[], float[], IntFloatPair> assigner = trainQuantiser(sample, pyramidDenseSIFT);

            FeatureExtractor<DoubleFV, FImage> extractor = new PHOWExtractor(pyramidDenseSIFT, assigner);

            //
            // Now we’re ready to construct and train a classifier
            //
            trainer = new LiblinearAnnotator<FImage, String>(
                    extractor, LiblinearAnnotator.Mode.MULTICLASS, SolverType.L2R_L2LOSS_SVC, 1.0, 0.00001);

            Date start = new Date();
            System.out.println("Classifier training: start");
            trainer.train(splits.getTrainingDataset());
            System.out.println("Classifier training: end");
            Date end = new Date();
            long durationSec = (end.getTime() - start.getTime()) / 1000;
            System.out.println("Classifier training duration: " + durationSec + " seconds");

            final GroupedDataset<String, ListDataset<FImage>, FImage> testDataSet = splits.getTestDataset();

            ClassificationEvaluator<CMResult<String>, String, FImage> eval =
                    new ClassificationEvaluator<CMResult<String>, String, FImage>(
                            trainer, testDataSet, new CMAnalyser<FImage, String>(CMAnalyser.Strategy.SINGLE));

            start = new Date();
            System.out.println("Classifier evaluation: start");
            Map<FImage, ClassificationResult<String>> guesses = eval.evaluate();
            System.out.println("Classifier evaluation - tp: end");
            end = new Date();
            durationSec = (end.getTime() - start.getTime()) / 1000;
            System.out.println("Classifier evaluation duration: " + durationSec + " seconds");

            CMResult<String> result = eval.analyse(guesses);
            System.out.println("Result - tp: " + result);
        } catch (IOException e) {
            e.printStackTrace();
        }
    }

    /**
     * This method extracts the first 10000 dense SIFT features from the images in the dataset, and then clusters them
     * into 300 separate classes. The method then returns a HardAssigner which can be used to assign SIFT features to
     * identifiers
     *
     * @param pyramidDenseSIFT
     * @return
     */
    static HardAssigner<byte[], float[], IntFloatPair> trainQuantiser(
            Dataset<FImage> sample,
//            VFSGroupDataset<FImage> trainingImages,
            PyramidDenseSIFT<FImage> pyramidDenseSIFT)
    {
        System.out.println("trainQuantiser: start");
        Date start = new Date();
        List<LocalFeatureList<ByteDSIFTKeypoint>> allKeys = new ArrayList<LocalFeatureList<ByteDSIFTKeypoint>>();

        int i = 0;

        int total = sample.numInstances();
//        for (FImage image: sample) {
//            ListDataset<FImage> images = trainingImages.get(key);
//            total = images.size();
//            break;
//        }
        for (FImage rec : sample) {
            i++;
            System.out.println(String.format("Analysing image %d out of %d", i, total));
            FImage img = rec.getImage();

            pyramidDenseSIFT.analyseImage(img);
            allKeys.add(pyramidDenseSIFT.getByteKeypoints(0.005f));
        }
        final int numberOfDenseSiftFeaturesToExtract = 10000;
        final int numberOfClassesInCluster = 300;
        if (allKeys.size() > numberOfDenseSiftFeaturesToExtract)
            allKeys = allKeys.subList(0, numberOfDenseSiftFeaturesToExtract);

        ByteKMeans km = ByteKMeans.createKDTreeEnsemble(numberOfClassesInCluster);
        DataSource<byte[]> dataSource = new LocalFeatureListDataSource<ByteDSIFTKeypoint, byte[]>(allKeys);
        System.out.println(String.format(
                "Clustering %d image features into %d classes...",
                numberOfDenseSiftFeaturesToExtract, numberOfClassesInCluster));
        ByteCentroidsResult result = km.cluster(dataSource);
        Date end = new Date();
        System.out.println("trainQuantiser: end");
        System.out.println("trainQuantiser duration: " + (end.getTime() - start.getTime())/1000 + " seconds");
        return result.defaultHardAssigner();
    }

    static class PHOWExtractor implements FeatureExtractor<DoubleFV, FImage> {
        PyramidDenseSIFT<FImage> pdsift;
        HardAssigner<byte[], float[], IntFloatPair> assigner;

        public PHOWExtractor(PyramidDenseSIFT<FImage> pdsift, HardAssigner<byte[], float[], IntFloatPair> assigner)
        {
            this.pdsift = pdsift;
            this.assigner = assigner;
        }

        public DoubleFV extractFeature(FImage object) {
            FImage image = object.getImage();
            pdsift.analyseImage(image);

            BagOfVisualWords<byte[]> bovw = new BagOfVisualWords<byte[]>(assigner);

            BlockSpatialAggregator<byte[], SparseIntFV> spatial = new BlockSpatialAggregator<byte[], SparseIntFV>(
                    bovw, 2, 2);

            return spatial.aggregate(pdsift.getByteKeypoints(0.015f), image.getBounds()).normaliseFV();
        }
    }
}
我不知道我该怎么走。我真正想要的不是像第12章那样评估分类器的准确性,而是使用分类器来确定新图像是否具有我感兴趣的形状。我没有找到任何文档或示例来说明如何做到这一点。任何帮助都将不胜感激

除了教程之外,我没有找到任何重要的文档。有人能告诉我它在哪里吗?与此同时,我只是在猜测。 我不能使用testDataset,因为需要在训练分类器和使用分类器之间进行分离。因此,我想训练Classifier一次,花费很长的时间-很多分钟,然后保存结果,比如说将上面的trainer对象序列化到磁盘,并在以后调用时反序列化它。当我添加代码来执行此操作,并尝试在新映像上使用testDataset时,我会得到一个空指针异常。该异常与反序列化对象无关,因为当对象还未在磁盘上时,我也会获取该异常。 新代码:

package com.mycompany.video.analytics;

import de.bwaldvogel.liblinear.SolverType;
import org.apache.commons.vfs2.FileSystemException;
import org.openimaj.data.DataSource;
import org.openimaj.data.dataset.Dataset;
import org.openimaj.data.dataset.GroupedDataset;
import org.openimaj.data.dataset.ListDataset;
import org.openimaj.data.dataset.VFSGroupDataset;
import org.openimaj.experiment.dataset.sampling.GroupSampler;
import org.openimaj.experiment.dataset.sampling.GroupedUniformRandomisedSampler;
import org.openimaj.experiment.dataset.split.GroupedRandomSplitter;
import org.openimaj.experiment.evaluation.classification.ClassificationEvaluator;
import org.openimaj.experiment.evaluation.classification.ClassificationResult;
import org.openimaj.experiment.evaluation.classification.analysers.confusionmatrix.CMAnalyser;
import org.openimaj.experiment.evaluation.classification.analysers.confusionmatrix.CMResult;
import org.openimaj.feature.DoubleFV;
import org.openimaj.feature.FeatureExtractor;
import org.openimaj.feature.SparseIntFV;
import org.openimaj.feature.local.data.LocalFeatureListDataSource;
import org.openimaj.feature.local.list.LocalFeatureList;
import org.openimaj.image.FImage;
import org.openimaj.image.ImageUtilities;
import org.openimaj.image.feature.dense.gradient.dsift.ByteDSIFTKeypoint;
import org.openimaj.image.feature.dense.gradient.dsift.DenseSIFT;
import org.openimaj.image.feature.dense.gradient.dsift.PyramidDenseSIFT;
import org.openimaj.image.feature.local.aggregate.BagOfVisualWords;
import org.openimaj.image.feature.local.aggregate.BlockSpatialAggregator;
import org.openimaj.io.IOUtils;
import org.openimaj.ml.annotation.ScoredAnnotation;
import org.openimaj.ml.annotation.linear.LiblinearAnnotator;
import org.openimaj.ml.clustering.ByteCentroidsResult;
import org.openimaj.ml.clustering.assignment.HardAssigner;
import org.openimaj.ml.clustering.kmeans.ByteKMeans;
import org.openimaj.util.pair.IntFloatPair;

import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Map;


public class Chapter12Generic {
    private static String IMAGES_PATH = "C:\\Development\\Video Analytics\\tpImages";
    private static String TEST_IMAGES_PATH = "C:\\Development\\Video Analytics\\testImages";
    private static String TRAINER_DATA_FILE_PATH = "C:\\Development\\Video Analytics\\out\\trainer.dat";

    public static void main(String[] args) throws Exception {

        LiblinearAnnotator<FImage, String> trainer = null;
        File inputDataFile = new File(TRAINER_DATA_FILE_PATH);
        if (inputDataFile.isFile()) {
            trainer = IOUtils.readFromFile(inputDataFile);
        } else {
            VFSGroupDataset<FImage> allData = null;
            allData = new VFSGroupDataset<FImage>(
                    IMAGES_PATH,
                    ImageUtilities.FIMAGE_READER);

            GroupedDataset<String, ListDataset<FImage>, FImage> data =
                    GroupSampler.sample(allData, 1, false);

            GroupedRandomSplitter<String, FImage> splits =
                    new GroupedRandomSplitter<String, FImage>(data, 15, 0, 15); // 15 training, 15 testing


            DenseSIFT denseSIFT = new DenseSIFT(5, 7);
            PyramidDenseSIFT<FImage> pyramidDenseSIFT = new PyramidDenseSIFT<FImage>(denseSIFT, 6f, 7);

            GroupedDataset<String, ListDataset<FImage>, FImage> sample =
                    GroupedUniformRandomisedSampler.sample(splits.getTrainingDataset(), 15);

            HardAssigner<byte[], float[], IntFloatPair> assigner = trainQuantiser(sample, pyramidDenseSIFT);

            FeatureExtractor<DoubleFV, FImage> extractor = new PHOWExtractor(pyramidDenseSIFT, assigner);

            //
            // Now we’re ready to construct and train a classifier
            //
            trainer = new LiblinearAnnotator<FImage, String>(
                    extractor, LiblinearAnnotator.Mode.MULTICLASS, SolverType.L2R_L2LOSS_SVC, 1.0, 0.00001);

            Date start = new Date();
            System.out.println("Classifier training: start");
            trainer.train(splits.getTrainingDataset());
            IOUtils.writeToFile(trainer, inputDataFile);

            System.out.println("Classifier training: end");
            Date end = new Date();
            long durationSec = (end.getTime() - start.getTime()) / 1000;
            System.out.println("Classifier training duration: " + durationSec + " seconds");
        }

//        final GroupedDataset<String, ListDataset<FImage>, FImage> testDataSet = splits.getTestDataset();
        VFSGroupDataset<FImage> testDataSet = new VFSGroupDataset<FImage>(
                TEST_IMAGES_PATH,
                ImageUtilities.FIMAGE_READER);

        ClassificationEvaluator<CMResult<String>, String, FImage> eval =
                new ClassificationEvaluator<CMResult<String>, String, FImage>(
                        trainer, testDataSet, new CMAnalyser<FImage, String>(CMAnalyser.Strategy.SINGLE));

        Date start = new Date();
        System.out.println("Classifier evaluation: start");
        Map<FImage, ClassificationResult<String>> guesses = eval.evaluate();
        System.out.println("Classifier evaluation - tp: end");
        Date end = new Date();
        long durationSec = (end.getTime() - start.getTime()) / 1000;
        System.out.println("Classifier evaluation duration: " + durationSec + " seconds");

        CMResult<String> result = eval.analyse(guesses);
        System.out.println("Result - tp: " + result);      
    }

    /**
     * This method extracts the first 10000 dense SIFT features from the images in the dataset, and then clusters them
     * into 300 separate classes. The method then returns a HardAssigner which can be used to assign SIFT features to
     * identifiers
     *
     * @param pyramidDenseSIFT
     * @return
     */
    static HardAssigner<byte[], float[], IntFloatPair> trainQuantiser(
            Dataset<FImage> sample,
//            VFSGroupDataset<FImage> trainingImages,
            PyramidDenseSIFT<FImage> pyramidDenseSIFT)
    {
        System.out.println("trainQuantiser: start");
        Date start = new Date();
        List<LocalFeatureList<ByteDSIFTKeypoint>> allKeys = new ArrayList<LocalFeatureList<ByteDSIFTKeypoint>>();

        int i = 0;

        int total = sample.numInstances();
//        for (FImage image: sample) {
//            ListDataset<FImage> images = trainingImages.get(key);
//            total = images.size();
//            break;
//        }
        for (FImage rec : sample) {
            i++;
            System.out.println(String.format("Analysing image %d out of %d", i, total));
            FImage img = rec.getImage();

            pyramidDenseSIFT.analyseImage(img);
            allKeys.add(pyramidDenseSIFT.getByteKeypoints(0.005f));
        }
        final int numberOfDenseSiftFeaturesToExtract = 10000;
        final int numberOfClassesInCluster = 300;
        if (allKeys.size() > numberOfDenseSiftFeaturesToExtract)
            allKeys = allKeys.subList(0, numberOfDenseSiftFeaturesToExtract);

        ByteKMeans km = ByteKMeans.createKDTreeEnsemble(numberOfClassesInCluster);
        DataSource<byte[]> dataSource = new LocalFeatureListDataSource<ByteDSIFTKeypoint, byte[]>(allKeys);
        System.out.println(String.format(
                "Clustering %d image features into %d classes...",
                numberOfDenseSiftFeaturesToExtract, numberOfClassesInCluster));
        ByteCentroidsResult result = km.cluster(dataSource);
        Date end = new Date();
        System.out.println("trainQuantiser: end");
        System.out.println("trainQuantiser duration: " + (end.getTime() - start.getTime())/1000 + " seconds");
        return result.defaultHardAssigner();
    }

    static class PHOWExtractor implements FeatureExtractor<DoubleFV, FImage> {
        PyramidDenseSIFT<FImage> pdsift;
        HardAssigner<byte[], float[], IntFloatPair> assigner;

        public PHOWExtractor(PyramidDenseSIFT<FImage> pdsift, HardAssigner<byte[], float[], IntFloatPair> assigner)
        {
            this.pdsift = pdsift;
            this.assigner = assigner;
        }

        public DoubleFV extractFeature(FImage object) {
            FImage image = object.getImage();
            pdsift.analyseImage(image);

            BagOfVisualWords<byte[]> bovw = new BagOfVisualWords<byte[]>(assigner);

            BlockSpatialAggregator<byte[], SparseIntFV> spatial = new BlockSpatialAggregator<byte[], SparseIntFV>(
                    bovw, 2, 2);

            return spatial.aggregate(pdsift.getByteKeypoints(0.015f), image.getBounds()).normaliseFV();
        }
    }
}
调用时发生异常

CMResult<String> result = eval.analyse(guesses);
有没有办法解决这个问题

根据@jon的答案添加代码的第3版。现在的问题是,它将虚假图像归类为真实图像

public class Chapter12Generic_v3 {

    // contains an accordion folder with images from caltech101
    private static String TRAINING_IMAGES_PATH = "C:\\Development\\Video Analytics\\images";

    // contains 1 airplane image from caltech101
    private static String TEST_IMAGE = "C:\\Development\\Video Analytics\\testImages\\falseImages\\image_0001.jpg";

    private static String TRAINER_DATA_FILE_PATH = "C:\\Development\\Video Analytics\\out\\trainer.dat";

    public static void main(String[] args) throws Exception {
        LiblinearAnnotator<FImage, String> trainer = null;
        File inputDataFile = new File(TRAINER_DATA_FILE_PATH);
        if (inputDataFile.isFile()) {
            trainer = IOUtils.readFromFile(inputDataFile);
        } else {
            VFSGroupDataset<FImage> allData = null;
            allData = new VFSGroupDataset<FImage>(
                    TRAINING_IMAGES_PATH,
                    ImageUtilities.FIMAGE_READER);

            GroupedDataset<String, ListDataset<FImage>, FImage> data =
                    GroupSampler.sample(allData, 1, false);

            GroupedRandomSplitter<String, FImage> splits =
                    new GroupedRandomSplitter<String, FImage>(data, 15, 0, 15); // 15 training, 15 testing


            DenseSIFT denseSIFT = new DenseSIFT(5, 7);
            PyramidDenseSIFT<FImage> pyramidDenseSIFT = new PyramidDenseSIFT<FImage>(denseSIFT, 6f, 7);

            GroupedDataset<String, ListDataset<FImage>, FImage> sample =
                    GroupedUniformRandomisedSampler.sample(splits.getTrainingDataset(), 15);

            HardAssigner<byte[], float[], IntFloatPair> assigner = trainQuantiser(sample, pyramidDenseSIFT);

            FeatureExtractor<DoubleFV, FImage> extractor = new PHOWExtractor(pyramidDenseSIFT, assigner);

            //
            // Now we’re ready to construct and train a classifier
            //
            trainer = new LiblinearAnnotator<FImage, String>(
                    extractor, LiblinearAnnotator.Mode.MULTICLASS, SolverType.L2R_L2LOSS_SVC, 1.0, 0.00001);

            Date start = new Date();
            System.out.println("Classifier training: start");
            trainer.train(splits.getTrainingDataset());
            IOUtils.writeToFile(trainer, new File(TRAINER_DATA_FILE_PATH));
            System.out.println("Classifier training: end");
            Date end = new Date();
            long durationSec = (end.getTime() - start.getTime()) / 1000;
            System.out.println("Classifier training duration: " + durationSec + " seconds");
        }


        FImage query = ImageUtilities.readF(new File(TEST_IMAGE));

        final List<ScoredAnnotation<String>> scoredAnnotations = trainer.annotate(query);
        final ClassificationResult<String> classificationResult = trainer.classify(query);
        System.out.println("scoredAnnotations: " + scoredAnnotations);
        System.out.println("classificationResult: " + classificationResult);
    }

    /**
     * This method extracts the first 10000 dense SIFT features from the images in the dataset, and then clusters them
     * into 300 separate classes. The method then returns a HardAssigner which can be used to assign SIFT features to
     * identifiers
     *
     * @param pyramidDenseSIFT
     * @return
     */
    static HardAssigner<byte[], float[], IntFloatPair> trainQuantiser(
            Dataset<FImage> sample,
            PyramidDenseSIFT<FImage> pyramidDenseSIFT)
    {
        System.out.println("trainQuantiser: start");
        Date start = new Date();
        List<LocalFeatureList<ByteDSIFTKeypoint>> allKeys = new ArrayList<LocalFeatureList<ByteDSIFTKeypoint>>();

        int i = 0;

        int total = sample.numInstances();
        for (FImage rec : sample) {
            i++;
            System.out.println(String.format("Analysing image %d out of %d", i, total));
            FImage img = rec.getImage();

            pyramidDenseSIFT.analyseImage(img);
            allKeys.add(pyramidDenseSIFT.getByteKeypoints(0.005f));
        }
        final int numberOfDenseSiftFeaturesToExtract = 10000;
        final int numberOfClassesInCluster = 300;
        if (allKeys.size() > numberOfDenseSiftFeaturesToExtract)
            allKeys = allKeys.subList(0, numberOfDenseSiftFeaturesToExtract);

        ByteKMeans km = ByteKMeans.createKDTreeEnsemble(numberOfClassesInCluster);
        DataSource<byte[]> dataSource = new LocalFeatureListDataSource<ByteDSIFTKeypoint, byte[]>(allKeys);
        System.out.println(String.format(
                "Clustering %d image features into %d classes...",
                numberOfDenseSiftFeaturesToExtract, numberOfClassesInCluster));
        ByteCentroidsResult result = km.cluster(dataSource);
        Date end = new Date();
        System.out.println("trainQuantiser: end");
        System.out.println("trainQuantiser duration: " + (end.getTime() - start.getTime())/1000 + " seconds");
        return result.defaultHardAssigner();
    }

    static class PHOWExtractor implements FeatureExtractor<DoubleFV, FImage> {
        PyramidDenseSIFT<FImage> pdsift;
        HardAssigner<byte[], float[], IntFloatPair> assigner;

        public PHOWExtractor(PyramidDenseSIFT<FImage> pdsift, HardAssigner<byte[], float[], IntFloatPair> assigner)
        {
            this.pdsift = pdsift;
            this.assigner = assigner;
        }

        public DoubleFV extractFeature(FImage object) {
            FImage image = object.getImage();
            pdsift.analyseImage(image);

            BagOfVisualWords<byte[]> bovw = new BagOfVisualWords<byte[]>(assigner);

            BlockSpatialAggregator<byte[], SparseIntFV> spatial = new BlockSpatialAggregator<byte[], SparseIntFV>(
                    bovw, 2, 2);

            return spatial.aggregate(pdsift.getByteKeypoints(0.015f), image.getBounds()).normaliseFV();
        }
    }
}

如果你只是想用你训练过的模型对事物进行分类,那么忽略所有的ClassificationEvaluator东西——这只是为了计算的准确性,等等


查看培训师对象的类型。当您的培训师实例在FImage上键入并字符串时,其注释和分类方法都将接受您提供的FImage作为输入,并以稍微不同的形式提供分类结果作为输出;你必须决定哪一个最适合你的需要。

你有一个测试数据集就在那里-我不知道那是测试图像所在的位置-所以你必须将图像粘贴在那里-你必须通过documentation@gpasch我根据你的评论更新了这个问题。除了教程之外,我没有找到任何重要的文档。我不能使用testDataset,因为我想将训练部分与检测部分分开,而新代码会抛出一个NullPointerException。我正在关注这个ClassificationEvaluator eval=new ClassificationEvaluator trainer,testDataset,new CmAnalyserCmanAnalyser.Strategy.SINGLE;-您必须查看文档中的ClassificationEvaluator不要期望教程中的所有内容-因此,是的,您需要进行培训并存储结果。然后用你想要的任何东西进行测试——如果必须的话,替换整个测试集。我能找到的唯一文档就是API引用。eval有两种方法可以使用:evaluate和analyseMap predicted,因此我应该按顺序执行它们:Map guesses=eval.evaluate;CMResult结果=评估分析结果;但是第二个调用抛出了一个异常,正如我已经提到的……我根据您的答案@Jon添加了代码的版本3。主要的变化是实例化单个FImage,并调用trainer.annotate。现在的问题是,我放了一个飞机测试图像,但它被归类为手风琴。TRAINING_IMAGES_PATH包含一个包含caltech101手风琴图像的手风琴文件夹。TEST_图像包含一张来自caltech101的飞机图像。什么仍然不正确?你不能只在一节课上训练它-在训练过程中,你至少需要有一节带示例图片的负面课你是对的!我为飞机添加了另一个文件夹,更改了行GroupedData=GroupSampler.sampleallData,1,false;to GroupedDataset data=GroupSampler.sampleallData,2,false;毫无疑问,飞机图像的分类是正确的。
CMResult<String> result = eval.analyse(guesses);
public class Chapter12Generic_v3 {

    // contains an accordion folder with images from caltech101
    private static String TRAINING_IMAGES_PATH = "C:\\Development\\Video Analytics\\images";

    // contains 1 airplane image from caltech101
    private static String TEST_IMAGE = "C:\\Development\\Video Analytics\\testImages\\falseImages\\image_0001.jpg";

    private static String TRAINER_DATA_FILE_PATH = "C:\\Development\\Video Analytics\\out\\trainer.dat";

    public static void main(String[] args) throws Exception {
        LiblinearAnnotator<FImage, String> trainer = null;
        File inputDataFile = new File(TRAINER_DATA_FILE_PATH);
        if (inputDataFile.isFile()) {
            trainer = IOUtils.readFromFile(inputDataFile);
        } else {
            VFSGroupDataset<FImage> allData = null;
            allData = new VFSGroupDataset<FImage>(
                    TRAINING_IMAGES_PATH,
                    ImageUtilities.FIMAGE_READER);

            GroupedDataset<String, ListDataset<FImage>, FImage> data =
                    GroupSampler.sample(allData, 1, false);

            GroupedRandomSplitter<String, FImage> splits =
                    new GroupedRandomSplitter<String, FImage>(data, 15, 0, 15); // 15 training, 15 testing


            DenseSIFT denseSIFT = new DenseSIFT(5, 7);
            PyramidDenseSIFT<FImage> pyramidDenseSIFT = new PyramidDenseSIFT<FImage>(denseSIFT, 6f, 7);

            GroupedDataset<String, ListDataset<FImage>, FImage> sample =
                    GroupedUniformRandomisedSampler.sample(splits.getTrainingDataset(), 15);

            HardAssigner<byte[], float[], IntFloatPair> assigner = trainQuantiser(sample, pyramidDenseSIFT);

            FeatureExtractor<DoubleFV, FImage> extractor = new PHOWExtractor(pyramidDenseSIFT, assigner);

            //
            // Now we’re ready to construct and train a classifier
            //
            trainer = new LiblinearAnnotator<FImage, String>(
                    extractor, LiblinearAnnotator.Mode.MULTICLASS, SolverType.L2R_L2LOSS_SVC, 1.0, 0.00001);

            Date start = new Date();
            System.out.println("Classifier training: start");
            trainer.train(splits.getTrainingDataset());
            IOUtils.writeToFile(trainer, new File(TRAINER_DATA_FILE_PATH));
            System.out.println("Classifier training: end");
            Date end = new Date();
            long durationSec = (end.getTime() - start.getTime()) / 1000;
            System.out.println("Classifier training duration: " + durationSec + " seconds");
        }


        FImage query = ImageUtilities.readF(new File(TEST_IMAGE));

        final List<ScoredAnnotation<String>> scoredAnnotations = trainer.annotate(query);
        final ClassificationResult<String> classificationResult = trainer.classify(query);
        System.out.println("scoredAnnotations: " + scoredAnnotations);
        System.out.println("classificationResult: " + classificationResult);
    }

    /**
     * This method extracts the first 10000 dense SIFT features from the images in the dataset, and then clusters them
     * into 300 separate classes. The method then returns a HardAssigner which can be used to assign SIFT features to
     * identifiers
     *
     * @param pyramidDenseSIFT
     * @return
     */
    static HardAssigner<byte[], float[], IntFloatPair> trainQuantiser(
            Dataset<FImage> sample,
            PyramidDenseSIFT<FImage> pyramidDenseSIFT)
    {
        System.out.println("trainQuantiser: start");
        Date start = new Date();
        List<LocalFeatureList<ByteDSIFTKeypoint>> allKeys = new ArrayList<LocalFeatureList<ByteDSIFTKeypoint>>();

        int i = 0;

        int total = sample.numInstances();
        for (FImage rec : sample) {
            i++;
            System.out.println(String.format("Analysing image %d out of %d", i, total));
            FImage img = rec.getImage();

            pyramidDenseSIFT.analyseImage(img);
            allKeys.add(pyramidDenseSIFT.getByteKeypoints(0.005f));
        }
        final int numberOfDenseSiftFeaturesToExtract = 10000;
        final int numberOfClassesInCluster = 300;
        if (allKeys.size() > numberOfDenseSiftFeaturesToExtract)
            allKeys = allKeys.subList(0, numberOfDenseSiftFeaturesToExtract);

        ByteKMeans km = ByteKMeans.createKDTreeEnsemble(numberOfClassesInCluster);
        DataSource<byte[]> dataSource = new LocalFeatureListDataSource<ByteDSIFTKeypoint, byte[]>(allKeys);
        System.out.println(String.format(
                "Clustering %d image features into %d classes...",
                numberOfDenseSiftFeaturesToExtract, numberOfClassesInCluster));
        ByteCentroidsResult result = km.cluster(dataSource);
        Date end = new Date();
        System.out.println("trainQuantiser: end");
        System.out.println("trainQuantiser duration: " + (end.getTime() - start.getTime())/1000 + " seconds");
        return result.defaultHardAssigner();
    }

    static class PHOWExtractor implements FeatureExtractor<DoubleFV, FImage> {
        PyramidDenseSIFT<FImage> pdsift;
        HardAssigner<byte[], float[], IntFloatPair> assigner;

        public PHOWExtractor(PyramidDenseSIFT<FImage> pdsift, HardAssigner<byte[], float[], IntFloatPair> assigner)
        {
            this.pdsift = pdsift;
            this.assigner = assigner;
        }

        public DoubleFV extractFeature(FImage object) {
            FImage image = object.getImage();
            pdsift.analyseImage(image);

            BagOfVisualWords<byte[]> bovw = new BagOfVisualWords<byte[]>(assigner);

            BlockSpatialAggregator<byte[], SparseIntFV> spatial = new BlockSpatialAggregator<byte[], SparseIntFV>(
                    bovw, 2, 2);

            return spatial.aggregate(pdsift.getByteKeypoints(0.015f), image.getBounds()).normaliseFV();
        }
    }
}