Java 无法在artff文件weka和生成分类器中使用字符串属性

Java 无法在artff文件weka和生成分类器中使用字符串属性,java,weka,Java,Weka,嗨,我使用Weka进行机器学习,我的artff文件格式如下 `@relation datastest @attribute fwoh {what, when, where, how, who, why} @attribute parameter {color, performance} @attribute object { power, cost} @attribute model {x,y,z} @attribute question String` 我尝试使用J48、PART、Deci

嗨,我使用Weka进行机器学习,我的artff文件格式如下

`@relation datastest

@attribute fwoh {what, when, where, how, who, why}
@attribute parameter {color, performance}
@attribute object { power, cost}
@attribute model {x,y,z}
@attribute question String`
我尝试使用J48、PART、DecisionTable、ZeroR和SMO,当我构建分类器时,所有分类器都会抛出异常

weka.core.UnsupportedAttributeTypeException: weka.classifiers.rules.ZeroR: Cannot handle string class!
    at weka.core.Capabilities.test(Capabilities.java:1164)
    at weka.core.Capabilities.test(Capabilities.java:1303)
    at weka.core.Capabilities.test(Capabilities.java:1208)
    at weka.core.Capabilities.testWithFail(Capabilities.java:1506)
    at weka.classifiers.rules.ZeroR.buildClassifier(ZeroR.java:122)
    at wekaproject.TextCategorizationTest.main(TextCategorizationTest.java:66)
我构建分类器如下

final Instances data = new Instances(readDataFile("questions.txt"));
final Classifier classifier = new SMO();
classifier.buildClassifier(data ); 
谁能告诉我应该使用什么分类器。我应该使用StringToOrdVector吗。我尝试使用StringToVector,但没有帮助。谁能告诉我如何使用StringToVector,如果需要的话

更新: 这是输入arff文件

@relation 'text_files_in_C:\\Desktop\\test'

@attribute id {a,b,c}
@attribute ids {g,h,i}
@attribute idss {k,l,m}
@attribute contents string

@data
a,g,k,'x'
b,h,l'y'
c,i,m,'z'
这是过滤后的输出arff文件

@relation 'text_files_in_C:\\Desktop\\test-weka.filters.unsupervised.attribute.StringToWordVector-D.,:\\\'\\\"()?!-R4-W1000000-C-T-N1-L-stemmerweka.core.stemmers.NullStemmer-M1'

@attribute id {a,b,c}
@attribute ids {g,h,i}
@attribute idss {k,l,m}
@attribute x numeric
@attribute y numeric
@attribute z numeric

@data
{3 0.693147}
{0 b,1 h,2 l,4 0.693147}
{0 c,1 i,2 m,5 0.693147}
我正在尝试测试的实例

@relation 'text_files_in_C:\\Desktop\\test'

@attribute id {a,b,c}
@attribute ids {g,h,i}
@attribute idss {k,l,m}
@attribute contents string

@data
b,h,l,'x'
c,i,m,'y'
这是我的Java代码

package wekaproject;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileReader;

import weka.classifiers.bayes.NaiveBayes;
import weka.classifiers.functions.SMO;
import weka.classifiers.rules.DecisionTable;
import weka.classifiers.rules.PART;
import weka.classifiers.rules.ZeroR;
import weka.classifiers.trees.J48;
import weka.core.Instances;
import weka.core.SerializationHelper;
import weka.core.converters.ArffSaver;
import weka.core.stemmers.LovinsStemmer;
import weka.core.stemmers.Stemmer;
import weka.core.stopwords.WordsFromFile;
import weka.core.tokenizers.NGramTokenizer;
import weka.filters.Filter;
import weka.filters.unsupervised.attribute.StringToNominal;
import weka.filters.unsupervised.attribute.StringToWordVector;

public class TestWeka {


    public static BufferedReader readDataFile(String filename) {
        BufferedReader inputReader = null;

        try {
            inputReader = new BufferedReader(new FileReader(filename));
        } catch (FileNotFoundException ex) {
            System.err.println("File not found: " + filename);
        }

        return inputReader;
    }

    public static void main(final String [] args) throws Exception {
        System.out.println("Running");

        final StringToWordVector filter = new StringToWordVector();
        final ZeroR classifier = new ZeroR(); 
        final Instances data = new Instances(readDataFile("test.arff"));
        data.setClassIndex(data.numAttributes() - 1);

        // Use filter.
        String[] options = new String[2];
        options[0] = "-R";                                    // "range"
        options[1] = "4";  
        filter.setOptions(options);


        filter.setInputFormat(data);
        Instances filteredData = Filter.useFilter(data, filter);
        filteredData.setClassIndex(0);
        // Rebuild classifier.
        classifier.buildClassifier(filteredData);
                 ArffSaver saver = new ArffSaver();
                 saver.setInstances(data);
                 saver.setFile(new File("input_test_filtered.arff"));
                 saver.writeBatch();

        Instances testInstances=new Instances(readDataFile("test2.arff"));
        testInstances.setClassIndex(testInstances.numAttributes()-1);
        Instances filteredTestData=Filter.useFilter(testInstances, filter);
        filteredTestData.setClassIndex(data.numAttributes()-1);
         saver = new ArffSaver();
         saver.setInstances(testInstances);
         saver.setFile(new File("output_test_filtered.arff"));
         saver.writeBatch();

        for (int j = 0; j < filteredTestData.numInstances(); j++) {
        double value = classifier.classifyInstance(filteredTestData.instance(j));
        System.out.println("value::" + value);
        // get the prediction percentage or distribution
        double[] percentage = classifier.distributionForInstance(filteredTestData.instance(j));

        String prediction = data.classAttribute().value((int) value);

        for (int i = 0; i < percentage.length; i = i + 1) {
            System.out.println("Probability of class " + data.classAttribute().value(i)
                    + " : " + Double.toString(percentage[i]));
        }
        System.out.println("The predicted value of instance " + Integer.toString(j) + ": " + prediction);

    }
    }

} // End of the class //
wekaproject包;
导入java.io.BufferedReader;
导入java.io.File;
导入java.io.FileNotFoundException;
导入java.io.FileReader;
导入weka.classifiers.bayes.NaiveBayes;
导入weka.classifiers.functions.SMO;
导入weka.classifiers.rules.DecisionTable;
导入weka.classifiers.rules.PART;
导入weka.classifiers.rules.ZeroR;
导入weka.classifiers.trees.J48;
导入weka.core.Instances;
导入weka.core.SerializationHelper;
导入weka.core.converters.ArffSaver;
导入weka.core.stemmers.lovinsstember;
导入weka.core.stemmers.Stemmer;
导入weka.core.stopwords.WordsFromFile;
导入weka.core.tokenizers.NGramTokenizer;
导入weka.filters.Filter;
导入weka.filters.unsupervised.attribute.StringToNominal;
导入weka.filters.unsupervised.attribute.StringToOrdVector;
公共类TestWeka{
公共静态BufferedReader readDataFile(字符串文件名){
BufferedReader inputReader=null;
试一试{
inputReader=newbufferedreader(newfilereader(filename));
}捕获(FileNotFoundException ex){
System.err.println(“未找到文件:“+filename”);
}
返回输入头;
}
公共静态void main(最终字符串[]args)引发异常{
System.out.println(“运行”);
最终StringToOrdVector过滤器=新StringToOrdVector();
最终ZeroR分类器=新ZeroR();
最终实例数据=新实例(readDataFile(“test.arff”);
data.setClassIndex(data.numAttributes()-1);
//使用过滤器。
字符串[]选项=新字符串[2];
选项[0]=“-R”;/“范围”
选项[1]=“4”;
filter.setOptions(选项);
filter.setInputFormat(数据);
实例filteredData=Filter.useFilter(数据,过滤器);
filteredData.setClassIndex(0);
//重建分类器。
构建分类器(filteredData);
ArffSaver=新的ArffSaver();
saver.setInstances(数据);
setFile(新文件(“input_test_filtered.arff”);
saver.writeBatch();
实例testInstances=新实例(readDataFile(“test2.arff”);
testInstances.setClassIndex(testInstances.numAttributes()-1);
实例filteredTestData=Filter.useFilter(测试实例,过滤器);
filteredTestData.setClassIndex(data.numAttributes()-1);
saver=新的ArffSaver();
saver.setInstances(testInstances);
setFile(新文件(“output_test_filtered.arff”);
saver.writeBatch();
对于(int j=0;j
当我对实例进行分类时,我总是得到X作为结果。非常感谢您的帮助

更新代码

package wekaproject;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;

import weka.classifiers.functions.LibLINEAR;
import weka.core.DenseInstance;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.converters.ArffSaver;
import weka.filters.Filter;
import weka.filters.unsupervised.attribute.StringToNominal;

public class demo1 {
    public demo1() throws Exception {
        // TODO Auto-generated constructor stub
        BufferedReader breader = null;
        breader = new BufferedReader(new FileReader(
                "test.arff"));
        Instances Train = new Instances(breader);
        //Train.setClassIndex(Train.numAttributes() - 1); // comment out this line
        LibLINEAR kMeans = new LibLINEAR();


        StringToNominal  filter=new StringToNominal();
        String options[]=new String[2];
        options[0]="-R";
        options[1]="4";
        filter.setInputFormat(Train);






        Instances traineData=Filter.useFilter(Train, filter);
        traineData.setClassIndex(4);
        kMeans.buildClassifier(traineData);

        breader.close();

        ArffSaver saver = new ArffSaver();
         saver.setInstances(traineData);
         saver.setFile(new File("output_test_filtered2.arff"));
         saver.writeBatch();

        Instance instance = new DenseInstance(4);
        instance.setDataset(traineData);
        instance.setValue(0, "what");
        instance.setValue(1, "car");
        instance.setValue(2, "green");
        instance.setValue(3, "y");

        double value = kMeans.classifyInstance(instance);
        System.out.println("value::" + value);

        double[] percentage = kMeans.distributionForInstance(instance);

        String prediction = traineData.classAttribute().value((int) value);

        for (int i = 0; i < percentage.length; i = i + 1) {
            System.out.println("Probability of class " + traineData.classAttribute().value(i)
                    + " : " + Double.toString(percentage[i]));
        }
        System.out.println("The predicted value of instance " + Integer.toString(0) + ": " + prediction);

    }
    public static void main(String[] args) throws Exception {
        // TODO Auto-generated method stub
        new demo1();
    }
}
wekaproject包;
导入java.io.BufferedReader;
导入java.io.File;
导入java.io.FileReader;
导入weka.classifiers.functions.LibLINEAR;
导入weka.core.DenseInstance;
导入weka.core.Instance;
导入weka.core.Instances;
导入weka.core.converters.ArffSaver;
导入weka.filters.Filter;
导入weka.filters.unsupervised.attribute.StringToNominal;
公开课演示1{
public demo1()引发异常{
//TODO自动生成的构造函数存根
BufferedReader breader=null;
breader=new BufferedReader(新文件读取器(
“test.arff”);
实例序列=新实例(面包机);
//Train.setClassIndex(Train.numAttributes()-1);//注释掉这一行
LibLINEAR kMeans=新的LibLINEAR();
StringToNominal筛选器=新StringToNominal();
字符串选项[]=新字符串[2];
选项[0]=“-R”;
选项[1]=“4”;
filter.setInputFormat(列车);
实例traineData=Filter.useFilter(Train,Filter);
traineData.setClassIndex(4);
kMeans.buildClassifier(traineData);
breader.close();
ArffSaver=新的ArffSaver();
saver.setInstances(traineData);
setFile(新文件(“output_test_filtered2.arff”);
saver.writeBatch();
实例实例=新的DenseInstance(4);
实例.setDataset(traineData);
setValue(0,“what”);
实例.setValue(1,“car”);
实例.setValue(2,“绿色”);
实例.设置值(3,“y”);
double value=kMeans.classifyInstance(实例);
System.out.println(“值::”+value);
double[]percentage=kMeans.distributionForInstance(实例);
字符串预测=traineData.classAttribute(