Warning: file_get_contents(/data/phpspider/zhask/data//catemap/6/apache/9.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181

Warning: file_get_contents(/data/phpspider/zhask/data//catemap/4/maven/6.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
无法运行导入org.apache.lucene.analysis.Analyzer的Java程序_Apache_Maven_Hadoop_Lucene_Mahout - Fatal编程技术网

无法运行导入org.apache.lucene.analysis.Analyzer的Java程序

无法运行导入org.apache.lucene.analysis.Analyzer的Java程序,apache,maven,hadoop,lucene,mahout,Apache,Maven,Hadoop,Lucene,Mahout,我无法运行导入org.apache.lucene.analysis.Analyzer的java程序 import java.io.BufferedReader; import java.io.BufferedWriter; import java.io.FileReader; import java.io.FileWriter; import java.io.StringReader; import java.util.HashMap; import java.util.Map; import

我无法运行导入org.apache.lucene.analysis.Analyzer的java程序

import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.StringReader;
import java.util.HashMap;
import java.util.Map;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.mahout.classifier.naivebayes.BayesUtils;
import org.apache.mahout.classifier.naivebayes.NaiveBayesModel;
import org.apache.mahout.classifier.naivebayes.StandardNaiveBayesClassifier;
import org.apache.mahout.common.Pair;
import org.apache.mahout.common.iterator.sequencefile.SequenceFileIterable;
import org.apache.mahout.math.RandomAccessSparseVector;
import org.apache.mahout.math.Vector;
import org.apache.mahout.math.Vector.Element;
import org.apache.mahout.vectorizer.DefaultAnalyzer;
import org.apache.mahout.vectorizer.TFIDF;

import com.google.common.collect.ConcurrentHashMultiset;
import com.google.common.collect.Multiset;


public class Classifier {

 public static Map<String, Integer> readDictionnary(Configuration conf, Path dictionnaryPath) {
  Map<String, Integer> dictionnary = new HashMap<String, Integer>();
  for (Pair<Text, IntWritable> pair : new SequenceFileIterable<Text, IntWritable>(dictionnaryPath, true, conf)) {
   dictionnary.put(pair.getFirst().toString(), pair.getSecond().get());
  }
  return dictionnary;
 }

 public static Map<Integer, Long> readDocumentFrequency(Configuration conf, Path documentFrequencyPath) {
  Map<Integer, Long> documentFrequency = new HashMap<Integer, Long>();
  for (Pair<IntWritable, LongWritable> pair : new SequenceFileIterable<IntWritable, LongWritable>(documentFrequencyPath, true, conf)) {
   documentFrequency.put(pair.getFirst().get(), pair.getSecond().get());
  }
  return documentFrequency;
 }

 public static void main(String[] args) throws Exception {

  System.out.println("Start time :" + System.currentTimeMillis());
  if (args.length < 5) {
   System.out.println("Arguments: [model] [label index] [dictionnary] [document frequency] [tweet file]");
   return;
  }
  String modelPath = args[0];
  String labelIndexPath = args[1];
  String dictionaryPath = args[2];
  String documentFrequencyPath = args[3];
  String testFilePath = args[4];

  Configuration configuration = new Configuration();

  // model is a matrix (wordId, labelId) => probability score
  NaiveBayesModel model = NaiveBayesModel.materialize(new Path(modelPath), configuration);

  StandardNaiveBayesClassifier classifier = new StandardNaiveBayesClassifier(model);

  // labels is a map label => classId
  Map<Integer, String> labels = BayesUtils.readLabelIndex(configuration, new Path(labelIndexPath));
  Map<String, Integer> dictionary = readDictionnary(configuration, new Path(dictionaryPath));
  Map<Integer, Long> documentFrequency = readDocumentFrequency(configuration, new Path(documentFrequencyPath));


  // analyzer used to extract word from tweet
  Analyzer analyzer = new DefaultAnalyzer();

  int labelCount = labels.size();
  int documentCount = documentFrequency.get(-1).intValue();

  System.out.println("Number of labels: " + labelCount);
  System.out.println("Number of documents in training set: " + documentCount);
  BufferedReader reader = new BufferedReader(new FileReader(testFilePath));

  String outputFile = "/home/hduser/result.txt";
  FileWriter f1 = new FileWriter(outputFile,true); 
  BufferedWriter out = new BufferedWriter(f1);



  int correctCounter=0;
  int totalCounter=0;
  while(true)
  {
   String line = reader.readLine();
   if (line == null) {
    break;
   }

   String[] arr = line.split(" ");
   String catId = arr[0];
   String label = arr[1];

   String msg = line.substring(arr[0].length() + arr[1].length() + 2);


   Multiset<String> words = ConcurrentHashMultiset.create();

   // extract words from Msg
   TokenStream ts = analyzer.reusableTokenStream("text", new StringReader(msg));
   CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
   ts.reset();
   int wordCount = 0;
   while (ts.incrementToken()) {
    if (termAtt.length() > 0) {
     String word = ts.getAttribute(CharTermAttribute.class).toString();
     Integer wordId = dictionary.get(word);
     // if the word is not in the dictionary, skip it
     if (wordId != null) {
      words.add(word);
      wordCount++;
     }
    }
   }

   // create vector wordId => weight using tfidf
   Vector vector = new RandomAccessSparseVector(10000);
   TFIDF tfidf = new TFIDF();
   for (Multiset.Entry<String> entry:words.entrySet()) {
    String word = entry.getElement();
    int count = entry.getCount();
    Integer wordId = dictionary.get(word);
    Long freq = documentFrequency.get(wordId);
    double tfIdfValue = tfidf.calculate(count, freq.intValue(), wordCount, documentCount);
    vector.setQuick(wordId, tfIdfValue);
   }
   // With the classifier, we get one score for each label 
   // The label with the highest score is the one the tweet is more likely to
   // be associated to
   Vector resultVector = classifier.classifyFull(vector);
   //double bestScore = -Double.MAX_VALUE;
   double bestScore =Double.MAX_VALUE;
   int bestCategoryId = -1;
   String resultStr=catId+" ";
   for(Element element: resultVector) 
   {
    int categoryId = element.index();
    double score = -1 * element.get();
    if (score < bestScore) {
     bestScore = score;
     bestCategoryId = categoryId;
    }
    //System.out.print("  " + labels.get(categoryId) + ": " + score);
    if(resultStr.equalsIgnoreCase(catId + " "))
    {
     resultStr=resultStr + labels.get(categoryId) + " " + score;
    }
    else
    {
     resultStr=resultStr + "   " + labels.get(categoryId) + " " + score;
    }
   }
   try
   {

     out.write(resultStr);
     out.write("\n");

   }
   catch(Exception e)
   {

   }

   //System.out.println(label + " => " + labels.get(bestCategoryId));
   out1.write(label + " => " + labels.get(bestCategoryId));
    out1.write("\n");
   totalCounter++;
    if(label.equalsIgnoreCase(labels.get(bestCategoryId)))
    {

     correctCounter++;
     System.out.println("correctCounter : " + correctCounter);
    }
  };
   //Close the output stream
  System.out.println("correctCounter : " + correctCounter + " TotalCounter :" + totalCounter);
  System.out.println("End time :" + System.currentTimeMillis());
  System.out.println("Accuracy : " +  (double)correctCounter/totalCounter);
   out.close();
 }
}
当我尝试使用hadoop执行此jar时,我遇到了以下错误:

hadoop jar Classify.jar {input arguments}
Exception in thread "main" java.lang.NoClassDefFoundError: org/apache/lucene/analysis/Analyzer
    at java.lang.Class.forName0(Native Method)
    at java.lang.Class.forName(Class.java:270)
    at org.apache.hadoop.util.RunJar.main(RunJar.java:153)
Caused by: java.lang.ClassNotFoundException: org.apache.lucene.analysis.Analyzer
    at java.net.URLClassLoader$1.run(URLClassLoader.java:217)
    at java.security.AccessController.doPrivileged(Native Method)
    at java.net.URLClassLoader.findClass(URLClassLoader.java:205)
    at java.lang.ClassLoader.loadClass(ClassLoader.java:323)
    at java.lang.ClassLoader.loadClass(ClassLoader.java:268)
    ... 3 more

您的问题是,在构建应用程序的jar时,jar中不包括Lucene(.jar文件)的maven依赖项,也就是说,Java找不到应用程序使用的Lucene类。我对相同的堆栈跟踪有相同的问题

您最好使用Maven来构建项目

您必须做的是,使用依赖项构建源代码,然后,编译的应用程序将包括Lucene的JAR

为此,只需将这些代码行添加到Maven项目的.pom文件中

    <build>
        <plugins>
            <plugin>
                <artifactId>maven-assembly-plugin</artifactId>
                <version>2.4</version>
                <configuration>
                  <descriptorRefs>
                     <descriptorRef>jar-with-dependencies</descriptorRef>
                  </descriptorRefs>    
               </configuration>
               <executions>
                  <execution>
                     <id>make-assembly</id>
                     <phase>package</phase>
                     <goals>                         
                       <goal>single</goal>
                     </goals>
                 </execution>
              </executions>
            </plugin>
        </plugins>
    </build>

maven汇编插件
2.4
带有依赖项的jar
组装
包裹
单一的
现在,运行您的应用程序:
hadoopjarapp\u NAME-jar-with-dependencies.jar


所有这些都将解决您的问题。

尝试提供jar文件名以及-classpath选项,例如/home/root/lib/lucene-api.jar。我已经尝试在命令hadoop-cp/usr/local/mahout/lib/lucene-core-4.10.1.jar-jar/usr/local/hadoop/Classify.jar中包含类路径,但即使现在也是一样的问题。Hadoop脚本也不会采用-cp,所以最好定义一个名为CLASSPATH或Hadoop_CLASSPATH的变量(如果您使用Hadoop),然后运行java或Hadoop脚本。我使用的是Hadoop,请告诉我添加Hadoop_CLASSPATH的情况。为了供您参考,我已经在我的bashrc文件中定义了HADOOP_HOME在您的bashrc文件中定义了HADOOP_类路径和类路径。找到它的源代码,然后重新运行hadoop命令。
    <build>
        <plugins>
            <plugin>
                <artifactId>maven-assembly-plugin</artifactId>
                <version>2.4</version>
                <configuration>
                  <descriptorRefs>
                     <descriptorRef>jar-with-dependencies</descriptorRef>
                  </descriptorRefs>    
               </configuration>
               <executions>
                  <execution>
                     <id>make-assembly</id>
                     <phase>package</phase>
                     <goals>                         
                       <goal>single</goal>
                     </goals>
                 </execution>
              </executions>
            </plugin>
        </plugins>
    </build>