./bin/hadoop jar出现:当;“主要”;JAVALang.Classnotfoundexception:org.apache.hadoop.examples.WordCount2

./bin/hadoop jar出现:当;“主要”;JAVALang.Classnotfoundexception:org.apache.hadoop.examples.WordCount2,java,hadoop,Java,Hadoop,我将hadoop源代码wordcount.Java复制到我自己创建的目录中,编译并生成一个JAR,然后传递到./bin/hadoop。然后我修改了wordcount.Java,并在我自己的目录中保存为wordcount2.Java,编译通过后,生成一个JAR文件,但在使用./bin/hadoop JAR运行时。这使我犯了错误 in the thread when the "main" Java. Lang. Classnotfoundexception: org.apache.hadoop.ex

我将hadoop源代码wordcount.Java复制到我自己创建的目录中,编译并生成一个JAR,然后传递到./bin/hadoop。然后我修改了wordcount.Java,并在我自己的目录中保存为wordcount2.Java,编译通过后,生成一个JAR文件,但在使用./bin/hadoop JAR运行时。这使我犯了错误

in the thread when the "main" Java. Lang. Classnotfoundexception: org.apache.hadoop.examples.WordCount2,
这就是我运行hadoop作业的方式

./usr/local/hadoop/bin/hadoop jar ./usr/local/hadoop/bin/hadoop/playground/wordcount2.jar org.apache.hadoop.examples.wordcount2 ./usr/local/hadoop/bin/hadoop/input ./usr/local/hadoop/bin/hadoop/output
这是我的密码:

package org.apache.hadoop.examples;

import java.io.IOException;
import java.util.StringTokenizer;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;

public class WordCount2 {

  public static class TokenizerMapper 
       extends Mapper<Object, Text, Text, IntWritable>{

    private final static IntWritable one = new IntWritable(1);
    private Text word = new Text();

    public void map(Object key, Text value, Context context
                ) throws IOException, InterruptedException {
      StringTokenizer itr = new StringTokenizer(value.toString(),"       \t\n\r\f,.:;?![]'");
      while (itr.hasMoreTokens()) {
        word.set(itr.nextToken().toLowerCase());
        context.write(word, one);
      }
    }
  }

  public static class IntSumReducer 
       extends Reducer<Text,IntWritable,Text,IntWritable> {
    private IntWritable result = new IntWritable();

    public void reduce(Text key, Iterable<IntWritable> values, 
                   Context context
                   ) throws IOException, InterruptedException {
      int sum = 0;
      for (IntWritable val : values) {
        sum += val.get();
      }
      result.set(sum);
      if (sum > 4) context.write(key, result);
    }
  }

  public static void main(String[] args) throws Exception {
    Configuration conf = new Configuration();
    String[] otherArgs = new GenericOptionsParser(conf,     args).getRemainingArgs();
    if (otherArgs.length < 2) {
      System.err.println("Usage: wordcount <in> [<in>...] <out>");
      System.exit(2);
    }
    Job job = Job.getInstance(conf, "word count");
    job.setJarByClass(WordCount2.class);
    job.setMapperClass(TokenizerMapper.class);
    job.setCombinerClass(IntSumReducer.class);
    job.setReducerClass(IntSumReducer.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(IntWritable.class);
    for (int i = 0; i < otherArgs.length - 1; ++i) {
       FileInputFormat.addInputPath(job, new Path(otherArgs[i]));
    }
    FileOutputFormat.setOutputPath(job,
      new Path(otherArgs[otherArgs.length - 1]));
    System.exit(job.waitForCompletion(true) ? 0 : 1);
  }
}
package org.apache.hadoop.examples;
导入java.io.IOException;
导入java.util.StringTokenizer;
导入org.apache.hadoop.conf.Configuration;
导入org.apache.hadoop.fs.Path;
导入org.apache.hadoop.io.IntWritable;
导入org.apache.hadoop.io.Text;
导入org.apache.hadoop.mapreduce.Job;
导入org.apache.hadoop.mapreduce.Mapper;
导入org.apache.hadoop.mapreduce.Reducer;
导入org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
导入org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
导入org.apache.hadoop.util.GenericOptionsParser;
公共类WordCount2{
公共静态类令牌映射器
扩展映射器{
私有最终静态IntWritable one=新的IntWritable(1);
私有文本字=新文本();
公共无效映射(对象键、文本值、上下文
)抛出IOException、InterruptedException{
StringTokenizer itr=新的StringTokenizer(value.toString(),“\t\n\r\f,.:;?![]”;
而(itr.hasMoreTokens()){
set(itr.nextToken().toLowerCase());
上下文。写(单词,一);
}
}
}
公共静态类IntSumReducer
伸缩减速机{
私有IntWritable结果=新的IntWritable();
public void reduce(文本键、Iterable值、,
语境
)抛出IOException、InterruptedException{
整数和=0;
for(可写入值:值){
sum+=val.get();
}
结果集(总和);
如果(总和>4)上下文。写入(键、结果);
}
}
公共静态void main(字符串[]args)引发异常{
Configuration conf=新配置();
String[]otherArgs=新的GenericOptionsParser(conf,args);
如果(其他参数长度<2){
System.err.println(“用法:wordcount[…]);
系统出口(2);
}
Job Job=Job.getInstance(conf,“字数”);
job.setJarByClass(WordCount2.class);
setMapperClass(TokenizerMapper.class);
job.setCombinerClass(IntSumReducer.class);
job.setReducerClass(IntSumReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
对于(int i=0;i
您正在使用eclipse吗?如果是,您是否将jar的主类名正确设置为WordCount2?非常感谢您的帮助!我没有使用eclipse。事实上,我没有使用任何IDE,我只是使用ubuntu终端来做这些。jar的主类名自动为WordCount2。这就是我生成jar的方式:usr/lib/jvm/java-7-openjdk-i386/bin/javac-classthpath-d usr/local/hadoop/playway/classes usr/local/hadoop/playway/src/WordCount2.java和jar-cvf playway/WordCount2.jar playway/classes/*