Warning: file_get_contents(/data/phpspider/zhask/data//catemap/9/java/321.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181

Warning: file_get_contents(/data/phpspider/zhask/data//catemap/6/eclipse/9.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Java 试图通过导入所有JAR文件在eclipse中运行“Word Count”程序时,getCredentials方法出错_Java_Eclipse_Apache_Hadoop - Fatal编程技术网

Java 试图通过导入所有JAR文件在eclipse中运行“Word Count”程序时,getCredentials方法出错

Java 试图通过导入所有JAR文件在eclipse中运行“Word Count”程序时,getCredentials方法出错,java,eclipse,apache,hadoop,Java,Eclipse,Apache,Hadoop,错误:线程主java.lang.NoSuchMethodError中出现异常:org.apache.hadoop.security.UserGroupInformation.getCredentialsLorg/apache/hadoop/security/Credentials; 位于org.apache.hadoop.mapreduce.Job.Job.java:135 位于org.apache.hadoop.mapreduce.Job.getInstanceJob.java:176 位于o

错误:线程主java.lang.NoSuchMethodError中出现异常:org.apache.hadoop.security.UserGroupInformation.getCredentialsLorg/apache/hadoop/security/Credentials; 位于org.apache.hadoop.mapreduce.Job.Job.java:135 位于org.apache.hadoop.mapreduce.Job.getInstanceJob.java:176 位于org.apache.hadoop.mapreduce.Job.getInstanceJob.java:195 在WordCount.mainWordCount.java:20

Hadoop版本2.2.0

WordCount.java

java

SumReducer.java

请让我知道可以做什么?最新的mapreduce API用于该程序。Hadoop2.2.0附带的所有JAR也被导入eclipse中


谢谢:

您是否正在使用用于Hadoop的Eclipse插件?如果不是,那就是问题所在。没有插件,Eclipse如果只是运行WordCount类,Hadoop就无法找到必要的JAR。捆绑所有jar,包括WordCount,并在集群中运行它


如果您想从Eclipse运行它,您需要Eclipse插件。如果你没有,你可以按照这个来构建插件

Hi,我做了一些修改,得到了更多的错误行,如下所示!java:24在org.apache.hadoop.util.ToolRunner.runToolRunner.java:70在org.apache.hadoop.util.ToolRunner.runToolRunner.java:84在WordCount.mainWordCount.java:47我导入了所有hadoop JAR,它在保存程序时不会显示任何错误。我还想提到:1。我在windows上安装了hadoop,我刚刚下载了hadoop 2.2.0并将JAR加载到我的eclipse中。2.这个应用程序的主要目的是用它制作一个定制的jar,在amazonemr上运行,而不是在默认的应用程序上运行。所以我想制作一个WordCount jar,实际上我不需要安装hadoop,只需要导入jar并编写应用程序!现在,我应该继续安装eclipse插件吗?或者我可以在知道我的应用程序没有在eclipse上运行的情况下创建一个jar,然后将它放在EMR上看看吗?我建议在EMR中进行测试。它在EMR上工作!。但是要在windows上运行,我应该安装hadoop,还是仅仅构建插件并导入JAR然后运行应用程序就足够了?。
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;

import org.apache.hadoop.mapreduce.Job;

import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
public class WordCount {
 public static void main(String[] args) throws Exception {
        if (args.length != 2) {
          System.out.println("usage: [input] [output]");
          System.exit(-1);
        }


        Job job = Job.getInstance(new Configuration(), "word count");
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);

        job.setMapperClass(WordMapper.class); 
        job.setReducerClass(SumReducer.class);  

        job.setInputFormatClass(TextInputFormat.class);
        job.setOutputFormatClass(TextOutputFormat.class);

        FileInputFormat.setInputPaths(job, new Path(args[0]));
        FileOutputFormat.setOutputPath(job, new Path(args[1]));

        job.setJarByClass(WordCount.class);
        job.setJobName("WordCount");

        job.submit();






 }
}
import java.io.IOException;    
import java.util.StringTokenizer;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
public class WordMapper extends Mapper<Object, Text, Text, IntWritable> {
private Text word = new Text();
private final static IntWritable one = new IntWritable(1);
 @Override
        public void map(Object key, Text value,
        Context contex) throws IOException, InterruptedException {
        // Break line into words for processing
        StringTokenizer wordList = new StringTokenizer(value.toString());
        while (wordList.hasMoreTokens()) {
       word.set(wordList.nextToken());
       contex.write(word, one);
      }
     }
    }
import java.io.IOException;
import java.util.Iterator;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;



public class SumReducer extends Reducer<Text, IntWritable, Text, IntWritable> {

 private IntWritable totalWordCount = new IntWritable();

 @Override
 public void reduce(Text key, Iterable<IntWritable> values, Context context)
            throws IOException, InterruptedException {
  int wordCount = 0;
  Iterator<IntWritable> it=values.iterator();
  while (it.hasNext()) {
   wordCount += it.next().get();
  }
  totalWordCount.set(wordCount);
  context.write(key, totalWordCount);
 }
}