Warning: file_get_contents(/data/phpspider/zhask/data//catemap/9/java/359.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Java MapReduce不';不产生产出_Java_Hadoop_Mapreduce_Word Count - Fatal编程技术网

Java MapReduce不';不产生产出

Java MapReduce不';不产生产出,java,hadoop,mapreduce,word-count,Java,Hadoop,Mapreduce,Word Count,我想在文本文件上执行一个简单的MapReduce,但它不是一个输出。这是我的代码: import java.io.IOException; import java.util.StringTokenizer; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.i

我想在文本文件上执行一个简单的MapReduce,但它不是一个输出。这是我的代码:

import java.io.IOException;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class WordCount {

  public static class TokenizerMapper
   extends Mapper<Object, Text, Text, IntWritable>{

private final static IntWritable one = new IntWritable(1);
private Text word = new Text();

public void map(Object key, Text value, Context context
                ) throws IOException, InterruptedException {
  StringTokenizer itr = new StringTokenizer(value.toString());
  while (itr.hasMoreTokens()) {
    word.set(itr.nextToken());
    context.write(word, one);
  }
 }
}

  public static class IntSumReducer
   extends Reducer<Text,IntWritable,Text,IntWritable> {
private IntWritable result = new IntWritable();

    public void reduce(Text key, Iterable<IntWritable> values,
                   Context context
                   ) throws IOException, InterruptedException {
      int sum = 0;
      for (IntWritable val : values) {
        sum += val.get();
      }
      result.set(sum) ;
      context.write(key, result);
    }
  }

  public static void main(String[] args) throws Exception {
    Configuration conf = new Configuration();
    Job job = Job.getInstance(conf, "word count");
    job.setJarByClass(WordCount.class);
    job.setMapperClass(TokenizerMapper.class);
    job.setCombinerClass(IntSumReducer.class);
    job.setReducerClass(IntSumReducer.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(IntWritable.class);
    FileInputFormat.addInputPath(job, new Path(args[0]));
    FileOutputFormat.setOutputPath(job, new Path(args[1]));
    System.exit(job.waitForCompletion(true) ? 0 : 1);
  }
}
import java.io.IOException;
导入java.util.StringTokenizer;
导入org.apache.hadoop.conf.Configuration;
导入org.apache.hadoop.fs.Path;
导入org.apache.hadoop.io.IntWritable;
导入org.apache.hadoop.io.Text;
导入org.apache.hadoop.mapreduce.Job;
导入org.apache.hadoop.mapreduce.Mapper;
导入org.apache.hadoop.mapreduce.Reducer;
导入org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
导入org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
公共类字数{
公共静态类令牌映射器
扩展映射器{
私有最终静态IntWritable one=新的IntWritable(1);
私有文本字=新文本();
公共无效映射(对象键、文本值、上下文
)抛出IOException、InterruptedException{
StringTokenizer itr=新的StringTokenizer(value.toString());
而(itr.hasMoreTokens()){
set(itr.nextToken());
上下文。写(单词,一);
}
}
}
公共静态类IntSumReducer
伸缩减速机{
私有IntWritable结果=新的IntWritable();
public void reduce(文本键、Iterable值、,
语境
)抛出IOException、InterruptedException{
整数和=0;
for(可写入值:值){
sum+=val.get();
}
结果集(总和);
编写(键、结果);
}
}
公共静态void main(字符串[]args)引发异常{
Configuration conf=新配置();
Job Job=Job.getInstance(conf,“字数”);
job.setJarByClass(WordCount.class);
setMapperClass(TokenizerMapper.class);
job.setCombinerClass(IntSumReducer.class);
job.setReducerClass(IntSumReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
addInputPath(作业,新路径(args[0]);
setOutputPath(作业,新路径(args[1]);
系统退出(作业等待完成(真)?0:1;
}
}
我在jar文件执行期间收到以下错误:

17/05/07 23:10:53 WARN mapred.LocalJobRunner: job_local973452829_0001
java.lang.Exception:     org.apache.hadoop.mapreduce.task.reduce.Shuffle$ShuffleError: error in shuffle in localfetcher#1
at org.apache.hadoop.mapred.LocalJobRunner$Job.runTasks(LocalJobRunner.java:462)
at org.apache.hadoop.mapred.LocalJobRunner$Job.run(LocalJobRunner.java:529)
Caused by: org.apache.hadoop.mapreduce.task.reduce.Shuffle$ShuffleError: error in shuffle in localfetcher#1
at org.apache.hadoop.mapreduce.task.reduce.Shuffle.run(Shuffle.java:134)
at org.apache.hadoop.mapred.ReduceTask.run(ReduceTask.java:376)
at org.apache.hadoop.mapred.LocalJobRunner$Job$ReduceTaskRunnable.run(LocalJobRunner.java:319)
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
at java.util.concurrent.FutureTask.run(FutureTask.java:266)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:748)
Caused by: java.io.FileNotFoundException: /app/hadoop/tmp%20/mapred/local/localRunner/hduser/jobcache/job_local973452829_0001/attempt_local973452829_0001_m_000000_0/output/file.out.index
at org.apache.hadoop.fs.RawLocalFileSystem.open(RawLocalFileSystem.java:193)
at org.apache.hadoop.fs.FileSystem.open(FileSystem.java:764)
at org.apache.hadoop.io.SecureIOUtils.openFSDataInputStream(SecureIOUtils.java:156)
at org.apache.hadoop.mapred.SpillRecord.<init>(SpillRecord.java:70)
at org.apache.hadoop.mapred.SpillRecord.<init>(SpillRecord.java:62)
at org.apache.hadoop.mapred.SpillRecord.<init>(SpillRecord.java:57)
at org.apache.hadoop.mapreduce.task.reduce.LocalFetcher.copyMapOutput(LocalFetcher.java:123)
at org.apache.hadoop.mapreduce.task.reduce.LocalFetcher.doCopy(LocalFetcher.java:101)
at org.apache.hadoop.mapreduce.task.reduce.LocalFetcher.run(LocalFetcher
17/05/07 23:10:53警告映射。本地JobRunner:job_local973452829_0001
java.lang.Exception:org.apache.hadoop.mapreduce.task.reduce.Shuffle$ShuffleError:localfetcher#1中的Shuffle错误
位于org.apache.hadoop.mapred.LocalJobRunner$Job.runTasks(LocalJobRunner.java:462)
位于org.apache.hadoop.mapred.LocalJobRunner$Job.run(LocalJobRunner.java:529)
原因:org.apache.hadoop.mapreduce.task.reduce.Shuffle$ShuffleError:localfetcher中的Shuffle错误#1
位于org.apache.hadoop.mapreduce.task.reduce.Shuffle.run(Shuffle.java:134)
位于org.apache.hadoop.mapred.ReduceTask.run(ReduceTask.java:376)
位于org.apache.hadoop.mapred.LocalJobRunner$Job$ReduceTaskRunnable.run(LocalJobRunner.java:319)
位于java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
在java.util.concurrent.FutureTask.run(FutureTask.java:266)处
位于java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
位于java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
运行(Thread.java:748)
原因:java.io.FileNotFoundException:/app/hadoop/tmp%20/mapred/local/localRunner/hduser/jobcache/job\u local973452829\u 0001/trust\u local973452829\u 0001\u m\u000000\u 0/output/file.out.index
位于org.apache.hadoop.fs.RawLocalFileSystem.open(RawLocalFileSystem.java:193)
位于org.apache.hadoop.fs.FileSystem.open(FileSystem.java:764)
在org.apache.hadoop.io.SecureIOUtils.openFSDataInputStream(SecureIOUtils.java:156)上
位于org.apache.hadoop.mapred.SpillRecord.(SpillRecord.java:70)
位于org.apache.hadoop.mapred.SpillRecord.(SpillRecord.java:62)
位于org.apache.hadoop.mapred.SpillRecord.(SpillRecord.java:57)
位于org.apache.hadoop.mapreduce.task.reduce.LocalFetcher.copymappoutput(LocalFetcher.java:123)
位于org.apache.hadoop.mapreduce.task.reduce.LocalFetcher.doCopy(LocalFetcher.java:101)
位于org.apache.hadoop.mapreduce.task.reduce.LocalFetcher.run(LocalFetcher

我的代码有什么问题?我在ubuntu 14.04下使用hadoop 2.4,试着编写如下hdfs命令

hadoop jar(jar文件名)(输入名)(输出名)


或者您在导出(jar文件)之前是否检查了警告?您是否接受了任何警告?

这是错误的一部分:

原因:java.io.FileNotFoundException:/app/hadoop/tmp%20/mapred/local/localRunner/hduser/jobcache/job\u local973452829\u 0001/trust\u local973452829\u 0001\u m\u000000\u 0/output/file.out.index

我猜是您的
核心站点.xml
中的配置属性
hadoop.tmp.dir
有问题,因为hadoop无法将临时输出文件(从映射器)存储到磁盘

您可以删除该属性,以便hadoop创建自己的临时目录来存储中间输出,或者将其设置为具有适当权限的某个目录