Warning: file_get_contents(/data/phpspider/zhask/data//catemap/0/hadoop/6.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
java.io.IOException:map中的值类型不匹配:应为org.apache.hadoop.io.intwriteable,received org.apache.hadoop.io.Text_Hadoop_Mapreduce - Fatal编程技术网

java.io.IOException:map中的值类型不匹配:应为org.apache.hadoop.io.intwriteable,received org.apache.hadoop.io.Text

java.io.IOException:map中的值类型不匹配:应为org.apache.hadoop.io.intwriteable,received org.apache.hadoop.io.Text,hadoop,mapreduce,Hadoop,Mapreduce,我在主块中配置了Mapper、reducer类和map输出键值类。我不明白这段代码的错误是什么,它从map中抛出了错误类型不匹配的值:expected org.apache.hadoop.io.intwriteable,received org.apache.hadoop.io.Text 有人能帮忙吗?多谢各位 代码是: import java.io.IOException; import java.lang.String; import org.apache.hadoop.fs.Path; i

我在主块中配置了Mapper、reducer类和map输出键值类。我不明白这段代码的错误是什么,它从map中抛出了错误类型不匹配的值:
expected org.apache.hadoop.io.intwriteable,received org.apache.hadoop.io.Text
有人能帮忙吗?多谢各位

代码是:

import java.io.IOException;
import java.lang.String;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Mapper.Context;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.util.StringTokenizer;

public class Alphabet {

    public static class AlphabetMapper
    extends Mapper<IntWritable, Text, LongWritable, IntWritable>{
        private Text word = new Text();

        public void map(LongWritable key, Text value, Context context)
                throws IOException, InterruptedException{
                    String line = value.toString();

                      StringTokenizer tokenizer = new StringTokenizer(line);

                      while (tokenizer.hasMoreTokens()) {
                        word.set(tokenizer.nextToken());
                        context.write( new LongWritable(word.getLength()), new IntWritable(1)  );
                      }

                }

    }
    public static class AlphabetReducer
    extends Reducer<LongWritable, IntWritable, LongWritable, IntWritable>{
    public void reduce(LongWritable key, Iterable<IntWritable> values, Context context) 
    throws IOException, InterruptedException {

              int sum = 0;

              for (IntWritable val : values) {                  
                  sum += val.get();
              }

              context.write( key, new IntWritable(sum) );
}

    }
public static void main(String[] args) throws Exception {

        if (args.length!=2){
            System.err.println("Usage:Alphabet <input path> <output path>");
            System.exit(-1);
        }

        Job job =new Job();
        job.setJarByClass(Alphabet.class);
        job.setJobName("Word Char Count");

        FileInputFormat.addInputPath(job, new Path(args[0]));
        FileOutputFormat.setOutputPath(job, new Path(args[1]));

        job.setMapperClass(AlphabetMapper.class);
        job.setReducerClass(AlphabetReducer.class);

        job.setMapOutputKeyClass( LongWritable.class );
            job.setMapOutputValueClass( IntWritable.class );

        job.setOutputKeyClass(LongWritable.class);
        job.setOutputValueClass(IntWritable.class);

        System.exit(job.waitForCompletion(true)?0:1);

    }

}
import java.io.IOException;
导入java.lang.String;
导入org.apache.hadoop.fs.Path;
导入org.apache.hadoop.io.IntWritable;
导入org.apache.hadoop.io.LongWritable;
导入org.apache.hadoop.io.Text;
导入org.apache.hadoop.mapreduce.Reducer;
导入org.apache.hadoop.mapreduce.Mapper;
导入org.apache.hadoop.mapreduce.Mapper.Context;
导入org.apache.hadoop.mapreduce.Job;
导入org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
导入org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
导入java.util.StringTokenizer;
公共类字母表{
公共静态类AlphabetMapper
扩展映射器{
私有文本字=新文本();
公共void映射(可长写键、文本值、上下文)
抛出IOException、InterruptedException{
字符串行=value.toString();
StringTokenizer标记器=新的StringTokenizer(行);
while(tokenizer.hasMoreTokens()){
set(tokenizer.nextToken());
write(新的LongWritable(word.getLength()),新的IntWritable(1));
}
}
}
公共静态类
伸缩减速机{
公共void reduce(可长写键、可写值、上下文)
抛出IOException、InterruptedException{
整数和=0;
对于(IntWritable val:values){
sum+=val.get();
}
write(key,newintwriteable(sum));
}
}
公共静态void main(字符串[]args)引发异常{
如果(参数长度!=2){
System.err.println(“用法:字母表”);
系统退出(-1);
}
作业=新作业();
job.setJarByClass(Alphabet.class);
setJobName(“字字符计数”);
addInputPath(作业,新路径(args[0]);
setOutputPath(作业,新路径(args[1]);
setMapperClass(AlphabetMapper.class);
job.setReducerClass(AlphabetReducer.class);
job.setMapOutputKeyClass(LongWritable.class);
job.setMapOutputValueClass(IntWritable.class);
job.setOutputKeyClass(LongWritable.class);
job.setOutputValueClass(IntWritable.class);
系统退出(作业等待完成(真)?0:1;
}
}

如果您使用的是
FileInputFormat
映射器的默认输入是
LongWritable
Text
已解决:映射器的第一个输入键参数为IntWritable{*更改为LongWritable},根据映射函数{*missed to Override}。