reduce()只能接受reduce(Text,Text)?

reduce()只能接受reduce(Text,Text)?,text,mapreduce,Text,Mapreduce,我是MapReduce的新手。这是我的问题 在hdfs://centmaster/input 目录,我有2个文件: file1.txt: 2012-3-1 a 2012-3-2 b 2012-3-3 c 2012-3-4 d 2012-3-5 a 2012-3-6 b 2012-3-7 c 2012-3-3 c 和file2.txt: 2012-3-1 b 2012-3-2 a 2012-3-3 b 2012-3-4 d 2012-3-5 a 2012-3-6 c 2012-3-7 d 201

我是MapReduce的新手。这是我的问题

在hdfs://centmaster/input 目录,我有2个文件:

file1.txt:

2012-3-1 a
2012-3-2 b
2012-3-3 c
2012-3-4 d
2012-3-5 a
2012-3-6 b
2012-3-7 c
2012-3-3 c
和file2.txt:

2012-3-1 b
2012-3-2 a
2012-3-3 b
2012-3-4 d
2012-3-5 a
2012-3-6 c
2012-3-7 d
2012-3-3 c
我运行了一个重复数据消除MapReduce代码:

package Hadoop_for_jar;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;

public class Dedup2 {

    public static class Map extends Mapper<Object,Text,Text,Text>{
        private static Text line=new Text();
        public void map(Object key,Text value,Context context)
                throws IOException,InterruptedException{
            line=value;
            context.write(line, new Text(""));
        }

    }

    public static class Reduce extends Reducer<Text,Text,Text,Text>{
        public void reduce(Text key,Iterable<Text> values,Context context)
                throws IOException,InterruptedException{
            context.write(key, new Text(""));
        }
    }

    public static void main(String[] args) throws Exception{
        Configuration conf = new Configuration();
        System.setProperty("HADOOP_USER_NAME", "root");
        String[] otherArgs = {"hdfs://centmaster:9000/input", "hdfs://centmaster:9000/output/debup1"};
        Job job = new Job(conf, "Data Deduplication");
        job.setJarByClass(Dedup.class);

        job.setMapperClass(Map.class);
        job.setCombinerClass(Reduce.class);
        job.setReducerClass(Reduce.class);

        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(Text.class);

        FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
        FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
        System.exit(job.waitForCompletion(true) ? 0 : 1);
     }
}
现在,我在想,Reducer不使用输入键的值,Reducer的值,这是Mapper的输出

这对这个节目是没有用的。我想将文本更改为IntWritable,我希望它能得到相同的结果。所以,我

进行了以下更改:

package Hadoop_for_jar;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;

public class Dedup2 {

    public static class Map extends Mapper<Object,Text,Text,IntWritable>{   //Change Text to IntWritable
        private static Text line=new Text();
        public void map(Object key,Text value,Context context)
                throws IOException,InterruptedException{
            line=value;
            context.write(line, new IntWritable(0));    //Change Text("") to IntWritable(0)
        }

    }

    public static class Reduce extends Reducer<Text,IntWritable,Text,Text>{ //Change Text to IntWritable
        public void reduce(Text key,Iterable<IntWritable> values,Context context)   //Change Text to IntWritable
                throws IOException,InterruptedException{
            context.write(key, new Text(""));
        }

    }

    public static void main(String[] args) throws Exception{
        Configuration conf = new Configuration();
        System.setProperty("HADOOP_USER_NAME", "root");
        String[] otherArgs = {"hdfs://centmaster:9000/input", "hdfs://centmaster:9000/output/debup2"};
        Job job = new Job(conf, "Data Deduplication");
        job.setJarByClass(Dedup2.class);

        job.setMapperClass(Map.class);
        job.setCombinerClass(Reduce.class);
        job.setReducerClass(Reduce.class);

        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(Text.class);

        FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
        FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
        System.exit(job.waitForCompletion(true) ? 0 : 1);
     }
}
还有/output/debup2目录。但是目录是空的。我的假设是错误的。所以,我的问题是:Reducer是否只接受文本,文本作为输入?
谢谢大家!

问题解决了。根据下面的链接,映射器和还原器的输出应该是相同的。如果要设置映射器和还原器的不同输出,应避免使用组合器

package Hadoop_for_jar;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;

public class Dedup2 {

    public static class Map extends Mapper<Object,Text,Text,IntWritable>{   //Change Text to IntWritable
        private static Text line=new Text();
        public void map(Object key,Text value,Context context)
                throws IOException,InterruptedException{
            line=value;
            context.write(line, new IntWritable(0));    //Change Text("") to IntWritable(0)
        }

    }

    public static class Reduce extends Reducer<Text,IntWritable,Text,Text>{ //Change Text to IntWritable
        public void reduce(Text key,Iterable<IntWritable> values,Context context)   //Change Text to IntWritable
                throws IOException,InterruptedException{
            context.write(key, new Text(""));
        }

    }

    public static void main(String[] args) throws Exception{
        Configuration conf = new Configuration();
        System.setProperty("HADOOP_USER_NAME", "root");
        String[] otherArgs = {"hdfs://centmaster:9000/input", "hdfs://centmaster:9000/output/debup2"};
        Job job = new Job(conf, "Data Deduplication");
        job.setJarByClass(Dedup2.class);

        job.setMapperClass(Map.class);
        job.setCombinerClass(Reduce.class);
        job.setReducerClass(Reduce.class);

        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(Text.class);

        FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
        FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
        System.exit(job.waitForCompletion(true) ? 0 : 1);
     }
}