CombineFileInputFormat仅启动一个映射始终为Hadoop 1.2.1
我试图使用TestCombineFileInputFormat来处理几个8 MB的小文件(20个文件)。我遵循了本文中给出的样本。我能够实现并测试它。最终结果是正确的。但令我惊讶的是,它总是以一张地图而告终。我尝试设置属性“mapred.max.split.size”各种值,如16MB、32MB等(当然是字节),但没有成功。我还需要做些什么,或者这是正确的行为吗 我正在运行一个两节点群集,默认复制为2。下面给出的是开发的代码。非常感谢您的帮助CombineFileInputFormat仅启动一个映射始终为Hadoop 1.2.1,hadoop,mapreduce,Hadoop,Mapreduce,我试图使用TestCombineFileInputFormat来处理几个8 MB的小文件(20个文件)。我遵循了本文中给出的样本。我能够实现并测试它。最终结果是正确的。但令我惊讶的是,它总是以一张地图而告终。我尝试设置属性“mapred.max.split.size”各种值,如16MB、32MB等(当然是字节),但没有成功。我还需要做些什么,或者这是正确的行为吗 我正在运行一个两节点群集,默认复制为2。下面给出的是开发的代码。非常感谢您的帮助 package inverika.test.reta
package inverika.test.retail;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
import org.apache.hadoop.mapreduce.Reducer;
public class CategoryCount {
public static class CategoryMapper
extends Mapper<LongWritable, Text, Text, IntWritable> {
private final static IntWritable one = new IntWritable(1);
private String[] columns = new String[8];
@Override
public void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
columns = value.toString().split(",");
context.write(new Text(columns[4]), one);
}
}
public static class CategoryReducer
extends Reducer< Text, IntWritable, Text, IntWritable> {
@Override
public void reduce(Text key, Iterable<IntWritable> values, Context context)
throws IOException, InterruptedException {
int sum = 0;
for (IntWritable value : values) {
sum += value.get();
}
context.write(key, new IntWritable(sum));
}
}
public static void main(String args[]) throws Exception {
if (args.length != 2) {
System.err.println("Usage: CategoryCount <input Path> <output Path>");
System.exit(-1);
}
Configuration conf = new Configuration();
conf.set("mapred.textoutputformat.separator", ",");
conf.set("mapred.max.split.size", "16777216"); // 16 MB
Job job = new Job(conf, "Retail Category Count");
job.setJarByClass(CategoryCount.class);
job.setMapperClass(CategoryMapper.class);
job.setReducerClass(CategoryReducer.class);
job.setInputFormatClass(CombinedInputFormat.class);
//CombineFileInputFormat.setMaxInputSplitSize(job, 16777216);
CombinedInputFormat.setMaxInputSplitSize(job, 16777216);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
job.setOutputFormatClass(TextOutputFormat.class);
FileInputFormat.addInputPath(job, new Path(args[0]) );
FileOutputFormat.setOutputPath(job, new Path(args[1]) );
//job.submit();
//System.exit(job.waitForCompletion(false) ? 0 : 1);
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}
package inverika.test.retail;
导入org.apache.hadoop.conf.Configuration;
导入org.apache.hadoop.mapreduce.Job;
导入org.apache.hadoop.io.Text;
导入org.apache.hadoop.io.IntWritable;
导入org.apache.hadoop.io.LongWritable;
导入org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
导入org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
导入org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
导入org.apache.hadoop.fs.Path;
导入org.apache.hadoop.mapreduce.Mapper;
导入java.io.IOException;
导入org.apache.hadoop.mapreduce.Reducer;
公共类类别计数{
公共静态类CategoryMapper
扩展映射器{
私有最终静态IntWritable one=新的IntWritable(1);
私有字符串[]列=新字符串[8];
@凌驾
公共void映射(可长写键、文本值、上下文)
抛出IOException、InterruptedException{
columns=value.toString().split(“,”);
编写(新文本(第[4]列),一个);
}
}
公共静态类CategoryReducer
扩展减速机{
@凌驾
公共void reduce(文本键、Iterable值、上下文)
抛出IOException、InterruptedException{
整数和=0;
for(可写入值:值){
sum+=value.get();
}
write(key,newintwriteable(sum));
}
}
公共静态void main(字符串args[])引发异常{
如果(参数长度!=2){
System.err.println(“用法:CategoryCount”);
系统退出(-1);
}
Configuration conf=新配置();
conf.set(“mapred.textoutputformat.separator”,“,”);
conf.set(“mapred.max.split.size”,“16777216”);//16 MB
作业=新作业(配置,“零售类别计数”);
job.setJarByClass(CategoryCount.class);
setMapperClass(CategoryMapper.class);
job.setReducerClass(CategoryReducer.class);
job.setInputFormatClass(CombinedInputFormat.class);
//setMaxInputSplitSize(作业,16777216);
CombinedInputFormat.setMaxInputSplitSize(作业,16777216);
job.setMapOutputKeyClass(Text.class);
setMapOutputValueClass(IntWritable.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
setOutputFormatClass(TextOutputFormat.class);
addInputPath(作业,新路径(args[0]);
setOutputPath(作业,新路径(args[1]);
//job.submit();
//系统退出(作业等待完成(假)?0:1;
系统退出(作业等待完成(真)?0:1;
}
}
下面是实现的组合文件输入格式
package inverika.test.retail;
import java.io.IOException;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.lib.input.CombineFileRecordReader;
import org.apache.hadoop.mapreduce.lib.input.CombineFileSplit;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.input.LineRecordReader;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.CombineFileInputFormat;
public class CombinedInputFormat extends CombineFileInputFormat<LongWritable, Text> {
@Override
public RecordReader<LongWritable, Text>
createRecordReader(InputSplit split, TaskAttemptContext context)
throws IOException {
CombineFileRecordReader<LongWritable, Text> reader =
new CombineFileRecordReader<LongWritable, Text>(
(CombineFileSplit) split, context, myCombineFileRecordReader.class);
return reader;
}
public static class myCombineFileRecordReader extends RecordReader<LongWritable, Text> {
private LineRecordReader lineRecordReader = new LineRecordReader();
public myCombineFileRecordReader(CombineFileSplit split,
TaskAttemptContext context, Integer index) throws IOException {
FileSplit fileSplit = new FileSplit(split.getPath(index),
split.getOffset(index),
split.getLength(index),
split.getLocations());
lineRecordReader.initialize(fileSplit, context);
}
@Override
public void initialize(InputSplit inputSplit, TaskAttemptContext context)
throws IOException, InterruptedException {
//linerecordReader.initialize(inputSplit, context);
}
@Override
public void close() throws IOException {
lineRecordReader.close();
}
@Override
public float getProgress() throws IOException {
return lineRecordReader.getProgress();
}
@Override
public LongWritable getCurrentKey() throws IOException,
InterruptedException {
return lineRecordReader.getCurrentKey();
}
@Override
public Text getCurrentValue() throws IOException, InterruptedException {
return lineRecordReader.getCurrentValue();
}
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
return lineRecordReader.nextKeyValue();
}
}
}
package inverika.test.retail;
导入java.io.IOException;
导入org.apache.hadoop.io.LongWritable;
导入org.apache.hadoop.io.Text;
导入org.apache.hadoop.mapreduce.lib.input.CombineFileRecordReader;
导入org.apache.hadoop.mapreduce.lib.input.CombineFileSplit;
导入org.apache.hadoop.mapreduce.lib.input.FileSplit;
导入org.apache.hadoop.mapreduce.lib.input.LineRecordReader;
导入org.apache.hadoop.mapreduce.InputSplit;
导入org.apache.hadoop.mapreduce.RecordReader;
导入org.apache.hadoop.mapreduce.TaskAttemptContext;
导入org.apache.hadoop.mapreduce.lib.input.CombineFileInputFormat;
公共类CombinedInputFormat扩展了CombineFileInputFormat{
@凌驾
公共记录阅读器
createRecordReader(InputSplit拆分,TaskAttemptContext上下文)
抛出IOException{
CombineFileRecordReader读取器=
新的CombineFileRecordReader(
(CombineFileSplit)拆分,上下文,myCombineFileRecordReader.class);
返回读取器;
}
公共静态类myCombineFileRecordReader扩展了RecordReader{
私有LineRecordReader LineRecordReader=新的LineRecordReader();
公共myCombineFileRecordReader(组合文件拆分,
TaskAttemptContext上下文,整数索引)引发IOException{
FileSplit FileSplit=newfilesplit(split.getPath(index),
split.getOffset(索引),
split.getLength(索引),
split.getLocations());
初始化(fileSplit,context);
}
@凌驾
公共void初始化(InputSplit InputSplit,TaskAttemptContext上下文)
抛出IOException、InterruptedException{
//linerecordReader.initialize(inputSplit,上下文);
}
@凌驾
public void close()引发IOException{
lineRecordReader.close();
}
@凌驾
公共浮点getProgress()引发IOException{
返回lineRecordReader.getProgress();
}
@凌驾
public LongWritable getCurrentKey()引发IOException,
中断异常{
返回lineRecordReader.getCurrentKey();
}
@凌驾
job.getConfiguration().setLong("mapreduce.input.fileinputformat.split.maxsize", (long)(256*1024*1024));