Hadoop 修改Grep以解析序列/快照文件

Hadoop 修改Grep以解析序列/快照文件,hadoop,mapreduce,grep,sequence,snappy,Hadoop,Mapreduce,Grep,Sequence,Snappy,我试图将Grep示例与CDH捆绑在一起,以读取序列/快照文件 默认情况下,程序在尝试读取序列/快照文件时抛出错误: java.io.EOFException:输入流中块的意外结束 位于org.apache.hadoop.io.compress.BlockDecompressorStream.getCompressedData(BlockDecompressorStream.java:121) 位于org.apache.hadoop.io.compress.BlockDecompressorStr

我试图将Grep示例与CDH捆绑在一起,以读取序列/快照文件

默认情况下,程序在尝试读取序列/快照文件时抛出错误:

java.io.EOFException:输入流中块的意外结束 位于org.apache.hadoop.io.compress.BlockDecompressorStream.getCompressedData(BlockDecompressorStream.java:121)
位于org.apache.hadoop.io.compress.BlockDecompressorStream.decompress(BlockDecompressorStream.java:95)
位于org.apache.hadoop.io.compress.DecompressorStream.read(DecompressorStream.java:83)
读取(InputStream.java:82)

所以我编辑了代码来读取序列文件

更改:

    FileInputFormat.setInputPaths(grepJob, args[0]);
致:

但我还是犯了同样的错误

1) 我需要手动设置输入压缩编解码器吗?我以为SequenceFile阅读器会自动检测压缩。

2) 如果我需要手动设置压缩,是使用“setInputFormatClass”还是在“conf”对象中设置的?

让代码正常工作。我有点困惑,但我不必在代码中的任何地方指定压缩编解码器。以下是原始代码:

package org.myorg; 

import java.util.Random; 

import org.apache.hadoop.conf.Configuration; 
import org.apache.hadoop.conf.Configured; 
import org.apache.hadoop.fs.FileSystem; 
import org.apache.hadoop.fs.Path; 
import org.apache.hadoop.io.LongWritable; 
import org.apache.hadoop.io.Text; 
import org.apache.hadoop.mapreduce.*; 
import org.apache.hadoop.mapreduce.lib.input.*; 
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; 
import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat; 
import org.apache.hadoop.mapreduce.lib.map.InverseMapper; 
import org.apache.hadoop.mapreduce.lib.map.RegexMapper; 
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; 
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat; 
import org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer; 
import org.apache.hadoop.util.Tool; 
import org.apache.hadoop.util.ToolRunner; 
import org.apache.hadoop.io.SequenceFile.CompressionType; 
import org.apache.hadoop.io.SequenceFile.Metadata; 
import org.apache.hadoop.io.compress.*; 


/* Extracts matching regexs from input files and counts them. */ 
public class Grep extends Configured implements Tool { 
private Grep() {} // singleton 

public int run(String[] args) throws Exception { 
if (args.length < 3) { 
System.out.println("Grep <inDir> <outDir> <regex> [<group>]"); 
ToolRunner.printGenericCommandUsage(System.out); 
return 2; 
} 

Path tempDir = 
new Path("grep-temp-"+ 
Integer.toString(new Random().nextInt(Integer.MAX_VALUE))); 

Configuration conf = getConf(); 
conf.set(RegexMapper.PATTERN, args[2]); 
if (args.length == 4) 
conf.set(RegexMapper.GROUP, args[3]); 


Job grepJob = new Job(conf); 

try { 

grepJob.setJobName("grep-search"); 

FileInputFormat.setInputPaths(grepJob, args[0]); 
grepJob.setInputFormatClass(SequenceFileAsTextInputFormat.class); 

grepJob.setMapperClass(RegexMapper.class); 

grepJob.setCombinerClass(LongSumReducer.class); 
grepJob.setReducerClass(LongSumReducer.class); 
FileOutputFormat.setOutputPath(grepJob, tempDir); 
grepJob.setOutputFormatClass(SequenceFileOutputFormat.class); 
grepJob.setOutputKeyClass(Text.class); 
grepJob.setOutputValueClass(LongWritable.class); 

grepJob.waitForCompletion(true); 

Job sortJob = new Job(conf); 
sortJob.setJobName("grep-sort"); 

FileInputFormat.setInputPaths(sortJob, tempDir); 
sortJob.setInputFormatClass(SequenceFileInputFormat.class); 

sortJob.setMapperClass(InverseMapper.class); 

sortJob.setNumReduceTasks(1); // write a single file 
FileOutputFormat.setOutputPath(sortJob, new Path(args[1])); 
sortJob.setSortComparatorClass( // sort by decreasing freq 
LongWritable.DecreasingComparator.class); 

sortJob.waitForCompletion(true); 
} 
finally { 
FileSystem.get(conf).delete(tempDir, true); 
} 
return 0; 
} 

public static void main(String[] args) throws Exception { 
int res = ToolRunner.run(new Configuration(), new Grep(), args); 
System.exit(res); 
} 

}
package org.myorg; 

import java.util.Random; 

import org.apache.hadoop.conf.Configuration; 
import org.apache.hadoop.conf.Configured; 
import org.apache.hadoop.fs.FileSystem; 
import org.apache.hadoop.fs.Path; 
import org.apache.hadoop.io.LongWritable; 
import org.apache.hadoop.io.Text; 
import org.apache.hadoop.mapreduce.*; 
import org.apache.hadoop.mapreduce.lib.input.*; 
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; 
import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat; 
import org.apache.hadoop.mapreduce.lib.map.InverseMapper; 
import org.apache.hadoop.mapreduce.lib.map.RegexMapper; 
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; 
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat; 
import org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer; 
import org.apache.hadoop.util.Tool; 
import org.apache.hadoop.util.ToolRunner; 
import org.apache.hadoop.io.SequenceFile.CompressionType; 
import org.apache.hadoop.io.SequenceFile.Metadata; 
import org.apache.hadoop.io.compress.*; 


/* Extracts matching regexs from input files and counts them. */ 
public class Grep extends Configured implements Tool { 
private Grep() {} // singleton 

public int run(String[] args) throws Exception { 
if (args.length < 3) { 
System.out.println("Grep <inDir> <outDir> <regex> [<group>]"); 
ToolRunner.printGenericCommandUsage(System.out); 
return 2; 
} 

Path tempDir = 
new Path("grep-temp-"+ 
Integer.toString(new Random().nextInt(Integer.MAX_VALUE))); 

Configuration conf = getConf(); 
conf.set(RegexMapper.PATTERN, args[2]); 
if (args.length == 4) 
conf.set(RegexMapper.GROUP, args[3]); 


Job grepJob = new Job(conf); 

try { 

grepJob.setJobName("grep-search"); 

FileInputFormat.setInputPaths(grepJob, args[0]); 
grepJob.setInputFormatClass(SequenceFileAsTextInputFormat.class); 

grepJob.setMapperClass(RegexMapper.class); 

grepJob.setCombinerClass(LongSumReducer.class); 
grepJob.setReducerClass(LongSumReducer.class); 
FileOutputFormat.setOutputPath(grepJob, tempDir); 
grepJob.setOutputFormatClass(SequenceFileOutputFormat.class); 
grepJob.setOutputKeyClass(Text.class); 
grepJob.setOutputValueClass(LongWritable.class); 

grepJob.waitForCompletion(true); 

Job sortJob = new Job(conf); 
sortJob.setJobName("grep-sort"); 

FileInputFormat.setInputPaths(sortJob, tempDir); 
sortJob.setInputFormatClass(SequenceFileInputFormat.class); 

sortJob.setMapperClass(InverseMapper.class); 

sortJob.setNumReduceTasks(1); // write a single file 
FileOutputFormat.setOutputPath(sortJob, new Path(args[1])); 
sortJob.setSortComparatorClass( // sort by decreasing freq 
LongWritable.DecreasingComparator.class); 

sortJob.waitForCompletion(true); 
} 
finally { 
FileSystem.get(conf).delete(tempDir, true); 
} 
return 0; 
} 

public static void main(String[] args) throws Exception { 
int res = ToolRunner.run(new Configuration(), new Grep(), args); 
System.exit(res); 
} 

}
package org.myorg;
import java.io.*;
import java.util.*;
import java.net.*;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.util.*;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.io.SequenceFile.Metadata;
import org.apache.hadoop.io.compress.*;
import java.nio.charset.*;

public class seqcat {

public static void main(String[] args) throws IOException {
    String uri = args[0];
    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.getLocal(conf);
    Path seqFilePath = new Path(uri);
    SequenceFile.Reader reader = new SequenceFile.Reader(conf, SequenceFile.Reader.file(seqFilePath));

    Writable key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
    Writable value = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), conf);

    CompressionCodecFactory ccf = new CompressionCodecFactory(conf);
    CompressionCodec codec = ccf.getCodecByClassName(DefaultCodec.class.getName());

    while (reader.next(key, value)) {
                byte[] strBytes = ((BytesWritable) value).getBytes();
                int len = ((BytesWritable) value).getLength();
                String val = new String(strBytes,0,len,Charset.forName("UTF-8"));
                System.out.println(val);
           }

   reader.close();
  }
}