Hadoop 使用自定义FileInputFormat时遇到错误
您好,我是MapReduce编程的新手,我正在尝试从PDF文件中读取内容,这样我就可以扩展程序,以便能够进行字数计算 下面是我的节目Hadoop 使用自定义FileInputFormat时遇到错误,hadoop,Hadoop,您好,我是MapReduce编程的新手,我正在尝试从PDF文件中读取内容,这样我就可以扩展程序,以便能够进行字数计算 下面是我的节目 package com.pdfreader; import java.io.IOException; import java.util.HashSet; import java.util.StringTokenizer; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IntWrita
package com.pdfreader;
import java.io.IOException;
import java.util.HashSet;
import java.util.StringTokenizer;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.TextOutputFormat;
import com.itextpdf.text.pdf.PdfReader;
import com.itextpdf.text.pdf.parser.PdfTextExtractor;
import com.pdfreader.newPDFReader.PDFInputFormat;
public class PDFReader1 {
static class PDFInputFormat extends FileInputFormat<Text, Text>
{
// TODO Auto-generated method stub
@Override
public RecordReader<Text, Text> getRecordReader(InputSplit arg0,
JobConf arg1, Reporter arg2) throws IOException {
// TODO Auto-generated method stub
HashSet<String> hset=new HashSet<String>();
PdfReader reader=new PdfReader("/home/a/Desktop/a.pdf");
Integer pagecount=reader.getNumberOfPages();
for(int i=1;i<=pagecount;i++)
{
String page=PdfTextExtractor.getTextFromPage(reader, i);
StringTokenizer tokenizer=new StringTokenizer(page);
while(tokenizer.hasMoreTokens())
{
String word=tokenizer.nextToken();
hset.add(word);
}
}
return null;
}
}
class WordCountMapper extends MapReduceBase implements Mapper<LongWritable, Text, Text, IntWritable>{
@Override
public void map(LongWritable key, Text value,
OutputCollector<Text, IntWritable> output, Reporter reporter)
throws IOException {
// TODO Auto-generated method stub
String line=value.toString();
StringTokenizer tokenizer=new StringTokenizer(line);
}
}
public static void main(String[] args) throws IOException {
// TODO Auto-generated method stub
JobConf conf=new JobConf(PDFReader1.class);
conf.setJobName("PDFInputFormat");
conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(IntWritable.class);
conf.setMapperClass(WordCountMapper.class);
conf.setInputFormat(PDFInputFormat.class);
conf.setOutputFormat(TextOutputFormat.class);
FileInputFormat.setInputPaths(conf,new Path ("/home/a/Desktop/a.pdf"));
FileOutputFormat.setOutputPath(conf, new Path("/home/a/Desktop/Hadoop"));
JobClient.runJob(conf);
}
}
package com.pdfreader;
导入java.io.IOException;
导入java.util.HashSet;
导入java.util.StringTokenizer;
导入org.apache.hadoop.fs.Path;
导入org.apache.hadoop.io.IntWritable;
导入org.apache.hadoop.io.LongWritable;
导入org.apache.hadoop.io.Text;
导入org.apache.hadoop.mapred.FileInputFormat;
导入org.apache.hadoop.mapred.FileOutputFormat;
导入org.apache.hadoop.mapred.InputSplit;
导入org.apache.hadoop.mapred.JobClient;
导入org.apache.hadoop.mapred.JobConf;
导入org.apache.hadoop.mapred.MapReduceBase;
导入org.apache.hadoop.mapred.Mapper;
导入org.apache.hadoop.mapred.OutputCollector;
导入org.apache.hadoop.mapred.RecordReader;
导入org.apache.hadoop.mapred.Reporter;
导入org.apache.hadoop.mapred.TextOutputFormat;
导入com.itextpdf.text.pdf.PdfReader;
导入com.itextpdf.text.pdf.parser.PdfTextExtractor;
导入com.pdfreader.newPDFReader.PDFInputFormat;
公共类PDFReader1{
静态类PDFInputFormat扩展了FileInputFormat
{
//TODO自动生成的方法存根
@凌驾
公共RecordReader getRecordReader(InputSplit arg0,
JobConf arg1,Reporter arg2)引发IOException{
//TODO自动生成的方法存根
HashSet hset=新的HashSet();
PdfReader reader=新的PdfReader(“/home/a/Desktop/a.pdf”);
整数pagecount=reader.getNumberOfPages();
对于(int i=1;i您的映射器类当前是PDFReader1类的子类,因此它的默认构造函数依赖于该父对象。您显然无法从原样的代码中看到这一点,但是在编译的源代码上运行javap,您会看到构造函数是使用单个PDFReader1参数生成的
您看到的堆栈跟踪与此问题有关-Hadoop使用反射来实例化映射器类,但要求映射器具有无参数默认构造函数
这非常简单,可以修复-只需在映射器类名之前添加static
关键字:
public static class WordCountMapper extends MapReduceBase
package com.pdfreader;
导入java.io.IOException;
导入java.util.HashSet;
导入java.util.StringTokenizer;
导入org.apache.hadoop.conf.Configuration;
导入org.apache.hadoop.fs.FSDataInputStream;
导入org.apache.hadoop.fs.FileSystem;
导入org.apache.hadoop.fs.Path;
导入org.apache.hadoop.io.IntWritable;
导入org.apache.hadoop.io.LongWritable;
导入org.apache.hadoop.io.Text;
导入org.apache.hadoop.mapred.FileInputFormat;
导入org.apache.hadoop.mapred.FileOutputFormat;
导入org.apache.hadoop.mapred.FileSplit;
导入org.apache.hadoop.mapred.InputSplit;
导入org.apache.hadoop.mapred.JobClient;
导入org.apache.hadoop.mapred.JobConf;
导入org.apache.hadoop.mapred.MapReduceBase;
导入org.apache.hadoop.mapred.Mapper;
导入org.apache.hadoop.mapred.OutputCollector;
导入org.apache.hadoop.mapred.RecordReader;
导入org.apache.hadoop.mapred.Reporter;
导入org.apache.hadoop.mapred.TaskAttemptContext;
导入org.apache.hadoop.mapred.TextOutputFormat;
导入org.apache.hadoop.record.Buffer;
导入com.itextpdf.text.pdf.PdfReader;
导入com.itextpdf.text.pdf.parser.PdfTextExtractor;
公共类PDFReader1{
公共静态类PDFInputFormat扩展了FileInputFormat
{
//TODO自动生成的方法存根
@凌驾
公共RecordReader getRecordReader(InputSplit arg0,
JobConf arg1,Reporter arg2)引发IOException
{
//TODO自动生成的方法存根
返回新的PDFRecordReader();
}
公共静态类PDFRecordReader实现RecordReader
{
/*HashSet hset=新的HashSet();
PdfReader=新的PdfReader(“/home/vaibhavsrivastava/Desktop/a.pdf”);
整数pagecount=reader.getNumberOfPages();
对于(int i=1;iThanks for response,我尝试将其更改为public static类,但是再次出现了相同的错误您是否重新编译并重新构建了作业jar?这确实是您的问题我创建了一个新项目,该错误似乎已经消失,但是现在出现了空指针错误。调试模式没有显示任何内容
public static class WordCountMapper extends MapReduceBase
package com.pdfreader;
import java.io.IOException;
import java.util.HashSet;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.TaskAttemptContext;
import org.apache.hadoop.mapred.TextOutputFormat;
import org.apache.hadoop.record.Buffer;
import com.itextpdf.text.pdf.PdfReader;
import com.itextpdf.text.pdf.parser.PdfTextExtractor;
public class PDFReader1 {
public static class PDFInputFormat extends FileInputFormat<Text, Text>
{
// TODO Auto-generated method stub
@Override
public RecordReader<Text, Text> getRecordReader(InputSplit arg0,
JobConf arg1, Reporter arg2) throws IOException
{
// TODO Auto-generated method stub
return new PDFRecordReader();
}
public static class PDFRecordReader implements RecordReader<Text, Text>
{
/*HashSet<String> hset=new HashSet<String>();
PdfReader reader=new PdfReader("/home/vaibhavsrivastava/Desktop/a.pdf");
Integer pagecount=reader.getNumberOfPages();
for(int i=1;i<=pagecount;i++)
{
String page=PdfTextExtractor.getTextFromPage(reader, i);
StringTokenizer tokenizer=new StringTokenizer(page);
while(tokenizer.hasMoreTokens())
{
String word=tokenizer.nextToken();
hset.add("/home/vaibhavsrivastava/Desktop/a.pdf"+" "+word);
}
}*/
private FSDataInputStream fileIn;
public String fileName=null;
HashSet<String> hset=new HashSet<String>();
private Text key=null;
private Text value=null;
@Override
public void close() throws IOException {
// TODO Auto-generated method stub
}
@Override
public Text createKey() {
// TODO Auto-generated method stub
return null;
}
@Override
public Text createValue() {
// TODO Auto-generated method stub
return value;
}
@Override
public long getPos() throws IOException {
// TODO Auto-generated method stub
return 0;
}
@Override
public float getProgress() throws IOException {
// TODO Auto-generated method stub
return 0;
}
@Override
public boolean next(Text arg0, Text arg1) throws IOException {
// TODO Auto-generated method stub
return false;
}
public void initialize(InputSplit genericSplit, TaskAttemptContext job) throws
IOException
{
FileSplit split=(FileSplit) genericSplit;
Configuration conf=job.getConfiguration();
Path file=split.getPath();
FileSystem fs=file.getFileSystem(conf);
fileIn= fs.open(split.getPath());
fileName=split.getPath().getName().toString();
PdfReader reader=new PdfReader(fileName);
Integer pagecount=reader.getNumberOfPages();
for(int i=1;i<=pagecount;i++)
{
String page=PdfTextExtractor.getTextFromPage(reader, i);
StringTokenizer tokenizer=new StringTokenizer(page);
while(tokenizer.hasMoreTokens())
{
String word=tokenizer.nextToken();
value=new Text("a");
}
}
}
}
}
public static class WordCountMapper extends MapReduceBase implements Mapper<LongWritable, Text, Text, IntWritable>{
@Override
public void map(LongWritable key, Text value,
OutputCollector<Text, IntWritable> output, Reporter reporter)
throws IOException {
// TODO Auto-generated method stub
String line=value.toString();
System.out.println(line);
StringTokenizer tokenizer=new StringTokenizer(line);
while(tokenizer.hasMoreTokens())
{
output.collect(new Text(tokenizer.nextToken()),new IntWritable(1));
}
}
}
public static void main(String[] args) throws IOException {
// TODO Auto-generated method stub
JobConf conf=new JobConf(PDFReader1.class);
conf.setJobName("PDFReader1");
conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(IntWritable.class);
conf.setMapperClass(WordCountMapper.class);
conf.setInputFormat(PDFInputFormat.class);
conf.setOutputFormat(TextOutputFormat.class);
FileInputFormat.setInputPaths(conf,new Path ("/home/a/Desktop/test"));
FileOutputFormat.setOutputPath(conf, new Path("/home/a/Desktop/Hadoop"));
JobClient.runJob(conf);
}
}