WholeFileRecordReader无法强制转换为org.apache.hadoop.mapred.RecordReader

WholeFileRecordReader无法强制转换为org.apache.hadoop.mapred.RecordReader,hadoop,map,casting,Hadoop,Map,Casting,我想在Hadoop中创建一个新的数据类型,但我的自定义inputformat类出现以下错误,这是我的代码: 错误-无法将WholeFileRecordReader强制转换为org.apache.hadoop.mapred.RecordReader 代码- 导入java.io.IOException import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.i

我想在Hadoop中创建一个新的数据类型,但我的自定义inputformat类出现以下错误,这是我的代码:

错误-无法将WholeFileRecordReader强制转换为org.apache.hadoop.mapred.RecordReader

代码-

导入java.io.IOException

import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.TaskAttemptContext;



 public class wholeFileInputFormat extends FileInputFormat<Text, apriori>{

public RecordReader<Text, apriori> getRecordReader(
          InputSplit input, JobConf job, Reporter reporter)
          throws IOException {

        reporter.setStatus(input.toString());

    return (RecordReader<Text, apriori>) new WholeFileRecordReader(job,FileSplit)input);

      }

}
import org.apache.hadoop.fs.FileSystem;
导入org.apache.hadoop.fs.Path;
导入org.apache.hadoop.io.LongWritable;
导入org.apache.hadoop.io.Text;
导入org.apache.hadoop.mapred.FileInputFormat;
导入org.apache.hadoop.mapred.FileSplit;
导入org.apache.hadoop.mapred.InputSplit;
导入org.apache.hadoop.mapred.JobConf;
导入org.apache.hadoop.mapred.RecordReader;
导入org.apache.hadoop.mapred.Reporter;
导入org.apache.hadoop.mapred.TaskAttemptContext;
公共类wholeFileInputFormat扩展FileInputFormat{
公共记录阅读器getRecordReader(
InputSplit输入,JobConf作业,Reporter(报告器)
抛出IOException{
reporter.setStatus(input.toString());
返回(RecordReader)新的WholeFileRecordReader(作业、文件拆分)输入;
}
}
我的自定义记录阅读器如下

import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;

 class WholeFileRecordReader extends RecordReader<Text, apriori> {


private FileSplit fileSplit;
private Configuration conf;
private InputStream in;
private Text key = new Text("");
private apriori value = new apriori();
private boolean processed = false;


public void initialize( JobConf job, FileSplit split)
        throws IOException {

    this.fileSplit = split;
    this.conf = job;
    final Path file = fileSplit.getPath();
    String StringPath = new String(fileSplit.getPath().toString());
    String StringPath2 = new String();
    StringPath2 = StringPath.substring(5);
    System.out.println(StringPath2);
    in = new FileInputStream(StringPath2);

    FileSystem fs = file.getFileSystem(conf);
    in = fs.open(file);
    }


public boolean nextKeyValue() throws IOException, InterruptedException {
    if (!processed) {
        byte[] contents = new byte[(int) fileSplit.getLength()];
        Path file = fileSplit.getPath();
        key.set(file.getName());

        try {
            IOUtils.readFully(in, contents, 0, contents.length);
            value.set(contents, 0, contents.length);
        } finally {
            IOUtils.closeStream(in);
        }

        processed = true;
        return true;
    }

    return false;
}

@Override
public Text getCurrentKey() throws IOException, InterruptedException {
    return key;
}

@Override
public apriori getCurrentValue() throws IOException, InterruptedException {
    return value;
}

@Override
public float getProgress() throws IOException {
    return processed ? 1.0f : 0.0f;
}

@Override
public void close() throws IOException {
    // Do nothing
}

@Override
public void initialize(InputSplit arg0, TaskAttemptContext arg1)
        throws IOException, InterruptedException {
    // TODO Auto-generated method stub

}

} 
import java.io.FileInputStream;
导入java.io.FileNotFoundException;
导入java.io.IOException;
导入java.io.InputStream;
导入org.apache.hadoop.conf.Configuration;
导入org.apache.hadoop.fs.FileSystem;
导入org.apache.hadoop.fs.Path;
导入org.apache.hadoop.io.IOUtils;
导入org.apache.hadoop.io.LongWritable;
导入org.apache.hadoop.io.Text;
导入org.apache.hadoop.mapred.FileSplit;
导入org.apache.hadoop.mapred.JobConf;
导入org.apache.hadoop.mapreduce.InputSplit;
导入org.apache.hadoop.mapreduce.RecordReader;
导入org.apache.hadoop.mapreduce.TaskAttemptContext;
类WholeFileRecordReader扩展了RecordReader{
私有文件分割文件分割;
私有配置配置;
私有输入流;
私有文本密钥=新文本(“”);
私有apriori值=新apriori();
私有布尔值=false;
public void初始化(JobConf作业、FileSplit拆分)
抛出IOException{
this.fileSplit=split;
this.conf=作业;
最终路径文件=fileSplit.getPath();
String StringPath=新字符串(fileSplit.getPath().toString());
String StringPath2=新字符串();
StringPath2=StringPath.substring(5);
System.out.println(StringPath2);
in=新文件输入流(StringPath2);
FileSystem fs=file.getFileSystem(conf);
in=fs.open(文件);
}
公共布尔值nextKeyValue()引发IOException、InterruptedException{
如果(!已处理){
byte[]contents=新字节[(int)fileSplit.getLength()];
Path file=fileSplit.getPath();
key.set(file.getName());
试一试{
IOUtils.readFully(in,contents,0,contents.length);
value.set(contents,0,contents.length);
}最后{
IOUtils.closeStream(in);
}
已处理=真;
返回true;
}
返回false;
}
@凌驾
公共文本getCurrentKey()引发IOException、InterruptedException{
返回键;
}
@凌驾
public apriori getCurrentValue()引发IOException、InterruptedException{
返回值;
}
@凌驾
公共浮点getProgress()引发IOException{
已处理退货?1.0f:0.0f;
}
@凌驾
public void close()引发IOException{
//无所事事
}
@凌驾
公共void初始化(InputSplit arg0,TaskAttemptContext arg1)
抛出IOException、InterruptedException{
//TODO自动生成的方法存根
}
} 

WholeFileRecordReader
类是
org.apache.hadoop.mapreduce.RecordReader
类的子类。该类不能强制转换为
org.apache.hadoop.mapred.RecordReader
类。您可以尝试在这两个类中使用相同的API吗


根据Java编程语言的规则,只有来自同一类型层次结构的类或接口(统称为类型)可以强制转换或相互转换。如果您尝试强制转换两个不共享相同类型层次结构的对象,即它们之间没有父子关系,则会出现编译时错误。您可以参考此

出现此错误的原因是程序包不匹配

在您的代码中,您结合了MRv1和MRv2,因此得到了错误

Packages
org.apache.hadoop.mapred
是Mrv1。(地图缩小版本1)

org.apache.hadoop.mapreduce
是Mrv2。(地图缩小版本2)

在您的代码中,您结合了MRv1和MRv2:

使用所有导入包作为
org.apache.hadoop.mapred
(MRv1)或
org.apache.hadoop.mapreduce
(MRv2)


希望这能有所帮助。

您能分享整个FileRecordReader类的完全限定名吗?您好,先生,甜甜圈我已经编辑了我的查询。。
import org.apache.hadoop.mapred.FileSplit;

import org.apache.hadoop.mapred.JobConf;

import org.apache.hadoop.mapreduce.InputSplit;

import org.apache.hadoop.mapreduce.RecordReader;

import org.apache.hadoop.mapreduce.TaskAttemptContext;