Java 类型SequenceFile.Writer的方法文件(FSDataOutputStream)未定义

Java 类型SequenceFile.Writer的方法文件(FSDataOutputStream)未定义,java,hadoop,mapreduce,Java,Hadoop,Mapreduce,我在eclipse中使用Hadoop 1.0.4,在使用sequencefile writer时,我发现eclipse中存在上述错误。这是我的密码 import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import

我在eclipse中使用Hadoop 1.0.4,在使用sequencefile writer时,我发现eclipse中存在上述错误。这是我的密码

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.SequenceFile.Writer;
import org.apache.hadoop.io.compress.GzipCodec;



public class filemerge {

    public static void main(String[] args) throws IOException {

        Configuration conf = new Configuration();
        FileSystem hdfs  = FileSystem.getLocal(conf);
        FileSystem local = FileSystem.getLocal(conf);

        Path inputDir = new Path(args[0]);
        Path hdfsFile = new Path(args[1]);

        IntWritable key = null;
        BytesWritable value = null;



        try {
            FileStatus[] inputFiles = local.listStatus(inputDir);
            FSDataOutputStream out = hdfs.create(hdfsFile);
            if ((conf != null) && (args[0] != null)) {
                SequenceFile.Writer writer = SequenceFile.createWriter(conf, Writer.file(out),
                        Writer.compression(SequenceFile.CompressionType.RECORD, new GzipCodec()),
                        Writer.keyClass(IntWritable.class), Writer.valueClass(BytesWritable.class));

            for (int i=0; i<inputFiles.length; i++) {
                System.out.println(inputFiles[i].getPath().getName());
                FSDataInputStream in = local.open(inputFiles[i].getPath());
                value = new BytesWritable(in.read());
                key = new IntWritable(i);
                writer.append(key, value);
                in.close();
            }
            out.close();
            IOUtils.closeStream(writer);
            }
        } catch (IOException e) {
            e.printStackTrace();
        }
    }
}
import java.io.IOException;
导入org.apache.hadoop.conf.Configuration;
导入org.apache.hadoop.fs.FSDataInputStream;
导入org.apache.hadoop.fs.FSDataOutputStream;
导入org.apache.hadoop.fs.FileStatus;
导入org.apache.hadoop.fs.FileSystem;
导入org.apache.hadoop.fs.Path;
导入org.apache.hadoop.io.BytesWritable;
导入org.apache.hadoop.io.IOUtils;
导入org.apache.hadoop.io.IntWritable;
导入org.apache.hadoop.io.SequenceFile;
导入org.apache.hadoop.io.SequenceFile.Writer;
导入org.apache.hadoop.io.compress.gzip代码;
公共类文件合并{
公共静态void main(字符串[]args)引发IOException{
Configuration conf=新配置();
FileSystem hdfs=FileSystem.getLocal(conf);
FileSystem local=FileSystem.getLocal(conf);
路径inputDir=新路径(args[0]);
路径hdfsFile=新路径(args[1]);
IntWritable key=null;
BytesWritable值=null;
试一试{
FileStatus[]inputFiles=local.listStatus(inputDir);
FSDataOutputStream out=hdfs.create(hdfsFile);
if((conf!=null)&&(args[0]!=null)){
SequenceFile.Writer Writer=SequenceFile.createWriter(conf,Writer.file(out),
Writer.compression(SequenceFile.CompressionType.RECORD,新的gzip代码()),
Writer.keyClass(intwriteable.class)、Writer.valueClass(byteswriteable.class));

对于(int i=0;我认为在已经导入SequenceFile.Writer的情况下,您可以不导入SequenceFile.Writer。@GV:尝试过同样的pblm。。