Apache spark 使用Hadoop编程卸载包含多个不相关csv文件的文件

Apache spark 使用Hadoop编程卸载包含多个不相关csv文件的文件,apache-spark,hadoop,hive,apache-pig,tar,Apache Spark,Hadoop,Hive,Apache Pig,Tar,我的hdfs中有几个压缩文件(.tar.gz),其中包含不相关的tsv文件(类似于下面的列表)。我想以编程方式解压这些文件夹,潜在地利用MPP架构(例如Hadoop或Spark),并将它们保存到hdfs中 - browser.tsv - connection_type.tsv - country.tsv - color_depth.tsv - javascript_version.tsv - languages.tsv - operating_systems.tsv - plugins.tsv

我的hdfs中有几个压缩文件(.tar.gz),其中包含不相关的tsv文件(类似于下面的列表)。我想以编程方式解压这些文件夹,潜在地利用MPP架构(例如Hadoop或Spark),并将它们保存到hdfs中

- browser.tsv
- connection_type.tsv
- country.tsv
- color_depth.tsv
- javascript_version.tsv
- languages.tsv
- operating_systems.tsv
- plugins.tsv
- referrer_type.tsv
- resolution.tsv
- search_engine.tsv
到目前为止,我只能想出一个bash脚本,从hdfs下载每个文件,卸载并将文件夹保存回hdfs。我甚至可以将脚本并行化,但我对解决方案也不满意

谢谢:)

编辑:

如果能在下面的任何位置看到解决方案,那将是一件有趣的事情:

  • Spark 2.4.5
  • 蜂巢2.3.6
  • 猪0.17.0
  • Hadoop 2.8.5

我能看到的唯一方法是迭代每个文件,例如使用Spark读取,然后使用Spark本身将其未压缩写回HDFS。类似这样的东西(使用PySpark):

注意:我还没有测试这段代码,在HDFS和tar文件中复制这段代码很复杂,可能需要添加一些额外的参数来解析tar文件,但我希望想法很清楚


IMHO不可能在一次迭代中同时读取所有这些文件,因为它们具有不同的结构(以及它们所代表的不同数据)。

我终于找到了解决问题的方法,它由一个仅使用Mapper的Hadoop作业组成。每个映射程序在tar文件夹中获取一个未压缩文件,并使用Hadoop中的
MultipleOutput
实用程序将其写入特定路径

此外,我实现了一种自定义的不可拆分Hadoop输入格式来处理Tarball提取,称为
TarballInputFormat

public class TarballInputFormat extends FileInputFormat<Text, Text> {

    @Override
    protected boolean isSplitable(JobContext context, Path filename) {
        return false;
    }

    @Override
    public RecordReader<Text, Text> createRecordReader(InputSplit inputSplit,
                                                       TaskAttemptContext taskAttemptContext) {
        TarballRecordReader recordReader = new TarballRecordReader();
        recordReader.initialize(inputSplit, taskAttemptContext);
        return recordReader;
    }

}


感谢您的回答@Vzzarr。我的目标是解压并保存hdfs上的文件夹,但不处理它们。如果你能提供一个简单的例子,说明你如何使用Spark,那就太好了。谢谢,@Vzzarr。我正在尝试使用MapReduce提取这些文件。如果我设法解决它,我会张贴解决方案。
public class TarballInputFormat extends FileInputFormat<Text, Text> {

    @Override
    protected boolean isSplitable(JobContext context, Path filename) {
        return false;
    }

    @Override
    public RecordReader<Text, Text> createRecordReader(InputSplit inputSplit,
                                                       TaskAttemptContext taskAttemptContext) {
        TarballRecordReader recordReader = new TarballRecordReader();
        recordReader.initialize(inputSplit, taskAttemptContext);
        return recordReader;
    }

}

public class TarballRecordReader extends RecordReader<Text, Text> {

    private static final Log log = LogFactory.getLog(TarballRecordReader.class);

    private TarInputStream tarInputStream;
    private Text key;
    private Text value;
    private boolean finished = false;
    private String folderName;

    @Override
    public void initialize(InputSplit inputSplit, TaskAttemptContext taskAttemptContext) {
        key = new Text();
        value = new Text();

        try {
            FileSplit split = (FileSplit) inputSplit;
            Configuration conf = taskAttemptContext.getConfiguration();

            Path tarballPath = split.getPath();
            folderName = tarballPath.getName().split("\\.")[0];
            FileSystem fs = tarballPath.getFileSystem(conf);
            FSDataInputStream fsInputStream = fs.open(tarballPath);

            CompressionCodecFactory compressionCodecs = new CompressionCodecFactory(conf);
            CompressionCodec codec = compressionCodecs.getCodec(tarballPath);

            tarInputStream = new TarInputStream(codec.createInputStream(fsInputStream));
        }
        catch (IOException ex) {
            log.error(ex.getMessage());
        }
    }

    @Override
    public boolean nextKeyValue() throws IOException {

        TarEntry tarEntry = tarInputStream.getNextEntry();
        while (tarEntry != null && tarEntry.isDirectory())
            tarEntry = tarInputStream.getNextEntry();

        finished = tarEntry == null;
        if (finished) {
            return false;
        }

        key.clear();
        value.clear();

        long tarSize = tarEntry.getSize();

        int read;
        int offset = 0;
        int bufSize = (int) tarSize;
        byte[] buffer = new byte[bufSize];
        while ((read = tarInputStream.read(buffer, offset, bufSize)) != -1) offset += read;

        value.set(buffer);
        key.set(folderName + "/" + tarEntry.getName());

        return true;
    }

    @Override
    public Text getCurrentKey() {
        return key;
    }

    @Override
    public Text getCurrentValue() {
        return value;
    }

    @Override
    public float getProgress() {
        return finished? 1: 0;
    }

    @Override
    public void close() throws IOException {
        if (tarInputStream != null) {
            tarInputStream.close();
        }
    }
}

public class ExtractTarball extends Configured implements Tool {

    public static final Log log = LogFactory.getLog(ExtractTarball.class);
    private static final String LOOKUP_OUTPUT = "lookup";

    public static class MapClass extends Mapper<Text, Text, Text, Text> {

        private MultipleOutputs<Text, Text> mos;

        @Override
        protected void map(Text key, Text value, Context context) throws IOException, InterruptedException {
            String filename = key.toString();

            int length = value.getBytes().length;
            System.out.printf("%s: %s%n", filename, length);

            mos.write(LOOKUP_OUTPUT, "", value, key.toString());
        }

        public void setup(Context context) {
            mos = new MultipleOutputs<>(context);
        }

        protected void cleanup(Context context) throws IOException, InterruptedException {
            mos.close();
        }
    }

    public int run(String[] args) throws Exception {
        Configuration conf = new Configuration();
        Job job = Job.getInstance(conf, "ExtractTarball");
        job.setJarByClass(this.getClass());
        job.setMapperClass(MapClass.class);

        job.setInputFormatClass(TarballInputFormat.class);
        job.setOutputFormatClass(TextOutputFormat.class);

        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(Text.class);

        FileInputFormat.addInputPath(job, new Path(args[0]));
        FileOutputFormat.setOutputPath(job, new Path(args[1]));
        job.setNumReduceTasks(0);

        MultipleOutputs.addNamedOutput(job, LOOKUP_OUTPUT, TextOutputFormat.class, Text.class, Text.class);

        log.isDebugEnabled();
        return job.waitForCompletion(true) ? 0 : 1;
    }

    public static void main(String[] args) throws Exception {
        int exitCode = ToolRunner.run(new ExtractTarball(), args);
        System.out.println(exitCode);
        System.exit(exitCode);
    }
}
- output
    - lookup_data
        - .browser.tsv-m-00000.crc
        - .browser_type.tsv-m-00000.crc
        - .color_depth.tsv-m-00000.crc
        - .column_headers.tsv-m-00000.crc
        - .connection_type.tsv-m-00000.crc
        - .country.tsv-m-00000.crc
        - .event.tsv-m-00000.crc
        - .javascript_version.tsv-m-00000.crc
        - .languages.tsv-m-00000.crc
        - .operating_systems.tsv-m-00000.crc
        - .plugins.tsv-m-00000.crc
        - .referrer_type.tsv-m-00000.crc
        - .resolution.tsv-m-00000.crc
        - .search_engines.tsv-m-00000.crc
        - browser.tsv-m-00000
        - browser_type.tsv-m-00000
        - color_depth.tsv-m-00000
        - column_headers.tsv-m-00000
        - connection_type.tsv-m-00000
        - country.tsv-m-00000
        - event.tsv-m-00000
        - javascript_version.tsv-m-00000
        - languages.tsv-m-00000
        - operating_systems.tsv-m-00000
        - plugins.tsv-m-00000
        - referrer_type.tsv-m-00000
        - resolution.tsv-m-00000
        - search_engines.tsv-m-00000
- ._SUCCESS.crc
- .part-m-00000.crc
- _SUCCESS
- part-m-00000