Java 尝试将LZO压缩与MapReduce结合使用

Java 尝试将LZO压缩与MapReduce结合使用,java,hadoop,mapreduce,compression,hadoop-lzo,Java,Hadoop,Mapreduce,Compression,Hadoop Lzo,我想在MapReduce中使用LZO压缩,但在运行MapReduce作业时出错。我正在使用Ubuntu和Java程序。我只是想在本地机器上运行这个。我最初的错误是 ERROR lzo.GPLNativeCodeLoader: Could not load native gpl library 接下来呢 ERROR lzo.LzoCodec: Cannot load native-lzo without native-hadoop 然后 java.lang.RuntimeException:

我想在MapReduce中使用LZO压缩,但在运行MapReduce作业时出错。我正在使用Ubuntu和Java程序。我只是想在本地机器上运行这个。我最初的错误是

ERROR lzo.GPLNativeCodeLoader: Could not load native gpl library
接下来呢

ERROR lzo.LzoCodec: Cannot load native-lzo without native-hadoop
然后

java.lang.RuntimeException: native-lzo library not available
关于如何下载和配置用于LZO压缩的文件,我遵循了许多在线和文本说明。在这里,您可以在
lib
文件夹中看到我的hadoop lzo jar文件

我已经改变了我的配置。这是我的core-site.xml

<configuration>
<property>
    <name>fs.default.name</name>
    <value>hdfs://localhost:9000</value>
</property>
<property>
    <name>io.compression.codecs</name>
        <value>com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec</value>
</property>

<property>
    <name>io.compression.codec.lzo.class</name>
    <value>com.hadoop.compression.lzo.LzoCodec</value>
</property>
<property>
    <name>mapred.output.compress</name>
    <value>true</value>
</property>

<property>
    <name>mapred.output.compression.codec</name>
    <value>com.hadoop.compression.lzo.LzopCodec</value>
</property>
</configuration>
如果您对
/home/hadoop/lib/native/lib/lib

值得一提的是,这是我的驱动程序类,它执行所有压缩操作

//import java.net.URI;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
//import org.apache.hadoop.filecache.DistributedCache;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
//import org.apache.hadoop.io.compress.CompressionCodec;
import com.hadoop.compression.lzo.LzopCodec;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
//import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

public class FuzzyJoinDriver extends Configured implements Tool{

public static void main(String[] args) throws Exception {     
    int exitCode = ToolRunner.run(new Configuration(), new  FuzzyJoinDriver(),args);
System.exit(exitCode);
}
@Override
public int run(String[] args) throws Exception {
    if (args.length != 2) {
          System.err.println("Usage: FuzzyJoinDriver <input path> <output path>");
          System.exit(-1);
        }

        Configuration conf = new Configuration();
        //Used to compress map output
        //conf.setBoolean("mapred.compres.map.output", true);
        //conf.setClass("mapred.map.output.compression.code", GzipCodec.class, CompressionCodec.class);
        Job job = new Job(conf);
        job.setJarByClass(FuzzyJoinDriver.class);
        job.setJobName("Fuzzy Join");

        //Distributed Cache
        //DistributedCache.addCacheFile(new URI("/cache/ReducerCount.txt"),  job.getConfiguration());


        FileInputFormat.addInputPath(job, new Path(args[0]));
        FileOutputFormat.setOutputPath(job, new Path(args[1]));

        job.setMapperClass(FuzzyJoinMapper.class);


        job.setReducerClass(FuzzyJoinReducer.class);
        job.setMapOutputKeyClass(IntWritable.class);
        job.setMapOutputValueClass(RelationTuple.class);


        job.setPartitionerClass(JoinKeyPartitioner.class);


        job.setOutputKeyClass(NullWritable.class);
        job.setOutputValueClass(Text.class);

        //Used to compress output from reducer
        FileOutputFormat.setCompressOutput(job,  true);
        FileOutputFormat.setOutputCompressorClass(job, LzopCodec.class);
        //SequenceFileOutputFormat.setOutputCompressionType(job, org.apache.hadoop.io.SequenceFile.CompressionType.BLOCK);

        return(job.waitForCompletion(true) ? 0 : 1);
    }
}
//导入java.net.URI;
导入org.apache.hadoop.conf.Configuration;
导入org.apache.hadoop.conf.Configured;
//导入org.apache.hadoop.filecache.DistributedCache;
导入org.apache.hadoop.fs.Path;
导入org.apache.hadoop.io.IntWritable;
导入org.apache.hadoop.io.NullWritable;
导入org.apache.hadoop.io.Text;
//导入org.apache.hadoop.io.compress.CompressionCodec;
导入com.hadoop.compression.lzo.LzopCodec;
导入org.apache.hadoop.mapreduce.Job;
导入org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
导入org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
//导入org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
导入org.apache.hadoop.util.Tool;
导入org.apache.hadoop.util.ToolRunner;
公共类FuzzyJoinDriver扩展配置的实现工具{
公共静态void main(字符串[]args)引发异常{
int exitCode=ToolRunner.run(新配置(),新模糊joindriver(),args);
系统退出(退出代码);
}
@凌驾
公共int运行(字符串[]args)引发异常{
如果(参数长度!=2){
System.err.println(“用法:FuzzyJoinDriver”);
系统退出(-1);
}
Configuration conf=新配置();
//用于压缩贴图输出
//conf.setBoolean(“mapred.compres.map.output”,true);
//conf.setClass(“mapred.map.output.compression.code”,gzicodec.class,CompressionCodec.class);
作业=新作业(配置);
job.setJarByClass(FuzzyJoinDriver.class);
setJobName(“模糊连接”);
//分布式缓存
//DistributedCache.addCacheFile(新URI(“/cache/reduceCount.txt”)、job.getConfiguration();
addInputPath(作业,新路径(args[0]);
setOutputPath(作业,新路径(args[1]);
setMapperClass(FuzzyJoinMapper.class);
job.setReducerClass(FuzzyJoinReducer.class);
setMapOutputKeyClass(IntWritable.class);
setMapOutputValueClass(RelationTuple.class);
job.setPartitionerClass(JoinKeyPartitioner.class);
setOutputKeyClass(NullWritable.class);
job.setOutputValueClass(Text.class);
//用于压缩减速器的输出
FileOutputFormat.setCompressOutput(作业,true);
setOutputCompressorClass(作业,LzopCodec.class);
//SequenceFileOutputFormat.setOutputCompressionType(作业,org.apache.hadoop.io.SequenceFile.CompressionType.BLOCK);
返回(job.waitForCompletion(true)?0:1;
}
}
它编译时没有错误


我担心,我对配置和其他步骤的理解不足,导致我走上了错误的道路,也许我错过了一些简单的事情,对于那些比我更好地理解这件事的人来说。谢谢你走了这么远。我知道这是一篇冗长的文章。

您需要在mapred-site.xml中设置以下属性:

<property>
    <name>mapred.compress.map.output</name>
    <value>true</value>
</property>

<property>
    <name>mapred.map.output.compression.codec</name>
    <value>org.apache.hadoop.io.compress.LzoCodec</value>
</property>


首先也是最重要的一点:感谢您抽出时间回答。这个问题没有得到我所希望的效果。如果您从我的驱动程序类中看到,我甚至没有尝试压缩中间输出,只是压缩减速机的输出。你仍然认为我需要添加这个属性吗?不确定,根据我的说法是的,并且在更改后不要忘记重新启动hadoop。还要确保您同时安装了32位和64位liblzo2。详细信息,以及如何安装32位和64位liblzo2?您必须同时安装32位和64位liblzo2
//import java.net.URI;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
//import org.apache.hadoop.filecache.DistributedCache;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
//import org.apache.hadoop.io.compress.CompressionCodec;
import com.hadoop.compression.lzo.LzopCodec;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
//import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

public class FuzzyJoinDriver extends Configured implements Tool{

public static void main(String[] args) throws Exception {     
    int exitCode = ToolRunner.run(new Configuration(), new  FuzzyJoinDriver(),args);
System.exit(exitCode);
}
@Override
public int run(String[] args) throws Exception {
    if (args.length != 2) {
          System.err.println("Usage: FuzzyJoinDriver <input path> <output path>");
          System.exit(-1);
        }

        Configuration conf = new Configuration();
        //Used to compress map output
        //conf.setBoolean("mapred.compres.map.output", true);
        //conf.setClass("mapred.map.output.compression.code", GzipCodec.class, CompressionCodec.class);
        Job job = new Job(conf);
        job.setJarByClass(FuzzyJoinDriver.class);
        job.setJobName("Fuzzy Join");

        //Distributed Cache
        //DistributedCache.addCacheFile(new URI("/cache/ReducerCount.txt"),  job.getConfiguration());


        FileInputFormat.addInputPath(job, new Path(args[0]));
        FileOutputFormat.setOutputPath(job, new Path(args[1]));

        job.setMapperClass(FuzzyJoinMapper.class);


        job.setReducerClass(FuzzyJoinReducer.class);
        job.setMapOutputKeyClass(IntWritable.class);
        job.setMapOutputValueClass(RelationTuple.class);


        job.setPartitionerClass(JoinKeyPartitioner.class);


        job.setOutputKeyClass(NullWritable.class);
        job.setOutputValueClass(Text.class);

        //Used to compress output from reducer
        FileOutputFormat.setCompressOutput(job,  true);
        FileOutputFormat.setOutputCompressorClass(job, LzopCodec.class);
        //SequenceFileOutputFormat.setOutputCompressionType(job, org.apache.hadoop.io.SequenceFile.CompressionType.BLOCK);

        return(job.waitForCompletion(true) ? 0 : 1);
    }
}
<property>
    <name>mapred.compress.map.output</name>
    <value>true</value>
</property>

<property>
    <name>mapred.map.output.compression.codec</name>
    <value>org.apache.hadoop.io.compress.LzoCodec</value>
</property>
conf.set("mapred.compress.map.output", "true")
conf.set("mapred.map.output.compression.codec", "org.apache.hadoop.io.compress.LzoCodec");
jobConf.setMapOutputCompressorClass(LzoCodec.class);