Hadoop 错误:java.lang.ClassNotFoundException:org.apache.commons.collections4.map.LinkedMap

Hadoop 错误:java.lang.ClassNotFoundException:org.apache.commons.collections4.map.LinkedMap,hadoop,mapreduce,Hadoop,Mapreduce,执行命令时出现错误: hadoop jar /home/edureka/Desktop/firstnlast.jar FirstandLast hdfs:/FirstnLast/first-last_sample hdfs:/FirstnLastoutput 代码如下: import java.io.IOException; /*import java.util.ArrayList; import java.util.Iterator; import java.util.StringToken

执行命令时出现错误:

hadoop jar /home/edureka/Desktop/firstnlast.jar FirstandLast hdfs:/FirstnLast/first-last_sample hdfs:/FirstnLastoutput
代码如下:

import java.io.IOException;
/*import java.util.ArrayList;
import java.util.Iterator;
import java.util.StringTokenizer;*/

import org.apache.commons.collections4.map.LinkedMap;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;

public class FirstandLast {

    public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException {
        // TODO Auto-generated method stub


        Configuration conf = new Configuration();
        Path in=new Path(args[0]);
        FileSystem a=in.getFileSystem(conf);
        FileStatus s=a.getFileStatus(in);


        long len=s.getLen();

        if(len<=0){
            System.err.println("file size is empty");
            System.exit(-1);
        }
        Job j = new Job(conf,"First and last lines");
        j.setJarByClass(FirstandLast.class);
        j.setMapperClass(Map1.class);
        j.setReducerClass(Red1.class);
        j.setOutputKeyClass(IntWritable.class);
        j.setOutputValueClass(Text.class);
        j.setInputFormatClass(TextInputFormat.class);
        j.setOutputFormatClass(TextOutputFormat.class);

        Path outputPath = new Path(args[1]);

      FileInputFormat.addInputPath(j, new Path(args[0]));
      FileOutputFormat.setOutputPath(j,outputPath);
        //deleting the output path automatically from hdfs so that we don't have delete it explicitly

        outputPath.getFileSystem(conf).delete(outputPath);

            //exiting the job only if the flag value becomes false

      System.exit(j.waitForCompletion(true)?0:1);

    }

    public static class Map1 extends Mapper<LongWritable, Text, IntWritable, Text> {
        static int count=0;

        public void map(LongWritable key, Text value , Context con) throws IOException, InterruptedException{
            count++;
            String line=value.toString();

                con.write(new IntWritable(1), new Text(count+"\t"+line));



        }
    }

public static class Red1 extends Reducer<IntWritable, Text, IntWritable, Text> {

    @Override
    public  void reduce(IntWritable key, Iterable<Text> value,
            Reducer<IntWritable, Text, IntWritable, Text>.Context con)
            throws IOException, InterruptedException {

        LinkedMap<String,String> map=new LinkedMap<String, String>();
        String s="";
        for(Text a:value){
            s=a.toString();
            map.put(s, null);
        }
        String first[]=map.firstKey().split("\t");
        String last[]=map.lastKey().split("\t");
        con.write(new IntWritable(Integer.parseInt(last[0])),new Text(last[1]));
        con.write(new IntWritable(Integer.parseInt(first[0])),new Text(first[1]));


    }

    }

    }
import java.io.IOException;
/*导入java.util.ArrayList;
导入java.util.Iterator;
导入java.util.StringTokenizer*/
导入org.apache.commons.collections4.map.LinkedMap;
导入org.apache.hadoop.conf.Configuration;
导入org.apache.hadoop.fs.FileStatus;
导入org.apache.hadoop.fs.FileSystem;
导入org.apache.hadoop.fs.Path;
导入org.apache.hadoop.io.IntWritable;
导入org.apache.hadoop.io.LongWritable;
导入org.apache.hadoop.io.Text;
导入org.apache.hadoop.mapreduce.Job;
导入org.apache.hadoop.mapreduce.Mapper;
导入org.apache.hadoop.mapreduce.Reducer;
导入org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
导入org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
导入org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
导入org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
公务舱头等舱{
公共静态void main(字符串[]args)引发IOException、InterruptedException、ClassNotFoundException{
//TODO自动生成的方法存根
Configuration conf=新配置();
路径输入=新路径(args[0]);
文件系统a=in.getFileSystem(conf);
FileStatus s=a.getFileStatus(in);
long len=s.getLen();

如果(len您的程序在我的本地计算机上运行时没有出现任何错误,我将从6行文件中获得以下输出:

1   customer_id, server_id, code
6   11145456, 1436, 2
只需将
commons-collections4-4.1.jar
jar复制到
HADOOP_HOME/lib
:(对于
HADOOP-2.2.0
lib,路径是
$HADOOP_HOME/share/HADOOP/lib/common

运行:


您的程序在我的本地计算机上运行时没有出现任何错误,我从6行文件中获得以下输出:

1   customer_id, server_id, code
6   11145456, 1436, 2
只需将
commons-collections4-4.1.jar
jar复制到
HADOOP_HOME/lib
:(对于
HADOOP-2.2.0
lib,路径是
$HADOOP_HOME/share/HADOOP/lib/common

运行:


请参阅更新的答案:也将jar添加到
HADOOP\u PATH
您是否使用任何IDE来创建jar?我正在使用Eclipse。HADOOP\u类路径中更新的答案中的/local/PATH/jar2.jar是什么?如果您得到任何其他未找到的类异常,您需要添加更多jar,您可以添加它们,它们之间用
隔开:
-注意:
/local/PATH/to
您需要根据jar在您的环境中的位置进行编辑我刚才给了您一个在类路径中添加多个jar的示例参见更新的答案:还要将jar添加到
HADOOP\u路径中
您是否使用任何IDE来创建jar?我正在使用Eclipse。在HADOOP\u类路径中更新的答案中/local/PATH/jar2.jar是什么?如果您得到任何其他答案,那么这个问题就解决了r类找不到异常,您需要添加更多jar,您可以添加它们,它们之间用
分隔-注意:
/local/path/to
您需要根据jar在您的环境中的位置进行编辑我刚才给了您一个在CLASSPATH中添加多个jar的示例
hadoop jar /home/edureka/Desktop/firstnlast.jar FirstandLast hdfs:/FirstnLast/first-last_sample hdfs:/FirstnLastoutput