Warning: file_get_contents(/data/phpspider/zhask/data//catemap/0/hadoop/6.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Hadoop:ClassNotFoundException_Hadoop - Fatal编程技术网

Hadoop:ClassNotFoundException

Hadoop:ClassNotFoundException,hadoop,Hadoop,我遵循这个教程,它非常简单(),在我尝试运行java文件之前,一切都运行良好 我得到了以下线索 Exception in thread "main" java.lang.ClassNotFoundException: ProcessUnits at java.net.URLClassLoader.findClass(URLClassLoader.java:381) at java.lang.ClassLoader.loadClass(ClassLoader.java:424)

我遵循这个教程,它非常简单(),在我尝试运行java文件之前,一切都运行良好

我得到了以下线索

Exception in thread "main" java.lang.ClassNotFoundException: ProcessUnits
    at java.net.URLClassLoader.findClass(URLClassLoader.java:381)
    at java.lang.ClassLoader.loadClass(ClassLoader.java:424)
    at java.lang.ClassLoader.loadClass(ClassLoader.java:357)
    at java.lang.Class.forName0(Native Method)
    at java.lang.Class.forName(Class.java:348)
    at org.apache.hadoop.util.RunJar.run(RunJar.java:214)
    at org.apache.hadoop.util.RunJar.main(RunJar.java:136)
我尝试过很多不同的方法,我总是得到ClassNotFoundException。我感觉它与Hadoop类路径或其他东西有关。请帮助。

导入java.io.IOException;
import java.io.IOException;
import java.util.Iterator;
import java.util.StringTokenizer;

import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.TextInputFormat;
import org.apache.hadoop.mapred.TextOutputFormat;

public class ProcessUnits 
{ 
   //Mapper class 
   public static class E_EMapper extends MapReduceBase implements 
   Mapper<LongWritable ,/*Input key Type */ 
   Text,                /*Input value Type*/ 
   Text,                /*Output key Type*/ 
   IntWritable>        /*Output value Type*/ 
   { 

      //Map function 
      public void map(LongWritable key, Text value, 
      OutputCollector<Text, IntWritable> output,   
      Reporter reporter) throws IOException 
      { 
         String line = value.toString(); 
         String lasttoken = null; 
         StringTokenizer s = new StringTokenizer(line,"\t"); 
         String year = s.nextToken(); 

         while(s.hasMoreTokens())
            {
               lasttoken=s.nextToken();
            } 
         int avgprice = Integer.parseInt(lasttoken.trim()); 
         output.collect(new Text(year), new IntWritable(avgprice)); 
      } 
   } 


   //Reducer class 
   public static class E_EReduce extends MapReduceBase implements 
   Reducer< Text, IntWritable, Text, IntWritable > 
   {  

      //Reduce function 
      public void reduce( Text key, Iterator <IntWritable> values, 
         OutputCollector<Text, IntWritable> output, Reporter reporter) throws IOException 
         { 
            int maxavg=30; 
            int val=Integer.MIN_VALUE; 

            while (values.hasNext()) 
            { 
               if((val=values.next().get())>maxavg) 
               { 
                  output.collect(key, new IntWritable(val)); 
               } 
            } 

         } 
   }  
   //Main function 
   public static void main(String args[])throws Exception 
   { 
      JobConf conf = new JobConf(ProcessUnits.class); 
      conf.setJobName("max_eletricityunits"); 
      conf.setOutputKeyClass(Text.class);
      conf.setOutputValueClass(IntWritable.class); 
      conf.setMapperClass(E_EMapper.class); 
      conf.setCombinerClass(E_EReduce.class); 
      conf.setReducerClass(E_EReduce.class); 
      conf.setInputFormat(TextInputFormat.class); 
      conf.setOutputFormat(TextOutputFormat.class); 

      FileInputFormat.setInputPaths(conf, new Path(args[0])); 
      FileOutputFormat.setOutputPath(conf, new Path(args[1])); 

      JobClient.runJob(conf); 
   } 
} 
导入java.util.Iterator; 导入java.util.StringTokenizer; 导入org.apache.hadoop.fs.Path; 导入org.apache.hadoop.io.IntWritable; 导入org.apache.hadoop.io.LongWritable; 导入org.apache.hadoop.io.Text; 导入org.apache.hadoop.mapred.FileInputFormat; 导入org.apache.hadoop.mapred.FileOutputFormat; 导入org.apache.hadoop.mapred.JobClient; 导入org.apache.hadoop.mapred.JobConf; 导入org.apache.hadoop.mapred.MapReduceBase; 导入org.apache.hadoop.mapred.Mapper; 导入org.apache.hadoop.mapred.OutputCollector; 导入org.apache.hadoop.mapred.Reducer; 导入org.apache.hadoop.mapred.Reporter; 导入org.apache.hadoop.mapred.TextInputFormat; 导入org.apache.hadoop.mapred.TextOutputFormat; 公共类处理单元 { //映射器类 公共静态类E_EMapper扩展了MapReduceBase实现 映射器/*输出值类型*/ { //映射函数 公共无效映射(可长写键、文本值、, 输出采集器输出, (记者)抛出IOException { 字符串行=value.toString(); 字符串lasttoken=null; StringTokenizer s=新的StringTokenizer(行“\t”); 字符串year=s.nextToken(); 而(s.hasMoreTokens()) { lasttoken=s.nextToken(); } int avgprice=Integer.parseInt(lasttoken.trim()); output.collect(新文本(年),新可写(avgprice)); } } //减速器类 公共静态类E_EReduce扩展了MapReduceBase实现 Reducer { //还原函数 public void reduce(文本键、迭代器值、, OutputCollector输出,Reporter)引发IOException { int maxavg=30; int val=Integer.MIN_值; while(values.hasNext()) { if((val=values.next().get())>maxavg) { collect(key,newintwriteable(val)); } } } } //主要功能 公共静态void main(字符串args[])引发异常 { JobConf conf=newjobconf(ProcessUnits.class); conf.setJobName(“max_eletricityunits”); conf.setOutputKeyClass(Text.class); conf.setOutputValueClass(IntWritable.class); conf.setMapperClass(E_EMapper.class); conf.setCombinerClass(E_EReduce.class); conf.setReduceClass(E_EReduce.class); conf.setInputFormat(TextInputFormat.class); conf.setOutputFormat(TextOutputFormat.class); setInputPath(conf,新路径(args[0]); setOutputPath(conf,新路径(args[1]); runJob(conf); } }

此代码运行良好
ProcessUnits
是主类,我想您已经更改了主类的名称
JobConf=newjobconf(ProcessUnits.class)未根据主类进行更改的位置。根据您的程序主类在作业中指定类main。

尝试将jar文件添加到您的
HADOOP\u类路径中,并验证您是否编写了类的完整规范名称
mypackage。MyClass

是否可以检查该命令是否正确运行(检查当前目录和类名,您应该有hadoop.ProcessUnits,而不仅仅是ProcessUnits,请参见下文)

$HADOOP\u HOME/bin/HADOOP jar units.jar HADOOP.ProcessUnits input\u dir output\u dir

如果仍不起作用,请尝试以下命令


$HADOOP\u HOME/bin/HADOOP jar units.jar-cp./*HADOOP.ProcessUnits input\u dir output\u dir

如果在构建将用作“HADOOP jar”参数的jar文件的过程中出错,也会发生这种情况。这可能导致jar文件清单中的主类为“null”而不是null。这将阻止hadoop jar命令从命令行参数获取主类。

就hadoop API的使用而言,这是一个非常古老的教程,实际上已经没有多少人编写纯Java mapreduce代码了。我是新手,只想了解hadoop和mapreduce。你能指出一个例子吗教程?我可能会指向Apache hadoop页面上的官方示例。。。