Warning: file_get_contents(/data/phpspider/zhask/data//catemap/9/java/345.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Java 未找到来自eclipse错误类的提交Hadoop作业_Java_Eclipse_Hadoop_Mapreduce_Cloudera - Fatal编程技术网

Java 未找到来自eclipse错误类的提交Hadoop作业

Java 未找到来自eclipse错误类的提交Hadoop作业,java,eclipse,hadoop,mapreduce,cloudera,Java,Eclipse,Hadoop,Mapreduce,Cloudera,我试图从eclipse向jobtracker提交一个mapreduce作业,在本例中,它在本地机器上运行 这是密码 package org.myorg; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; import org.apache.

我试图从eclipse向jobtracker提交一个mapreduce作业,在本例中,它在本地机器上运行

这是密码

package org.myorg;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;

public class WordCount {

    public static void main(String[] args) throws Exception {
        Configuration conf = new Configuration();

        conf.set("fs.defaultFS", "hdfs://localhost:8020");
        conf.set("mapred.job.tracker", "localhost:8021");

        Job job = new Job(conf, "wordcount");

        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);

        job.setMapperClass(Map.class);
        job.setReducerClass(Reduce.class);

        job.setInputFormatClass(TextInputFormat.class);
        job.setOutputFormatClass(TextOutputFormat.class);

        FileInputFormat.addInputPath(job, new Path("hdfs://localhost:8020/tmp/nages/cooper.txt"));
        FileOutputFormat.setOutputPath(job, new Path("hdfs://localhost:8020/tmp/nages/output"));

        job.setJarByClass(WordCount.class);     
        job.waitForCompletion(true);
    }

}
映射器文件

package org.myorg;

import java.io.IOException;
import java.util.StringTokenizer;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

public class Map extends Mapper<LongWritable, Text, Text, IntWritable> {
    private final static IntWritable one = new IntWritable(1);
    private Text word = new Text();

    public void map(LongWritable key, Text value, Context context)
            throws IOException, InterruptedException {
        String line = value.toString();
        StringTokenizer tokenizer = new StringTokenizer(line);
        while (tokenizer.hasMoreTokens()) {
            word.set(tokenizer.nextToken());
            context.write(word, one);
        }
    }
}

如何解决此问题?

本地主机:检查本地文件系统中的文件

尝试给HDFS ip而不是本地主机

config.set("fs.defaultFS", "hdfs://HDFSclusterip:8020/");
config.set("hadoop.job.ugi", "hdfs");
对于输入/输出也是如此

FileInputFormat.addInputPathjob,新建 Pathhdfs://HDFSclusterip:8020/tmp/nages/cooper.txt; FileOutputFormat.setOutputPathjob,新建 Pathhdfs://HDFSclusterip:8020/tmp/nages/output;


让我知道这是否有效。

它的jar问题,找不到映射类。 添加到驱动程序代码job.setJarByClassWordCount.class中


干杯

您想在本地模式下使用本地FS运行程序,还是在群集中使用HDFS访问HDFS运行程序。正在向Hadoop群集提交作业。我正在本地计算机上运行HDFS群集。是否包含所有依赖项?是的,所有依赖项都在eclipse中配置。这只是一个字数计算程序。只需单独修补这3个类,所有其他依赖项都应在hadoop集群中可用。这是一个类似的问题:
14/09/23 11:09:41 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
14/09/23 11:09:42 WARN mapred.JobClient: Use GenericOptionsParser for parsing the arguments. Applications should implement Tool for the same.
14/09/23 11:09:42 WARN mapred.JobClient: No job jar file set.  User classes may not be found. See JobConf(Class) or JobConf#setJar(String).
14/09/23 11:09:42 INFO input.FileInputFormat: Total input paths to process : 1
14/09/23 11:09:42 WARN conf.Configuration: fs.default.name is deprecated. Instead, use fs.defaultFS
14/09/23 11:09:43 INFO mapred.JobClient: Running job: job_201409221650_0012
14/09/23 11:09:44 INFO mapred.JobClient:  map 0% reduce 0%
14/09/23 11:09:50 INFO mapred.JobClient: Task Id : attempt_201409221650_0012_m_000000_0, Status : FAILED
java.lang.RuntimeException: java.lang.ClassNotFoundException: Class org.myorg.Map not found
    at org.apache.hadoop.conf.Configuration.getClass(Configuration.java:1774)
    at org.apache.hadoop.mapreduce.task.JobContextImpl.getMapperClass(JobContextImpl.java:191)
    at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:631)
    at org.apache.hadoop.mapred.MapTask.run(MapTask.java:330)
    at org.apache.hadoop.mapred.Child$4.run(Child.java:268)
    at java.security.AccessController.doPrivileged(Native Method)
    at javax.security.auth.Subject.doAs(Subject.java:415)
    at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1408)
    at org.apache.hadoop.mapred.Child.main(Child.java:262)
Caused by: java.lang.ClassNotFoundException: Class org.myorg.Map not found
    at org.apache.hadoop.conf.Configuration.getClassByName(Configuration.java:1680)
    at org.apache.hadoop.conf.Configuration.getClass(Configuration.java:1772)
    ... 8 more
config.set("fs.defaultFS", "hdfs://HDFSclusterip:8020/");
config.set("hadoop.job.ugi", "hdfs");