Warning: file_get_contents(/data/phpspider/zhask/data//catemap/9/java/397.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181

Warning: file_get_contents(/data/phpspider/zhask/data//catemap/0/hadoop/6.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Hadoop mapreduce-java.io.IOException:作业失败_Java_Hadoop - Fatal编程技术网

Hadoop mapreduce-java.io.IOException:作业失败

Hadoop mapreduce-java.io.IOException:作业失败,java,hadoop,Java,Hadoop,我在尝试执行hadoopmapreduce程序时遇到以下异常 java.io.IOException:作业失败! 位于org.apache.hadoop.mapred.JobClient.runJob(JobClient.java:865) 位于com.vasa.books.BookDriver.main(BookDriver.java:37) BookDriver.java package com.vasa.books; import org.apache.hadoop.fs.Path; i

我在尝试执行
hadoop
mapreduce
程序时遇到以下
异常

java.io.IOException:作业失败! 位于org.apache.hadoop.mapred.JobClient.runJob(JobClient.java:865) 位于com.vasa.books.BookDriver.main(BookDriver.java:37)

BookDriver.java

package com.vasa.books;

import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.TextInputFormat;
import org.apache.hadoop.mapred.TextOutputFormat;

public class BookDriver {

    public static void main(String args[]) {
        // TODO Auto-generated method stub
        JobClient client=new JobClient();
        JobConf conf=new JobConf(com.vasa.books.BookDriver.class);

        conf.setJobName("booknamefind");


        conf.setOutputKeyClass(Text.class);
        conf.setOutputValueClass(IntWritable.class);

        conf.setMapperClass(com.vasa.books.bookmapper.class);
        conf.setReducerClass(com.vasa.books.bookreducer.class);

        conf.setInputFormat(TextInputFormat.class);
        conf.setOutputFormat(TextOutputFormat.class);

        FileInputFormat.setInputPaths(conf,new Path(args[0]));
        FileOutputFormat.setOutputPath(conf, new Path(args[1]));

        client.setConf(conf);
        try{
        JobClient.runJob(conf);
        }catch(Exception e){
        e.printStackTrace();
        }

    }
}
package com.vasa.books;

import java.io.IOException;  

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;

public class BookMapper extends MapReduceBase implements Mapper<LongWritable,Text,Text,IntWritable> {
    private final static IntWritable one=new IntWritable(1);
    public void map(LongWritable _key, Text value,
            OutputCollector<Text, IntWritable> output, Reporter reporter)
            throws IOException {
        // TODO Auto-generated method stub
        String Tempstring=value.toString();
        String[] singlebookdata=Tempstring.split("\",\"");
        output.collect(new Text(singlebookdata[3]), one);


    }

}
BookMapper.java

package com.vasa.books;

import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.TextInputFormat;
import org.apache.hadoop.mapred.TextOutputFormat;

public class BookDriver {

    public static void main(String args[]) {
        // TODO Auto-generated method stub
        JobClient client=new JobClient();
        JobConf conf=new JobConf(com.vasa.books.BookDriver.class);

        conf.setJobName("booknamefind");


        conf.setOutputKeyClass(Text.class);
        conf.setOutputValueClass(IntWritable.class);

        conf.setMapperClass(com.vasa.books.bookmapper.class);
        conf.setReducerClass(com.vasa.books.bookreducer.class);

        conf.setInputFormat(TextInputFormat.class);
        conf.setOutputFormat(TextOutputFormat.class);

        FileInputFormat.setInputPaths(conf,new Path(args[0]));
        FileOutputFormat.setOutputPath(conf, new Path(args[1]));

        client.setConf(conf);
        try{
        JobClient.runJob(conf);
        }catch(Exception e){
        e.printStackTrace();
        }

    }
}
package com.vasa.books;

import java.io.IOException;  

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;

public class BookMapper extends MapReduceBase implements Mapper<LongWritable,Text,Text,IntWritable> {
    private final static IntWritable one=new IntWritable(1);
    public void map(LongWritable _key, Text value,
            OutputCollector<Text, IntWritable> output, Reporter reporter)
            throws IOException {
        // TODO Auto-generated method stub
        String Tempstring=value.toString();
        String[] singlebookdata=Tempstring.split("\",\"");
        output.collect(new Text(singlebookdata[3]), one);


    }

}
package com.vasa.books;
导入java.io.IOException;
导入org.apache.hadoop.io.IntWritable;
导入org.apache.hadoop.io.LongWritable;
导入org.apache.hadoop.io.Text;
导入org.apache.hadoop.io.writeable;
导入org.apache.hadoop.io.WritableComparable;
导入org.apache.hadoop.mapred.MapReduceBase;
导入org.apache.hadoop.mapred.Mapper;
导入org.apache.hadoop.mapred.OutputCollector;
导入org.apache.hadoop.mapred.Reporter;
公共类BookMapper扩展MapReduceBase实现Mapper{
私有最终静态IntWritable one=新的IntWritable(1);
public void映射(LongWritable\u键、文本值、,
OutputCollector输出,报告器(报告器)
抛出IOException{
//TODO自动生成的方法存根
字符串Tempstring=value.toString();
String[]singlebookdata=Tempstring.split(“\”,“\”);
output.collect(新文本(singlebookdata[3]),一个);
}
}

为什么会出现这种异常?

根据JobClient源代码,
JobClient.runJob()
调用
JobClient.monitorAndPrintJob()
,返回布尔值。如果该布尔值为false(表示作业失败),它将打印出您看到的无用错误消息“something failed!”

要解决此问题,您有两种选择:

1-(更快)检查日志。
RunningJob
故障信息应打印到日志中

2-如果您不知道日志在哪里,没有启用日志记录,或者不想挖掘日志,您可以重写一点代码。与使用
JobClient.runJob()
不同,我将在代码中执行与
runJob()
相同的操作,以便在失败时获得有用的错误消息

  public static RunningJob myCustomRunJob(JobConf job) throws Exception {
    JobClient jc = new JobClient(job);
    RunningJob rj = jc.submitJob(job);
    if (!jc.monitorAndPrintJob(job, rj)) {
      throw new IOException("Job failed with info: " + rj.getFailureInfo());
    }
    return rj;
  }

我猜根本的问题是找不到arg[0]或arg[1](您的输入或输出文件)

您能显示您给定的参数吗?@patrick在运行配置中,我已将以下参数作为输入和输出BX-Books.txt out_文件检查此文件的路径是否正确。在您的参数中,路径应该正好是文件存储的位置。感谢ur suggession@aguibert,最后我发现异常是由于我在BookMapper.java中拆分文本文件的方式造成的