Warning: file_get_contents(/data/phpspider/zhask/data//catemap/9/java/392.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Java 无法在hadoop上运行单词计数_Java_Hadoop_Mapreduce - Fatal编程技术网

Java 无法在hadoop上运行单词计数

Java 无法在hadoop上运行单词计数,java,hadoop,mapreduce,Java,Hadoop,Mapreduce,我尝试在eclipse中运行hadoop字数统计。我只是将hadoop目录和hadoop/lib目录中的所有jar文件添加到该项目的库中,但得到以下错误: java.lang.Exception: java.lang.ArrayIndexOutOfBoundsException: 1 at org.apache.hadoop.mapred.LocalJobRunner$Job.run(LocalJobRunner.java:400) Caused by: java.lang.ArrayIndex

我尝试在eclipse中运行hadoop字数统计。我只是将hadoop目录和hadoop/lib目录中的所有jar文件添加到该项目的库中,但得到以下错误:

java.lang.Exception: java.lang.ArrayIndexOutOfBoundsException: 1
at org.apache.hadoop.mapred.LocalJobRunner$Job.run(LocalJobRunner.java:400)
Caused by: java.lang.ArrayIndexOutOfBoundsException: 1
at org.orzota.bookx.mappers.MyHadoopMapper.map(MyHadoopMapper.java:23)
at org.orzota.bookx.mappers.MyHadoopMapper.map(MyHadoopMapper.java:1)
at org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:54)
at org.apache.hadoop.mapred.MapTask.runOldMapper(MapTask.java:400)
at org.apache.hadoop.mapred.MapTask.run(MapTask.java:335)
at     
org.apache.hadoop.mapred.LocalJobRunner$Job$MapTaskRunnable.run(LocalJobRunner.java:232)
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:471)
at java.util.concurrent.FutureTask$Sync.innerRun(FutureTask.java:334)
at java.util.concurrent.FutureTask.run(FutureTask.java:166)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1146)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:679)
2013-10-23 18:59:20,841 INFO  [main] mapreduce.Job   
(Job.java:monitorAndPrintJob(1288))   Job job_local_0001 running in uber mode : false
2013-10-23 18:59:20,843 INFO  [main] mapreduce.Job (Job.java:monitorAndPrintJob(1295)) 
 map 0% reduce 0%
2013-10-23 18:59:20,847 INFO  [main] mapreduce.Job (Job.java:monitorAndPrintJob(1308)) 
Job job_local_0001 failed with state FAILED due to: NA
2013-10-23 18:59:20,866 INFO  [main] mapreduce.Job (Job.java:monitorAndPrintJob(1313)) 
 Counters: 0
java.io.IOException: Job failed!
at org.apache.hadoop.mapred.JobClient.runJob(JobClient.java:891)
at org.orzota.bookx.mappers.MyHadoopDriver.main(MyHadoopDriver.java:46)
你能帮我解决这个问题吗

MyHadoop映射器是:

package org.orzota.bookx.mappers;
import java.io.IOException;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;

public class MyHadoopMapper extends MapReduceBase implements Mapper <LongWritable,     
Text, Text, IntWritable>{
private final static IntWritable one = new IntWritable(1);

public void map(LongWritable _key, Text value, OutputCollector<Text, IntWritable> 
  output, Reporter reporter) throws IOException {
    String st = value.toString();
    String[] bookdata = st.split(",");
    //for (int i=0; i< bookdata.length; i++){
        //System.out.println(bookdata[i]);
    //}
    //if (bookdata.length!=8){
        //System.out.println("Warning, bad Entry.." + bookdata.length);
        //return;
    //}
    output.collect(new Text(bookdata[1]), one);
}

}
package org.orzota.bookx.mappers;
导入java.io.IOException;
导入org.apache.hadoop.io.*;
导入org.apache.hadoop.mapred.MapReduceBase;
导入org.apache.hadoop.mapred.Mapper;
导入org.apache.hadoop.mapred.OutputCollector;
导入org.apache.hadoop.mapred.Reporter;
公共类MyHadoopMapper扩展MapReduceBase实现Mapper{
私有最终静态IntWritable one=新的IntWritable(1);
公共void映射(LongWritable\u键、文本值、OutputCollector
输出,报告器)引发IOException{
字符串st=value.toString();
字符串[]bookdata=st.split(“,”);
//对于(int i=0;i
错误似乎出现在以下行中:

output.collect(新文本(bookdata[1]),一个)

因此,以下解释可以解释您所得到的例外情况:

  • 您的输入文件中有行没有
  • 输入文件中有空行

这相当于数组
bookdata
分别有一个元素或没有元素,因此导致
ArrayIndexOutOfBoundsException

错误似乎出现在以下行中:

output.collect(新文本(bookdata[1]),一个)

因此,以下解释可以解释您所得到的例外情况:

  • 您的输入文件中有行没有
  • 输入文件中有空行

这相当于数组
bookdata
分别有一个元素或没有元素,因此导致
ArrayIndexOutOfBoundsException

必须按照此方法更改map method参数

public void map(LongWritable key, Text value, Context context)throws IOException{
    String st = value.toString();
    String[] bookdata = st.split(",");
    context.write(new Text(bookdata[1]), one);
}

必须按照此方法更改映射方法参数

public void map(LongWritable key, Text value, Context context)throws IOException{
    String st = value.toString();
    String[] bookdata = st.split(",");
    context.write(new Text(bookdata[1]), one);
}

共享你的
MyHadoopMapper
如何?共享你的
MyHadoopMapper
如何?我更改了txt文件,但在运行后给出以下命令:亲爱的Amar,我更改了txt文件..但现在错误是java.io.IOException:Mkdirs未能创建文件:/home/text/output/\u temporary/0/\u temporary/trunt\u local\u 0001\r\000000这可能有些问题权限问题,签出此问题:什么的权限?为保存输出创建目录?或者这意味着从命令行运行wordcount.jar?我更改了txt文件,但在运行后给出了以下命令:亲爱的Amar我更改了txt文件..但现在错误是java.io.IOException:Mkdirs未能创建文件:/home/text/output/\u temporary/0/\u temporary/trument\u local\u 0001\r\u000000\0这可能是一些权限问题,签出此问题:什么权限?为保存输出创建目录?或者这意味着从命令行运行wordcount.jar?