Warning: file_get_contents(/data/phpspider/zhask/data//catemap/9/java/320.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Java 无法在hadoop上运行word count。谢谢_Java_Eclipse_Hadoop_Word Count - Fatal编程技术网

Java 无法在hadoop上运行word count。谢谢

Java 无法在hadoop上运行word count。谢谢,java,eclipse,hadoop,word-count,Java,Eclipse,Hadoop,Word Count,我尝试在eclipse中运行hadoop字数统计。但它有问题;它甚至无法调试 package test; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.In

我尝试在eclipse中运行hadoop字数统计。但它有问题;它甚至无法调试

package test;

import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;

import test.test.Map2.Combine;

public class test {


public static class Map2 extends Mapper<LongWritable, Text, Text, Text> {
  public void map(LongWritable key, Text value, Context context) 
            throws IOException, InterruptedException {
        String line = value.toString();
       String values=line.split(" ")[0]+"\\|"+line.split(" ")[1];
        context.write(new Text(" "),new Text(values));
        }
    //method reduce start
    public static class Combine extends Reducer<Text, Text, Text, IntWritable> {
    ArrayList<String> top5array= new ArrayList<String>();
    public void reduce(Text key, Iterable<Text> values, Context context) 
      throws IOException, InterruptedException {

        //arraylist    
        while(top5array.get(4)==null)
        {
            top5array.add(values.iterator().next().toString());
        }


     while(values.iterator().hasNext())
     {
       String  currentValues=values.iterator().next().toString();
       String currentkey=currentValues.split("\\|")[0];
       Integer currentnum=Integer.parseInt(currentValues.split("\\|")[1]);

      for(int i=0;i<5;i++)
         {
         Integer numofArray = Integer.parseInt(top5array.get(i).split("\\|")[1]);
             if(top5array.get(i) != null && currentnum < numofArray)
                {
                 break;
                 }
                 if(i == 4)
                         {
                         String currentKeyValuePair = currentkey + currentnum.toString();
                         top5array.add(5, currentKeyValuePair);
                         Collections.sort(top5array);
                         top5array.remove(0);
                         }
          }// for  end
      }// while end
    }//method reduce end
 } // Combine end
 }
 // map end

 public static class Reduce2 extends Reducer<Text, Text, Text, Text> {
 ArrayList<String> top5array= new ArrayList<String>();
    public void reduce(Text key, Iterable<Text> values, Context context) 
      throws IOException, InterruptedException {


        while(top5array.get(4)==null)
        {
            top5array.add(values.iterator().next().toString());
        }


     while(values.iterator().hasNext())
     {
       String  currentValues=values.iterator().next().toString();
       String currentkey=currentValues.split("\\|")[0];
       Integer currentnum=Integer.parseInt(currentValues.split("\\|")[1]);

      for(int i=0;i<5;i++)
         {
         Integer numofArray = Integer.parseInt(top5array.get(i).split("\\|")[1]);
             if(top5array.get(i) != null && currentnum < numofArray)
                {
                 break;
                 }
                 if(i == 4)
                         {
                         String currentKeyValuePair = currentkey + currentnum.toString();
                         top5array.add(5, currentKeyValuePair);
                         Collections.sort(top5array);
                         top5array.remove(0);
                         }
          }
      }
        String top5StringConca = ""; 
        for(int i=0; i < 5; i++){
        top5StringConca = top5StringConca + top5array.get(i);
        }
        context.write(new Text(" "), new Text(top5StringConca));
    }
 } 



 //the second of mapreduce  end


 public static void main(String[] args) throws Exception {
   Configuration conf = new Configuration();
   Job job = Job.getInstance(conf);
   job.setOutputKeyClass(Text.class);
   job.setOutputValueClass(Text.class);
   job.setMapperClass(Map2.class);
   job.setReducerClass(Reduce2.class);
   job.setCombinerClass(Combine.class);
   job.setInputFormatClass(TextInputFormat.class);
   job.setOutputFormatClass(TextOutputFormat.class);

   FileInputFormat.addInputPath(job, new Path(args[0]));
   FileOutputFormat.setOutputPath(job, new Path(args[1]));
   job.waitForCompletion(true);

 }

}
运行时出现的问题显示以下异常:

WARN  [main] util.NativeCodeLoader (NativeCodeLoader.java:<clinit>(62))
 - Unable to load native-hadoop library for your platform using builtin-java classes where applicable`
我怎样才能解决这个问题

在项目中添加hadoop jar。 如果您已经配置了hadoop,那么可以将您的HDF指向eclipse内部。为此,您必须包含依赖项。 如果在pom.xml中使用maven,请添加hadoop依赖项。还为eclipse添加第三方插件。这是向导。这些将在eclipse中启用MapReduce透视图。我在项目中添加了以下依赖项:

<dependency>
    <groupId>org.apache.hadoop</groupId>
    <artifactId>hadoop-core</artifactId>
    <version>1.2.1</version>
    <scope>compile</scope>
</dependency>

<dependency>
    <groupId>org.apache.hadoop</groupId>
    <artifactId>hadoop-common</artifactId>
    <version>2.6.0</version>
</dependency>

<dependency>
    <groupId>org.apache.hadoop</groupId>
    <artifactId>hadoop-client</artifactId>
    <version>2.6.0</version>
</dependency>
您将看到依赖项本身包含hadoop jar。现在将取决于您是要使用jar提供的现有配置还是默认配置

现在尝试运行hadoop驱动程序类。您可以在eclipse中轻松调试代码。现在您的hadoop透视图也已启用。您可以在此处添加hdfs路径


您还可以检查远程调试。

您是在windows上运行此程序吗?我是在ubuntui上运行的。我想知道如何在eclipse中调试mapreduce程序。请看一下。谢谢您的帮助,但是它不起作用谢谢你的回复。但是我找不到pom.xml?你能告诉我它在哪里吗?你当时没有使用基于maven的项目。使用工件Id名称搜索jar文件下载jar并包含到项目中。例如hadoop-client.jarHow?您是否会发布这些步骤,以便对将来有用!我的错误不在你提示的位置,它只是我的地图减少错误在我的地图。修改\t然后,我可以运行映射器,但现在,我也不能运行合并器和还原器!