Java 如何通过servlet向hadoop提交WordCount.jar

Java 如何通过servlet向hadoop提交WordCount.jar,java,hadoop,servlets,Java,Hadoop,Servlets,我现在有一个WordCount.jar存储在linux本地文件系统中,还有一个包含一组单词的文件存储在HDFS中。 如何通过servlet运行这个WordCount.jar并在servlet中指定输入和输出路径 package org.apache.hadoop.examples; import java.io.IOException; import java.util.StringTokenizer; import org.apache.hadoop.conf.Configurat

我现在有一个WordCount.jar存储在linux本地文件系统中,还有一个包含一组单词的文件存储在HDFS中。 如何通过servlet运行这个WordCount.jar并在servlet中指定输入和输出路径

    package org.apache.hadoop.examples;

import java.io.IOException;
import java.util.StringTokenizer;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;

public class WordCount {

  public static class TokenizerMapper 
       extends Mapper<Object, Text, Text, IntWritable>{

    private final static IntWritable one = new IntWritable(1);
    private Text word = new Text();

    public void map(Object key, Text value, Context context
                    ) throws IOException, InterruptedException {
      StringTokenizer itr = new StringTokenizer(value.toString());
      while (itr.hasMoreTokens()) {
        word.set(itr.nextToken());
        context.write(word, one);
      }
    }
  }

  public static class IntSumReducer 
       extends Reducer<Text,IntWritable,Text,IntWritable> {
    private IntWritable result = new IntWritable();

    public void reduce(Text key, Iterable<IntWritable> values, 
                       Context context
                       ) throws IOException, InterruptedException {
      int sum = 0;
      for (IntWritable val : values) {
        sum += val.get();
      }
      result.set(sum);
      context.write(key, result);
    }
  }

  public static void main(String[] args) throws Exception {
    Configuration conf = new Configuration();
    String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
    if (otherArgs.length < 2) {
      System.err.println("Usage: wordcount <in> [<in>...] <out>");
      System.exit(2);
    }
    Job job = Job.getInstance(conf, "word count");
    job.setJarByClass(WordCount.class);
    job.setMapperClass(TokenizerMapper.class);
    job.setCombinerClass(IntSumReducer.class);
    job.setReducerClass(IntSumReducer.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(IntWritable.class);
    for (int i = 0; i < otherArgs.length - 1; ++i) {
      FileInputFormat.addInputPath(job, new Path(otherArgs[i]));
    }
    FileOutputFormat.setOutputPath(job,
      new Path(otherArgs[otherArgs.length - 1]));
    System.exit(job.waitForCompletion(true) ? 0 : 1);
  }
}
package org.apache.hadoop.examples;
导入java.io.IOException;
导入java.util.StringTokenizer;
导入org.apache.hadoop.conf.Configuration;
导入org.apache.hadoop.fs.Path;
导入org.apache.hadoop.io.IntWritable;
导入org.apache.hadoop.io.Text;
导入org.apache.hadoop.mapreduce.Job;
导入org.apache.hadoop.mapreduce.Mapper;
导入org.apache.hadoop.mapreduce.Reducer;
导入org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
导入org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
导入org.apache.hadoop.util.GenericOptionsParser;
公共类字数{
公共静态类令牌映射器
扩展映射器{
私有最终静态IntWritable one=新的IntWritable(1);
私有文本字=新文本();
公共无效映射(对象键、文本值、上下文
)抛出IOException、InterruptedException{
StringTokenizer itr=新的StringTokenizer(value.toString());
而(itr.hasMoreTokens()){
set(itr.nextToken());
上下文。写(单词,一);
}
}
}
公共静态类IntSumReducer
伸缩减速机{
私有IntWritable结果=新的IntWritable();
public void reduce(文本键、Iterable值、,
语境
)抛出IOException、InterruptedException{
整数和=0;
for(可写入值:值){
sum+=val.get();
}
结果集(总和);
编写(键、结果);
}
}
公共静态void main(字符串[]args)引发异常{
Configuration conf=新配置();
String[]otherArgs=新的GenericOptionsParser(conf,args);
如果(其他参数长度<2){
System.err.println(“用法:wordcount[…]);
系统出口(2);
}
Job Job=Job.getInstance(conf,“字数”);
job.setJarByClass(WordCount.class);
setMapperClass(TokenizerMapper.class);
job.setCombinerClass(IntSumReducer.class);
job.setReducerClass(IntSumReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
对于(int i=0;i
通常遵循以下步骤

  • 将wordcount类jar文件以及必要的hadoop客户端jar文件放入服务器类路径
  • 将输入和输出目录指定为http请求参数
  • 在servlet doGet()方法中解析http请求中的dir
  • 用于提交您的作业

  • 您好,谢谢您的回答。刚才我通过参考本文解决了这个问题,首先将hadoop配置文件和jar添加到类路径,然后使用org.apache.hadoop.util.RunJar()方法将jar提交到mapreduce框架。我遇到了一个问题,因为System.exit(job.waitForCompletion(true)?0:1;导致jvm关闭,tomcat关闭。当我删除system.exit()时,它工作正常。