Warning: file_get_contents(/data/phpspider/zhask/data//catemap/8/sorting/2.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Java 如何在mapReduce Hadoop框架中对值(及其对应的键)进行排序?_Java_Sorting_Hadoop_Parallel Processing_Mapreduce - Fatal编程技术网

Java 如何在mapReduce Hadoop框架中对值(及其对应的键)进行排序?

Java 如何在mapReduce Hadoop框架中对值(及其对应的键)进行排序?,java,sorting,hadoop,parallel-processing,mapreduce,Java,Sorting,Hadoop,Parallel Processing,Mapreduce,我正在尝试使用Hadoop mapReduce对输入数据进行排序。问题是我只能按键对键值对进行排序,而我试图按值对它们进行排序。每个值的键都是用一个计数器创建的,因此第一个值(234)有键1,第二个值(944)有键2,等等。你知道我该怎么做并按值排序输入吗 import java.io.IOException; import java.util.StringTokenizer; import java.util.ArrayList; import java.util.List; import

我正在尝试使用Hadoop mapReduce对输入数据进行排序。问题是我只能按键对键值对进行排序,而我试图按值对它们进行排序。每个值的键都是用一个计数器创建的,因此第一个值(234)有键1,第二个值(944)有键2,等等。你知道我该怎么做并按值排序输入吗


import java.io.IOException;
import java.util.StringTokenizer;
import java.util.ArrayList;
import java.util.List;
import java.util.Collections;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class Sortt {

  public static class TokenizerMapper
       extends Mapper<Object, Text, Text ,IntWritable >{
    int k=0;
    int v=0;
    int va=0;
    public Text ke = new Text();
   private final static IntWritable val = new IntWritable();

    public void map(Object key, Text value, Context context) throws 
    IOException, InterruptedException 
{
      StringTokenizer itr = new StringTokenizer(value.toString());


        while (itr.hasMoreTokens()) 
{
        val.set(Integer.parseInt(itr.nextToken()));
        v=val.get();
        k=k+1;
        ke.set(Integer.toString(k));

        context.write(ke, new IntWritable(v));}
}


    }


  public static class SortReducer
       extends Reducer<Text,IntWritable,Text,IntWritable> {
        int a=0;
        int v=0;
       private IntWritable va = new IntWritable();
    public void reduce(Text key, Iterable<IntWritable> values,
                       Context context
                       ) throws IOException, InterruptedException {
    List<Integer> sorted = new ArrayList<Integer>();

    for (IntWritable val : values) {
           a= val.get();
          sorted.add(a);

}
    Collections.sort(sorted);
    for(int i=0;i<sorted.size();i++) {
    v=sorted.get(i);
    va.set(v);

     context.write(key, va);
}
    }
  }

  public static void main(String[] args) throws Exception {
   long startTime=0;
   long Time=0;
   long duration=0;
Configuration conf = new Configuration();
    Job job = Job.getInstance(conf, "sort");
    job.setJarByClass(Sortt.class);
    job.setMapperClass(TokenizerMapper.class);
    job.setCombinerClass(SortReducer.class);
    job.setReducerClass(SortReducer.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(IntWritable.class);
    FileInputFormat.addInputPath(job, new Path(args[0]));
    FileOutputFormat.setOutputPath(job, new Path(args[1]));
    System.exit(job.waitForCompletion(true) ? 0 : 1);
    Time = System.currentTimeMillis();
  //duration = (endTime-startTime)/1000000;
    System.out.println("time="+Time+"MS");
  }
}

导入java.io.IOException;
导入java.util.StringTokenizer;
导入java.util.ArrayList;
导入java.util.List;
导入java.util.Collections;
导入org.apache.hadoop.conf.Configuration;
导入org.apache.hadoop.fs.Path;
导入org.apache.hadoop.io.IntWritable;
导入org.apache.hadoop.io.Text;
导入org.apache.hadoop.mapreduce.Job;
导入org.apache.hadoop.mapreduce.Mapper;
导入org.apache.hadoop.mapreduce.Reducer;
导入org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
导入org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
公共类索特{
公共静态类令牌映射器
扩展映射器{
int k=0;
int v=0;
int-va=0;
公共文本ke=新文本();
private final static intwriteable val=new intwriteable();
公共void映射(对象键、文本值、上下文)抛出
IOException,InterruptedException
{
StringTokenizer itr=新的StringTokenizer(value.toString());
而(itr.hasMoreTokens())
{
val.set(Integer.parseInt(itr.nextToken());
v=val.get();
k=k+1;
ke.set(Integer.toString(k));
write(ke,新的intwriteable(v));}
}
}
公共静态类排序器
伸缩减速机{
int a=0;
int v=0;
私有IntWritable va=新的IntWritable();
public void reduce(文本键、Iterable值、,
语境
)抛出IOException、InterruptedException{
列表排序=新建ArrayList();
for(可写入值:值){
a=val.get();
已排序。添加(a);
}
集合。排序(已排序);

对于(int i=0;iMapReduce输出,默认情况下按键排序,要按值排序,可以使用辅助排序。
二次排序是根据值对减速机输出进行排序的最佳技术之一,这是一个完整的示例。

这称为“二次排序”,幸运的是,在线上有很多关于它的内容。这是一个好的开端什么将是第二个键?您需要根据排序值创建自定义键类作为键