Java Hadoop执行错误:映射中的键类型不匹配:应为org.apache.Hadoop.io.Text,Received org.apache.Hadoop.io.LongWritable

Java Hadoop执行错误:映射中的键类型不匹配:应为org.apache.Hadoop.io.Text,Received org.apache.Hadoop.io.LongWritable,java,hadoop,Java,Hadoop,我正在Hadoop上实现PageRank算法,正如标题所说,我在尝试执行代码时出现了以下错误: 映射中的键类型不匹配:预期为org.apache.hadoop.io.Text,Received org.apache.hadoop.io.LongWritable 在我的输入文件中,我将图形节点id存储为键,并将它们的一些信息存储为值。我的输入文件具有以下格式: 1\t 3.4,2,5,6,67 4\t 4.2,77,2,7,83 。。。 为了理解错误的意思,我尝试使用LongWritable作为

我正在Hadoop上实现PageRank算法,正如标题所说,我在尝试执行代码时出现了以下错误:

映射中的键类型不匹配:预期为org.apache.hadoop.io.Text,Received org.apache.hadoop.io.LongWritable

在我的输入文件中,我将图形节点id存储为键,并将它们的一些信息存储为值。我的输入文件具有以下格式:

1\t 3.4,2,5,6,67

4\t 4.2,77,2,7,83

。。。

为了理解错误的意思,我尝试使用LongWritable作为我的主要变量类型,正如您在下面的代码中看到的那样。这意味着我有:

map

减少

但是,我也试过:

地图

减少

而且:

映射

减少

我总是犯同样的错误。我想我很难理解预期和收到的错误意味着什么。这是否意味着我的映射函数期望从我的输入文件中长写,并且它得到了文本?我使用的输入文件的格式或变量类型是否有问题

这是完整的代码,您能告诉我要更改的内容和位置吗

import java.io.IOException;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.lang.Object.*;

import org.apache.commons.cli.ParseException;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.configuration.Configuration;
import org.apache.hadoop.security.Credentials;
import org.apache.log4j.*;
import org.apache.commons.logging.*;
import org.apache.hadoop.mapred.*;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;



public class Pagerank
{   


public static class PRMap extends Mapper<LongWritable, LongWritable, LongWritable, LongWritable>
{

    public void map(LongWritable lineNum, LongWritable line, OutputCollector<LongWritable, LongWritable> outputCollector, Reporter reporter) throws IOException, InterruptedException
    {
        if (line.toString().length() == 0) {
            return;
        }

        Text key = new Text();
        Text value = new Text();
        LongWritable valuel = new LongWritable();
        StringTokenizer spline = new StringTokenizer(line.toString(),"\t");
        key.set(spline.nextToken());  
        value.set(spline.nextToken());  

        valuel.set(Long.parseLong(value.toString()));
        outputCollector.collect(lineNum,valuel);


        String info = value.toString();
        String splitter[] = info.split(",");

        if(splitter.length >= 3)
        {
            float f = Float.parseFloat(splitter[0]);
            float pagerank = f / (splitter.length - 2);

            for(int i=2;i<splitter.length;i++)
            {
                LongWritable key2 = new LongWritable();
                LongWritable value2 = new LongWritable();
                long l;

                l = Long.parseLong(splitter[i]);
                key2.set(l);
                //key2.set(splitter[i]);
                value2.set((long)f);

                outputCollector.collect(key2, value2);
            }
        }
    }
}

public static class PRReduce extends Reducer<LongWritable,LongWritable,LongWritable,LongWritable>
{
    private Text result = new Text();
    public void reduce(LongWritable key, Iterator<LongWritable> values,OutputCollector<LongWritable, LongWritable> results, Reporter reporter) throws IOException, InterruptedException
    {

        float pagerank = 0;
        String allinone = ",";
        while(values.hasNext())
        {
            LongWritable temp = values.next();
            String converted = temp.toString();
            String[] splitted = converted.split(",");

            if(splitted.length > 1)
            {                   
                for(int i=1;i<splitted.length;i++)
                {
                    allinone = allinone.concat(splitted[i]);
                    if(i != splitted.length - 1)
                        allinone = allinone.concat(",");
                }
            }
            else
            {
                float f = Float.parseFloat(splitted[0]);
                pagerank = pagerank + f;
            }
        }
        String last = Float.toString(pagerank);
        last = last.concat(allinone);

        LongWritable value = new LongWritable();
        value.set(Long.parseLong(last));

        results.collect(key, value);
    }      
}



public static void main(String[] args) throws Exception
{      


    org.apache.hadoop.conf.Configuration conf = new org.apache.hadoop.conf.Configuration();

    String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
    if (otherArgs.length != 2) {
      System.err.println("Usage: wordcount <in> <out>");
      System.exit(2);
    }

    Job job = new Job(conf, "pagerank_itr0");

    job.setJarByClass(Pagerank.class);      
    job.setMapperClass(Pagerank.PRMap.class);       
    job.setReducerClass(Pagerank.PRReduce.class);    


    job.setOutputKeyClass(LongWritable.class);            
    job.setOutputValueClass(LongWritable.class);            
    FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
    FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
    job.waitForCompletion(true);

}
}
import java.io.IOException;
导入java.util.*;
导入java.util.regex.Matcher;
导入java.util.regex.Pattern;
导入java.lang.Object.*;
导入org.apache.commons.cli.ParseException;
导入org.apache.commons.lang.StringUtils;
导入org.apache.commons.configuration.configuration;
导入org.apache.hadoop.security.Credentials;
导入org.apache.log4j.*;
导入org.apache.commons.logging.*;
导入org.apache.hadoop.mapred.*;
导入org.apache.hadoop.fs.FileSystem;
导入org.apache.hadoop.fs.Path;
导入org.apache.hadoop.io.Text;
导入org.apache.hadoop.io.DoubleWritable;
导入org.apache.hadoop.io.IntWritable;
导入org.apache.hadoop.io.LongWritable;
导入org.apache.hadoop.mapreduce.Job;
导入org.apache.hadoop.mapreduce.JobContext;
导入org.apache.hadoop.mapreduce.Mapper;
导入org.apache.hadoop.mapreduce.Reducer;
导入org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
导入org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
导入org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
导入org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
导入org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
导入org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
导入org.apache.hadoop.util.GenericOptionsParser;
公共类Pagerank
{   
公共静态类PRMap扩展映射器
{
公共void映射(LongWritable lineNum、LongWritable line、OutputCollector OutputCollector、Reporter Reporter)抛出IOException、InterruptedException
{
if(line.toString().length()=0){
返回;
}
文本键=新文本();
文本值=新文本();
LongWritable valuel=新的LongWritable();
StringTokenizer样条=新的StringTokenizer(line.toString(),“\t”);
key.set(spline.nextToken());
value.set(spline.nextToken());
valuel.set(Long.parseLong(value.toString());
collect(lineNum,valuel);
字符串信息=value.toString();
字符串拆分器[]=信息拆分(“,”);
如果(拆分器长度>=3)
{
float f=float.parseFloat(拆分器[0]);
浮点pagerank=f/(拆分器长度-2);
对于(int i=2;i 1)
{                   

对于(int i=1;i您没有在作业配置中设置映射器输出类。 尝试使用以下方法设置作业中的键和值类:

setMapOutputKeyClass()

setMapOutputValueClass()