Cassandra Hadoop MapReduce:java.lang.ClassCastException:java.util.HashMap不能强制转换为java.nio.ByteBuffer

Cassandra Hadoop MapReduce:java.lang.ClassCastException:java.util.HashMap不能强制转换为java.nio.ByteBuffer,java,hadoop,mapreduce,cassandra,bytebuffer,Java,Hadoop,Mapreduce,Cassandra,Bytebuffer,我正在尝试使用ApacheCassandra创建mapreduce作业。输入日期来自cassandra,输出也来自cassandra 该程序尝试从名为tweetstore的表中选择所有数据,然后插入包含用户名的行数 这是mapreduce作业的主要类: package com.cassandra.hadoop; import java.io.*; import java.lang.*; import java.util.*; import java.nio.ByteBuffer; import

我正在尝试使用ApacheCassandra创建mapreduce作业。输入日期来自cassandra,输出也来自cassandra

该程序尝试从名为tweetstore的表中选择所有数据,然后插入包含用户名的行数

这是mapreduce作业的主要类:

package com.cassandra.hadoop;
import java.io.*;
import java.lang.*;
import java.util.*;
import java.nio.ByteBuffer;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.cassandra.hadoop.ColumnFamilyInputFormat;
import org.apache.cassandra.hadoop.ColumnFamilyOutputFormat;
import org.apache.cassandra.hadoop.ConfigHelper;
import org.apache.cassandra.hadoop.cql3.*;
import org.apache.cassandra.thrift.SlicePredicate;
import org.apache.cassandra.thrift.SliceRange;
import org.apache.cassandra.thrift.IndexExpression;
import org.apache.cassandra.thrift.IndexOperator;
import org.apache.cassandra.utils.ByteBufferUtil;

public class App 
{
static final String KEYSPACE_NAME = "tweet_cassandra_map_reduce";
static final String INPUT_COLUMN_FAMILY = "tweetstore";
static final String OUTPUT_COLUMN_FAMILY = "tweetcount";
static final String COLUMN_NAME = "user";

public static void main( String[] args ) throws IOException, InterruptedException, ClassNotFoundException 
{
    Configuration conf = new Configuration();
    String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
    Job job = new Job(conf, "tweet count");
    job.setJarByClass(App.class);

    // mapper configuration.
    job.setMapperClass(TweetMapper.class);
    job.setMapOutputKeyClass(Text.class);
    job.setMapOutputValueClass(IntWritable.class);
    job.setInputFormatClass(ColumnFamilyInputFormat.class);

    // Reducer configuration
    job.setReducerClass(TweetAggregator.class);
    job.setOutputKeyClass(ByteBuffer.class);
    job.setOutputValueClass(List.class);
    job.setOutputFormatClass(ColumnFamilyOutputFormat.class);

    // Cassandra input column family configuration
    ConfigHelper.setInputRpcPort(job.getConfiguration(), "9160");
    ConfigHelper.setInputInitialAddress(job.getConfiguration(), "localhost");
    ConfigHelper.setInputPartitioner(job.getConfiguration(), "Murmur3Partitioner");
    ConfigHelper.setInputColumnFamily(job.getConfiguration(), KEYSPACE_NAME, INPUT_COLUMN_FAMILY);

    job.setInputFormatClass(ColumnFamilyInputFormat.class);
    SlicePredicate slicePredicate = new SlicePredicate();
    slicePredicate.setSlice_range(new SliceRange(ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBufferUtil.EMPTY_BYTE_BUFFER, false, Integer.MAX_VALUE));

    // Prepare index expression.
    IndexExpression ixpr = new IndexExpression();ixpr.setColumn_name(ByteBufferUtil.bytes(COLUMN_NAME));
    ixpr.setOp(IndexOperator.EQ);
    ixpr.setValue(ByteBufferUtil.bytes(otherArgs.length > 0 && !StringUtils.isBlank(otherArgs[0])?otherArgs[0]: "mevivs"));

    List<IndexExpression> ixpressions = new ArrayList<IndexExpression>();
    ixpressions.add(ixpr);
    ConfigHelper.setInputRange(job.getConfiguration(), ixpressions);
    ConfigHelper.setInputSlicePredicate(job.getConfiguration(), slicePredicate);

    // Cassandra output family configuration.
    ConfigHelper.setOutputRpcPort(job.getConfiguration(), "9160");
    ConfigHelper.setOutputInitialAddress(job.getConfiguration(), "localhost");
    ConfigHelper.setOutputPartitioner(job.getConfiguration(), "Murmur3Partitioner");
    ConfigHelper.setOutputColumnFamily(job.getConfiguration(), KEYSPACE_NAME, OUTPUT_COLUMN_FAMILY);
    job.setOutputFormatClass(ColumnFamilyOutputFormat.class);
    job.getConfiguration().set("row_key", "key");
    job.waitForCompletion(true);
}
}

请帮忙!!谢谢

看起来减速器的作业设置将bytebuffer作为输出,而不是映射。尝试在作业设置中更改此设置

job.setOutputKeyClass(ByteBuffer.class);
对此

job.setOutputKeyClass(Map<String,ByteBuffer>.class);
job.setOutputKeyClass(Map.class);

无论如何,job.set中的泛型类型。。。。需要在映射器和reducer的泛型类型参数之间对齐,因此请检查以确保它们对齐。

请正确格式化代码,删除大量空行和导入。
15/02/14 16:53:13 WARN hadoop.AbstractColumnFamilyInputFormat: ignoring  jobKeyRange specified without start_key
15/02/14 16:53:14 INFO mapred.JobClient: Running job: job_201502141652_0001
15/02/14 16:53:15 INFO mapred.JobClient:  map 0% reduce 0%
15/02/14 16:53:20 INFO mapred.JobClient:  map 66% reduce 0%
15/02/14 16:53:22 INFO mapred.JobClient:  map 100% reduce 0%
15/02/14 16:53:28 INFO mapred.JobClient:  map 100% reduce 33%
15/02/14 16:53:30 INFO mapred.JobClient: Task Id : attempt_201502141652_0001_r_000000_0, Status : FAILED
java.lang.ClassCastException: java.util.HashMap cannot be cast to java.nio.ByteBuffer
at org.apache.cassandra.hadoop.ColumnFamilyRecordWriter.write(ColumnFamilyRecordWriter.java:50)
at org.apache.hadoop.mapred.ReduceTask$NewTrackingRecordWriter.write(ReduceTask.java:588)
at org.apache.hadoop.mapreduce.TaskInputOutputContext.write(TaskInputOutputContext.java:80)
at com.cassandra.hadoop.TweetAggregator.reduce(TweetAggregator.java:40)
at com.cassandra.hadoop.TweetAggregator.reduce(TweetAggregator.java:20)
at org.apache.hadoop.mapreduce.Reducer.run(Reducer.java:176)
at org.apache.hadoop.mapred.ReduceTask.runNewReducer(ReduceTask.java:650)
at org.apache.hadoop.mapred.ReduceTask.run(ReduceTask.java:418)
at org.apache.hadoop.mapred.Child$4.run(Child.java:255)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:415)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1136)
at org.apache.hadoop.mapred.Child.main(Child.java:249)
attempt_201502141652_0001_r_000000_0: writing
job.setOutputKeyClass(ByteBuffer.class);
job.setOutputKeyClass(Map<String,ByteBuffer>.class);