Apache spark 执行hbase扫描时发生异常

Apache spark 执行hbase扫描时发生异常,apache-spark,hadoop,hbase,apache-zookeeper,Apache Spark,Hadoop,Hbase,Apache Zookeeper,我在试 我的简单代码如下所示: public class DistributedHBaseScanToRddDemo { public static void main(String[] args) { JavaSparkContext jsc = getJavaSparkContext("hbasetable1"); Configuration hbaseConf = getHbaseConf(0, "", ""); JavaHBase

我在试

我的简单代码如下所示:

public class DistributedHBaseScanToRddDemo {

    public static void main(String[] args) {
        JavaSparkContext jsc = getJavaSparkContext("hbasetable1");
        Configuration hbaseConf = getHbaseConf(0, "", "");
        JavaHBaseContext javaHbaseContext = new JavaHBaseContext(jsc, hbaseConf);

        Scan scan = new Scan();
        scan.setCaching(100);

        JavaRDD<Tuple2<ImmutableBytesWritable, Result>> javaRdd =
                  javaHbaseContext.hbaseRDD(TableName.valueOf("hbasetable1"), scan);

        List<String> results = javaRdd.map(new ScanConvertFunction()).collect();
        System.out.println("Result Size: " + results.size());
    }

    public static Configuration getHbaseConf(int pRimeout, String pQuorumIP, String pClientPort)
    {
        Configuration hbaseConf = HBaseConfiguration.create();
        hbaseConf.setInt("timeout", 120000); 
        hbaseConf.set("hbase.zookeeper.quorum", "10.56.36.14"); 
        hbaseConf.set("hbase.zookeeper.property.clientPort", "2181");
        return hbaseConf;
    }

    public static JavaSparkContext getJavaSparkContext(String pTableName)
    {
        SparkConf sparkConf = new SparkConf().setAppName("JavaHBaseBulkPut" + pTableName);
        sparkConf.setMaster("local");
        sparkConf.set("spark.testing.memory", "471859200");
        JavaSparkContext jsc = new JavaSparkContext(sparkConf);

        return jsc;
    }

    private static class ScanConvertFunction implements Function<Tuple2<ImmutableBytesWritable, Result>, String> {
        public String call(Tuple2<ImmutableBytesWritable, Result> v1) throws Exception {
            return Bytes.toString(v1._1().copyBytes());
        }
    }
}
公共类分发的HBaseContordDemo{
公共静态void main(字符串[]args){
JavaSparkContext jsc=getJavaSparkContext(“hbasetable1”);
配置hbaseConf=getHbaseConf(0,“,”);
JavaHBaseContext JavaHBaseContext=新的JavaHBaseContext(jsc,hbaseConf);
扫描=新扫描();
扫描设置缓存(100);
JavaRDD JavaRDD=
javaHbaseContext.hbaseRDD(TableName.valueOf(“hbasetable1”),扫描;
List results=javaRdd.map(新的ScanConvertFunction()).collect();
System.out.println(“结果大小:+results.Size());
}
公共静态配置getHbaseConf(int-pRimeout、字符串pQuorumIP、字符串pClientPort)
{
配置hbaseConf=HBaseConfiguration.create();
hbaseConf.setInt(“超时”,120000);
hbaseConf.set(“hbase.zookeeper.quorum”,“10.56.36.14”);
hbaseConf.set(“hbase.zookeeper.property.clientPort”,“2181”);
返回hbaseConf;
}
公共静态JavaSparkContext getJavaSparkContext(字符串pTableName)
{
SparkConf SparkConf=new SparkConf().setAppName(“JavaHBaseBulkPut”+pTableName);
sparkConf.setMaster(“本地”);
sparkConf.set(“火花测试内存”,“471859200”);
JavaSparkContext jsc=新的JavaSparkContext(sparkConf);
返回jsc;
}
私有静态类ScanConvertFunction实现函数{
公共字符串调用(tuple2v1)引发异常{
返回Bytes.toString(v1._1().copyBytes());
}
}
}
我得到以下例外情况:

Exception in thread "main" org.apache.hadoop.hbase.DoNotRetryIOException: /10.56.48.219:16020 is unable to read call parameter from client 10.56.49.148; java.lang.UnsupportedOperationException: GetRegionLoad
    at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
    at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
    at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
    at java.lang.reflect.Constructor.newInstance(Constructor.java:422)
    at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:93)
    at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:83)
    at org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:368)
    at org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.getRemoteException(ProtobufUtil.java:345)
    at org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.getRegionLoad(ProtobufUtil.java:1746)
    at org.apache.hadoop.hbase.client.HBaseAdmin.getRegionLoad(HBaseAdmin.java:2089)
    at org.apache.hadoop.hbase.mapreduce.RegionSizeCalculator.init(RegionSizeCalculator.java:82)
    at org.apache.hadoop.hbase.mapreduce.RegionSizeCalculator.<init>(RegionSizeCalculator.java:60)
    at org.apache.hadoop.hbase.mapreduce.TableInputFormatBase.oneInputSplitPerRegion(TableInputFormatBase.java:293)
    at org.apache.hadoop.hbase.mapreduce.TableInputFormatBase.getSplits(TableInputFormatBase.java:257)
    at org.apache.hadoop.hbase.mapreduce.TableInputFormat.getSplits(TableInputFormat.java:254)
    at org.apache.spark.rdd.NewHadoopRDD.getPartitions(NewHadoopRDD.scala:121)
    at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:248)
    at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:246)
    at scala.Option.getOrElse(Option.scala:121)
    at org.apache.spark.rdd.RDD.partitions(RDD.scala:246)
    at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
    at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:248)
    at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:246)
    at scala.Option.getOrElse(Option.scala:121)
    at org.apache.spark.rdd.RDD.partitions(RDD.scala:246)
    at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
    at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:248)
    at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:246)
    at scala.Option.getOrElse(Option.scala:121)
    at org.apache.spark.rdd.RDD.partitions(RDD.scala:246)
    at org.apache.spark.SparkContext.runJob(SparkContext.scala:1911)
    at org.apache.spark.rdd.RDD$$anonfun$collect$1.apply(RDD.scala:893)
    at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
    at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
    at org.apache.spark.rdd.RDD.withScope(RDD.scala:358)
    at org.apache.spark.rdd.RDD.collect(RDD.scala:892)
    at org.apache.spark.api.java.JavaRDDLike$class.collect(JavaRDDLike.scala:360)
    at org.apache.spark.api.java.AbstractJavaRDDLike.collect(JavaRDDLike.scala:45)
    at com.myproj.poc.sparkhbaseneo4j.DistributedHBaseScanToRddDemo.main(DistributedHBaseScanToRddDemo.java:32)
Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.DoNotRetryIOException): /10.56.48.219:16020 is unable to read call parameter from client 10.56.49.148; java.lang.UnsupportedOperationException: GetRegionLoad
    at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:387)
    at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:95)
    at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:410)
    at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:406)
    at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:103)
    at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:118)
    at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:161)
    at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:191)
    at org.apache.hadoop.hbase.shaded.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)
    at org.apache.hadoop.hbase.shaded.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)
    at org.apache.hadoop.hbase.shaded.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)
    at org.apache.hadoop.hbase.shaded.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:310)
    at org.apache.hadoop.hbase.shaded.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:284)
    at org.apache.hadoop.hbase.shaded.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)
    at org.apache.hadoop.hbase.shaded.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)
    at org.apache.hadoop.hbase.shaded.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)
    at org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:287)
    at org.apache.hadoop.hbase.shaded.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)
    at org.apache.hadoop.hbase.shaded.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)
    at org.apache.hadoop.hbase.shaded.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)
    at org.apache.hadoop.hbase.shaded.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1334)
    at org.apache.hadoop.hbase.shaded.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)
    at org.apache.hadoop.hbase.shaded.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)
    at org.apache.hadoop.hbase.shaded.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:926)
    at org.apache.hadoop.hbase.shaded.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:134)
    at org.apache.hadoop.hbase.shaded.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:644)
    at org.apache.hadoop.hbase.shaded.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:579)
    at org.apache.hadoop.hbase.shaded.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:496)
    at org.apache.hadoop.hbase.shaded.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:458)
    at org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:858)
    at org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.DefaultThreadFactory$DefaultRunnableDecorator.run(DefaultThreadFactory.java:138)
    at java.lang.Thread.run(Thread.java:745)
线程“main”org.apache.hadoop.hbase.donotretryoException:/10.56.48.219:16020中的异常无法从客户端10.56.49.148读取调用参数;java.lang.UnsupportedOperationException:GetRegionLoad 位于sun.reflect.NativeConstructorAccessorImpl.newInstance0(本机方法) 位于sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62) 在sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) 位于java.lang.reflect.Constructor.newInstance(Constructor.java:422) 位于org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.InstanceException(RemoteWithExtrasException.java:93) 位于org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:83) 位于org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.makeIOExceptionOffException(ProtobufUtil.java:368) 位于org.apache.hadoop.hbase.shade.protobuf.ProtobufUtil.getRemoteException(ProtobufUtil.java:345) 位于org.apache.hadoop.hbase.shade.protobuf.ProtobufUtil.getRegionLoad(ProtobufUtil.java:1746) 位于org.apache.hadoop.hbase.client.HBaseAdmin.getRegionLoad(HBaseAdmin.java:2089) 位于org.apache.hadoop.hbase.mapreduce.RegionSizeCalculator.init(RegionSizeCalculator.java:82) 位于org.apache.hadoop.hbase.mapreduce.RegionSizeCalculator.(RegionSizeCalculator.java:60) 位于org.apache.hadoop.hbase.mapreduce.TableInputFormatBase.oneInputSplitPerRegion(TableInputFormatBase.java:293) 位于org.apache.hadoop.hbase.mapreduce.TableInputFormatBase.getSplits(TableInputFormatBase.java:257) 位于org.apache.hadoop.hbase.mapreduce.TableInputFormat.getSplits(TableInputFormat.java:254) 位于org.apache.spark.rdd.NewHadoopRDD.getPartitions(NewHadoopRDD.scala:121) 位于org.apache.spark.rdd.rdd$$anonfun$partitions$2.apply(rdd.scala:248) 位于org.apache.spark.rdd.rdd$$anonfun$partitions$2.apply(rdd.scala:246) 位于scala.Option.getOrElse(Option.scala:121) 位于org.apache.spark.rdd.rdd.partitions(rdd.scala:246) 位于org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35) 位于org.apache.spark.rdd.rdd$$anonfun$partitions$2.apply(rdd.scala:248) 位于org.apache.spark.rdd.rdd$$anonfun$partitions$2.apply(rdd.scala:246) 位于scala.Option.getOrElse(Option.scala:121) 位于org.apache.spark.rdd.rdd.partitions(rdd.scala:246) 位于org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35) 位于org.apache.spark.rdd.rdd$$anonfun$partitions$2.apply(rdd.scala:248) 位于org.apache.spark.rdd.rdd$$anonfun$partitions$2.apply(rdd.scala:246) 位于scala.Option.getOrElse(Option.scala:121) 位于org.apache.spark.rdd.rdd.partitions(rdd.scala:246) 位于org.apache.spark.SparkContext.runJob(SparkContext.scala:1911) 位于org.apache.spark.rdd.rdd$$anonfun$collect$1.apply(rdd.scala:893) 位于org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151) 位于org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112) 位于org.apache.spark.rdd.rdd.withScope(rdd.scala:358) 位于org.apache.spark.rdd.rdd.collect(rdd.scala:892) 位于org.apache.spark.api.java.JavaRDDLike$class.collect(JavaRDDLike.scala:360) 位于org.apache.spark.api.java.AbstractJavaRDDLike.collect(JavaRDDLike.scala:45) 位于com.myproj.poc.sparkhbaseneo4j.distributedHBaseContordDemo.main(distributedHBaseContordDeDemo.java:32) 原因:org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.donotretryoException):/10.56.48.219:16020无法从客户端10.56.49.148读取调用参数;java.lang.UnsupportedOperationException:GetRegionLoad 位于org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:387) 在org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:95) 在org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:410) 位于org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:406) 位于org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:103) 位于org.apache.hadoop.hbase.ipc.Call.setException(Call.java:118) 位于org.apache.hadoop.hbase.ipc.nettyrpcdublexhandler.readResponse(nettyrpcdublexhandler.java:161) 在org.apache.hadoop.hb
 <repositories>
    <repository>
      <id>cloudera</id>
      <name>cloudera</name>
      <url>https://repository.cloudera.com/content/repositories/releases/</url>
    </repository>
  </repositories>
 <dependency>
      <groupId>org.apache.hbase</groupId>
      <artifactId>hbase-spark</artifactId>
      <version>${hbase-spark.version}</version>
    </dependency>