Java 在地图调用中获取Spark上的行

Java 在地图调用中获取Spark上的行,java,hadoop,apache-spark,hbase,Java,Hadoop,Apache Spark,Hbase,Itry从HDFS中的文件聚合数据。 我需要在hbase中的特定表中添加这些数据的一些详细信息 但我有一个例外: org.apache.spark.SparkException: Task not serializable at org.apache.spark.util.ClosureCleaner$.ensureSerializable(ClosureCleaner.scala:166) at org.apache.spark.util.ClosureCleaner$.cle

Itry从HDFS中的文件聚合数据。 我需要在hbase中的特定表中添加这些数据的一些详细信息

但我有一个例外:

org.apache.spark.SparkException: Task not serializable
    at org.apache.spark.util.ClosureCleaner$.ensureSerializable(ClosureCleaner.scala:166)
    at org.apache.spark.util.ClosureCleaner$.clean(ClosureCleaner.scala:158)
    at org.apache.spark.SparkContext.clean(SparkContext.scala:1623)
    at org.apache.spark.rdd.RDD.map(RDD.scala:286)
    at org.apache.spark.api.java.JavaRDDLike$class.mapToPair(JavaRDDLike.scala:113)
    at org.apache.spark.api.java.AbstractJavaRDDLike.mapToPair(JavaRDDLike.scala:46)
    at ......
    at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
    at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
    at java.lang.reflect.Method.invoke(Method.java:497)
    at org.apache.spark.deploy.SparkSubmit$.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:577)
    at org.apache.spark.deploy.SparkSubmit$.doRunMain$1(SparkSubmit.scala:174)
    at org.apache.spark.deploy.SparkSubmit$.submit(SparkSubmit.scala:197)
    at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:112)
    at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
Caused by: java.io.NotSerializableException: org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation
Serialization stack:

    at org.apache.spark.serializer.SerializationDebugger$.improveException(SerializationDebugger.scala:38)
    at org.apache.spark.serializer.JavaSerializationStream.writeObject(JavaSerializer.scala:47)
    at org.apache.spark.serializer.JavaSerializerInstance.serialize(JavaSerializer.scala:80)
    at org.apache.spark.util.ClosureCleaner$.ensureSerializable(ClosureCleaner.scala:164)
我知道当我们在map函数期间尝试访问hbase时出现了问题

我的问题是:如何使用hbase表中包含的值来完成RDD

例如: HDF中的文件是csv:

Name;Number1;Number2
toto;1;2
在hbase中,我们将数据关联到名称toto

我需要检索数字1和数字2的总和(这是最简单的部分) 并与表中的数据进行聚合。 例如:

reducer的键将是tata,并通过在hbase表中获取rowkey toto来检索


有什么建议吗

多亏了你的建议,一位同事终于做到了:

这是映射的代码,它允许使用hbase表中的数据聚合文件

private final Logger LOGGER = LoggerFactory.getLogger(AbtractGetSDMapFunction.class);




/**
 * Namespace name
 */
public static final String NAMESPACE = "NameSpace";
private static final String ID = "id";
private Connection connection = null;
private static final String LINEID = "l";
private static final String CHANGE_LINE_ID = "clid";
private static final String CHANGE_LINE_DATE = "cld";
private String constClientPortHBase;
private String constQuorumHBase;
private int constTimeOutHBase;
private String constZnodeHBase;
public void initConnection() {
    Configuration conf = HBaseConfiguration.create();
    conf.setInt("timeout", constTimeOutHBase);
    conf.set("hbase.zookeeper.quorum", constQuorumHBase);
    conf.set("hbase.zookeeper.property.clientPort", constClientPortHBase);
    conf.set("zookeeper.znode.parent", constZnodeHBase);
    try {
        connection = HConnectionManager.createConnection(conf);
    } catch (Exception e) {
        LOGGER.error("Error in the configuration of the connection with HBase.", e);
    }
}

 public Tuple2<String, myInput> call(String row) throws Exception {
//this is where you need to init the connection for hbase to avoid serialization problem
    initConnection();

....do your work 
State state = getCurrentState(myInput.getKey());
....do your work 
}

public AbtractGetSDMapFunction( String constClientPortHBase, String constQuorumHBase, String constZnodeHBase, int constTimeOutHBase) {
    this.constClientPortHBase = constClientPortHBase;
    this.constQuorumHBase = constQuorumHBase;
    this.constZnodeHBase = constZnodeHBase;
    this.constTimeOutHBase = constTimeOutHBase;
}

/***************************************************************************/
/**
 * Table Name
 */
public static final String TABLE_NAME = "Table";

public state getCurrentState(String key) throws TechnicalException {
    LOGGER.debug("start key {}", key);
    String buildRowKey = buildRowKey(key);
    State currentState = new State();
    String columnFamily = State.getColumnFamily();
    if (!StringUtils.isEmpty(buildRowKey) && null != columnFamily) {
        try {
            Get scan = new Get(Bytes.toBytes(buildRowKey));
            scan.addFamily(Bytes.toBytes(columnFamily));
            addColumnsToScan(scan, columnFamily, ID);                
            Result result = getTable().get(scan);
            currentState.setCurrentId(getLong(result, columnFamily, ID));              
        } catch (IOException ex) {
            throw new TechnicalException(ex);
        }
        LOGGER.debug("end ");
    }
    return currentState;
}

/***********************************************************/

private Table getTable() throws IOException, TechnicalException {
    Connection connection = getConnection();
    // Table retrieve
    if (connection != null) {
        Table table = connection.getTable(TableName.valueOf(NAMESPACE, TABLE_NAME));


        return table;
    } else {
        throw new TechnicalException("Connection to Hbase not available");
    }
}

/****************************************************************/




private Long getLong(Result result, String columnFamily, String qualifier) {
    Long toLong = null;
    if (null != columnFamily && null != qualifier) {
        byte[] value = result.getValue(Bytes.toBytes(columnFamily), Bytes.toBytes(qualifier));
        toLong = (value != null ? Bytes.toLong(value) : null);
    }
    return toLong;
}

private String getString(Result result, String columnFamily, String qualifier) {
    String toString = null;
    if (null != columnFamily && null != qualifier) {
        byte[] value = result.getValue(Bytes.toBytes(columnFamily), Bytes.toBytes(qualifier));
        toString = (value != null ? Bytes.toString(value) : null);
    }
    return toString;
}


public Connection getConnection() {
    return connection;
}

public void setConnection(Connection connection) {
    this.connection = connection;
}



private void addColumnsToScan(Get scan, String family, String qualifier) {
    if (org.apache.commons.lang.StringUtils.isNotEmpty(family) && org.apache.commons.lang.StringUtils.isNotEmpty(qualifier)) {
        scan.addColumn(Bytes.toBytes(family), Bytes.toBytes(qualifier));
    }
}

private String buildRowKey(String key) throws TechnicalException {
    StringBuilder rowKeyBuilder = new StringBuilder();
    rowKeyBuilder.append(HashFunction.makeSHA1Hash(key));
    return rowKeyBuilder.toString();
}
private final Logger Logger=LoggerFactory.getLogger(AbtractGetSDMapFunction.class);
/**
*名称空间名称
*/
公共静态最终字符串NAMESPACE=“NAMESPACE”;
私有静态最终字符串ID=“ID”;
私有连接=null;
私有静态最终字符串LINEID=“l”;
私有静态最终字符串更改\u行\u ID=“clid”;
私有静态最终字符串更改\u LINE\u DATE=“cld”;
私有字符串constClientPortHBase;
私有字符串construorumhbase;
私有int-constTimeOutHBase;
私有字符串constZnodeHBase;
公共连接(){
Configuration=HBaseConfiguration.create();
conf.setInt(“超时”,constTimeOutHBase);
conf.set(“hbase.zookeeper.quorum”,constqourumhbase);
conf.set(“hbase.zookeeper.property.clientPort”,constClientPortHBase);
conf.set(“zookeeper.znode.parent”,constZnodeHBase);
试一试{
connection=HConnectionManager.createConnection(conf);
}捕获(例外e){
LOGGER.error(“与HBase的连接配置错误”,e);
}
}
公共Tuple2调用(字符串行)引发异常{
//这是您需要初始化hbase连接以避免序列化问题的地方
initConnection();
做你的工作
State State=getCurrentState(myInput.getKey());
做你的工作
}
公共AbtractGetSDMapFunction(字符串constClientPortHBase、字符串constQuorumHBase、字符串constZnodeHBase、int constTimeOutHBase){
this.constClientPortHBase=constClientPortHBase;
this.constQuorumHBase=constQuorumHBase;
this.constZnodeHBase=constZnodeHBase;
this.constTimeOutHBase=constTimeOutHBase;
}
/***************************************************************************/
/**
*表名
*/
公共静态最终字符串表\u NAME=“TABLE”;
公共状态getCurrentState(字符串键)引发TechnicalException{
debug(“开始键{}”,key);
字符串buildRowKey=buildRowKey(key);
State currentState=新状态();
String columnFamily=State.getColumnFamily();
如果(!StringUtils.isEmpty(buildRowKey)&&null!=columnFamily){
试一试{
Get scan=新Get(Bytes.toBytes(buildRowKey));
scan.addFamily(Bytes.toBytes(columnFamily));
addColumnsToScan(扫描,columnFamily,ID);
结果=getTable().get(扫描);
setCurrentId(getLong(result,columnFamily,ID));
}捕获(IOEX异常){
抛出新的技术异常(ex);
}
LOGGER.debug(“结束”);
}
返回电流状态;
}
/***********************************************************/
私有表getTable()引发IOException,TechnicalException{
Connection=getConnection();
//表检索
if(连接!=null){
Table Table=connection.getTable(TableName.valueOf(NAMESPACE,Table_NAME));
返回表;
}否则{
抛出新的TechnicalException(“与Hbase的连接不可用”);
}
}
/****************************************************************/
私有Long getLong(结果、字符串列族、字符串限定符){
Long-toLong=null;
if(null!=列族&&null!=限定符){
byte[]value=result.getValue(Bytes.toBytes(columnFamily),Bytes.toBytes(限定符));
toLong=(值!=null?字节。toLong(值):null);
}
返回吐隆;
}
私有字符串getString(结果、字符串列族、字符串限定符){
字符串toString=null;
if(null!=列族&&null!=限定符){
byte[]value=result.getValue(Bytes.toBytes(columnFamily),Bytes.toBytes(限定符));
toString=(值!=null?字节。toString(值):null);
}
返回字符串;
}
公共连接getConnection(){
回路连接;
}
公共连接(连接){
这个连接=连接;
}
私有void addColumnsToScan(获取扫描、字符串族、字符串限定符){
if(org.apache.commons.lang.StringUtils.isNotEmpty(family)和&org.apache.commons.lang.StringUtils.isNotEmpty(限定符)){
scan.addColumn(Bytes.toBytes(系列),Bytes.toBytes(限定符));
}
}
私有字符串buildRowKey(字符串键)引发TechnicalException{
StringBuilder rowKeyBuilder=新建StringBuilder();
append(HashFunction.makeSHA1Hash(key));
返回rowKeyBuilder.toString();
}

请检查我的答案是否正确。移动像htable之类的东西。。。在这种情况下,关闭将修复映射的数量有多大,基本上是Hbase中用于数据的键的数量?感谢您的回答,我将尝试使用这个Ram Ghadiyaram。该表可能有数百万个键,每个键可能有数千列