Warning: file_get_contents(/data/phpspider/zhask/data//catemap/3/apache-spark/5.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
spark:的配置单元JDBC连接无法将列1转换为输入字符串的long:java.lang.NumberFormatException:_Java_Apache Spark_Jdbc_Hive_Apache Spark Sql - Fatal编程技术网

spark:的配置单元JDBC连接无法将列1转换为输入字符串的long:java.lang.NumberFormatException:

spark:的配置单元JDBC连接无法将列1转换为输入字符串的long:java.lang.NumberFormatException:,java,apache-spark,jdbc,hive,apache-spark-sql,Java,Apache Spark,Jdbc,Hive,Apache Spark Sql,我使用下面的代码从hive导入数据,使用jdbc来spark final SparkConf sparkConf = new SparkConf().setAppName("HiveToSparkConnection"); final JavaSparkContext sc = new JavaSparkContext(sparkConf); final SQLContext sqlContext = new SQLContext(sc); System.out.println("Before

我使用下面的代码从hive导入数据,使用jdbc来spark

final SparkConf sparkConf = new SparkConf().setAppName("HiveToSparkConnection");
final JavaSparkContext sc = new JavaSparkContext(sparkConf);
final SQLContext sqlContext = new SQLContext(sc);

System.out.println("Before loading jar");

sc.addJar("lib/hive-jdbc-2.3.2.jar");
System.out.println("After loading jar");

Map<String,String> dbConfig = new HashMap<>();
dbConfig.put("url" , "<connection-string>");
dbConfig.put("dbtable" , "<table-name>");
dbConfig.put("user" , "<user>");
dbConfig.put("password" , "<pass>");
dbConfig.put("driver" , "org.apache.hive.jdbc.HiveDriver");
dbConfig.put("fetchsize" , "20");
System.out.println("After connection");

DataFrameReader dFrameReader = sqlContext.read().format("jdbc").options(dbConfig);
Dataset<Row> dataSet = dFrameReader.load();

System.out.println("Print columns");
dataSet.show(5);
我经常犯以下错误。即使尝试使用仅显示空行的字符串表

java.sql.SQLException: Cannot convert column 1 to long: java.lang.NumberFormatException: For input string: "total_pkg.total"
    at org.apache.hive.jdbc.HiveBaseResultSet.getLong(HiveBaseResultSet.java:372)
    at org.apache.spark.sql.execution.datasources.jdbc.JdbcUtils$$anonfun$org$apache$spark$sql$execution$datasources$jdbc$JdbcUtils$$makeGetter$8.apply(JdbcUtils.scala:426)
    at org.apache.spark.sql.execution.datasources.jdbc.JdbcUtils$$anonfun$org$apache$spark$sql$execution$datasources$jdbc$JdbcUtils$$makeGetter$8.apply(JdbcUtils.scala:425)
    at org.apache.spark.sql.execution.datasources.jdbc.JdbcUtils$$anon$1.getNext(JdbcUtils.scala:347)
    at org.apache.spark.sql.execution.datasources.jdbc.JdbcUtils$$anon$1.getNext(JdbcUtils.scala:329)
    at org.apache.spark.util.NextIterator.hasNext(NextIterator.scala:73)
    at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37)
    at org.apache.spark.util.CompletionIterator.hasNext(CompletionIterator.scala:32)
    at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
    at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
    at org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$10$$anon$1.hasNext(WholeStageCodegenExec.scala:614)
    at org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:253)
    at org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:247)
    at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$25.apply(RDD.scala:830)
    at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$25.apply(RDD.scala:830)
    at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
    at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)
    at org.apache.spark.rdd.RDD.iterator(RDD.scala:288)
    at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
    at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)
    at org.apache.spark.rdd.RDD.iterator(RDD.scala:288)
    at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
    at org.apache.spark.scheduler.Task.run(Task.scala:109)
    at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345)
    at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
    at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
    at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.NumberFormatException: For input string: "total_pkg.total"
    at java.lang.NumberFormatException.forInputString(NumberFormatException.java:65)
    at java.lang.Long.parseLong(Long.java:589)
    at java.lang.Long.valueOf(Long.java:803)
    at org.apache.hive.jdbc.HiveBaseResultSet.getLong(HiveBaseResultSet.java:368)

我也尝试过thrift,但它没有连接到服务器。

什么模式是
printSchema()
output?当你查看你的蜂巢表(spark外)时,你知道有一个
长的
/
数字
列,它的值为
total\u pkg.total
?是的,total\u pkg.total是蜂巢中的一个长列。printSchema打印dbConfig.put中提到的表的方案(“dbtable”,”);好的,让我们再试一次。。。Spark抱怨它在等待一个数字时遇到了值
“total_pkg.total
。这是否指向某个地方?在某个SQL查询中是否存在意外引用的字符串?
java.sql.SQLException: Cannot convert column 1 to long: java.lang.NumberFormatException: For input string: "total_pkg.total"
    at org.apache.hive.jdbc.HiveBaseResultSet.getLong(HiveBaseResultSet.java:372)
    at org.apache.spark.sql.execution.datasources.jdbc.JdbcUtils$$anonfun$org$apache$spark$sql$execution$datasources$jdbc$JdbcUtils$$makeGetter$8.apply(JdbcUtils.scala:426)
    at org.apache.spark.sql.execution.datasources.jdbc.JdbcUtils$$anonfun$org$apache$spark$sql$execution$datasources$jdbc$JdbcUtils$$makeGetter$8.apply(JdbcUtils.scala:425)
    at org.apache.spark.sql.execution.datasources.jdbc.JdbcUtils$$anon$1.getNext(JdbcUtils.scala:347)
    at org.apache.spark.sql.execution.datasources.jdbc.JdbcUtils$$anon$1.getNext(JdbcUtils.scala:329)
    at org.apache.spark.util.NextIterator.hasNext(NextIterator.scala:73)
    at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37)
    at org.apache.spark.util.CompletionIterator.hasNext(CompletionIterator.scala:32)
    at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
    at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
    at org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$10$$anon$1.hasNext(WholeStageCodegenExec.scala:614)
    at org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:253)
    at org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:247)
    at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$25.apply(RDD.scala:830)
    at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$25.apply(RDD.scala:830)
    at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
    at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)
    at org.apache.spark.rdd.RDD.iterator(RDD.scala:288)
    at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
    at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)
    at org.apache.spark.rdd.RDD.iterator(RDD.scala:288)
    at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
    at org.apache.spark.scheduler.Task.run(Task.scala:109)
    at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345)
    at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
    at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
    at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.NumberFormatException: For input string: "total_pkg.total"
    at java.lang.NumberFormatException.forInputString(NumberFormatException.java:65)
    at java.lang.Long.parseLong(Long.java:589)
    at java.lang.Long.valueOf(Long.java:803)
    at org.apache.hive.jdbc.HiveBaseResultSet.getLong(HiveBaseResultSet.java:368)