Scala Greenplum Spark Connector org.postgresql.util.PSQLException:错误:将数据写入gpfdist时出错

Scala Greenplum Spark Connector org.postgresql.util.PSQLException:错误:将数据写入gpfdist时出错,scala,azure,apache-spark,greenplum,Scala,Azure,Apache Spark,Greenplum,我在Azure上有一个Greenplum群集,我正试图用本地机器上的spark连接它(使用关键的Greenplum spark连接器) 我在scala代码中做了如下操作: var options=Map[String,String]() 选项+=(“url”->url) 选项+=(“用户”->凭证(“用户”)) 选项+=(“密码”->凭证(“密码”)) 选项+=(“分区列”->partitionColumn) sqlContext.read.format(“greenplum”).options

我在Azure上有一个Greenplum群集,我正试图用本地机器上的spark连接它(使用关键的Greenplum spark连接器)

我在scala代码中做了如下操作:

var options=Map[String,String]()
选项+=(“url”->url)
选项+=(“用户”->凭证(“用户”))
选项+=(“密码”->凭证(“密码”))
选项+=(“分区列”->partitionColumn)
sqlContext.read.format(“greenplum”).options(options).load()
出于测试目的,我创建了一个用户:

删除存在的用户user1;
创建用户user1 CREATEEXTTABLE(type='writable')密码'p@ss0rd';
然后使用该用户创建一个数据库/表,如下所示

drop table if exists sample;
create table public.sample (id serial, big bigint, wee smallint, stuff text) distributed by (id) ;
insert into sample (big) values (generate_series(1,100));
update sample set wee = 0; 
update sample set wee = 1 where mod(id,7)=1;
update sample set stuff = substr('abcdefghijklmnopqrstuvwxyz',1,mod(wee,13));
现在,当我使用Greenplum凭据执行spark代码时(在调试模式下运行时),代码似乎成功地读取了表元数据(它获取了列和类型),但读取行失败,并出现
SQLSTATE(08006)、ErrorCode(0)
。以下是stacktrace:

2020-03-24 19:04:31,168 WARN [Executor task launch worker for task 0] com.zaxxer.hikari.pool.ProxyConnection - HikariPool-1 - Connection org.postgresql.jdbc.PgConnection@14fab679 marked as broken because of SQLSTATE(08006), ErrorCode(0)
org.postgresql.util.PSQLException: ERROR: error when writing data to gpfdist http://127.0.0.1:60352/spark_e0aa1f0c8646f023_fffec8bf08e0054d_driver_261, quit after 8 tries  (seg1 172.21.0.4:6001 pid=25909)
    at org.postgresql.core.v3.QueryExecutorImpl.receiveErrorResponse(QueryExecutorImpl.java:2310)
    at org.postgresql.core.v3.QueryExecutorImpl.processResults(QueryExecutorImpl.java:2023)
    at org.postgresql.core.v3.QueryExecutorImpl.execute(QueryExecutorImpl.java:217)
    at org.postgresql.jdbc.PgStatement.execute(PgStatement.java:421)
    at org.postgresql.jdbc.PgStatement.executeWithFlags(PgStatement.java:318)
    at org.postgresql.jdbc.PgStatement.executeUpdate(PgStatement.java:294)
    at com.zaxxer.hikari.pool.ProxyStatement.executeUpdate(ProxyStatement.java:120)
    at com.zaxxer.hikari.pool.HikariProxyStatement.executeUpdate(HikariProxyStatement.java)
    at io.pivotal.greenplum.spark.jdbc.Jdbc$$anonfun$2.apply(Jdbc.scala:81)
    at io.pivotal.greenplum.spark.jdbc.Jdbc$$anonfun$2.apply(Jdbc.scala:79)
    at resource.AbstractManagedResource$$anonfun$5.apply(AbstractManagedResource.scala:88)
    at scala.util.control.Exception$Catch$$anonfun$either$1.apply(Exception.scala:125)
    at scala.util.control.Exception$Catch$$anonfun$either$1.apply(Exception.scala:125)
    at scala.util.control.Exception$Catch.apply(Exception.scala:103)
    at scala.util.control.Exception$Catch.either(Exception.scala:125)
    at resource.AbstractManagedResource.acquireFor(AbstractManagedResource.scala:88)
    at resource.ManagedResourceOperations$class.apply(ManagedResourceOperations.scala:26)
    at resource.AbstractManagedResource.apply(AbstractManagedResource.scala:50)
    at resource.DeferredExtractableManagedResource$$anonfun$tried$1.apply(AbstractManagedResource.scala:33)
    at scala.util.Try$.apply(Try.scala:192)
    at resource.DeferredExtractableManagedResource.tried(AbstractManagedResource.scala:33)
    at io.pivotal.greenplum.spark.jdbc.Jdbc$.copyTable(Jdbc.scala:83)
    at io.pivotal.greenplum.spark.externaltable.GreenplumRowIterator.liftedTree1$1(GreenplumRowIterator.scala:105)
    at io.pivotal.greenplum.spark.externaltable.GreenplumRowIterator.<init>(GreenplumRowIterator.scala:104)
    at io.pivotal.greenplum.spark.GreenplumRDD.compute(GreenplumRDD.scala:49)
    at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)
    at org.apache.spark.rdd.RDD.iterator(RDD.scala:288)
    at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
    at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)
    at org.apache.spark.rdd.RDD.iterator(RDD.scala:288)
    at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
    at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)
    at org.apache.spark.rdd.RDD$$anonfun$7.apply(RDD.scala:337)
    at org.apache.spark.rdd.RDD$$anonfun$7.apply(RDD.scala:335)
    at org.apache.spark.storage.BlockManager$$anonfun$doPutIterator$1.apply(BlockManager.scala:1165)
    at org.apache.spark.storage.BlockManager$$anonfun$doPutIterator$1.apply(BlockManager.scala:1156)
    at org.apache.spark.storage.BlockManager.doPut(BlockManager.scala:1091)
    at org.apache.spark.storage.BlockManager.doPutIterator(BlockManager.scala:1156)
    at org.apache.spark.storage.BlockManager.getOrElseUpdate(BlockManager.scala:882)
    at org.apache.spark.rdd.RDD.getOrCompute(RDD.scala:335)
    at org.apache.spark.rdd.RDD.iterator(RDD.scala:286)
    at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
    at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)
    at org.apache.spark.rdd.RDD.iterator(RDD.scala:288)
    at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
    at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)
    at org.apache.spark.rdd.RDD.iterator(RDD.scala:288)
    at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
    at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)
    at org.apache.spark.rdd.RDD.iterator(RDD.scala:288)
    at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
    at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)
    at org.apache.spark.rdd.RDD.iterator(RDD.scala:288)
    at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
    at org.apache.spark.scheduler.Task.run(Task.scala:121)
    at org.apache.spark.executor.Executor$TaskRunner$$anonfun$10.apply(Executor.scala:402)
    at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1360)
    at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:408)
    at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
    at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
    at java.lang.Thread.run(Thread.java:748)
2020-03-24 19:04:31168警告[Executor task launch worker for task 0]com.zaxxer.hikari.pool.ProxyConnection-hikariol-1-Connection org.postgresql.jdbc。PgConnection@14fab679由于SQLSTATE(08006)、错误代码(0)而标记为已断开
org.postgresql.util.PSQLException:错误:将数据写入gpfdist时出错http://127.0.0.1:60352/spark_e0aa1f0c8646f023_fffec8bf08e0054d_driver_261,尝试8次后退出(SEG1172.21.0.4:6001 pid=25909)
位于org.postgresql.core.v3.QueryExecutorImpl.receiveErrorResponse(QueryExecutorImpl.java:2310)
位于org.postgresql.core.v3.QueryExecutorImpl.processResults(QueryExecutorImpl.java:2023)
位于org.postgresql.core.v3.QueryExecutorImpl.execute(QueryExecutorImpl.java:217)
位于org.postgresql.jdbc.PgStatement.execute(PgStatement.java:421)
位于org.postgresql.jdbc.PgStatement.executeWithFlags(PgStatement.java:318)
位于org.postgresql.jdbc.PgStatement.executeUpdate(PgStatement.java:294)
在com.zaxxer.hikari.pool.ProxyStatement.executeUpdate(ProxyStatement.java:120)上
在com.zaxxer.hikari.pool.HikariProxyStatement.executeUpdate(HikariProxyStatement.java)上
位于io.pivotal.greenplum.spark.jdbc.jdbc$$anonfun$2.apply(jdbc.scala:81)
位于io.pivotal.greenplum.spark.jdbc.jdbc$$anonfun$2.apply(jdbc.scala:79)
位于resource.AbstractManagedResource$$anonfun$5.apply(AbstractManagedResource.scala:88)
在scala.util.control.Exception$Catch$$anonfun$or$1.apply处(Exception.scala:125)
在scala.util.control.Exception$Catch$$anonfun$or$1.apply处(Exception.scala:125)
在scala.util.control.Exception$Catch.apply处(Exception.scala:103)
在scala.util.control.Exception$Catch.other处(Exception.scala:125)
位于resource.AbstractManagedResource.acquireFor(AbstractManagedResource.scala:88)
位于resource.ManagedResourceOperations$class.apply(ManagedResourceOperations.scala:26)
位于resource.AbstractManagedResource.apply(AbstractManagedResource.scala:50)
位于resource.DeferredExtractableManagedResource$$anonfun$trued$1.apply(AbstractManagedResource.scala:33)
在scala.util.Try$.apply(Try.scala:192)
位于resource.DeferredExtractableManagedResource.trued(AbstractManagedResource.scala:33)
位于io.pivotal.greenplum.spark.jdbc.jdbc$.copyTable(jdbc.scala:83)
在io.pivotal.greenplum.spark.externaltable.GreenplumRowIterator.liftedTree1$1(GreenplumRowIterator.scala:105)
在io.pivotal.greenplum.spark.externaltable.GreenplumRowIterator.(GreenplumRowIterator.scala:104)
在io.pivotal.greenplum.spark.GreenplumRDD.compute(GreenplumRDD.scala:49)
在org.apache.spark.rdd.rdd.computeOrReadCheckpoint(rdd.scala:324)
位于org.apache.spark.rdd.rdd.iterator(rdd.scala:288)
位于org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
在org.apache.spark.rdd.rdd.computeOrReadCheckpoint(rdd.scala:324)
位于org.apache.spark.rdd.rdd.iterator(rdd.scala:288)
位于org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
在org.apache.spark.rdd.rdd.computeOrReadCheckpoint(rdd.scala:324)
位于org.apache.spark.rdd.rdd$$anonfun$7.apply(rdd.scala:337)
位于org.apache.spark.rdd.rdd$$anonfun$7.apply(rdd.scala:335)
位于org.apache.spark.storage.BlockManager$$anonfun$doputierator$1.apply(BlockManager.scala:1165)
位于org.apache.spark.storage.BlockManager$$anonfun$doputierator$1.apply(BlockManager.scala:1156)
位于org.apache.spark.storage.BlockManager.doPut(BlockManager.scala:1091)
位于org.apache.spark.storage.BlockManager.doputierator(BlockManager.scala:1156)
位于org.apache.spark.storage.BlockManager.getOrElseUpdate(BlockManager.scala:882)
位于org.apache.spark.rdd.rdd.getOrCompute(rdd.scala:335)
位于org.apache.spark.rdd.rdd.iterator(rdd.scala:286)
位于org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
在org.apache.spark.rdd.rdd.computeOrReadCheckpoint(rdd.scala:324)
位于org.apache.spark.rdd.rdd.iterator(rdd.scala:288)
位于org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
在org.apache.spark.rdd.rdd.computeOrReadCheckpoint(rdd.scala:324)
位于org.apache.spark.rdd.rdd.iterator(rdd.scala:288)
位于org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
在org.apache.spark.rdd.rdd.computeOrReadCheckpoint(rdd.scala:324)
位于org.apache.spark.rdd.rdd.iterator(rdd.scala:288)
位于org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
在org.apache.spark.rdd.rdd.computeOrReadCheckpoint(rdd.scala:324)
位于org.apache.spark.rdd.rdd.iterator(rdd.scala:288)
位于org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
位于org.apache.spark.scheduler.Task.run(Task.scala:121)
位于org.apache.spark.executor.executor$TaskRunner$$anonfun$10.apply(executor.scala:402)
位于org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1360)
在org.apache.spark.executor.executor$TaskRunner.run(Exe