Warning: file_get_contents(/data/phpspider/zhask/data//catemap/3/apache-spark/6.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Python Pyspark中RDD到DF的不完全转换_Python_Apache Spark_Pyspark - Fatal编程技术网

Python Pyspark中RDD到DF的不完全转换

Python Pyspark中RDD到DF的不完全转换,python,apache-spark,pyspark,Python,Apache Spark,Pyspark,使用PySpark 1.6.3,我试图将RDD转换为数据帧。这是齐柏林飞艇笔记本上运行的测试代码。感兴趣的RDD是RDD\u ret >>> from pyspark.sql import Row >>> rdd_ret.count() 9301 >>> rddofrows = rdd_ret.map(lambda x: Row(**x)) >>> things = rddofrows.take(10000) >>

使用PySpark 1.6.3,我试图将RDD转换为数据帧。这是齐柏林飞艇笔记本上运行的测试代码。感兴趣的RDD是
RDD\u ret

>>> from pyspark.sql import Row
>>> rdd_ret.count()
9301
>>> rddofrows = rdd_ret.map(lambda x: Row(**x))
>>> things = rddofrows.take(10000)
>>> len(things)
9301
>>> [type(x) for x in things if type(x) != Row]
[]
>>> [len(x) for x in things if len(x) != 117]
[]
所以我们在这里看到,我们肯定有9301行,所有的行对象和所有相同长度的行。现在我想转换为数据帧:

>>> outdf = rddofrows.toDF(sampleRatio=0.1)
>>> outdf.count()
这会引发一个错误:
TypeError:“NoneType”对象不可编辑
;底部有完整的堆栈跟踪

输出Dataframe对象已生成,但我尝试在其上运行的任何操作(.show();.count();.filter())都会在底部生成相同的堆栈跟踪。我不明白在这种情况下什么可能是非类型?当然,Row对象中的一些值可能有错误,但是为了计数或显示,您应该遍历数据帧的行,这些行都在那里

这是怎么回事

Traceback (most recent call last):
  File "/tmp/zeppelin_pyspark-5665146503764823323.py", line 360, in <module>
    exec(code, _zcUserQueryNameSpace)
  File "<stdin>", line 1, in <module>
  File "/usr/hdp/current/spark-client/python/pyspark/sql/dataframe.py", line 269, in count
    return int(self._jdf.count())
  File "/usr/hdp/current/spark-client/python/lib/py4j-0.9-src.zip/py4j/java_gateway.py", line 813, in __call__
    answer, self.gateway_client, self.target_id, self.name)
  File "/usr/hdp/current/spark-client/python/pyspark/sql/utils.py", line 45, in deco
    return f(*a, **kw)
  File "/usr/hdp/current/spark-client/python/lib/py4j-0.9-src.zip/py4j/protocol.py", line 308, in get_return_value
    format(target_id, ".", name), value)
py4j.protocol.Py4JJavaError: An error occurred while calling o2282.count.
: org.apache.spark.SparkException: Job aborted due to stage failure: Task 21 in stage 1256.0 failed 4 times, most recent failure: Lost task 21.3 in stage 1256.0 (TID 62913, usg-kov-e1b-slv005.c.pg-us-n-app-053847.internal): org.apache.spark.api.python.PythonException: Traceback (most recent call last):
  File "/usr/hdp/current/spark-client/python/pyspark/worker.py", line 111, in main
    process()
  File "/usr/hdp/current/spark-client/python/pyspark/worker.py", line 106, in process
    serializer.dump_stream(func(split_index, iterator), outfile)
  File "/usr/hdp/current/spark-client/python/pyspark/serializers.py", line 263, in dump_stream
    vs = list(itertools.islice(iterator, batch))
  File "/usr/hdp/current/spark-client/python/pyspark/sql/types.py", line 924, in convert_struct
    return tuple(conv(v) for v, conv in zip(obj, converters))
  File "/usr/hdp/current/spark-client/python/pyspark/sql/types.py", line 924, in <genexpr>
    return tuple(conv(v) for v, conv in zip(obj, converters))
  File "/usr/hdp/current/spark-client/python/pyspark/sql/types.py", line 900, in <lambda>
    return lambda row: [conv(v) for v in row]
TypeError: 'NoneType' object is not iterable
    at org.apache.spark.api.python.PythonRunner$$anon$1.read(PythonRDD.scala:166)
    at org.apache.spark.api.python.PythonRunner$$anon$1.<init>(PythonRDD.scala:207)
    at org.apache.spark.api.python.PythonRunner.compute(PythonRDD.scala:125)
    at org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:70)
    at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:313)
    at org.apache.spark.rdd.RDD.iterator(RDD.scala:277)
    at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
    at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:313)
    at org.apache.spark.rdd.RDD.iterator(RDD.scala:277)
    at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
    at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:313)
    at org.apache.spark.rdd.RDD.iterator(RDD.scala:277)
    at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
    at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:313)
    at org.apache.spark.rdd.RDD.iterator(RDD.scala:277)
    at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
    at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:313)
    at org.apache.spark.rdd.RDD.iterator(RDD.scala:277)
    at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
    at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:313)
    at org.apache.spark.rdd.RDD.iterator(RDD.scala:277)
    at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
    at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:313)
    at org.apache.spark.rdd.RDD.iterator(RDD.scala:277)
    at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:73)
    at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41)
    at org.apache.spark.scheduler.Task.run(Task.scala:89)
    at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:227)
    at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
    at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
    at java.lang.Thread.run(Thread.java:745)
Driver stacktrace:
    at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1433)
    at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1421)
    at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1420)
    at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
    at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47)
    at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1420)
    at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:801)
    at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:801)
    at scala.Option.foreach(Option.scala:236)
    at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:801)
    at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1642)
    at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1601)
    at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1590)
    at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
    at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:622)
    at org.apache.spark.SparkContext.runJob(SparkContext.scala:1831)
    at org.apache.spark.SparkContext.runJob(SparkContext.scala:1844)
    at org.apache.spark.SparkContext.runJob(SparkContext.scala:1857)
    at org.apache.spark.SparkContext.runJob(SparkContext.scala:1928)
    at org.apache.spark.rdd.RDD$$anonfun$collect$1.apply(RDD.scala:934)
    at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:150)
    at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:111)
    at org.apache.spark.rdd.RDD.withScope(RDD.scala:323)
    at org.apache.spark.rdd.RDD.collect(RDD.scala:933)
    at org.apache.spark.sql.execution.SparkPlan.executeCollect(SparkPlan.scala:166)
    at org.apache.spark.sql.execution.SparkPlan.executeCollectPublic(SparkPlan.scala:174)
    at org.apache.spark.sql.DataFrame$$anonfun$org$apache$spark$sql$DataFrame$$execute$1$1.apply(DataFrame.scala:1500)
    at org.apache.spark.sql.DataFrame$$anonfun$org$apache$spark$sql$DataFrame$$execute$1$1.apply(DataFrame.scala:1500)
    at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:56)
    at org.apache.spark.sql.DataFrame.withNewExecutionId(DataFrame.scala:2087)
    at org.apache.spark.sql.DataFrame.org$apache$spark$sql$DataFrame$$execute$1(DataFrame.scala:1499)
    at org.apache.spark.sql.DataFrame.org$apache$spark$sql$DataFrame$$collect(DataFrame.scala:1506)
    at org.apache.spark.sql.DataFrame$$anonfun$count$1.apply(DataFrame.scala:1516)
    at org.apache.spark.sql.DataFrame$$anonfun$count$1.apply(DataFrame.scala:1515)
    at org.apache.spark.sql.DataFrame.withCallback(DataFrame.scala:2100)
    at org.apache.spark.sql.DataFrame.count(DataFrame.scala:1515)
    at sun.reflect.GeneratedMethodAccessor118.invoke(Unknown Source)
    at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
    at java.lang.reflect.Method.invoke(Method.java:498)
    at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:231)
    at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:381)
    at py4j.Gateway.invoke(Gateway.java:259)
    at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:133)
    at py4j.commands.CallCommand.execute(CallCommand.java:79)
    at py4j.GatewayConnection.run(GatewayConnection.java:209)
    at java.lang.Thread.run(Thread.java:745)
回溯(最近一次呼叫最后一次):
文件“/tmp/zeppelin_pyspark-5665146503764823323.py”,第360行,in
exec(代码,ZCUUserQueryNameSpace)
文件“”,第1行,在
文件“/usr/hdp/current/spark client/python/pyspark/sql/dataframe.py”,第269行,计数
返回int(self.\u jdf.count())
文件“/usr/hdp/current/spark client/python/lib/py4j-0.9-src.zip/py4j/java_gateway.py”,第813行,在__
回答,self.gateway\u客户端,self.target\u id,self.name)
文件“/usr/hdp/current/spark client/python/pyspark/sql/utils.py”,第45行,deco格式
返回f(*a,**kw)
文件“/usr/hdp/current/spark client/python/lib/py4j-0.9-src.zip/py4j/protocol.py”,第308行,在get_return_值中
格式(目标id,“.”,名称),值)
py4j.protocol.Py4JJavaError:调用o2282.count时出错。
:org.apache.spark.sparkeexception:作业因阶段失败而中止:阶段1256.0中的Task 21失败4次,最近的失败:阶段1256.0中的Task 21.3丢失(TID 62913,usg-kov-e1b-slv005.c.pg-us-n-app-053847.internal):org.apache.spark.api.python异常:回溯(最近一次调用):
文件“/usr/hdp/current/spark client/python/pyspark/worker.py”,第111行,在main中
过程()
文件“/usr/hdp/current/spark client/python/pyspark/worker.py”,第106行,正在处理中
serializer.dump_流(func(拆分索引,迭代器),outfile)
文件“/usr/hdp/current/spark client/python/pyspark/serializers.py”,第263行,在dump_流中
vs=列表(itertools.islice(迭代器,批处理))
convert_struct中的文件“/usr/hdp/current/spark client/python/pyspark/sql/types.py”,第924行
返回元组(v的conv(v),zip中的conv(obj,转换器))
文件“/usr/hdp/current/spark client/python/pyspark/sql/types.py”,第924行,在
返回元组(v的conv(v),zip中的conv(obj,转换器))
文件“/usr/hdp/current/spark client/python/pyspark/sql/types.py”,第900行,在
返回lambda行:[conv(v)表示行中的v]
TypeError:“非类型”对象不可编辑
位于org.apache.spark.api.python.PythonRunner$$anon$1.read(PythonRDD.scala:166)
位于org.apache.spark.api.python.PythonRunner$$anon$1。(PythonRDD.scala:207)
位于org.apache.spark.api.python.PythonRunner.compute(PythonRDD.scala:125)
位于org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:70)
在org.apache.spark.rdd.rdd.computeOrReadCheckpoint(rdd.scala:313)
位于org.apache.spark.rdd.rdd.iterator(rdd.scala:277)
在org.apache.spark.rdd.MapPartitionsRDD.compute上(MapPartitionsRDD.scala:38)
在org.apache.spark.rdd.rdd.computeOrReadCheckpoint(rdd.scala:313)
位于org.apache.spark.rdd.rdd.iterator(rdd.scala:277)
在org.apache.spark.rdd.MapPartitionsRDD.compute上(MapPartitionsRDD.scala:38)
在org.apache.spark.rdd.rdd.computeOrReadCheckpoint(rdd.scala:313)
位于org.apache.spark.rdd.rdd.iterator(rdd.scala:277)
在org.apache.spark.rdd.MapPartitionsRDD.compute上(MapPartitionsRDD.scala:38)
在org.apache.spark.rdd.rdd.computeOrReadCheckpoint(rdd.scala:313)
位于org.apache.spark.rdd.rdd.iterator(rdd.scala:277)
在org.apache.spark.rdd.MapPartitionsRDD.compute上(MapPartitionsRDD.scala:38)
在org.apache.spark.rdd.rdd.computeOrReadCheckpoint(rdd.scala:313)
位于org.apache.spark.rdd.rdd.iterator(rdd.scala:277)
在org.apache.spark.rdd.MapPartitionsRDD.compute上(MapPartitionsRDD.scala:38)
在org.apache.spark.rdd.rdd.computeOrReadCheckpoint(rdd.scala:313)
位于org.apache.spark.rdd.rdd.iterator(rdd.scala:277)
在org.apache.spark.rdd.MapPartitionsRDD.compute上(MapPartitionsRDD.scala:38)
在org.apache.spark.rdd.rdd.computeOrReadCheckpoint(rdd.scala:313)
位于org.apache.spark.rdd.rdd.iterator(rdd.scala:277)
在org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:73)上
在org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41)上
位于org.apache.spark.scheduler.Task.run(Task.scala:89)
位于org.apache.spark.executor.executor$TaskRunner.run(executor.scala:227)
位于java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
位于java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
运行(Thread.java:745)
驱动程序堆栈跟踪:
位于org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1433)
位于org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1421)
位于org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1420)
位于scala.collection.mutable.resizeblearray$class.foreach(resizeblearray.scala:59)
位于scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47)
位于org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1420)
位于org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:801)
位于org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:801)
位于scala.Option.foreach(Option.scala:236)
位于org.apache.spark.scheduler.D
Row(accountType='individual', added='2018-06-05T01:52:34.257+0000', assignment='null', author='noahmagel', authorCity='null', authorCityCode='null', 
authorContinent='North America', authorContinentCode='n-a', authorCountry='United States', authorCountryCode='us', authorCounty='null', 
authorCountyCode='null', authorLocation='n-a,us,,,', authorState='null', authorStateCode='null', avatarUrl='https://pbs.twimg.com/profile_images/613069089263718401/P1BWMsFG_normal.jpg', 
averageDurationOfVisit=20.0, averageVisits=6.0, backlinks=49850734.0, blogComments=0.0, checked=False, city='null', cityCode='null', continent='North America', 
continentCode='n-a', country='United States', countryCode='us', county='null', countyCode='null', date='2017-12-11T10:58:36.000+0000', 
displayUrls=[], domain='twitter.com', engagement=0.0, expandedUrls=[], facebookAuthorId='null', facebookComments=0.0, facebookLikes=0.0, 
facebookRole='null', facebookShares=0.0, facebookSubtype='null', forumPosts=0.0, forumViews=0.0, fullText='@oli_braun @elonmusk @SpaceX Take my money Python  mapping "**" is used for python dict objects. I use converse Row to dict by x.as_dict() then can be used as **x.as_dict()

I reproduced this with the following:

sc = spark.sparkContext
json_rows = ['{"key1": [{"foo": 1}, {"bar": 2}]}', 
             '{"key2": 1}']
rows = sc.parallelize(json_rows)
df = spark.read.json(rows)
rdd = df.rdd
new_df = spark.createDataFrame(rdd, samplingRatio=1)
new_df.head(2)
File "/usr/hdp/current/spark-client/python/pyspark/sql/types.py", line 900, in <lambda>
    return lambda row: [conv(v) for v in row]
TypeError: 'NoneType' object is not iterable
json_rows = ['{"key1": [1, 2]}', 
             '{"key2": 1}']
def _need_converter(dataType):
    if isinstance(dataType, StructType):
        return True
    elif isinstance(dataType, ArrayType):
        return _need_converter(dataType.elementType)
import json
from pyspark.sql.types import Row, ArrayType

def flatten(x):
  x_dict = x.asDict()
  for k, v in x_dict.items():
    if isinstance(v, Row):
      x_dict[k] = json.dumps(v.asDict())
  return x_dict

sc = spark.sparkContext
rows = sc.parallelize(json_rows)
df = spark.read.json(rows)
flat_rdd = df.rdd.map(lambda x: flatten(x))
flat_df = spark.createDataFrame(flat_rdd, samplingRatio=1)
flat_df.head(2)