Pyspark 使用ML管道进行字符串匹配引发错误,无法执行用户定义的函数($anonfun$1:(向量)=>;数组<;向量>;)

Pyspark 使用ML管道进行字符串匹配引发错误,无法执行用户定义的函数($anonfun$1:(向量)=>;数组<;向量>;),pyspark,string-matching,fuzzy-search,Pyspark,String Matching,Fuzzy Search,我试图在两个数据帧上进行字符串匹配。假设dataframe1包含X个句子,dataframe2包含Y个句子。我需要检查,Dataframe1中的任何句子都与Dataframe2匹配。我尝试使用ML管线,如下所示: def匹配_名称(df_1、df_2): 管道=管道(阶段)=[ 重结晶器( pattern=“”,inputCol=“name”,outputCol=“tokens”,minTokenLength=1 ), NGram(n=3,inputCol=“tokens”,outputCol=

我试图在两个数据帧上进行字符串匹配。假设dataframe1包含X个句子,dataframe2包含Y个句子。我需要检查,Dataframe1中的任何句子都与Dataframe2匹配。我尝试使用ML管线,如下所示:

def匹配_名称(df_1、df_2):
管道=管道(阶段)=[
重结晶器(
pattern=“”,inputCol=“name”,outputCol=“tokens”,minTokenLength=1
),
NGram(n=3,inputCol=“tokens”,outputCol=“ngrams”),
HashingTF(inputCol=“ngrams”,outputCol=“vectors”),
MinHashLSH(inputCol=“vectors”,outputCol=“lsh”)
])
模型=管道.fit(df_1)
存储的\u散列=model.transform(df\u 1)
landed_hashed=model.transform(df_2)
matched_df=model.stages[-1].approxSimilarityJoin(存储的散列,落地的散列,1.0,“置信度”)。选择(
col(“datasetA.name”)、col(“datasetB.name”)、col(“confidence”))
匹配测向显示(20,假)
我得到以下错误:

Py4JJavaError: An error occurred while calling o4136.showString.
: org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 181.0 failed 1 times, most recent failure: Lost task 0.0 in stage 181.0 (TID 899, localhost, executor driver): org.apache.spark.SparkException: Failed to execute user defined function($anonfun$1: (vector) => array<vector>)
    at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage4.project_doConsume$(Unknown Source)
    at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage4.processNext(Unknown Source)
    at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
    at org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$10$$anon$1.hasNext(WholeStageCodegenExec.scala:614)
    at scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:439)
    at scala.collection.Iterator$JoinIterator.hasNext(Iterator.scala:211)
    at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:408)
    at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage5.processNext(Unknown Source)
    at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
    at org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$10$$anon$1.hasNext(WholeStageCodegenExec.scala:614)
    at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:408)
    at org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:148)
    at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96)
    at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53)
    at org.apache.spark.scheduler.Task.run(Task.scala:109)
    at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345)
    at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
    at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
    at java.lang.Thread.run(Thread.java:745)
Caused by: java.lang.IllegalArgumentException: requirement failed: Must have at least 1 non zero entry.
    at scala.Predef$.require(Predef.scala:224)
    at org.apache.spark.ml.feature.MinHashLSHModel$$anonfun$1.apply(MinHashLSH.scala:57)
    at org.apache.spark.ml.feature.MinHashLSHModel$$anonfun$1.apply(MinHashLSH.scala:56)
    ... 19 more

Driver stacktrace:
    at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1599)
    at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1587)
    at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1586)
    at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
    at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
    at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1586)
    at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:831)
    at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:831)
    at scala.Option.foreach(Option.scala:257)
    at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:831)
    at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1820)
    at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1769)
    at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1758)
    at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
    at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:642)
    at org.apache.spark.SparkContext.runJob(SparkContext.scala:2034)
    at org.apache.spark.SparkContext.runJob(SparkContext.scala:2055)
    at org.apache.spark.SparkContext.runJob(SparkContext.scala:2074)
    at org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:363)
    at org.apache.spark.sql.execution.CollectLimitExec.executeCollect(limit.scala:38)
    at org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$collectFromPlan(Dataset.scala:3272)
    at org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2484)
    at org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2484)
    at org.apache.spark.sql.Dataset$$anonfun$52.apply(Dataset.scala:3253)
    at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:77)
    at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3252)
    at org.apache.spark.sql.Dataset.head(Dataset.scala:2484)
    at org.apache.spark.sql.Dataset.take(Dataset.scala:2698)
    at org.apache.spark.sql.Dataset.showString(Dataset.scala:254)
    at sun.reflect.GeneratedMethodAccessor92.invoke(Unknown Source)
    at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
    at java.lang.reflect.Method.invoke(Method.java:498)
    at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
    at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)
    at py4j.Gateway.invoke(Gateway.java:282)
    at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
    at py4j.commands.CallCommand.execute(CallCommand.java:79)
    at py4j.GatewayConnection.run(GatewayConnection.java:214)
    at java.lang.Thread.run(Thread.java:745)
Caused by: org.apache.spark.SparkException: Failed to execute user defined function($anonfun$1: (vector) => array<vector>)
    at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage4.project_doConsume$(Unknown Source)
    at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage4.processNext(Unknown Source)
    at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
    at org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$10$$anon$1.hasNext(WholeStageCodegenExec.scala:614)
    at scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:439)
    at scala.collection.Iterator$JoinIterator.hasNext(Iterator.scala:211)
    at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:408)
    at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage5.processNext(Unknown Source)
    at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
    at org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$10$$anon$1.hasNext(WholeStageCodegenExec.scala:614)
    at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:408)
    at org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:148)
    at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96)
    at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53)
    at org.apache.spark.scheduler.Task.run(Task.scala:109)
    at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345)
    at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
    at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
    ... 1 more
Caused by: java.lang.IllegalArgumentException: requirement failed: Must have at least 1 non zero entry.
    at scala.Predef$.require(Predef.scala:224)
    at org.apache.spark.ml.feature.MinHashLSHModel$$anonfun$1.apply(MinHashLSH.scala:57)
    at org.apache.spark.ml.feature.MinHashLSHModel$$anonfun$1.apply(MinHashLSH.scala:56)
    ... 19 more
Py4JJavaError:调用o4136.showString时出错。
:org.apache.SparkException:作业因阶段失败而中止:阶段181.0中的任务0失败1次,最近的失败:阶段181.0中的任务0.0丢失(TID 899,localhost,executor driver):org.apache.SparkException:未能执行用户定义的函数($anonfun$1:(vector)=>array)
位于org.apache.spark.sql.catalyst.expressions.GeneratedClass$GenerateEditorForCodeGenstage4.project_doConsume$(未知源)
位于org.apache.spark.sql.catalyst.expressions.GeneratedClass$GenerateEditorForCodeGenStage4.processNext(未知源)
位于org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
位于org.apache.spark.sql.execution.whisttagecodegenexec$$anonfun$10$$anon$1.hasNext(whisttagecodegenexec.scala:614)
位于scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:439)
在scala.collection.Iterator$joineiterator.hasNext(Iterator.scala:211)
位于scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:408)
位于org.apache.spark.sql.catalyst.expressions.GeneratedClass$GenerateEditorForCodeGenStage5.processNext(未知源)
位于org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
位于org.apache.spark.sql.execution.whisttagecodegenexec$$anonfun$10$$anon$1.hasNext(whisttagecodegenexec.scala:614)
位于scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:408)
位于org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:148)
在org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96)上
在org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53)上
位于org.apache.spark.scheduler.Task.run(Task.scala:109)
位于org.apache.spark.executor.executor$TaskRunner.run(executor.scala:345)
位于java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
位于java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
运行(Thread.java:745)
原因:java.lang.IllegalArgumentException:需求失败:必须至少有一个非零条目。
在scala.Predef$.require处(Predef.scala:224)
位于org.apache.spark.ml.feature.MinHashLSHModel$$anonfun$1.apply(MinHashLSH.scala:57)
位于org.apache.spark.ml.feature.MinHashLSHModel$$anonfun$1.apply(MinHashLSH.scala:56)
... 还有19个
驱动程序堆栈跟踪:
位于org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1599)
位于org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1587)
位于org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1586)
位于scala.collection.mutable.resizeblearray$class.foreach(resizeblearray.scala:59)
位于scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
位于org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1586)
位于org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:831)
位于org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:831)
位于scala.Option.foreach(Option.scala:257)
位于org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:831)
位于org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1820)
位于org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1769)
位于org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1758)
位于org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
位于org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:642)
位于org.apache.spark.SparkContext.runJob(SparkContext.scala:2034)
位于org.apache.spark.SparkContext.runJob(SparkContext.scala:2055)
位于org.apache.spark.SparkContext.runJob(SparkContext.scala:2074)
位于org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:363)
位于org.apache.spark.sql.execution.CollectLimitExec.executeCollect(limit.scala:38)
位于org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$collectFromPlan(Dataset.scala:3272)
位于org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2484)
位于org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2484)
位于org.apache.spark.sql.Dataset$$anonfun$52.apply(Dataset.scala:3253)
位于org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:77)
位于org.apache.spark.sql.Dataset.withAction(Dataset.scala:3252)
位于org.apache.spark.sql.Dataset.head(Dataset.scala:2484)
位于org.apache.spark.sql.Dataset.take(Dataset.scala:2698)
在org.apache.spark.sql.Dataset.show上