Warning: file_get_contents(/data/phpspider/zhask/data//catemap/3/apache-spark/6.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Apache spark Pypark ALS建议错误:“0”;要求失败:没有可用的评级”;_Apache Spark_Pyspark_Apache Spark Sql_Recommendation Engine - Fatal编程技术网

Apache spark Pypark ALS建议错误:“0”;要求失败:没有可用的评级”;

Apache spark Pypark ALS建议错误:“0”;要求失败:没有可用的评级”;,apache-spark,pyspark,apache-spark-sql,recommendation-engine,Apache Spark,Pyspark,Apache Spark Sql,Recommendation Engine,我正在为一个推荐引擎运行ALS模型(交替最小二乘矩阵分解法),得到这个错误,即使花了几个小时也无法解决。根据模型函数定义,模型参数的数据类型和格式是正确的 请帮忙 ###Modeling codes train_df = train_df.dropDuplicates() als_model = ALS( implicitPrefs=True,coldStartStrategy="drop",userCol="userID_index", itemCol="itemID_index", ra

我正在为一个推荐引擎运行ALS模型(交替最小二乘矩阵分解法),得到这个错误,即使花了几个小时也无法解决。根据模型函数定义,模型参数的数据类型和格式是正确的

请帮忙

###Modeling codes
train_df = train_df.dropDuplicates() 

als_model = ALS( implicitPrefs=True,coldStartStrategy="drop",userCol="userID_index", itemCol="itemID_index", ratingCol="rating")
als_model.setSeed(345)
grid = (ParamGridBuilder()
        .addGrid(als_model.rank,[8,10])
        .addGrid(als_model.regParam,[0.1,0.5])
        .addGrid(als_model.alpha, [40])
        .build()
        )
als_eval = RegressionEvaluator(metricName='rmse', labelCol='rating')

cv = CrossValidator(estimator=als_model, estimatorParamMaps=grid, evaluator=als_eval,numFolds=1)
train_df.persist(StorageLevel.MEMORY_AND_DISK)
cv_model = cv.fit(train_df)
错误就在这里

---------------------------------------------------------------------------
Py4JJavaError                             Traceback (most recent call last)
/opt/cloudera/parcels/SPARK2/lib/spark2/python/pyspark/sql/utils.py in deco(*a, **kw)
     62         try:
---> 63             return f(*a, **kw)
     64         except py4j.protocol.Py4JJavaError as e:

/opt/cloudera/parcels/SPARK2/lib/spark2/python/lib/py4j-0.10.4-src.zip/py4j/protocol.py in get_return_value(answer, gateway_client, target_id, name)
    318                     "An error occurred while calling {0}{1}{2}.\n".
--> 319                     format(target_id, ".", name), value)
    320             else:

Py4JJavaError: An error occurred while calling o4662.fit.
: java.lang.IllegalArgumentException: requirement failed: No ratings available from MapPartitionsRDD[3167] at map at ALS.scala:613
    at scala.Predef$.require(Predef.scala:224)
    at org.apache.spark.ml.recommendation.ALS$.train(ALS.scala:843)
    at org.apache.spark.ml.recommendation.ALS.fit(ALS.scala:622)
    at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
    at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
    at java.lang.reflect.Method.invoke(Method.java:498)
    at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
    at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)
    at py4j.Gateway.invoke(Gateway.java:280)
    at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
    at py4j.commands.CallCommand.execute(CallCommand.java:79)
    at py4j.GatewayConnection.run(GatewayConnection.java:214)
    at java.lang.Thread.run(Thread.java:748)


During handling of the above exception, another exception occurred:

IllegalArgumentException                  Traceback (most recent call last)
<ipython-input-79-202a4259227e> in <module>
     16 cv = CrossValidator(estimator=als_model, estimatorParamMaps=grid, evaluator=als_eval,numFolds=1)
     17 train_df.persist(StorageLevel.MEMORY_AND_DISK)
---> 18 cv_model = cv.fit(train_df)

/opt/cloudera/parcels/SPARK2/lib/spark2/python/pyspark/ml/base.py in fit(self, dataset, params)
     62                 return self.copy(params)._fit(dataset)
     63             else:
---> 64                 return self._fit(dataset)
     65         else:
     66             raise ValueError("Params must be either a param map or a list/tuple of param maps, "

/opt/cloudera/parcels/SPARK2/lib/spark2/python/pyspark/ml/tuning.py in _fit(self, dataset)
    230             validation = df.filter(condition)
    231             train = df.filter(~condition)
--> 232             models = est.fit(train, epm)
    233             for j in range(numModels):
    234                 model = models[j]

/opt/cloudera/parcels/SPARK2/lib/spark2/python/pyspark/ml/base.py in fit(self, dataset, params)
     57             params = dict()
     58         if isinstance(params, (list, tuple)):
---> 59             return [self.fit(dataset, paramMap) for paramMap in params]
     60         elif isinstance(params, dict):
     61             if params:

/opt/cloudera/parcels/SPARK2/lib/spark2/python/pyspark/ml/base.py in <listcomp>(.0)
     57             params = dict()
     58         if isinstance(params, (list, tuple)):
---> 59             return [self.fit(dataset, paramMap) for paramMap in params]
     60         elif isinstance(params, dict):
     61             if params:

/opt/cloudera/parcels/SPARK2/lib/spark2/python/pyspark/ml/base.py in fit(self, dataset, params)
     60         elif isinstance(params, dict):
     61             if params:
---> 62                 return self.copy(params)._fit(dataset)
     63             else:
     64                 return self._fit(dataset)

/opt/cloudera/parcels/SPARK2/lib/spark2/python/pyspark/ml/wrapper.py in _fit(self, dataset)
    263 
    264     def _fit(self, dataset):
--> 265         java_model = self._fit_java(dataset)
    266         return self._create_model(java_model)
    267 

/opt/cloudera/parcels/SPARK2/lib/spark2/python/pyspark/ml/wrapper.py in _fit_java(self, dataset)
    260         """
    261         self._transfer_params_to_java()
--> 262         return self._java_obj.fit(dataset._jdf)
    263 
    264     def _fit(self, dataset):

/opt/cloudera/parcels/SPARK2/lib/spark2/python/lib/py4j-0.10.4-src.zip/py4j/java_gateway.py in __call__(self, *args)
   1131         answer = self.gateway_client.send_command(command)
   1132         return_value = get_return_value(
-> 1133             answer, self.gateway_client, self.target_id, self.name)
   1134 
   1135         for temp_arg in temp_args:

/opt/cloudera/parcels/SPARK2/lib/spark2/python/pyspark/sql/utils.py in deco(*a, **kw)
     77                 raise QueryExecutionException(s.split(': ', 1)[1], stackTrace)
     78             if s.startswith('java.lang.IllegalArgumentException: '):
---> 79                 raise IllegalArgumentException(s.split(': ', 1)[1], stackTrace)
     80             raise
     81     return deco

IllegalArgumentException: 'requirement failed: No ratings available from MapPartitionsRDD[3167] at map at ALS.scala:613'
---------------------------------------------------------------------------
Py4JJavaError回溯(最近一次调用)
/装饰中的opt/cloudera/parcels/SPARK2/lib/SPARK2/python/pyspark/sql/utils.py(*a,**kw)
62尝试:
--->63返回f(*a,**kw)
64除py4j.protocol.Py4JJavaError外的其他错误为e:
/获取返回值中的opt/cloudera/parcels/SPARK2/lib/SPARK2/python/lib/py4j-0.10.4-src.zip/py4j/protocol.py(答案、网关客户端、目标id、名称)
318“调用{0}{1}{2}时出错。\n”。
-->319格式(目标id,“.”,名称),值)
320其他:
Py4JJavaError:调用o4662.fit时出错。
:java.lang.IllegalArgumentException:需求失败:在ALS地图上的MapPartitionsRDD[3167]中没有可用的评级。scala:613
在scala.Predef$.require处(Predef.scala:224)
位于org.apache.spark.ml.recommendation.ALS$.train(ALS.scala:843)
位于org.apache.spark.ml.recommendation.ALS.fit(ALS.scala:622)
在sun.reflect.NativeMethodAccessorImpl.invoke0(本机方法)处
位于sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
在sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)中
位于java.lang.reflect.Method.invoke(Method.java:498)
位于py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
位于py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)
在py4j.Gateway.invoke处(Gateway.java:280)
位于py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
在py4j.commands.CallCommand.execute(CallCommand.java:79)
在py4j.GatewayConnection.run处(GatewayConnection.java:214)
运行(Thread.java:748)
在处理上述异常期间,发生了另一个异常:
IllegalArgumentException回溯(最后一次最近调用)
在里面
16 cv=CrossValidator(估计器=als_模型,估计器参数映射=grid,评估器=als_eval,numFolds=1)
17序列数据持久化(存储级别、内存和磁盘)
--->18等速万向节模型=等速配合(传动系df)
/opt/cloudera/parcels/SPARK2/lib/SPARK2/python/pyspark/ml/base.py(self、dataset、params)
62返回自复制(参数)。_fit(数据集)
63.其他:
--->64返回自拟合(数据集)
65.其他:
66 raise VALUERROR(“参数必须是参数映射或参数映射的列表/元组,”
/opt/cloudera/parcels/SPARK2/lib/SPARK2/python/pyspark/ml/tuning.py in_-fit(self,dataset)
230验证=测向过滤器(条件)
231列=测向过滤器(~条件)
-->232型号=测试配合(列车、epm)
233表示范围内的j(单位:mm):
234型号=型号[j]
/opt/cloudera/parcels/SPARK2/lib/SPARK2/python/pyspark/ml/base.py(self、dataset、params)
57参数=dict()
58如果isinstance(参数,(列表,元组)):
--->59返回[params中paramMap的self.fit(数据集,paramMap)]
60 elif isinstance(参数,dict):
61如果参数:
/opt/cloudera/parcels/SPARK2/lib/SPARK2/python/pyspark/ml/base.py in(.0)
57参数=dict()
58如果isinstance(参数,(列表,元组)):
--->59返回[params中paramMap的self.fit(数据集,paramMap)]
60 elif isinstance(参数,dict):
61如果参数:
/opt/cloudera/parcels/SPARK2/lib/SPARK2/python/pyspark/ml/base.py(self、dataset、params)
60 elif isinstance(参数,dict):
61如果参数:
--->62返回自复制(参数)。_fit(数据集)
63.其他:
64返回自拟合(数据集)
/opt/cloudera/parcels/SPARK2/lib/SPARK2/python/pyspark/ml/wrapper.py in_-fit(self,dataset)
263
264 def_拟合(自我,数据集):
-->265 java_model=self._fit_java(数据集)
266返回自创建模型(java模型)
267
/java中的opt/cloudera/parcels/SPARK2/lib/SPARK2/python/pyspark/ml/wrapper.py(self,数据集)
260         """
261 self._transfer_params_to_java()
-->262返回self.\u java.\u obj.fit(数据集.\u jdf)
263
264 def_拟合(自我,数据集):
/opt/cloudera/parcels/SPARK2/lib/SPARK2/python/lib/py4j-0.10.4-src.zip/py4j/java_gateway.py in_u__调用(self,*args)
1131 answer=self.gateway\u client.send\u命令(command)
1132返回值=获取返回值(
->1133应答,self.gateway\u客户端,self.target\u id,self.name)
1134
1135对于临时参数中的临时参数:
/装饰中的opt/cloudera/parcels/SPARK2/lib/SPARK2/python/pyspark/sql/utils.py(*a,**kw)
77 raise QueryExecutionException(s.split(“:”,1)[1],stackTrace)
78如果s.StartWith('java.lang.IllegalArgumentException:'):
--->79引发IllegalArgumentException(s.split(“:”,1)[1],stackTrace)
80加薪
81返回装饰
IllegalArgumentException:“需求失败:在ALS地图上的MapPartitionsRDD[3167]中没有可用的评级。scala:613”
但是,我没有发现我的数据集“train_df”或代码语法有任何问题。任何帮助都将不胜感激


异常指向将Java gateway转换为Python对象时发生的内部错误。(请参阅)这通常发生在不兼容的类型中。您能否将train dataset rating列四舍五入以映射2-3十进制精度,然后重试