Amazon web services 无法在AWS Glue作业脚本中的两个表上放置联接和查询

Amazon web services 无法在AWS Glue作业脚本中的两个表上放置联接和查询,amazon-web-services,apache-spark,pyspark,apache-spark-sql,aws-glue,Amazon Web Services,Apache Spark,Pyspark,Apache Spark Sql,Aws Glue,因此,我创建了一个AWS粘合作业脚本,在其中添加了两个数据源,并将它们从dynamicframe转换为数据帧。我的目标是使用内部联接从两个表中获取查询,但我无法做到这一点。作业在查询步骤失败。我也添加了错误。请帮帮我。 另外,请检查下面的代码 import sys from awsglue.transforms import * from awsglue.utils import getResolvedOptions from pyspark.context import SparkContex

因此,我创建了一个AWS粘合作业脚本,在其中添加了两个数据源,并将它们从dynamicframe转换为数据帧。我的目标是使用内部联接从两个表中获取查询,但我无法做到这一点。作业在查询步骤失败。我也添加了错误。请帮帮我。 另外,请检查下面的代码

import sys
from awsglue.transforms import *
from awsglue.utils import getResolvedOptions
from pyspark.context import SparkContext
from awsglue.context import GlueContext
from pyspark.sql.functions import *
from pyspark.sql.types import *
from pyspark.sql import SQLContext
from pyspark import SparkContext,SparkConf
from pyspark.sql.window import Window
from awsglue.dynamicframe import DynamicFrame
from awsglue.job import Job
## @params: [JOB_NAME]
args = getResolvedOptions(sys.argv, ['JOB_NAME'])

sc = SparkContext()
glueContext = GlueContext(sc)
spark = glueContext.spark_session
spark_session = glueContext.spark_session
sqlContext = SQLContext(spark_session.sparkContext, spark_session)
job = Job(glueContext)
job.init(args['JOB_NAME'], args)

## @type: DataSource
## @args: [database = "matomo", table_name = "matomo_matomo_log_visit", transformation_ctx = "datasource0"]
## @return: datasource0
## @inputs: []
datasource0 = glueContext.create_dynamic_frame.from_catalog(database = "matomo", table_name = "matomo_matomo_log_visit", transformation_ctx = "datasource0")
## @type: ApplyMapping
## @args: [mapping = [("visit_last_action_time", "timestamp", "visit_last_action_time", "timestamp"), ("custom_dimension_1", "string", "custom_dimension_1", "string")], transformation_ctx = "applymapping1"]
## @return: applymapping1
## @inputs: [frame = datasource0]
applymapping1 = ApplyMapping.apply(frame = datasource0, mappings = [("idsite" , "int" , "idsite" , "int"),("visit_last_action_time", "timestamp", "visit_last_action_time", "timestamp"), ("custom_dimension_1", "string", "custom_dimension_1", "string")], transformation_ctx = "applymapping1")

## @type: DataSource
## @args: [database = "matomo", table_name = "matomo_matomo_site", transformation_ctx = "datasource1"]
## @return: datasource1
## @inputs: []
datasource1 =  glueContext.create_dynamic_frame.from_catalog(database = "matomo", table_name = "matomo_matomo_site", transformation_ctx = "datasource1")
## @type: ApplyMapping
## @args: [mapping = [("idsite", "int", "idsite", "int"), ("main_url", "string", "main_url", "string")], transformation_ctx = "applymapping2"]
## @return: applymapping2
## @inputs: [frame = datasource1]
applymapping2 = ApplyMapping.apply(frame = datasource1, mappings = [("idsite", "int", "idsite", "int"), ("main_url", "string", "main_url", "string")], transformation_ctx = "applymapping2")

df = datasource0.toDF()
df1= datasource1.toDF()
print("hi4")
df.createOrReplaceTempView("matomoLogVisit")
df1.createOrReplaceTempView("matomoSite")
print("hi5")
consolidated_df = spark.sql("""Select  ms.main_url, ml.custom_dimension_1 , max(ml.visit_last_action_time) from
                            matomoLogVisit ml inner join matomoSite ms
                            on ms.idsite = ml.idsite
                            where ms.main_url = "abcde"
                            group by ml.custom_dimension_1""")

output_df = consolidated_df.orderBy('custom_dimension_1', ascending=True)
consolidated_dynamicframe = DynamicFrame.fromDF(output_df.repartition(1), glueContext, "consolidated_dynamicframe")
print("hi8")
## @type: DataSink
## @args: [connection_type = "s3", connection_options = {"path": "s3://gevme-datalake"}, format = "json", transformation_ctx = "datasink2"]
## @return: datasink2
## @inputs: [frame = applymapping1]
datasink2 = glueContext.write_dynamic_frame.from_options(frame = consolidated_dynamicframe, connection_type = "s3", connection_options = {"path": "s3://gevme-datalake/"}, format = "csv", transformation_ctx = "datasink2")
job.commit()

您也可以按
ms.main\u url
分组,因为它始终等于
where
子句中的
abcde

consolidated_df = spark.sql("""Select ms.main_url, ml.custom_dimension_1 , max(ml.visit_last_action_time) from
                            matomoLogVisit ml inner join matomoSite ms
                            on ms.idsite = ml.idsite
                            where ms.main_url = "abcde"
                            group by ms.main_url, ml.custom_dimension_1""")