Pyspark 将文件的数据处理到kafka时出现问题,这将触发结构化流

Pyspark 将文件的数据处理到kafka时出现问题,这将触发结构化流,pyspark,apache-kafka,spark-structured-streaming,Pyspark,Apache Kafka,Spark Structured Streaming,我试图从一个文件生成卡夫卡,然后在spark流媒体中使用它 历史数据\u级别\u 1.json->producer.py->consumer.py 问题是,当消息到达spark时,它们的到达方式很奇怪,因此我知道问题出在producer.py中 当我检查df时 kafka_df.selectExpr("CAST(value AS STRING)").show(30, False) 结果是: +-----------------------------------------

我试图从一个文件生成卡夫卡,然后在spark流媒体中使用它

历史数据\u级别\u 1.json->producer.py->consumer.py

问题是,当消息到达spark时,它们的到达方式很奇怪,因此我知道问题出在producer.py中

当我检查df时

kafka_df.selectExpr("CAST(value AS STRING)").show(30, False)
结果是:

+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
|value                                                                                                                                                                                                              |
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
|"{\"localSymbol\": \"EUR.USD\", \"time\": \"2021-05-07 10:10:50.873031+00:00\", \"precio_actual\": 1.2065, \"bid\": NaN, \"ask\": NaN, \"high\": 1.20905, \"low\": 1.2053, \"close\": 1.2065}\n"                   |
|"{\"localSymbol\": \"EUR.USD\", \"time\": \"2021-05-07 10:10:50.873720+00:00\", \"precio_actual\": 1.208235, \"bid\": 1.20823, \"ask\": 1.20824, \"high\": 1.20905, \"low\": 1.2053, \"close\": 1.2065}\n"         |
|"{\"localSymbol\": \"USD.JPY\", \"time\": \"2021-05-07 10:10:50.914310+00:00\", \"precio_actual\": 109.09, \"bid\": NaN, \"ask\": NaN, \"high\": 109.2, \"low\": 108.935, \"close\": 109.09}\n"                    |
|"{\"localSymbol\": \"USD.JPY\", \"time\": \"2021-05-07 10:10:50.914867+00:00\", \"precio_actual\": 109.10249999999999, \"bid\": 109.102, \"ask\": 109.103, \"high\": 109.2, \"low\": 108.935, \"close\": 109.09}\n"|
|"{\"localSymbol\": \"USD.JPY\", \"time\": \"2021-05-07 10:10:50.975038+00:00\", \"precio_actual\": 109.102, \"bid\": 109.101, \"ask\": 109.103, \"high\": 109.2, \"low\": 108.935, \"close\": 109.09}\n"           |
|"{\"localSymbol\": \"USD.JPY\", \"time\": \"2021-05-07 10:10:51.059851+00:00\", \"precio_actual\": 109.1015, \"bid\": 109.101, \"ask\": 109.102, \"high\": 109.2, \"low\": 108.935, \"close\": 109.09}\n"          |
|"{\"localSymbol\": \"EUR.USD\", \"time\": \"2021-05-07 10:10:51.101304+00:00\", \"precio_actual\": 1.208235, \"bid\": 1.20823, \"ask\": 1.20824, \"high\": 1.20905, \"low\": 1.2053, \"close\": 1.2065}\n"         |
如果我将数据复制并粘贴到终端(生产者)中,那么它就会正常工作,因此问题在于如何将消息发送给spark

你能帮我吗

我有以下文件历史数据\u级别\u 1.json,其中包含此数据:

{"localSymbol": "EUR.USD", "time": "2021-05-07 10:10:50.873031+00:00", "precio_actual": 1.2065, "bid": NaN, "ask": NaN, "high": 1.20905, "low": 1.2053, "close": 1.2065}
{"localSymbol": "EUR.USD", "time": "2021-05-07 10:10:50.873720+00:00", "precio_actual": 1.208235, "bid": 1.20823, "ask": 1.20824, "high": 1.20905, "low": 1.2053, "close": 1.2065}
{"localSymbol": "USD.JPY", "time": "2021-05-07 10:10:50.914310+00:00", "precio_actual": 109.09, "bid": NaN, "ask": NaN, "high": 109.2, "low": 108.935, "close": 109.09}
{"localSymbol": "USD.JPY", "time": "2021-05-07 10:10:50.914867+00:00", "precio_actual": 109.10249999999999, "bid": 109.102, "ask": 109.103, "high": 109.2, "low": 108.935, "close": 109.09}
我有以下制作人producer.py

import json
from kafka import KafkaProducer
import settings

def producer():
    kafka_producer = KafkaProducer(bootstrap_servers=['localhost:9092'])
    with open(settings.HISTORICAL_DATA_FOLDER+"historical_data_level_1.json") as historical_data_level_1:
        for line in historical_data_level_1:
            kafka_producer.send("test", json.dumps(line).encode('utf-8'))

if __name__ == '__main__':
    producer()
from pyspark.sql import SparkSession
from pyspark.sql.functions import from_json, col, expr
from pyspark.sql.types import StructType, StructField, StringType, LongType, DoubleType, IntegerType, ArrayType

if __name__ == "__main__":
    spark = SparkSession \
        .builder \
        .appName("File Streaming Demo3") \
        .master("local[3]") \
        .config("spark.streaming.stopGracefullyOnShutdown", "true") \
        .config('spark.jars.packages', 'org.apache.spark:spark-sql-kafka-0-10_2.12:3.1.1') \
        .getOrCreate()

    kafka_df = spark.readStream \
        .format("kafka") \
        .option("kafka.bootstrap.servers", "localhost:9092") \
        .option("subscribe", "test") \
        .option("startingOffsets", "earliest") \
        .load()

    kafka_df.printSchema()

    schema = StructType([
        StructField('localSymbol', StringType()),
        StructField('time', StringType()),
        StructField('precio_actual', StringType()),
        StructField('bid', StringType()),
        StructField('ask', StringType()),
        StructField('high', StringType()),
        StructField('low', StringType()),
        StructField('close', StringType()),
    ])

    value_df = kafka_df.select(from_json(col("value").cast("string"), schema).alias("value"))

    value_df.printSchema()

    level_1_df_process = value_df \
        .select(
            'value.*'
        )

    invoice_writer_query = level_1_df_process.writeStream \
        .format("json") \
        .queryName("Flattened Invoice Writer") \
        .outputMode("append") \
        .option("path", "output") \
        .option("checkpointLocation", "chk-point-dir") \
        .trigger(processingTime="1 minute") \
        .start()


    invoice_writer_query.awaitTermination()

我有以下消费者consumer.py

import json
from kafka import KafkaProducer
import settings

def producer():
    kafka_producer = KafkaProducer(bootstrap_servers=['localhost:9092'])
    with open(settings.HISTORICAL_DATA_FOLDER+"historical_data_level_1.json") as historical_data_level_1:
        for line in historical_data_level_1:
            kafka_producer.send("test", json.dumps(line).encode('utf-8'))

if __name__ == '__main__':
    producer()
from pyspark.sql import SparkSession
from pyspark.sql.functions import from_json, col, expr
from pyspark.sql.types import StructType, StructField, StringType, LongType, DoubleType, IntegerType, ArrayType

if __name__ == "__main__":
    spark = SparkSession \
        .builder \
        .appName("File Streaming Demo3") \
        .master("local[3]") \
        .config("spark.streaming.stopGracefullyOnShutdown", "true") \
        .config('spark.jars.packages', 'org.apache.spark:spark-sql-kafka-0-10_2.12:3.1.1') \
        .getOrCreate()

    kafka_df = spark.readStream \
        .format("kafka") \
        .option("kafka.bootstrap.servers", "localhost:9092") \
        .option("subscribe", "test") \
        .option("startingOffsets", "earliest") \
        .load()

    kafka_df.printSchema()

    schema = StructType([
        StructField('localSymbol', StringType()),
        StructField('time', StringType()),
        StructField('precio_actual', StringType()),
        StructField('bid', StringType()),
        StructField('ask', StringType()),
        StructField('high', StringType()),
        StructField('low', StringType()),
        StructField('close', StringType()),
    ])

    value_df = kafka_df.select(from_json(col("value").cast("string"), schema).alias("value"))

    value_df.printSchema()

    level_1_df_process = value_df \
        .select(
            'value.*'
        )

    invoice_writer_query = level_1_df_process.writeStream \
        .format("json") \
        .queryName("Flattened Invoice Writer") \
        .outputMode("append") \
        .option("path", "output") \
        .option("checkpointLocation", "chk-point-dir") \
        .trigger(processingTime="1 minute") \
        .start()


    invoice_writer_query.awaitTermination()

它是由producer.py一个字典而不是字符串发送的,代码如下所示

import json
from kafka import KafkaProducer
import settings

def producer():
    kafka_producer = KafkaProducer(bootstrap_servers=['localhost:9092'])
    with open(settings.HISTORICAL_DATA_FOLDER+"historical_data_level_1.json") as historical_data_level_1:
        for line in historical_data_level_1:
            line_python_dict = json.loads(line)
            kafka_producer.send("test", json.dumps(line_python_dict).encode('utf-8'))


if __name__ == '__main__':
    producer()

结构化流媒体?是的,是结构化流媒体