如何将多个json对象从1 S3文件读写到dynamodb python 3.8

如何将多个json对象从1 S3文件读写到dynamodb python 3.8,json,python-3.x,aws-lambda,boto3,amazon-dynamodb-streams,Json,Python 3.x,Aws Lambda,Boto3,Amazon Dynamodb Streams,我能够从S3 bucket到dynamodb读写一条json记录。然而,当我试图从包含多个json对象的文件中读写时,它会给我错误。请在下面查找代码和错误-请您帮助解决相同的问题- Lambda代码(读取S3文件并写入dynamodb) cloudwatch中的错误- [ERROR] JSONDecodeError: Extra data: line 1 column 230 (char 229) Traceback (most recent call last):   File "/

我能够从S3 bucket到dynamodb读写一条json记录。然而,当我试图从包含多个json对象的文件中读写时,它会给我错误。请在下面查找代码和错误-请您帮助解决相同的问题- Lambda代码(读取S3文件并写入dynamodb)

cloudwatch中的错误-

[ERROR] JSONDecodeError: Extra data: line 1 column 230 (char 229)
Traceback (most recent call last):
  File "/var/task/lambda_function.py", line 20, in lambda_handler
    jsonFilerec = json.loads(jsonFileReader)
  File "/var/lang/lib/python3.8/json/__init__.py", line 357, in loads
    return _default_decoder.decode(s)
  File "/var/lang/lib/python3.8/json/decoder.py", line 340, in decode
    raise JSONDecodeError("Extra data", s, end)
请在下面找到S3文件示例记录

b'[{"id": "1305857561179152385", "tweet": "If you like vintage coke machines and guys who look like Fred Flintstone you\'ll love the short we\'ve riffed: Coke R, "ts": "Tue Sep 15 13:14:38 +0000 2020"}][{"id": "1305858267067883521", "tweet": "Chinese unicorn Genki Forest plots own beverage hits  #China #Chinese #Brands #GoingGlobal\\u2026 ", "ts": "Tue Sep 15 13:17:27 +0000 2020"}][{"id": "1305858731293507585", "tweet": "RT @CinemaCheezy: If you like vintage coke machines and guys who look like Fred Flintstone you\'ll love the short we\'ve riffed: Coke Refresh\\u2026", "ts": "Tue Sep 15 13:19:17 +0000 2020"}]'
添加生成Json文件的生产者/输入代码

import boto3
import json
from datetime import datetime
import calendar
import random
import time
import sys
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
import preprocessor as p


#Variables that contains the user credentials to access Twitter API
consumer_key = '************'
consumer_secret ='******************'
access_token = '********************'
access_token_secret = '***************'

# Create tracklist with the words that will be searched for
tracklist = ['#coke']

awsRegionName='us-east-1'
awsAccessKey='************'
awsSecretKey='**********'

class TweetStreamListener(StreamListener):
    # on success
    def on_data(self, data):
        # decode json
        tweet = json.loads(data)
        print(type(tweet))
        #print(tweet)
        if "text" in tweet.keys():
            payload = {'id': str(tweet['id']),
                       'tweet': str(tweet['text'].encode('utf8', 'replace')),
                       #'tweet': str(tweet['text']),
                       'ts': str(tweet['created_at']),
                       },
                       
            try:
                print(tweet)
                #print(payload)
                
                               
                put_response = kinesis_client.put_record(
                    StreamName=stream_name,
                    Data=json.dumps(payload),
                    PartitionKey=str(['screen_name']))
                    #PartitionKey=str(tweet['user']['screen_name']))
            except (AttributeError, Exception) as e:
                print(e)
                pass
        return True

    # on failure
    def on_error(self, status):
        print("On_error status:", status)


stream_name = 'twitter-data-stream'  # fill the name of Kinesis data stream you created
#stream_name = 'demo-datastream' 

if __name__ == '__main__':
    # create kinesis client connection
    kinesis_client = boto3.client('kinesis',
                                  region_name=awsRegionName,
                                  aws_access_key_id=awsAccessKey,
                                  aws_secret_access_key=awsSecretKey)
    
    # create instance of the tweepy tweet stream listener
    listener = TweetStreamListener()
    # set twitter keys/tokens
    auth = OAuthHandler(consumer_key, consumer_secret)
    auth.set_access_token(access_token, access_token_secret)
    # create instance of the tweepy stream
    stream = Stream(auth, listener)
    # search twitter for tags or keywords from cli parameters
    #query = sys.argv[1:]  # list of CLI arguments
    #query_fname = ' '.join(query)  # string
    stream.filter(track=tracklist)
    #tweets = api.search(tracklist, count=10, lang='en', exclude='retweets',tweet_mode = 'extended')
                         
    
    

                         
    
问候,,
Priti

可能您的json不正确:[tweet_data][…][…]不是有效的json对象。
您应该处理您的输入数据,这样做:[{tweet_data},{…},{…},{…},{…}]

对于相同格式的单个项,它可以正常工作。对于相同格式的单个项,它可以正常工作。另外,如果您检查S3示例记录集,它是按照json预期格式的。不,不是。单个数组[]是有效的,因此它可以工作。但多个数组不是有效的json内容([…][…][…])。谢谢朱利安。你是对的。我已经更新了生成此json文件的生产者/输入代码。您能看一下并建议我需要做什么更改来创建一个json数组吗?对不起,我不熟悉这种类型的代码。您应该关闭这篇文章,因为它被回答(错误来自不正确的json),并询问关于生产者json输出的其他问题。
import boto3
import json
from datetime import datetime
import calendar
import random
import time
import sys
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
import preprocessor as p


#Variables that contains the user credentials to access Twitter API
consumer_key = '************'
consumer_secret ='******************'
access_token = '********************'
access_token_secret = '***************'

# Create tracklist with the words that will be searched for
tracklist = ['#coke']

awsRegionName='us-east-1'
awsAccessKey='************'
awsSecretKey='**********'

class TweetStreamListener(StreamListener):
    # on success
    def on_data(self, data):
        # decode json
        tweet = json.loads(data)
        print(type(tweet))
        #print(tweet)
        if "text" in tweet.keys():
            payload = {'id': str(tweet['id']),
                       'tweet': str(tweet['text'].encode('utf8', 'replace')),
                       #'tweet': str(tweet['text']),
                       'ts': str(tweet['created_at']),
                       },
                       
            try:
                print(tweet)
                #print(payload)
                
                               
                put_response = kinesis_client.put_record(
                    StreamName=stream_name,
                    Data=json.dumps(payload),
                    PartitionKey=str(['screen_name']))
                    #PartitionKey=str(tweet['user']['screen_name']))
            except (AttributeError, Exception) as e:
                print(e)
                pass
        return True

    # on failure
    def on_error(self, status):
        print("On_error status:", status)


stream_name = 'twitter-data-stream'  # fill the name of Kinesis data stream you created
#stream_name = 'demo-datastream' 

if __name__ == '__main__':
    # create kinesis client connection
    kinesis_client = boto3.client('kinesis',
                                  region_name=awsRegionName,
                                  aws_access_key_id=awsAccessKey,
                                  aws_secret_access_key=awsSecretKey)
    
    # create instance of the tweepy tweet stream listener
    listener = TweetStreamListener()
    # set twitter keys/tokens
    auth = OAuthHandler(consumer_key, consumer_secret)
    auth.set_access_token(access_token, access_token_secret)
    # create instance of the tweepy stream
    stream = Stream(auth, listener)
    # search twitter for tags or keywords from cli parameters
    #query = sys.argv[1:]  # list of CLI arguments
    #query_fname = ' '.join(query)  # string
    stream.filter(track=tracklist)
    #tweets = api.search(tracklist, count=10, lang='en', exclude='retweets',tweet_mode = 'extended')