Amazon s3 Amazon AWS无法访问我的s3n url

Amazon s3 Amazon AWS无法访问我的s3n url,amazon-s3,spark-ec2,spark-submit,Amazon S3,Spark Ec2,Spark Submit,我正在尝试使用python文件执行spark submit,在这个python文件中,我想访问存储在bucket中的data.txt。我正在使用下面的命令访问我的文件,但我一直收到一个错误 dataFile = ("s3n://<AWS Access Key ID>:" "<AWS Secret Access Key>" "@<mybucket>/data.txt") Traceback (most recent call l

我正在尝试使用python文件执行spark submit,在这个python文件中,我想访问存储在bucket中的data.txt。我正在使用下面的命令访问我的文件,但我一直收到一个错误

dataFile = ("s3n://<AWS Access Key ID>:"
        "<AWS Secret Access Key>"
        "@<mybucket>/data.txt")

Traceback (most recent call last):
  File "/home/adas/spark/SimpleApp.py", line 23, in <module>
    numAs = logData.filter(lambda s: 'a' in s).count()
  File "/home/adas/spark/spark/python/lib/pyspark.zip/pyspark/rdd.py", line 1041, in count
  File "/home/adas/spark/spark/python/lib/pyspark.zip/pyspark/rdd.py", line 1032, in sum
  File "/home/adas/spark/spark/python/lib/pyspark.zip/pyspark/rdd.py", line 906, in fold
  File "/home/adas/spark/spark/python/lib/pyspark.zip/pyspark/rdd.py", line 809, in collect
  File "/home/adas/spark/spark/python/lib/py4j-0.10.4-src.zip/py4j/java_gateway.py", line 1133, in __call__
  File "/home/adas/spark/spark/python/lib/py4j-0.10.4-src.zip/py4j/protocol.py", line 319, in get_return_value
py4j.protocol.Py4JJavaError: An error occurred while calling z:org.apache.spark.api.python.PythonRDD.collectAndServe.
: java.lang.IllegalArgumentException: AWS Secret Access Key must be specified as the password of a s3n URL, or by setting the fs.s3n.awsSecretAccessKey property.
    at org.apache.hadoop.fs.s3.S3Credentials.initialize(S3Credentials.java:86)
    at org.apache.hadoop.fs.s3native.Jets3tNativeFileSystemStore.initialize(Jets3tNativeFileSystemStore.java:80)
    at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
    at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
    at java.lang.reflect.Method.invoke(Method.java:498)
    at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:191)
    at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:102)
    at org.apache.hadoop.fs.s3native.$Proxy16.initialize(Unknown Source)
    at org.apache.hadoop.fs.s3native.NativeS3FileSystem.initialize(NativeS3FileSystem.java:334)
    at org.apache.hadoop.fs.FileSystem.createFileSystem(FileSystem.java:2669)
    at org.apache.hadoop.fs.FileSystem.access$200(FileSystem.java:94)
    at org.apache.hadoop.fs.FileSystem$Cache.getInternal(FileSystem.java:2703)
    at org.apache.hadoop.fs.FileSystem$Cache.get(FileSystem.java:2685)
    at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:373)
    at org.apache.hadoop.fs.Path.getFileSystem(Path.java:295)
    at org.apache.hadoop.mapred.FileInputFormat.singleThreadedListStatus(FileInputFormat.java:258)
    at org.apache.hadoop.mapred.FileInputFormat.listStatus(FileInputFormat.java:229)
    at org.apache.hadoop.mapred.FileInputFormat.getSplits(FileInputFormat.java:315)
    at org.apache.spark.rdd.HadoopRDD.getPartitions(HadoopRDD.scala:202)
    at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:252)
    at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:250)
    at scala.Option.getOrElse(Option.scala:121)
    at org.apache.spark.rdd.RDD.partitions(RDD.scala:250)
    at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
    at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:252)
    at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:250)
    at scala.Option.getOrElse(Option.scala:121)
    at org.apache.spark.rdd.RDD.partitions(RDD.scala:250)
    at org.apache.spark.api.python.PythonRDD.getPartitions(PythonRDD.scala:53)
    at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:252)
    at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:250)
    at scala.Option.getOrElse(Option.scala:121)
    at org.apache.spark.rdd.RDD.partitions(RDD.scala:250)
    at org.apache.spark.SparkContext.runJob(SparkContext.scala:1958)
    at org.apache.spark.rdd.RDD$$anonfun$collect$1.apply(RDD.scala:935)
    at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
    at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
    at org.apache.spark.rdd.RDD.withScope(RDD.scala:362)
    at org.apache.spark.rdd.RDD.collect(RDD.scala:934)
    at org.apache.spark.api.python.PythonRDD$.collectAndServe(PythonRDD.scala:453)
    at org.apache.spark.api.python.PythonRDD.collectAndServe(PythonRDD.scala)
    at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
    at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
    at java.lang.reflect.Method.invoke(Method.java:498)
    at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
    at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)
    at py4j.Gateway.invoke(Gateway.java:280)
    at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
    at py4j.commands.CallCommand.execute(CallCommand.java:79)
    at py4j.GatewayConnection.run(GatewayConnection.java:214)
    at java.lang.Thread.run(Thread.java:745)
dataFile=(“s3n://:”
""
“@/data.txt”)
回溯(最近一次呼叫最后一次):
文件“/home/adas/spark/SimpleApp.py”,第23行,在
numAs=logData.filter(lambda s:s中的a).count()
文件“/home/adas/spark/spark/python/lib/pyspark.zip/pyspark/rdd.py”,第1041行,计数
文件“/home/adas/spark/spark/python/lib/pyspark.zip/pyspark/rdd.py”,第1032行,总计
文件“/home/adas/spark/spark/python/lib/pyspark.zip/pyspark/rdd.py”,第906行,折叠
文件“/home/adas/spark/spark/python/lib/pyspark.zip/pyspark/rdd.py”,第809行,在collect中
文件“/home/adas/spark/spark/python/lib/py4j-0.10.4-src.zip/py4j/java_gateway.py”,第1133行,在__
文件“/home/adas/spark/spark/python/lib/py4j-0.10.4-src.zip/py4j/protocol.py”,第319行,在get_return_值中
py4j.protocol.Py4JJavaError:调用z:org.apache.spark.api.python.PythonRDD.collectAndServe时出错。
:java.lang.IllegalArgumentException:AWS秘密访问密钥必须指定为s3n URL的密码,或通过设置fs.s3n.awsSecretAccessKey属性来指定。
位于org.apache.hadoop.fs.s3.S3Credentials.initialize(S3Credentials.java:86)
位于org.apache.hadoop.fs.s3native.Jets3tNativeFileSystemStore.initialize(Jets3tNativeFileSystemStore.java:80)
在sun.reflect.NativeMethodAccessorImpl.invoke0(本机方法)处
位于sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
在sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)中
位于java.lang.reflect.Method.invoke(Method.java:498)
位于org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:191)
位于org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:102)
位于org.apache.hadoop.fs.s3native.$Proxy16.initialize(未知源)
位于org.apache.hadoop.fs.s3native.NativeS3FileSystem.initialize(NativeS3FileSystem.java:334)
位于org.apache.hadoop.fs.FileSystem.createFileSystem(FileSystem.java:2669)
位于org.apache.hadoop.fs.FileSystem.access$200(FileSystem.java:94)
位于org.apache.hadoop.fs.FileSystem$Cache.getInternal(FileSystem.java:2703)
位于org.apache.hadoop.fs.FileSystem$Cache.get(FileSystem.java:2685)
位于org.apache.hadoop.fs.FileSystem.get(FileSystem.java:373)
位于org.apache.hadoop.fs.Path.getFileSystem(Path.java:295)
位于org.apache.hadoop.mapred.FileInputFormat.singleThreadedListStatus(FileInputFormat.java:258)
位于org.apache.hadoop.mapred.FileInputFormat.listStatus(FileInputFormat.java:229)
位于org.apache.hadoop.mapred.FileInputFormat.getSplits(FileInputFormat.java:315)
位于org.apache.spark.rdd.HadoopRDD.getPartitions(HadoopRDD.scala:202)
位于org.apache.spark.rdd.rdd$$anonfun$partitions$2.apply(rdd.scala:252)
位于org.apache.spark.rdd.rdd$$anonfun$partitions$2.apply(rdd.scala:250)
位于scala.Option.getOrElse(Option.scala:121)
位于org.apache.spark.rdd.rdd.partitions(rdd.scala:250)
位于org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
位于org.apache.spark.rdd.rdd$$anonfun$partitions$2.apply(rdd.scala:252)
位于org.apache.spark.rdd.rdd$$anonfun$partitions$2.apply(rdd.scala:250)
位于scala.Option.getOrElse(Option.scala:121)
位于org.apache.spark.rdd.rdd.partitions(rdd.scala:250)
位于org.apache.spark.api.python.PythonRDD.getPartitions(PythonRDD.scala:53)
位于org.apache.spark.rdd.rdd$$anonfun$partitions$2.apply(rdd.scala:252)
位于org.apache.spark.rdd.rdd$$anonfun$partitions$2.apply(rdd.scala:250)
位于scala.Option.getOrElse(Option.scala:121)
位于org.apache.spark.rdd.rdd.partitions(rdd.scala:250)
位于org.apache.spark.SparkContext.runJob(SparkContext.scala:1958)
位于org.apache.spark.rdd.rdd$$anonfun$collect$1.apply(rdd.scala:935)
位于org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
位于org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
位于org.apache.spark.rdd.rdd.withScope(rdd.scala:362)
位于org.apache.spark.rdd.rdd.collect(rdd.scala:934)
位于org.apache.spark.api.python.PythonRDD$.collectAndServe(PythonRDD.scala:453)
位于org.apache.spark.api.python.PythonRDD.collectAndServe(PythonRDD.scala)
在sun.reflect.NativeMethodAccessorImpl.invoke0(本机方法)处
位于sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
在sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)中
位于java.lang.reflect.Method.invoke(Method.java:498)
位于py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
位于py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)
在py4j.Gateway.invoke处(Gateway.java:280)
位于py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
在py4j.commands.CallCommand.execute(CallCommand.java:79)
在py4j.GatewayConnection.run处(GatewayConnection.java:214)
运行(Thread.java:745)

尝试下面链接中的步骤@challenge-thanking,但我尝试了这些解决方案,但它们不起作用。我也没有这个文件。{HADOOP_HOME}/conf/HADOOP-site.xml