Amazon web services 无法使用StreamExecutionEnvironment使用S3接收器写入S3-Apache Flink 1.1.4

Amazon web services 无法使用StreamExecutionEnvironment使用S3接收器写入S3-Apache Flink 1.1.4,amazon-web-services,hadoop,amazon-s3,aws-sdk,apache-flink,Amazon Web Services,Hadoop,Amazon S3,Aws Sdk,Apache Flink,我创建了一个简单的ApacheFlink项目,该项目将从Kafka主题中读取数据并将数据写入S3存储桶。当我运行这个项目时,我没有收到任何错误,它成功地读取了Kafka主题中的每条消息,但是没有任何内容写入我的S3存储桶。没有错误,因此很难尝试和调试正在进行的操作。下面是我的项目和配置。这仅在我使用StreamExecutionEnvironment时发生。如果我尝试使用一个常规的批处理执行环境生成S3,它会工作 S3测试Java程序 public class S3Test { public

我创建了一个简单的ApacheFlink项目,该项目将从Kafka主题中读取数据并将数据写入S3存储桶。当我运行这个项目时,我没有收到任何错误,它成功地读取了Kafka主题中的每条消息,但是没有任何内容写入我的S3存储桶。没有错误,因此很难尝试和调试正在进行的操作。下面是我的项目和配置。这仅在我使用StreamExecutionEnvironment时发生。如果我尝试使用一个常规的批处理执行环境生成S3,它会工作

S3测试Java程序

public class S3Test {

public static void main(String[] args) throws Exception {
    // parse input arguments
    final ParameterTool parameterTool = ParameterTool.fromPropertiesFile(args[0]);

    if(parameterTool.getNumberOfParameters() < 4) {
        System.out.println("Missing parameters!\nUsage: Kafka --topic <topic> " +
                "--bootstrap.servers <kafka brokers> --zookeeper.connect <zk quorum> --group.id <some id>");
        return;
    }

    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.getConfig().disableSysoutLogging();
    env.getConfig().setRestartStrategy(RestartStrategies.fixedDelayRestart(4, 10000));
    env.enableCheckpointing(5000); // create a checkpoint every 5 seconds
    env.getConfig().setGlobalJobParameters(parameterTool); //make parameters available in the web interface

    DataStream<String> messageStream = env
            .addSource(new FlinkKafkaConsumer09<String>(
                    parameterTool.getRequired("kafka.topic"),
                    new SimpleStringSchema(),
                    parameterTool.getProperties()));


    // write kafka stream to standard out.
    //messageStream.print();
    String id = UUID.randomUUID().toString();
    messageStream.writeAsText("s3://flink-data/" + id + ".txt").setParallelism(1);

    env.execute("Write to S3 Example");
}
}
公共类测试{
公共静态void main(字符串[]args)引发异常{
//解析输入参数
final ParameterTool ParameterTool=ParameterTool.fromPropertiesFile(args[0]);
if(parameterTool.getNumberOfParameters()<4){
System.out.println(“缺少参数!\n用法:卡夫卡--主题”+
“--bootstrap.servers--zookeeper.connect--group.id”);
返回;
}
StreamExecutionEnvironment env=StreamExecutionEnvironment.getExecutionEnvironment();
env.getConfig().disableSysoutLogging();
setRestartStrategy(RestartStrategies.fixedDelayRestart(41000));
env.enableCheckpointing(5000);//每5秒创建一个检查点
env.getConfig().setGlobalJobParameters(parameterTool);//使参数在web界面中可用
DataStream messageStream=env
.addSource(新FlinkKafkaConsumer09(
parameterTool.getRequired(“kafka.topic”),
新的SimpleStringSchema(),
parameterTool.getProperties());
//将卡夫卡流写入标准输出。
//messageStream.print();
字符串id=UUID.randomUUID().toString();
messageStream.writesText(“s3://flink data/”+id+“.txt”).setParallelism(1);
env.execute(“写入S3示例”);
}
}
pom.xml

<dependencies>
    <dependency>
        <groupId>org.apache.flink</groupId>
        <artifactId>flink-java</artifactId>
        <version>1.1.4</version>
    </dependency>

    <dependency>
        <groupId>org.apache.flink</groupId>
        <artifactId>flink-streaming-java_2.10</artifactId>
        <version>1.1.4</version>
    </dependency>

    <dependency>
        <groupId>org.apache.flink</groupId>
        <artifactId>flink-clients_2.10</artifactId>
        <version>1.1.4</version>
    </dependency>

    <dependency>
        <groupId>org.apache.flink</groupId>
        <artifactId>flink-connector-kafka-0.9_2.10</artifactId>
        <version>1.1.4</version>
    </dependency>

    <dependency>
        <groupId>com.amazonaws</groupId>
        <artifactId>aws-java-sdk</artifactId>
        <version>1.7.4</version>
    </dependency>

    <dependency>
        <groupId>org.apache.hadoop</groupId>
        <artifactId>hadoop-aws</artifactId>
        <version>2.7.2</version>
    </dependency>

    <dependency>
        <groupId>org.apache.httpcomponents</groupId>
        <artifactId>httpclient</artifactId>
        <version>4.2.5</version>
    </dependency>
    <dependency>
        <groupId>org.apache.httpcomponents</groupId>
        <artifactId>httpcore</artifactId>
        <version>4.2.5</version>
    </dependency>

    <!-- Apache Kafka Dependencies -->
    <dependency>
        <groupId>org.apache.kafka</groupId>
        <artifactId>kafka_2.10</artifactId>
        <version>0.9.0.1</version>
        <exclusions>
            <exclusion>
                <groupId>org.slf4j</groupId>
                <artifactId>slf4j-log4j12</artifactId>
            </exclusion>
        </exclusions>
    </dependency>

</dependencies>

org.apache.flink
弗林克爪哇
1.1.4
org.apache.flink
flink-streaming-java_2.10
1.1.4
org.apache.flink
flink-U 2.10
1.1.4
org.apache.flink
flink-connector-kafka-0.9_2.10
1.1.4
亚马逊网站
aws java sdk
1.7.4
org.apache.hadoop
hadoop aws
2.7.2
org.apache.httpcomponents
httpclient
4.2.5
org.apache.httpcomponents
httpcore
4.2.5
org.apache.kafka
卡夫卡2.10
0.9.0.1
org.slf4j
slf4j-log4j12
core-site.xml(Hadoop配置)


fs.defaultFS
hdfs://localhost:9000
fs.s3.impl
org.apache.hadoop.fs.s3a.S3AFileSystem
fs.s3a.buffer.dir
/tmp
fs.s3a.access.key
***************
fs.s3a.secret.key
****************

IAM权限-确保要写入S3 bucket的角色。

帮助您获取一些调试信息的简单方法是打开S3 bucket的日志记录,该S3 bucket应该接收kafka数据。这将为您提供更多信息,帮助您从s3的角度确定错误的来源:


通过Flink将卡夫卡主题持久化到S3需要使用
RollingSink
。RollingSink使用Bucketer指定零件文件将保存到的目录的名称。DateTime是默认的Bucketer,但您也可以创建自定义Bucketer。只要达到最大批量大小,就会保存和关闭零件文件,然后创建新零件文件。以下代码有效:

public class TestRollingSink {

    public static void main(String[] args){
        Map<String, String> configs = ConfigUtils.loadConfigs("/Users/path/to/config.yaml");

    final ParameterTool parameterTool = ParameterTool.fromMap(configs);

    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

    env.getConfig().disableSysoutLogging();
    env.getConfig().setGlobalJobParameters(parameterTool);
    env.socketTextStream("localhost", 9092);

    DataStream<String> parsed = env
            .addSource(new FlinkKafkaConsumer09<String>(
                    parameterTool.getRequired("kafka.topic"),
                    new SimpleStringSchema(),
                    parameterTool.getProperties()));

    env.enableCheckpointing(2000, CheckpointingMode.AT_LEAST_ONCE);

    RollingSink<String> sink = new RollingSink<String>("s3://flink-test/"+"TEST");
    sink.setBucketer(new DateTimeBucketer("yyyy-MM-dd--HHmm"));
    sink.setWriter(new StringWriter<String>());
    sink.setBatchSize(200);
    sink.setPendingPrefix("file-");
    sink.setPendingSuffix(".txt");
    parsed.print();
    parsed.addSink(sink).setParallelism(1);

    try {
        env.execute();
    } catch (Exception e) {
        e.printStackTrace();
    }
}
公共类TestRollingSink{
公共静态void main(字符串[]args){
Map configs=ConfigUtils.loadConfigs(“/Users/path/to/config.yaml”);
final ParameterTool ParameterTool=ParameterTool.fromMap(配置);
StreamExecutionEnvironment env=StreamExecutionEnvironment.getExecutionEnvironment();
env.getConfig().disableSysoutLogging();
env.getConfig().setGlobalJobParameters(parameterTool);
环境socketTextStream(“localhost”,9092);
已解析数据流=环境
.addSource(新FlinkKafkaConsumer09(
parameterTool.getRequired(“kafka.topic”),
新的SimpleStringSchema(),
parameterTool.getProperties());
环境启用检查点(2000,检查点模式。至少一次);
RollingSink水槽=新的RollingSink(“s3://flink测试/“+”测试”);
沉降计(新的日期时间计(“yyyy-MM-dd--HHmm”);
sink.setWriter(新的StringWriter());
水槽尺寸(200);
sink.setPendingPrefix(“文件-”);
sink.setPendingSuffix(“.txt”);
parsed.print();
addSink(sink).setParallelism(1);
试一试{
execute();
}捕获(例外e){
e、 printStackTrace();
}
}
}

public class TestRollingSink {

    public static void main(String[] args){
        Map<String, String> configs = ConfigUtils.loadConfigs("/Users/path/to/config.yaml");

    final ParameterTool parameterTool = ParameterTool.fromMap(configs);

    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

    env.getConfig().disableSysoutLogging();
    env.getConfig().setGlobalJobParameters(parameterTool);
    env.socketTextStream("localhost", 9092);

    DataStream<String> parsed = env
            .addSource(new FlinkKafkaConsumer09<String>(
                    parameterTool.getRequired("kafka.topic"),
                    new SimpleStringSchema(),
                    parameterTool.getProperties()));

    env.enableCheckpointing(2000, CheckpointingMode.AT_LEAST_ONCE);

    RollingSink<String> sink = new RollingSink<String>("s3://flink-test/"+"TEST");
    sink.setBucketer(new DateTimeBucketer("yyyy-MM-dd--HHmm"));
    sink.setWriter(new StringWriter<String>());
    sink.setBatchSize(200);
    sink.setPendingPrefix("file-");
    sink.setPendingSuffix(".txt");
    parsed.print();
    parsed.addSink(sink).setParallelism(1);

    try {
        env.execute();
    } catch (Exception e) {
        e.printStackTrace();
    }
}