Apache flink 如何按日期时间分区在HDFS上使用ApacheFlink写拼花文件?

Apache flink 如何按日期时间分区在HDFS上使用ApacheFlink写拼花文件?,apache-flink,flink-streaming,Apache Flink,Flink Streaming,在从kafka摄取json数据并将其保存为要加载到hive中的拼花文件的过程中,我遇到了中提到的相同问题。有人知道怎么解决吗?非常感谢。我使用了ApacheFlink 1.4.0+HDFS2.7.3,您可以直接实现Writer接口。它可以如下所示: import org.apache.flink.util.Preconditions; import org.apache.avro.Schema; import org.apache.avro.generic.GenericData; impor

在从kafka摄取json数据并将其保存为要加载到hive中的拼花文件的过程中,我遇到了中提到的相同问题。有人知道怎么解决吗?非常感谢。我使用了ApacheFlink 1.4.0+HDFS2.7.3,您可以直接实现
Writer
接口。它可以如下所示:

import org.apache.flink.util.Preconditions;

import org.apache.avro.Schema;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericRecord;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.parquet.avro.AvroParquetWriter;
import org.apache.parquet.hadoop.ParquetWriter;
import org.apache.parquet.hadoop.metadata.CompressionCodecName;

import java.io.IOException;

/**
 * Parquet writer.
 *
 * @param <T>
 */
public class ParquetSinkWriter<T extends GenericRecord> implements Writer<T> {

    private static final long serialVersionUID = -975302556515811398L;

    private final CompressionCodecName compressionCodecName = CompressionCodecName.SNAPPY;
    private final int pageSize = 64 * 1024;

    private final String schemaRepresentation;

    private transient Schema schema;
    private transient ParquetWriter<GenericRecord> writer;
    private transient Path path;

    private int position;

    public ParquetSinkWriter(String schemaRepresentation) {
        this.schemaRepresentation = Preconditions.checkNotNull(schemaRepresentation);
    }

    @Override
    public void open(FileSystem fs, Path path) throws IOException {
        this.position = 0;
        this.path = path;

        if (writer != null) {
            writer.close();
        }

        writer = createWriter();
    }

    @Override
    public long flush() throws IOException {
        Preconditions.checkNotNull(writer);
        position += writer.getDataSize();
        writer.close();
        writer = createWriter();

        return position;
    }

    @Override
    public long getPos() throws IOException {
        Preconditions.checkNotNull(writer);
        return position + writer.getDataSize();
    }

    @Override
    public void close() throws IOException {
        if (writer != null) {
            writer.close();
            writer = null;
        }
    }

    @Override
    public void write(T element) throws IOException {
        Preconditions.checkNotNull(writer);
        writer.write(element);
    }

    @Override
    public Writer<T> duplicate() {
        return new ParquetSinkWriter<>(schemaRepresentation);
    }

    private ParquetWriter<GenericRecord> createWriter() throws IOException {
        if (schema == null) {
            schema = new Schema.Parser().parse(schemaRepresentation);
        }

        return AvroParquetWriter.<GenericRecord>builder(path)
            .withSchema(schema)
            .withDataModel(new GenericData())
            .withCompressionCodec(compressionCodecName)
            .withPageSize(pageSize)
            .build();
    }
}
import org.apache.flink.util.premissions;
导入org.apache.avro.Schema;
导入org.apache.avro.generic.GenericData;
导入org.apache.avro.generic.GenericRecord;
导入org.apache.hadoop.fs.FileSystem;
导入org.apache.hadoop.fs.Path;
导入org.apache.parquet.avro.avroparquetwitter;
导入org.apache.parquet.hadoop.parquetwitter;
导入org.apache.parquet.hadoop.metadata.CompressionCodecName;
导入java.io.IOException;
/**
*拼花写手。
*
*@param
*/
公共类实木拼花房{
私有静态最终长serialVersionUID=-975302556515811398L;
private final CompressionCodecName CompressionCodecName=CompressionCodecName.SNAPPY;
私有最终整型页面大小=64*1024;
私有最终字符串模式演示;
私有瞬态模式;
私人临时拼花作家;
专用瞬态路径;
私人职位;
公共拼花师(字符串模式演示){
this.schemaRepresentation=前提条件.checkNotNull(schemaRepresentation);
}
@凌驾
公共void open(文件系统fs,路径路径)引发IOException{
这个位置=0;
this.path=path;
if(writer!=null){
writer.close();
}
writer=createWriter();
}
@凌驾
public long flush()引发IOException{
先决条件。checkNotNull(writer);
position+=writer.getDataSize();
writer.close();
writer=createWriter();
返回位置;
}
@凌驾
public long getPos()引发IOException{
先决条件。checkNotNull(writer);
返回位置+writer.getDataSize();
}
@凌驾
public void close()引发IOException{
if(writer!=null){
writer.close();
writer=null;
}
}
@凌驾
公共void write(T元素)引发IOException{
先决条件。checkNotNull(writer);
writer.write(元素);
}
@凌驾
公共编写器副本(){
返回新的Parquetskwriter(schemaRepresentation);
}
私有ParquetWriter createWriter()引发IOException{
if(schema==null){
schema=newschema.Parser().parse(schemaRepresentation);
}
返回AvroParquetWriter.builder(路径)
.withSchema(schema)
.withDataModel(新的GenericData())
.withCompressionCodec(compressionCodecName)
.withPageSize(pageSize)
.build();
}
}

谢谢你的建议。但我还是遇到了以下例外。原因:org.apache.hadoop.fs.filealreadyexistException:/wikipedia编辑flink/partitionKey=2018-01-10--12-10/_part-5-0.in-progress for client 127.0.0.1已存在。。。位于org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFileInternal(FSNamesystem.java:2563)。。。在org.apache.flink.streaming.connectors.fs.bucketing.BucketingSink.snapshotState(BucketingSink.java:688)的flink.parquetskinkriter.flunsh(parquetskinkriter.java:58)的flink.parquetskinkriter.createWriter(parquetskinter.java:98)中,它可以在没有avro模式的情况下编写拼花文件,但可以通过ApacheFlink的数据帧[T]键入推断?我认为您可以从
AvroSerializer
获取
模式,您可以通过
AvroTypeInfo
@实例化该模式,直到阅读此实现和bucketingSink实现,硬故障会导致parquetWriter缓存中当前所有数据的完全丢失,这样说正确吗?我猜这是因为不刷新数据,也不以任何状态存储数据,使得作业无法恢复数据。。。