Spring boot 春季Webflux如何从“输出流”变为“流量<;数据缓冲>;`?

Spring boot 春季Webflux如何从“输出流”变为“流量<;数据缓冲>;`?,spring-boot,spring-webflux,Spring Boot,Spring Webflux,我正在动态地构建一个tarball,并希望直接将其流式返回,这在.tar.gz中应该是100%可能的 下面的代码是我能通过大量的谷歌搜索找到的最接近数据缓冲的代码。基本上,我需要实现一个OutputStream并提供或发布到Flux的东西,这样我就可以从我的方法返回它,并获得流式输出,而不是在ram中缓冲整个tarball(我很确定这里发生的事情)。我使用的是ApacheCompressCommons,它有一个很棒的API,但它都是基于OutputStream的 我想另一种方法是直接写回复,但我

我正在动态地构建一个tarball,并希望直接将其流式返回,这在.tar.gz中应该是100%可能的

下面的代码是我能通过大量的谷歌搜索找到的最接近数据缓冲的代码。基本上,我需要实现一个
OutputStream
并提供或发布到
Flux
的东西,这样我就可以从我的方法返回它,并获得流式输出,而不是在ram中缓冲整个tarball(我很确定这里发生的事情)。我使用的是ApacheCompressCommons,它有一个很棒的API,但它都是基于OutputStream的

我想另一种方法是直接写回复,但我不认为这是正确的反应?也不知道如何从某种响应对象中获取
OutputStream

这是kotlin btw,在Spring Boot 2.0上

@GetMapping("/cookbook.tar.gz", "/cookbook")
fun getCookbook(): Mono<DefaultDataBuffer> {
    log.info("Creating tarball of cookbooks: ${soloConfig.cookbookPaths}")

    val transformation = Mono.just(soloConfig.cookbookPaths.stream()
            .toList()
            .flatMap {
                Files.walk(Paths.get(it)).map(Path::toFile).toList()
            })
            .map { files ->

                //Will make one giant databuffer... but oh well? TODO: maybe use some kind of chunking.
                val buffer = DefaultDataBufferFactory().allocateBuffer()
                val outputBufferStream = buffer.asOutputStream()


                //Transform my list of stuff into an archiveOutputStream
                TarArchiveOutputStream(GzipCompressorOutputStream(outputBufferStream)).use { taos ->
                    taos.setLongFileMode(TarArchiveOutputStream.LONGFILE_GNU)

                    log.info("files to compress: ${files}")

                    for (file in files) {
                        if (file.isFile) {
                            val entry = "cookbooks/" + file.name
                            log.info("Adding ${entry} to tarball")
                            taos.putArchiveEntry(TarArchiveEntry(file, entry))
                            FileInputStream(file).use { fis ->
                                fis.copyTo(taos) //Copy that stuff!
                            }
                            taos.closeArchiveEntry()
                        }
                    }
                }
                buffer
            }

    return transformation
}
@GetMapping(“/cookbook.tar.gz”,“/cookbook”)
有趣的getCookbook():Mono{
log.info(“创建烹饪书的tarball:${soloConfig.cookbookpath}”)
val transformation=Mono.just(soloConfig.cookbookpath.stream())
托利斯先生()
.平面图{
Files.walk(Path.get(it)).map(Path::toFile.toList())
})
.map{文件->
//将制作一个巨大的数据缓冲…但是哦,好吧?托多:也许使用某种块。
val buffer=DefaultDataBufferFactory().allocateBuffer()
val outputBufferStream=buffer.asOutputStream()
//将我的内容列表转换为archiveOutputStream
TarArchiveOutputStream(GzipCompressorOutputStream(outputBufferStream))。使用{taos->
taos.setLongFileMode(TarArchiveOutputStream.LONGFILE_GNU)
log.info(“要压缩的文件:${files}”)
用于(文件中的文件){
if(file.isFile){
val entry=“cookbooks/”+file.name
info(“向tarball添加${entry}”)
taos.putArchiveEntry(文件,条目))
FileInputStream(文件)。使用{fis->
fis.copyTo(taos)//复制那些东西!
}
taos.closeArchiveEntry()
}
}
}
缓冲器
}
回归变换
}

我对此感到困惑,并找到了有效的解决方案。实现一个
OutputStream
,获取这些字节并将它们发布到流中。确保覆盖close,并发送onComplete。很好

@RestController
class SoloController(
        val soloConfig: SoloConfig
) {
    val log = KotlinLogging.logger { }

    @GetMapping("/cookbooks.tar.gz", "/cookbooks")
    fun streamCookbook(serverHttpResponse: ServerHttpResponse): Flux<DataBuffer> {
        log.info("Creating tarball of cookbooks: ${soloConfig.cookbookPaths}")

        val publishingOutputStream = PublishingOutputStream(serverHttpResponse.bufferFactory())

        //Needs to set up cookbook path as a parent directory, and then do `cookbooks/$cookbook_path/<all files>` for each cookbook path given
        Flux.just(soloConfig.cookbookPaths.stream().toList())
                .doOnNext { paths ->
                    //Transform my list of stuff into an archiveOutputStream
                    TarArchiveOutputStream(GzipCompressorOutputStream(publishingOutputStream)).use { taos ->
                        taos.setLongFileMode(TarArchiveOutputStream.LONGFILE_GNU)

                        paths.forEach { cookbookDir ->
                            if (Paths.get(cookbookDir).toFile().isDirectory) {

                                val cookbookDirFile = Paths.get(cookbookDir).toFile()
                                val directoryName = cookbookDirFile.name
                                val entryStart = "cookbooks/${directoryName}"

                                val files = Files.walk(cookbookDirFile.toPath()).map(Path::toFile).toList()

                                log.info("${files.size} files to compress")

                                for (file in files) {
                                    if (file.isFile) {
                                        val relativePath = file.toRelativeString(cookbookDirFile)
                                        val entry = "$entryStart/$relativePath"
                                        taos.putArchiveEntry(TarArchiveEntry(file, entry))
                                        FileInputStream(file).use { fis ->
                                            fis.copyTo(taos) //Copy that stuff!
                                        }
                                        taos.closeArchiveEntry()
                                    }
                                }
                            }
                        }
                    }
                }
                .subscribeOn(Schedulers.parallel())
                .doOnComplete {
                    publishingOutputStream.close()
                }
                .subscribe()

        return publishingOutputStream.publisher
    }

    class PublishingOutputStream(bufferFactory: DataBufferFactory) : OutputStream() {

        val publisher: UnicastProcessor<DataBuffer> = UnicastProcessor.create(Queues.unbounded<DataBuffer>().get())
        private val bufferPublisher: UnicastProcessor<Byte> = UnicastProcessor.create(Queues.unbounded<Byte>().get())

        init {
            bufferPublisher
                    .bufferTimeout(4096, Duration.ofMillis(100))
                    .doOnNext { intList ->
                        val buffer = bufferFactory.allocateBuffer(intList.size)
                        buffer.write(intList.toByteArray())
                        publisher.onNext(buffer)
                    }
                    .doOnComplete {
                        publisher.onComplete()
                    }
                    .subscribeOn(Schedulers.newSingle("publisherThread"))
                    .subscribe()
        }

        override fun write(b: Int) {
            bufferPublisher.onNext(b.toByte())
        }

        override fun close() {
            bufferPublisher.onComplete() //which should trigger the clean up of the whole thing
        }
    }
}
@RestController
类SoloController(
val soloConfig:soloConfig
) {
val log=KotlinLogging.logger{}
@GetMapping(“/cookbooks.tar.gz”,“/cookbooks”)
有趣的streamCookbook(serverHttpResponse:serverHttpResponse):Flux{
log.info(“创建烹饪书的tarball:${soloConfig.cookbookpath}”)
val publishingOutputStream=publishingOutputStream(serverHttpResponse.bufferFactory())
//需要将cookbook path设置为父目录,然后对给定的每个cookbook path执行'cookbooks/$cookbook_path/'
Flux.just(soloConfig.cookbookpath.stream().toList())
.doOnNext{path->
//将我的内容列表转换为archiveOutputStream
TarArchiveOutputStream(GzipCompressorOutputStream(publishingOutputStream))。使用{taos->
taos.setLongFileMode(TarArchiveOutputStream.LONGFILE_GNU)
paths.forEach{cookbookDir->
if(path.get(cookbookDir.toFile().isDirectory){
val cookbookDirFile=path.get(cookbookDir.toFile())
val directoryName=cookbookDirFile.name
val entryStart=“cookbooks/${directoryName}”
val files=files.walk(cookbookDirFile.toPath()).map(Path::toFile.toList())
log.info(“${files.size}要压缩的文件”)
用于(文件中的文件){
if(file.isFile){
val relativePath=file.toRelativeString(cookbookDirFile)
val entry=“$entryStart/$relativePath”
taos.putArchiveEntry(文件,条目))
FileInputStream(文件)。使用{fis->
fis.copyTo(taos)//复制那些东西!
}
taos.closeArchiveEntry()
}
}
}
}
}
}
.subscribeOn(Schedulers.parallel())
doOnComplete先生{
publishingOutputStream.close()发布
}
.subscribe()
返回publishingOutputStream.publisher
}
类PublishingOutputStream(bufferFactory:DataBufferFactory):OutputStream(){
val publisher:UnicastProcessor=UnicastProcessor.create(Queues.unbounded().get())
private val bufferPublisher:UnicastProcessor=UnicastProcessor.create(Queues.unbounded().get())
初始化{
缓冲发布器
.bufferTimeout(4096,持续时间.百万(100))
.doOnNext{intList->
val buffer=bufferFactory.allocateBuffer(intList.size)
buffer.write(intList.toByteArray())
publisher.onNext(缓冲区)
}
doOnComplete先生{
公共图书馆