C++ C+中的FFMPEG和DirectX捕获+;

C++ C+中的FFMPEG和DirectX捕获+;,c++,ffmpeg,directx,screen-capture,C++,Ffmpeg,Directx,Screen Capture,我有一个系统,允许我捕获一个窗口并使用ffmpeg将其保存为mp4。我使用gdigrab捕获帧,但速度相当慢(每个av_read_帧调用60毫秒) 我知道我可以使用DirectX API捕获游戏,但我不知道如何将生成的BMP转换为AVFrame 下面的代码是我用来捕获帧的DirectX代码 extern void* pBits; extern IDirect3DDevice9* g_pd3dDevice; IDirect3DSurface9* pSurface; g_pd3dDevice->

我有一个系统,允许我捕获一个窗口并使用ffmpeg将其保存为mp4。我使用gdigrab捕获帧,但速度相当慢(每个av_read_帧调用60毫秒)

我知道我可以使用DirectX API捕获游戏,但我不知道如何将生成的BMP转换为AVFrame

下面的代码是我用来捕获帧的DirectX代码

extern void* pBits;
extern IDirect3DDevice9* g_pd3dDevice;
IDirect3DSurface9* pSurface;
g_pd3dDevice->CreateOffscreenPlainSurface(ScreenWidth, ScreenHeight,
                                      D3DFMT_A8R8G8B8, D3DPOOL_SCRATCH, 
                                      &pSurface, NULL);
g_pd3dDevice->GetFrontBufferData(0, pSurface);
D3DLOCKED_RECT lockedRect;
pSurface->LockRect(&lockedRect,NULL,
               D3DLOCK_NO_DIRTY_UPDATE|
               D3DLOCK_NOSYSLOCK|D3DLOCK_READONLY)));
for( int i=0 ; i < ScreenHeight ; i++)
{
    memcpy( (BYTE*) pBits + i * ScreenWidth * BITSPERPIXEL / 8 , 
         (BYTE*) lockedRect.pBits + i* lockedRect.Pitch , 
         ScreenWidth * BITSPERPIXEL / 8);
}
g_pSurface->UnlockRect();
pSurface->Release();
extern void*pBits;
外部IDirect3DDevice9*g_pd3dDevice;
IDirect3DSurface9*pSurface;
g_pd3dDevice->CreateOffscreenPlainSurface(屏幕宽度、屏幕高度、,
D3DFMT_A8R8G8B8,D3DPOOL_划痕,
&pSurface,空);
g_pd3dDevice->GetFrontBufferData(0,pSurface);
D3DLOCKED_u-RECT lockedRect;
pSurface->LockRect(&lockedRect),NULL,
D3DLOCK\u无脏\u更新|
D3DLOCK_NOSYSLOCK(D3DLOCK_READONLY));
对于(int i=0;i<屏幕高度;i++)
{
memcpy((字节*)pBits+i*屏幕宽度*位像素/8,
(字节*)lockedRect.pBits+i*lockedRect.Pitch,
屏幕宽度*比特像素/8);
}
g_pSurface->UnlockRect();
pSurface->Release();
这是我的阅读循环:

{
    while (1) {
    if (av_read_frame(pFormatCtx, &packet) < 0 || exit)
        break;
    if (packet.stream_index == videoindex) {
        // Decode video frame
        av_packet_rescale_ts(&packet, { 1, std::stoi(pParser->GetVal("video-fps")) }, pCodecCtx->time_base);
        avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);

        if (frameFinished) {
            pFrame->pts = i;
            i++;
            sws_scale(sws_ctx, (uint8_t const * const *)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize);
            pFrameRGB->pts = pFrame->pts;
            enc.encodeFrame(pFrameRGB);

    }
    // Free the packet that was allocated by av_read_frame
    av_free_packet(&packet);
}
{
而(1){
中频(av|U读|帧(PFormatCx和数据包)<0|退出)
打破
if(packet.stream_index==videoindex){
//解码视频帧
av数据包重新缩放(数据包,{1,std::stoi(pParser->GetVal(“video fps”)},pCodecCtx->时基);
avcodec_decode_video2(pCodecCtx、pFrame、frameFinished和packet);
如果(框架完成){
pFrame->pts=i;
i++;
sws_比例(sws_ctx,(uint8_t const*const*)pFrame->数据,pFrame->线宽,0,pCodecCtx->高度,pFrameRGB->数据,pFrameRGB->线宽);
pFrameRGB->pts=pFrame->pts;
enc.encodeFrame(pFrameRGB);
}
//释放av_read_帧分配的数据包
av_免费_数据包(&数据包);
}
如何使用现有的bmp创建AVFrame,而不使用av_read_frame