C++ ffmpeg sws_比例从YUV420P到RGB24得到了扭曲的图像
我得到扭曲的图像时,试图转换YUV420p到RGB24使用 sws_标度 代码:C++ ffmpeg sws_比例从YUV420P到RGB24得到了扭曲的图像,c++,ffmpeg,C++,Ffmpeg,我得到扭曲的图像时,试图转换YUV420p到RGB24使用 sws_标度 代码: ret=avcodec\u decode\u video2(视频、视频、视频、视频帧和pkt); 如果(ret编码\图片\编号, “#”/*av_ts2timestr(帧->pts和视频_dec_ctx->时基)*/); /*将解码帧复制到目标缓冲区: *这是必需的,因为rawvideo需要不对齐的数据*/ av_图像_拷贝(视频数据、视频线条尺寸、, (const uint8_t**)(帧->数据),帧->线宽,
ret=avcodec\u decode\u video2(视频、视频、视频、视频帧和pkt);
如果(ret<0){
fprintf(stderr,“视频帧解码错误”);
返回ret;
}
如果(*得到了框架)
{
printf(“视频帧%s n:%d编码\n:%d点:%s\n”,
“已缓存?”(已缓存)”:“”,
视频\帧\计数++,帧->编码\图片\编号,
“#”/*av_ts2timestr(帧->pts和视频_dec_ctx->时基)*/);
/*将解码帧复制到目标缓冲区:
*这是必需的,因为rawvideo需要不对齐的数据*/
av_图像_拷贝(视频数据、视频线条尺寸、,
(const uint8_t**)(帧->数据),帧->线宽,
视频-数字ctx->pix-fmt,视频-数字ctx->宽度,视频-数字ctx->高度);
/*写入原始视频文件*/
fwrite(视频数据[0],1,视频大小,视频文件);
AVPIC;
avpicture_alloc(图片和图片,AV_PIX_FMT_RGB24,帧->宽度,帧->高度);
SwsContext*ctxt=sws\u getContext(帧->宽度,帧->高度,静态\u转换(帧->格式),
帧->宽度,帧->高度,AV_PIX_FMT_RGB24,SWS_双线性,空,空,空);
if(NULL==ctxt)
{
//日志(“无法获取sws上下文”);
}
如果(0数据,帧->线条尺寸,0,帧->高度,图片数据,图片线条尺寸))
{
char szPic[256]={0};
sprintf(szPic,“解码/%d.bmp”,视频帧计数);
文件*pf=fopen(szPic,“w”);
如果(NULL!=pf)
{
BITMAPFILEHEADER bmpFileHeader={0};
bmpFileHeader.bfReserved1=0;
bmpFileHeader.bfReserved2=0;
bmpFileHeader.bfType=0x4D42;
bmpFileHeader.bfSize=sizeof(bmpFileHeader)+sizeof(bitmapinfo头)+pic.linesize[0]*帧->高度;
bmpFileHeader.bfOffBits=sizeof(bmpFileHeader)+sizeof(BitMapInfo头);
BitMapInfo标头bmiHeader={0};
bmiHeader.biSize=sizeof(BitMapInfo标头);
bmiHeader.biWidth=帧->宽度;
bmiHeader.biHeight=0-帧->高度;
bmiHeader.biPlanes=1;
bmiHeader.biBitCount=24;
bmiHeader.biCompression=BI_RGB;
bmiHeader.biSizeImage=pic.linesize[0]*帧->高度;
bmiHeader.biXPelsPerMeter=0;
bmiHeader.biYPelsPerMeter=0;
bmiHeader.biClrUsed=0;
bmiHeader.biClrImportant=0;
fwrite(&bmpFileHeader,1,sizeof(bmpFileHeader),pf);
fwrite(&bmiHeader,1,sizeof(bmiHeader),pf);
写入(图片数据[0],1,图片线条尺寸[0]*帧->高度,pf);
fclose(pf);
}
}
//pic.data[0]现在包含RGB格式的图像数据(3字节)
//而pic.linesize[0]是数据的间距(即内存中一行的大小,可以大于width*sizeof(像素))
avpicture_免费(&pic);
sws_freeContext(ctxt);
}
上面仅对帧进行解码,然后将其转换为RGB24,然后编写位图。
像这样的原始视频帧,
但是转换后的图像,
是否缺少某些代码或某些代码错误
提前谢谢。也许这些代码可以帮助您。这些很好用
int got_frame = 0;
auto len = avcodec_decode_video2(m_avCodecContext
, m_avFrame
, &got_frame
, &avpkt);
if (len < 0)
{
return;
}
if (got_frame /*&& !silentMode*/)
{
//if (videoRenderer != nullptr)
{
if (frameSize == NULL)
{
return;
}
uint8_t *dst_data[4];
int dst_linesize[4];
int dst_w, dst_h;
int ret = 0;
if (1)// avcodec_alloc_frame()
{
auto stride = m_avFrame->linesize;
auto scan0 = m_avFrame->data;
SwsContext *scaleContext = sws_getContext(m_avCodecContext->width
, m_avCodecContext->height
, m_avCodecContext->pix_fmt
, m_avCodecContext->width
, m_avCodecContext->height
, PixelFormat::PIX_FMT_BGR24
, SWS_FAST_BILINEAR, NULL, NULL, NULL);
if (scaleContext == NULL)
{
//TODO: log error
return;
}
try
{
//*vb->signal = 1;
ret = avpicture_alloc(&m_dst_picture
, PixelFormat::PIX_FMT_BGR24
, m_avCodecContext->width
, m_avCodecContext->height);
// AVFrame *picture_RGB;
// uint8_t *bufferRGB;
// picture_RGB = avcodec_alloc_frame();
// bufferRGB = (uint8_t*)malloc(720*576*(24/8)/*avpicture_get_size(PIX_FMT_RGB24, 720, 576)*/);
// avpicture_fill((AVPicture *)picture_RGB, bufferRGB, PIX_FMT_RGB24, 720, 576);
if (ret < 0)
{
return;
}
int retScale = sws_scale(scaleContext
, scan0
, stride
, 0
, m_avCodecContext->height
, m_dst_picture.data //picture_RGB->data
, m_dst_picture.linesize //picture_RGB->linesize
);
if (1)
{
HWND hwnd = m_pParent->GetSafeHwnd();
SetFocus(hwnd);
CRect rc;
m_pParent->GetClientRect(rc);
CDC *cdc = m_pParent->GetDC();
char* bitmap = (char*)m_dst_picture.data[0];
// static unsigned int i = 0;
// bmp_save(bitmap, m_avCodecContext->width, m_avCodecContext->height, i++);
BITMAPINFO bmpinfo;
bmpinfo.bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
bmpinfo.bmiHeader.biWidth = m_avCodecContext->width;
bmpinfo.bmiHeader.biHeight = -m_avCodecContext->height;
bmpinfo.bmiHeader.biPlanes = 1;
bmpinfo.bmiHeader.biBitCount = 24;
bmpinfo.bmiHeader.biCompression = BI_RGB;
bmpinfo.bmiHeader.biSizeImage =
m_avCodecContext->width * m_avCodecContext->height * (24 / 8);
bmpinfo.bmiHeader.biXPelsPerMeter = 100;
bmpinfo.bmiHeader.biYPelsPerMeter = 100;
bmpinfo.bmiHeader.biClrUsed = 0;
bmpinfo.bmiHeader.biClrImportant = 0;
HBITMAP hBitmap = CreateDIBitmap(cdc->GetSafeHdc(), &bmpinfo.bmiHeader, CBM_INIT, bitmap, &bmpinfo/*bi*/, DIB_RGB_COLORS);
DrawBitmap(cdc, hBitmap, m_pParent);
::DeleteObject(hBitmap);
::DeleteObject(cdc->GetSafeHdc());
}
avpicture_free(&m_dst_picture);
sws_freeContext(scaleContext);
}
catch (int e)
{
sws_freeContext(scaleContext);
}
}
}
}
void DrawBitmap(CDC *pDC, HBITMAP hbitmap,CWnd *wnd)
{
CBitmap *pBitmap = CBitmap::FromHandle(hbitmap);
BITMAP bm;
pBitmap -> GetBitmap(&bm);
CDC MemDC;
MemDC.CreateCompatibleDC(pDC);
HGDIOBJ gob= MemDC.SelectObject(pBitmap);
CRect rc;
wnd->GetClientRect(rc);
pDC->SetStretchBltMode( COLORONCOLOR);
pDC->StretchBlt(0, 0,rc.Width(),rc.Height() , &MemDC, 0, 0, bm.bmWidth, bm.bmHeight, SRCCOPY);
MemDC.SelectObject(gob);
DeleteObject(pBitmap);
DeleteObject(MemDC);
DeleteObject(&bm);
ReleaseDC(wnd->GetSafeHwnd(), MemDC);
}
void initDecoder()
{
m_avCodecContext = avcodec_alloc_context();
if (!m_avCodecContext)
{
//failed to allocate codec context
Cleanup();
return;
}
m_avCodecContext->flags = 0;
uint8_t startCode[] = { 0x00, 0x00, 0x01 };
//////////////////////////////////////////////////////////////////////////
//I thought for play live video you can comment these lines.
if (m_sProps != NULL)
{
// USES_CONVERSION;
// ::MessageBox(NULL, A2T(sprops), TEXT("sprops"), MB_OK);
unsigned spropCount;
SPropRecord* spropRecords = parseSPropParameterSets(m_sProps, spropCount);
try
{
for (unsigned i = 0; i < spropCount; ++i)
{
AddExtraData(startCode, sizeof(startCode));
AddExtraData(spropRecords[i].sPropBytes, spropRecords[i].sPropLength);
}
}
catch (void*)
{
//extradata exceeds size limit
delete[] spropRecords;
Cleanup();
return;
}
delete[] spropRecords;
m_avCodecContext->extradata = extraDataBuffer;
m_avCodecContext->extradata_size = extraDataSize;
}
AddExtraData(startCode, sizeof(startCode));
bInitEx = true;
av_register_all();
avcodec_register_all();
m_codecId = CODEC_ID_H264;
m_avCodec = avcodec_find_decoder(m_codecId);
if (m_avCodec == NULL)
{
return;
}
if (avcodec_open(m_avCodecContext, m_avCodec) < 0)
{
//failed to open codec
Cleanup();
return;
}
if (m_avCodecContext->codec_id == CODEC_ID_H264)
{
m_avCodecContext->flags2 |= CODEC_FLAG2_CHUNKS;
//avCodecContext->flags2 |= CODEC_FLAG2_SHOW_ALL;
}
m_avFrame = avcodec_alloc_frame();
if (!m_avFrame)
{
//failed to allocate frame
Cleanup();
return;
}
}
int-got\u-frame=0;
自动len=avcodec\u decode\u video2(m\u avCodecContext
,m_avFrame
,&已获得\u帧
和avpkt);
if(len<0)
{
返回;
}
如果(得到帧/*&&!silentMode*/)
{
//if(视频渲染器!=nullptr)
{
if(frameSize==NULL)
{
返回;
}
uint8_t*dst_数据[4];
int dst_线条尺寸[4];
int dst_w,dst_h;
int-ret=0;
if(1)//avcodec\u alloc\u frame()
{
自动步幅=m_avFrame->linesize;
自动扫描0=m_avFrame->data;
SwsContext*scaleContext=sws_getContext(m_avcodeContext->width
,m_avCodecContext->height
,m_avCodecContext->pix_fmt
,m_avCodecContext->width
,m_avCodecContext->height
,PixelFormat::PIX_FMT_BGR24
,SWS_FAST_双线性,NULL,NULL,NULL);
if(scaleContext==NULL)
{
//TODO:日志错误
返回;
}
尝试
{
//*vb->信号=1;
ret=avpicture\U alloc(&m\dst\U picture)
,PixelFormat::PIX_FMT_BGR24
,m_avCodecContext->width
,m_avc(上下文->高度);
//AVFrame*图片_RGB;
//uint8_t*bufferRGB;
//picture_RGB=avcodec_alloc_frame();
//bufferRGB=(uint8*t*)malloc(720*576*(24/8)/*avpicture\u get\u size(PIX\u FMT\u RGB24,720,576)*/);
//avpicture_fill((avpicture*)picture_RGB、bufferRGB、PIX_FMT_RGB24、720、576);
如果(ret<0)
{
返回;
int got_frame = 0;
auto len = avcodec_decode_video2(m_avCodecContext
, m_avFrame
, &got_frame
, &avpkt);
if (len < 0)
{
return;
}
if (got_frame /*&& !silentMode*/)
{
//if (videoRenderer != nullptr)
{
if (frameSize == NULL)
{
return;
}
uint8_t *dst_data[4];
int dst_linesize[4];
int dst_w, dst_h;
int ret = 0;
if (1)// avcodec_alloc_frame()
{
auto stride = m_avFrame->linesize;
auto scan0 = m_avFrame->data;
SwsContext *scaleContext = sws_getContext(m_avCodecContext->width
, m_avCodecContext->height
, m_avCodecContext->pix_fmt
, m_avCodecContext->width
, m_avCodecContext->height
, PixelFormat::PIX_FMT_BGR24
, SWS_FAST_BILINEAR, NULL, NULL, NULL);
if (scaleContext == NULL)
{
//TODO: log error
return;
}
try
{
//*vb->signal = 1;
ret = avpicture_alloc(&m_dst_picture
, PixelFormat::PIX_FMT_BGR24
, m_avCodecContext->width
, m_avCodecContext->height);
// AVFrame *picture_RGB;
// uint8_t *bufferRGB;
// picture_RGB = avcodec_alloc_frame();
// bufferRGB = (uint8_t*)malloc(720*576*(24/8)/*avpicture_get_size(PIX_FMT_RGB24, 720, 576)*/);
// avpicture_fill((AVPicture *)picture_RGB, bufferRGB, PIX_FMT_RGB24, 720, 576);
if (ret < 0)
{
return;
}
int retScale = sws_scale(scaleContext
, scan0
, stride
, 0
, m_avCodecContext->height
, m_dst_picture.data //picture_RGB->data
, m_dst_picture.linesize //picture_RGB->linesize
);
if (1)
{
HWND hwnd = m_pParent->GetSafeHwnd();
SetFocus(hwnd);
CRect rc;
m_pParent->GetClientRect(rc);
CDC *cdc = m_pParent->GetDC();
char* bitmap = (char*)m_dst_picture.data[0];
// static unsigned int i = 0;
// bmp_save(bitmap, m_avCodecContext->width, m_avCodecContext->height, i++);
BITMAPINFO bmpinfo;
bmpinfo.bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
bmpinfo.bmiHeader.biWidth = m_avCodecContext->width;
bmpinfo.bmiHeader.biHeight = -m_avCodecContext->height;
bmpinfo.bmiHeader.biPlanes = 1;
bmpinfo.bmiHeader.biBitCount = 24;
bmpinfo.bmiHeader.biCompression = BI_RGB;
bmpinfo.bmiHeader.biSizeImage =
m_avCodecContext->width * m_avCodecContext->height * (24 / 8);
bmpinfo.bmiHeader.biXPelsPerMeter = 100;
bmpinfo.bmiHeader.biYPelsPerMeter = 100;
bmpinfo.bmiHeader.biClrUsed = 0;
bmpinfo.bmiHeader.biClrImportant = 0;
HBITMAP hBitmap = CreateDIBitmap(cdc->GetSafeHdc(), &bmpinfo.bmiHeader, CBM_INIT, bitmap, &bmpinfo/*bi*/, DIB_RGB_COLORS);
DrawBitmap(cdc, hBitmap, m_pParent);
::DeleteObject(hBitmap);
::DeleteObject(cdc->GetSafeHdc());
}
avpicture_free(&m_dst_picture);
sws_freeContext(scaleContext);
}
catch (int e)
{
sws_freeContext(scaleContext);
}
}
}
}
void DrawBitmap(CDC *pDC, HBITMAP hbitmap,CWnd *wnd)
{
CBitmap *pBitmap = CBitmap::FromHandle(hbitmap);
BITMAP bm;
pBitmap -> GetBitmap(&bm);
CDC MemDC;
MemDC.CreateCompatibleDC(pDC);
HGDIOBJ gob= MemDC.SelectObject(pBitmap);
CRect rc;
wnd->GetClientRect(rc);
pDC->SetStretchBltMode( COLORONCOLOR);
pDC->StretchBlt(0, 0,rc.Width(),rc.Height() , &MemDC, 0, 0, bm.bmWidth, bm.bmHeight, SRCCOPY);
MemDC.SelectObject(gob);
DeleteObject(pBitmap);
DeleteObject(MemDC);
DeleteObject(&bm);
ReleaseDC(wnd->GetSafeHwnd(), MemDC);
}
void initDecoder()
{
m_avCodecContext = avcodec_alloc_context();
if (!m_avCodecContext)
{
//failed to allocate codec context
Cleanup();
return;
}
m_avCodecContext->flags = 0;
uint8_t startCode[] = { 0x00, 0x00, 0x01 };
//////////////////////////////////////////////////////////////////////////
//I thought for play live video you can comment these lines.
if (m_sProps != NULL)
{
// USES_CONVERSION;
// ::MessageBox(NULL, A2T(sprops), TEXT("sprops"), MB_OK);
unsigned spropCount;
SPropRecord* spropRecords = parseSPropParameterSets(m_sProps, spropCount);
try
{
for (unsigned i = 0; i < spropCount; ++i)
{
AddExtraData(startCode, sizeof(startCode));
AddExtraData(spropRecords[i].sPropBytes, spropRecords[i].sPropLength);
}
}
catch (void*)
{
//extradata exceeds size limit
delete[] spropRecords;
Cleanup();
return;
}
delete[] spropRecords;
m_avCodecContext->extradata = extraDataBuffer;
m_avCodecContext->extradata_size = extraDataSize;
}
AddExtraData(startCode, sizeof(startCode));
bInitEx = true;
av_register_all();
avcodec_register_all();
m_codecId = CODEC_ID_H264;
m_avCodec = avcodec_find_decoder(m_codecId);
if (m_avCodec == NULL)
{
return;
}
if (avcodec_open(m_avCodecContext, m_avCodec) < 0)
{
//failed to open codec
Cleanup();
return;
}
if (m_avCodecContext->codec_id == CODEC_ID_H264)
{
m_avCodecContext->flags2 |= CODEC_FLAG2_CHUNKS;
//avCodecContext->flags2 |= CODEC_FLAG2_SHOW_ALL;
}
m_avFrame = avcodec_alloc_frame();
if (!m_avFrame)
{
//failed to allocate frame
Cleanup();
return;
}
}
fwrite( pic.data[0], 1, pic.linesize[0] * frame->height, pf );
uint8_t *ptr = pic.data[0];
for (int y = 0; y < frame->height; y++) {
fwrite(ptr, 1, frame->width, pf);
ptr += pic.linesize[0];
}