Ffmpeg 位图图像到WEBM视频编码
我试图将位图图像编码到webm视频,但生成的视频无法播放 这是我正在使用的代码,代码在H624上运行良好。 编码控制台窗口显示时:[libvpx@009b2f60]v1.2.0 生成的视频文件中没有webm字符串。有线索吗?(我是FFMPEG API的新手)Ffmpeg 位图图像到WEBM视频编码,ffmpeg,vp8,Ffmpeg,Vp8,我试图将位图图像编码到webm视频,但生成的视频无法播放 这是我正在使用的代码,代码在H624上运行良好。 编码控制台窗口显示时:[libvpx@009b2f60]v1.2.0 生成的视频文件中没有webm字符串。有线索吗?(我是FFMPEG API的新手) void从\u bmp()创建\u { CFile文件[5]; 字节*szTxt[5]; int nWidth=0; int nHeight=0; int-ndalen=0; int nLen; CString-csFileName; 对于
void从\u bmp()创建\u
{
CFile文件[5];
字节*szTxt[5];
int nWidth=0;
int nHeight=0;
int-ndalen=0;
int nLen;
CString-csFileName;
对于(int fileI=1;fileI bit_rate=3000000;//输入样本参数
c->width=nWidth;//
c->height=n高度;//
//每秒帧数
合理利率;
rate.num=1;
rate.den=5;//每秒5帧
c->time_base=rate;//(AVRational){1,25};
c->gop_size=10;//每十帧发射一帧内帧//每秒发射一帧内帧
c->max_b_frames=0//
c->螺纹计数=1;
c->pix_fmt=pix_fmt_YUV420P;//pix_fmt_RGB24;
c->codec_id=codec_id_VP8;
//av_opt_set(c->priv_数据,/*“预设”*/“libvpx-1080p.ffpreset”、/*“慢速”*/NULL,0);
如果(avcodec_open2(c,pCodecH264,NULL)宽度*c->高度;
yuv_buff=(uint8_t*)malloc((大小*3)/2);//yuv 420的大小
uint8_t*rgb_buff=新uint8_t[ndalen];
单位面积=400000;
uint8_t*突发=(uint8_t*)malloc(突发大小);
int u_size=0;
文件*f=NULL;
char*filename=“e:\\pics\\myData.h264”;
f=fopen(文件名,“wb”);
如果(!f)
{
printf(“无法打开%s\n”,文件名);
出口(1);
}
//SwsContext
SwsContext*scxt=sws_getContext(c->width,c->height,PIX_FMT_BGR24,c->width,c->height,PIX_FMT_YUV420P,sws_POINT,NULL,NULL);
avpkt;
//AVFrame*pTFrame=新的AVFrame
对于(int i=0;idata[0]+=m_pRGBFrame->linesize[0]*(nHeight-1);
m_pRGBFrame->linesize[0]*=-1;
m_pRGBFrame->数据[1]+=m_pRGBFrame->线条尺寸[1]*(右/2-1);
m_pRGBFrame->linesize[1]*=-1;
m_pRGBFrame->data[2]+=m_pRGBFrame->linesize[2]*(nHeight/2-1);
m_pRGBFrame->linesize[2]*=-1;
sws_比例(scxt,m_pRGBFrame->data,m_pRGBFrame->linesize,0,c->height,m_pYUVFrame->data,m_pYUVFrame->linesize);
int got_packet_ptr=0;
av_init_数据包(&avpkt);
avpkt.data=exputf;
avpkt.size=EXBUF_尺寸;
u_size=avcodec_encode_video2(c、avpkt、m_pYUVFrame和got_packet_ptr);
如果(u_size==0)
{
fwrite(avpkt.data,1,avpkt.size,f);
}
}
fclose(f);
删除[]m_pRGBFrame;
删除[]m_pYUVFrame;
删除[]rgb_buff;
免费(免费);
avcodec_close(c);
无AVU(c);
}
我想使用此代码通过编码屏幕截图来记录屏幕。我可以使用什么样的fps、gop大小或其他参数来减小视频大小。这对您有用吗,我正在构建类似的东西?
void create_from_bmp()
{
CFile file[5];
BYTE *szTxt[5];
int nWidth = 0;
int nHeight= 0;
int nDataLen=0;
int nLen;
CString csFileName;
for (int fileI = 1; fileI <= 5; fileI ++)
{
csFileName.Format("e:\\pics\\%d.bmp",fileI);
file[fileI - 1].Open(csFileName,CFile::modeRead | CFile::typeBinary);
nLen = file[fileI - 1].GetLength();
szTxt[fileI -1] = new BYTE[nLen];
file[fileI - 1].Read(szTxt[fileI - 1], nLen);
file[fileI - 1].Close();
//BMP bmi;//BITMAPINFO bmi;
//int nHeadLen = sizeof(BMP);
BITMAPFILEHEADER bmpFHeader;
BITMAPINFOHEADER bmpIHeader;
memcpy(&bmpFHeader,szTxt[fileI -1],sizeof(BITMAPFILEHEADER));
int nHeadLen = bmpFHeader.bfOffBits - sizeof(BITMAPFILEHEADER);
memcpy(&bmpIHeader,szTxt[fileI - 1]+sizeof(BITMAPFILEHEADER),nHeadLen);
nWidth = bmpIHeader.biWidth;// 464;// bmi.bmpInfo.bmiHeader.biWidth;// ;
nHeight = bmpIHeader.biHeight;//362;// bmi.bmpInfo.bmiHeader.biHeight;// ;
szTxt[fileI - 1] += bmpFHeader.bfOffBits;
nDataLen = nLen-bmpFHeader.bfOffBits;
}
av_register_all();
avcodec_register_all();
AVFrame *m_pRGBFrame = new AVFrame[1]; //RGB
AVFrame *m_pYUVFrame = new AVFrame[1];; //YUV
AVCodecContext *c= NULL;
AVCodecContext *in_c= NULL;
AVCodec *pCodecH264;
uint8_t * yuv_buff;//
pCodecH264 = avcodec_find_encoder(CODEC_ID_VP8);
if(!pCodecH264)
{
fprintf(stderr, "h264 codec not found\n");
exit(1);
}
c= avcodec_alloc_context3(pCodecH264);
c->bit_rate = 3000000;// put sample parameters
c->width =nWidth;//
c->height = nHeight;//
// frames per second
AVRational rate;
rate.num = 1;
rate.den = 5; //5 frames per sec
c->time_base= rate;//(AVRational){1,25};
c->gop_size = 10; // emit one intra frame every ten frames //emit one iframe per sec
c->max_b_frames=0; //
c->thread_count = 1;
c->pix_fmt = PIX_FMT_YUV420P;//PIX_FMT_RGB24;
c->codec_id=CODEC_ID_VP8;
//av_opt_set(c->priv_data, /*"preset"*/"libvpx-1080p.ffpreset", /*"slow"*/NULL, 0);
if(avcodec_open2(c,pCodecH264,NULL)<0)
printf("Cant open codec");
int size = c->width * c->height;
yuv_buff = (uint8_t *) malloc((size * 3) / 2); // size for YUV 420
uint8_t * rgb_buff = new uint8_t[nDataLen];
int outbuf_size=400000;
uint8_t * outbuf= (uint8_t*)malloc(outbuf_size);
int u_size = 0;
FILE *f=NULL;
char * filename = "e:\\pics\\myData.h264";
f = fopen(filename, "wb");
if (!f)
{
printf( "could not open %s\n", filename);
exit(1);
}
//SwsContext
SwsContext * scxt = sws_getContext(c->width,c->height,PIX_FMT_BGR24,c->width,c->height,PIX_FMT_YUV420P,SWS_POINT,NULL,NULL,NULL);
AVPacket avpkt;
//AVFrame *pTFrame=new AVFrame
for (int i=0;i<60;++i)
{
//AVFrame *m_pYUVFrame = new AVFrame[1];
int index = (i / 5) % 5;
memcpy(rgb_buff,szTxt[index],nDataLen);
avpicture_fill((AVPicture*)m_pRGBFrame, (uint8_t*)rgb_buff, PIX_FMT_RGB24, nWidth, nHeight);
avpicture_fill((AVPicture*)m_pYUVFrame, (uint8_t*)yuv_buff, PIX_FMT_YUV420P, nWidth, nHeight);
m_pRGBFrame->data[0] += m_pRGBFrame->linesize[0] * (nHeight - 1);
m_pRGBFrame->linesize[0] *= -1;
m_pRGBFrame->data[1] += m_pRGBFrame->linesize[1] * (nHeight / 2 - 1);
m_pRGBFrame->linesize[1] *= -1;
m_pRGBFrame->data[2] += m_pRGBFrame->linesize[2] * (nHeight / 2 - 1);
m_pRGBFrame->linesize[2] *= -1;
sws_scale(scxt,m_pRGBFrame->data,m_pRGBFrame->linesize,0,c->height,m_pYUVFrame->data,m_pYUVFrame->linesize);
int got_packet_ptr = 0;
av_init_packet(&avpkt);
avpkt.data = outbuf;
avpkt.size = outbuf_size;
u_size = avcodec_encode_video2(c, &avpkt, m_pYUVFrame, &got_packet_ptr);
if (u_size == 0)
{
fwrite(avpkt.data, 1, avpkt.size, f);
}
}
fclose(f);
delete []m_pRGBFrame;
delete []m_pYUVFrame;
delete []rgb_buff;
free(outbuf);
avcodec_close(c);
av_free(c);
}