Warning: file_get_contents(/data/phpspider/zhask/data//catemap/4/video/2.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
更改ffmpeg输出中的分辨率以在Android中显示视频流_Android_Video_Android Ndk_Ffmpeg_Sdl 2 - Fatal编程技术网

更改ffmpeg输出中的分辨率以在Android中显示视频流

更改ffmpeg输出中的分辨率以在Android中显示视频流,android,video,android-ndk,ffmpeg,sdl-2,Android,Video,Android Ndk,Ffmpeg,Sdl 2,我正在尝试将相机输出(分辨率:640 x 480)更改为1024 x 720,并在Android屏幕上渲染视频帧。是否可以使用ffmpeg和SDL库进行此视频转换?如果可以,ffmepeg编解码器库中是否有可用的API来执行此操作 以下是我获得640 x 480分辨率输出的代码: //Registering all the formats: av_register_all(); AVFormatContext *pFormatCtx=NULL; int i, videoS

我正在尝试将相机输出(分辨率:640 x 480)更改为1024 x 720,并在Android屏幕上渲染视频帧。是否可以使用ffmpeg和SDL库进行此视频转换?如果可以,ffmepeg编解码器库中是否有可用的API来执行此操作

以下是我获得640 x 480分辨率输出的代码:

//Registering all the formats:
av_register_all();
AVFormatContext *pFormatCtx=NULL;
int             i, videoStream;
AVCodecContext  *pCodecCtx=NULL;
AVCodec         *pCodec=NULL;
AVFrame         *pFrame;
AVPacket        packet;
int             frameFinished;
SDL_Texture         *bmp;
SDL_Renderer *renderer;
SDL_Window  *screen;

if (SDL_Init(SDL_INIT_VIDEO)) {
    LOGD( "Could not initialize SDL - %s\n", SDL_GetError());
    SDL_Quit();
    exit(1);
}
LOGD(" SDL Initialized..");

screen = SDL_CreateWindow("Window", SDL_WINDOWPOS_UNDEFINED,
        SDL_WINDOWPOS_UNDEFINED, 0, 0,
        SDL_WINDOW_SHOWN | SDL_WINDOW_FULLSCREEN);
LOGD("SDL Screen Created ..");

renderer = SDL_CreateRenderer(screen,-1,SDL_RENDERER_ACCELERATED | SDL_RENDERER_TARGETTEXTURE);
LOGD("Rendering Created...");

bmp = SDL_CreateTexture(renderer,     SDL_PIXELFORMAT_IYUV,SDL_TEXTUREACCESS_STREAMING,640,480);
LOGD("Texture created;");

SDL_RenderSetLogicalSize(renderer,640,480);
// Open video file
if(avformat_open_input(&pFormatCtx,"Filename", NULL,NULL)!=0)
    LOGD("Cannot open the File");

pFormatCtx->interrupt_callback.callback = decode_interrupt_cb;

// Retrieve stream information
if(avformat_find_stream_info(pFormatCtx,NULL)<0)
    LOGD("Cannot retrive Stream info");
  // Couldn't find stream information

videoStream=-1;
for(i=0; i<pFormatCtx->nb_streams; i++)
if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
      videoStream=i;
    break;
}
if(videoStream==-1)
LOGD("Cannot find Video Stream:");
pCodecCtx=pFormatCtx->streams[videoStream]->codec;
// Find the decoder for the video stream
pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
if(pCodec==NULL) {
  fprintf(stderr, "Unsupported codec!\n");
  LOGD("Unable to find the decoder");
    // Codec not found
 }
 // Open codec
 if(avcodec_open2(pCodecCtx, pCodec,NULL)<0)
  LOGD("Unable to OPEN Codec");
  // Could not open codec

 // Allocate video frame
  pFrame=avcodec_alloc_frame();

  i=0;
  while(av_read_frame(pFormatCtx, &packet)>=0) {
        // Is this a packet from the video stream?
        if(packet.stream_index==videoStream) {
      // Decode video frame
      avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished,&packet);

      // Did we get a video frame?
      if(frameFinished) {
          //----------------Code for Displaying
          SDL_UpdateYUVTexture(bmp, NULL, pFrame->data[0],
                                    pFrame->linesize[0], pFrame->data[1], pFrame->linesize[1],
                                    pFrame->data[2], pFrame->linesize[2]);

                            retcl = SDL_RenderClear(renderer);
                            retcopy = SDL_RenderCopy(renderer, bmp, NULL, NULL);
                            SDL_RenderPresent(renderer);
          //-----------------
      }
    }

        // Free the packet that was allocated by av_read_frame
    av_free_packet(&packet);
  }

  // Free the RGB image
  av_free(buffer);

  // Free the YUV frame
  av_free(pFrame);

  // Close the codec
  avcodec_close(pCodecCtx);

  // Close the video file
  avformat_close_input(&pFormatCtx);

  return 0;
}
//注册所有格式:
av_寄存器_all();
AVFormatContext*pFormatCtx=NULL;
inti,视频流;
AVCodecContext*pCodecCtx=NULL;
AVCodec*pCodec=NULL;
AVFrame*pFrame;
数据包;
int框架完成;
SDL_纹理*bmp;
SDL_渲染器*渲染器;
SDL_窗口*屏幕;
中频(SDL_初始化(SDL_初始化视频)){
LOGD(“无法初始化SDL-%s\n”,SDL_GetError());
SDL_退出();
出口(1);
}
LOGD(“SDL已初始化”);
屏幕=SDL_CreateWindow(“窗口”,SDL_WINDOWPOS_未定义,
SDL_窗口位置未定义,0,0,
显示SDL_窗口| SDL_窗口|全屏);
LOGD(“创建SDL屏幕”);
renderer=SDL_CreateRenderer(屏幕,-1,SDL_renderer_ACCELERATED | SDL_renderer_TARGETTEXTURE);
LOGD(“渲染创建…”);
bmp=SDL_CreateTexture(渲染器,SDL_像素格式,SDL_纹理访问流,640480);
LOGD(“创建的纹理;”;
SDL_RenderSetLogicalSize(渲染器,640480);
//打开视频文件
if(avformat\u open\u输入(&pFormatCtx,“Filename”,NULL,NULL)!=0)
LOGD(“无法打开文件”);
pFormatCtx->interrupt\u callback.callback=解码\u interrupt\u cb;
//检索流信息
如果(avformat_find_stream_info(pFormatCtx,NULL)ffmpeg使用“filtergraph”要应用过滤器,如缩放、裁剪等。如果您查看,您将看到如何以编程方式应用过滤器。如果您可以将该示例集成到代码中,那么您所要做的就是更改字符串-filter\u descr-以将视频缩放到所需的分辨率

过滤器的定义方式与单机程序相同。有关详细信息,请参阅