閱讀466 返回首頁    go 汽車大全


ffmpeg入門之 Tutorial01


#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <stdio.h>

void SaveFrame(AVFrame *pFrame, int width, int height, int iFrame)
{
  FILE *pFile;
  char szFilename[32];
  int  y;
 
  // Open file
  sprintf(szFilename, "frame%d.ppm", iFrame);
  pFile=fopen(szFilename, "wb");
  if(pFile==NULL)
    return;
 
  // Write header
  fprintf(pFile, "P6\n%d %d\n255\n", width, height);
 
  // Write pixel data
  for(y=0; y<height; y++)
    fwrite(pFrame->data[0]+y*pFrame->linesize[0], 1, width*3, pFile);
 
  // Close file
  fclose(pFile);
}

int main(int argc, char *argv[])
{
  AVFormatContext *pFormatCtx;
  int             i, videoStream;
  AVCodecContext  *pCodecCtx;
  AVCodec         *pCodec;
  AVFrame         *pFrame;
  AVFrame         *pFrameRGB;
  AVPacket        packet;
  int             frameFinished;
  int             numBytes;
  uint8_t         *buffer;
  static int sws_flags = SWS_BICUBIC;
  struct SwsContext *img_convert_ctx;
   AVPicture pict; 
  argc = 2;
  argv[1] = "d:\\temp\\test.264";
  if(argc < 2) {
    printf("Please provide a movie file\n");
    return -1;
  }
  // /*注冊所有可用的格式和編解碼器*/
  av_register_all();
 
  // Open video file /*以輸入方式打開一個媒體文件,也即源文件,
codecs並沒有打開,隻讀取了文件的頭信息*/
  if(av_open_input_file(&pFormatCtx, argv[1], NULL, 0, NULL)!=0)
    return -1; // Couldn't open file
 
  // Retrieve stream information
/*通過讀取媒體文件的中的包來獲取媒體文件中的流信息,對於沒有頭信息的文件如(mpeg)是非常有用的,

// 該函數通常重算類似mpeg-2幀模式的真實幀率,該函數並未改變邏輯文件的position.
*/
  if(av_find_stream_info(pFormatCtx)<0)
    return -1; // Couldn't find stream information
 
  // Dump information about file onto standard error
//該函數的作用就是檢查下初始化過程中設置的參數是否符合規範

  dump_format(pFormatCtx, 0, argv[1], 0);
 
  // Find the first video stream
  videoStream=-1;
  for(i=0; i<pFormatCtx->nb_streams; i++)
    if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO&&videoStream < 0)
 {
      videoStream=i;
      break;
    }
  if(videoStream==-1)
    return -1; // Didn't find a video stream
 
  // Get a pointer to the codec context for the video stream
  pCodecCtx=pFormatCtx->streams[videoStream]->codec;
 
  // Find the decoder for the video stream
  pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
/*通過code ID查找一個已經注冊的音視頻編碼器,查找編碼器之前,必須先調用av_register_all注冊所有支持的編碼器
音視頻編碼器保存在一個鏈表中,查找過程中,函數從頭到尾遍曆鏈表,通過比較編碼器的ID來查找

*/
  if(pCodec==NULL) {
    fprintf(stderr, "Unsupported codec!\n");
    return -1; // Codec not found
  }
  // Open codec
//使用給定的AVCodec初始化AVCodecContext


  if(avcodec_open(pCodecCtx, pCodec)<0)
    return -1; // Could not open codec
 
  // Allocate video frame
  pFrame=avcodec_alloc_frame();
 
  // Allocate an AVFrame structure
  pFrameRGB=avcodec_alloc_frame();
  if(pFrameRGB==NULL)
    return -1;
 
  // Determine required buffer size and allocate buffer
  numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width,
         pCodecCtx->height);
  buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
 
  // Assign appropriate parts of buffer to image planes in pFrameRGB
  // Note that pFrameRGB is an AVFrame, but AVFrame is a superset
  // of AVPicture
  avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24,
   pCodecCtx->width, pCodecCtx->height);
 
  // Read frames and save first five frames to disk
  i=0;
  while(av_read_frame(pFormatCtx, &packet)>=0)
  {
    // Is this a packet from the video stream?
    if(packet.stream_index==videoStream)
 {
    // Decode video frame
    avcodec_decode_video(pCodecCtx, pFrame, &frameFinished,
       packet.data, packet.size);
     
    // Did we get a video frame?
    if(frameFinished)
    {
  // Convert the image from its native format to RGB

    #if 0
     // Convert the image into YUV format that SDL uses
     img_convert(&pict, PIX_FMT_YUV420P,
         (AVPicture *)pFrame, pCodecCtx->pix_fmt,
       pCodecCtx->width, pCodecCtx->height);
    #else
      /*   img_convert(&pict, dst_pix_fmt,
      (AVPicture *)pFrame, is->video_st->codec->pix_fmt,
      is->video_st->codec->width, is->video_st->codec->height);
      */


        img_convert_ctx = sws_getContext( pCodecCtx->width,
               pCodecCtx->height,
               pCodecCtx->pix_fmt,
               pCodecCtx->width,
               pCodecCtx->height,
               PIX_FMT_YUV420P,
               sws_flags, NULL, NULL, NULL);

        sws_scale(img_convert_ctx,(const uint8_t* const*)pFrame->data,pFrame->linesize,0,pCodecCtx->height,pFrameRGB->data,pFrameRGB->linesize);  
        sws_freeContext(img_convert_ctx);
    #endif
     
     // Save the frame to disk
     if((++i<=100)&&(i>95))
       SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height, i);
    }
    }
   
    // Free the packet that was allocated by av_read_frame
    av_free_packet(&packet);
  }
 
  // Free the RGB image
  av_free(buffer);
  av_free(pFrameRGB);
 
  // Free the YUV frame
  av_free(pFrame);
 
  // Close the codec
  avcodec_close(pCodecCtx);
 
  // Close the video file
  av_close_input_file(pFormatCtx);
 
  return 0;
}

最後更新:2017-04-03 16:49:00

  上一篇:go ffmpeg結構體以及函數介紹(一)
  下一篇:go android圖片異步加載到本地