H264和AAC合成FLV案例
生活随笔
收集整理的這篇文章主要介紹了
H264和AAC合成FLV案例
小編覺得挺不錯的,現在分享給大家,幫大家做個參考.
H264和AAC合成FLV案例
目錄
1. FFmpeg合成流程
2. FFmpeg函數:avformat_write_header
int avformat_write_header(AVFormatContext *s, AVDictionary **options) {int ret = 0;int already_initialized = s->internal->initialized;int streams_already_initialized = s->internal->streams_initialized;if (!already_initialized)if ((ret = avformat_init_output(s, options)) < 0)return ret;if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)avio_write_marker(s->pb, AV_NOPTS_VALUE, AVIO_DATA_MARKER_HEADER);if (s->oformat->write_header) {ret = s->oformat->write_header(s);if (ret >= 0 && s->pb && s->pb->error < 0)ret = s->pb->error;if (ret < 0)goto fail;flush_if_needed(s);}if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)avio_write_marker(s->pb, AV_NOPTS_VALUE, AVIO_DATA_MARKER_UNKNOWN);if (!s->internal->streams_initialized) {if ((ret = init_pts(s)) < 0)goto fail;}return streams_already_initialized;fail:deinit_muxer(s);return ret; }3. FFmpeg結構體:avformat_alloc_output_context2
4. FFmpeg結構體:AVOutputFormat
1. 描述
2. 結構體定義
typedef struct AVOutputFormat {const char *name;/*** Descriptive name for the format, meant to be more human-readable* than name. You should use the NULL_IF_CONFIG_SMALL() macro* to define it.*/const char *long_name;const char *mime_type;const char *extensions; /**< comma-separated filename extensions *//* output support */enum AVCodecID audio_codec; /**< default audio codec */enum AVCodecID video_codec; /**< default video codec */enum AVCodecID subtitle_codec; /**< default subtitle codec *//*** can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER,* AVFMT_GLOBALHEADER, AVFMT_NOTIMESTAMPS, AVFMT_VARIABLE_FPS,* AVFMT_NODIMENSIONS, AVFMT_NOSTREAMS, AVFMT_ALLOW_FLUSH,* AVFMT_TS_NONSTRICT, AVFMT_TS_NEGATIVE*/int flags;/*** List of supported codec_id-codec_tag pairs, ordered by "better* choice first". The arrays are all terminated by AV_CODEC_ID_NONE.*/const struct AVCodecTag * const *codec_tag;const AVClass *priv_class; ///< AVClass for the private context/****************************************************************** No fields below this line are part of the public API. They* may not be used outside of libavformat and can be changed and* removed at will.* New public fields should be added right above.******************************************************************//*** The ff_const59 define is not part of the public API and will* be removed without further warning.*/ #if FF_API_AVIOFORMAT #define ff_const59 #else #define ff_const59 const #endifff_const59 struct AVOutputFormat *next;/*** size of private data so that it can be allocated in the wrapper*/int priv_data_size;int (*write_header)(struct AVFormatContext *);/*** Write a packet. If AVFMT_ALLOW_FLUSH is set in flags,* pkt can be NULL in order to flush data buffered in the muxer.* When flushing, return 0 if there still is more data to flush,* or 1 if everything was flushed and there is no more buffered* data.*/int (*write_packet)(struct AVFormatContext *, AVPacket *pkt);int (*write_trailer)(struct AVFormatContext *);/*** A format-specific function for interleavement.* If unset, packets will be interleaved by dts.*/int (*interleave_packet)(struct AVFormatContext *, AVPacket *out,AVPacket *in, int flush);/*** Test if the given codec can be stored in this container.** @return 1 if the codec is supported, 0 if it is not.* A negative number if unknown.* MKTAG('A', 'P', 'I', 'C') if the codec is only supported as AV_DISPOSITION_ATTACHED_PIC*/int (*query_codec)(enum AVCodecID id, int std_compliance);void (*get_output_timestamp)(struct AVFormatContext *s, int stream,int64_t *dts, int64_t *wall);/*** Allows sending messages from application to device.*/int (*control_message)(struct AVFormatContext *s, int type,void *data, size_t data_size);/*** Write an uncoded AVFrame.** See av_write_uncoded_frame() for details.** The library will free *frame afterwards, but the muxer can prevent it* by setting the pointer to NULL.*/int (*write_uncoded_frame)(struct AVFormatContext *, int stream_index,AVFrame **frame, unsigned flags);/*** Returns device list with it properties.* @see avdevice_list_devices() for more details.*/int (*get_device_list)(struct AVFormatContext *s, struct AVDeviceInfoList *device_list);/*** Initialize device capabilities submodule.* @see avdevice_capabilities_create() for more details.*/int (*create_device_capabilities)(struct AVFormatContext *s, struct AVDeviceCapabilitiesQuery *caps);/*** Free device capabilities submodule.* @see avdevice_capabilities_free() for more details.*/int (*free_device_capabilities)(struct AVFormatContext *s, struct AVDeviceCapabilitiesQuery *caps);enum AVCodecID data_codec; /**< default data codec *//*** Initialize format. May allocate data here, and set any AVFormatContext or* AVStream parameters that need to be set before packets are sent.* This method must not write output.** Return 0 if streams were fully configured, 1 if not, negative AVERROR on failure** Any allocations made here must be freed in deinit().*/int (*init)(struct AVFormatContext *);/*** Deinitialize format. If present, this is called whenever the muxer is being* destroyed, regardless of whether or not the header has been written.** If a trailer is being written, this is called after write_trailer().** This is called if init() fails as well.*/void (*deinit)(struct AVFormatContext *);/*** Set up any necessary bitstream filtering and extract any extra data needed* for the global header.* Return 0 if more packets from this stream must be checked; 1 if not.*/int (*check_bitstream)(struct AVFormatContext *, const AVPacket *pkt); } AVOutputFormat;3. 常見變量及其作用
5. FFmpeg函數:avformat_new_stream
1. 關聯的結構體
6. FFmpeg函數:av_interleaved_write_frame
1. 參數
7. FFmpeg函數:av_compare_ts
/*** Compare two timestamps each in its own time base.** @return One of the following values:* - -1 if `ts_a` is before `ts_b`* - 1 if `ts_a` is after `ts_b`* - 0 if they represent the same position** @warning* The result of the function is undefined if one of the timestamps is outside* the `int64_t` range when represented in the other's timebase.*/ int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b);8. FFmpeg時間戳詳解
9. H264和AAC合成FLV代碼實現
/*** @file* libavformat API example.** Output a media file in any supported libavformat format. The default* codecs are used.* @example muxing.c*/#include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h>#include <libavutil/avassert.h> #include <libavutil/channel_layout.h> #include <libavutil/opt.h> #include <libavutil/mathematics.h> #include <libavutil/timestamp.h> #include <libavformat/avformat.h> #include <libswscale/swscale.h> #include <libswresample/swresample.h>#define STREAM_DURATION 5.0 //流時長 單位秒 #define STREAM_FRAME_RATE 25 /* 25 images/s */ #define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */#define SCALE_FLAGS SWS_BICUBIC // scale flag// 封裝單個輸出AVStream typedef struct OutputStream {AVStream *st; // 代表一個stream, 1路audio或1路video都代表獨立的steamAVCodecContext *enc; // 編碼器上下文/* pts of the next frame that will be generated */int64_t next_pts;int samples_count; // 音頻的采樣數量累計AVFrame *frame; // 重采樣后的frame, 視頻叫scaleAVFrame *tmp_frame; // 重采樣前float t, tincr, tincr2; // 這幾個參數用來生成PCM和YUV用的struct SwsContext *sws_ctx; // 圖像scalestruct SwrContext *swr_ctx; // 音頻重采樣 } OutputStream;static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt) {AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;printf("pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),pkt->stream_index); }static int write_frame(AVFormatContext *fmt_ctx, const AVRational *time_base,AVStream *st, AVPacket *pkt) {/* rescale output packet timestamp values from codec to stream timebase */// 將packet的timestamp由codec to stream timebase pts_before = -1024av_packet_rescale_ts(pkt, *time_base, st->time_base);pkt->stream_index = st->index; // pts_before * 1/44100 = pts_after *1/1000// pts_after = pts_before * 1/44100 * 1000 = -1024 * 1/44100 * 1000 = -23/* Write the compressed frame to the media file. */log_packet(fmt_ctx, pkt);return av_interleaved_write_frame(fmt_ctx, pkt); }//增加輸出流,返回AVStream,并給codec賦值,但此時codec并未打開 static void add_stream(OutputStream *ost, AVFormatContext *oc,AVCodec **codec,enum AVCodecID codec_id) {AVCodecContext *codec_ctx;int i;/* 查找編碼器 */*codec = avcodec_find_encoder(codec_id); //通過codec_id找到編碼器if (!(*codec)) {fprintf(stderr, "Could not find encoder for '%s'\n",avcodec_get_name(codec_id));exit(1);}// 新建碼流 綁定到 AVFormatContext stream->index 有設置ost->st = avformat_new_stream(oc, NULL); // 創建一個流成分if (!ost->st) {fprintf(stderr, "Could not allocate stream\n");exit(1);}/* 為什么要 -1呢?每次調用avformat_new_stream的時候nb_streams+1但id是從0開始, 比如第1個流:對應流id = nb_streams(1) -1 = 0第2個流:對應流id = nb_streams(2) -1 = 1*/ost->st->id = oc->nb_streams - 1;codec_ctx = avcodec_alloc_context3(*codec); // 創建編碼器上下文if (!codec_ctx) {fprintf(stderr, "Could not alloc an encoding context\n");exit(1);}ost->enc = codec_ctx;// 初始化編碼器參數switch ((*codec)->type) {case AVMEDIA_TYPE_AUDIO:codec_ctx->codec_id = codec_id;codec_ctx->sample_fmt = (*codec)->sample_fmts ? // 采樣格式(*codec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;codec_ctx->bit_rate = 64000; // 碼率codec_ctx->sample_rate = 44100; // 采樣率if ((*codec)->supported_samplerates) {codec_ctx->sample_rate = (*codec)->supported_samplerates[0];for (i = 0; (*codec)->supported_samplerates[i]; i++) {if ((*codec)->supported_samplerates[i] == 44100)codec_ctx->sample_rate = 44100;}}codec_ctx->channel_layout = AV_CH_LAYOUT_STEREO;codec_ctx->channels = av_get_channel_layout_nb_channels(codec_ctx->channel_layout);if ((*codec)->channel_layouts) {codec_ctx->channel_layout = (*codec)->channel_layouts[0];for (i = 0; (*codec)->channel_layouts[i]; i++) {if ((*codec)->channel_layouts[i] == AV_CH_LAYOUT_STEREO)codec_ctx->channel_layout = AV_CH_LAYOUT_STEREO;}}codec_ctx->channels = av_get_channel_layout_nb_channels(codec_ctx->channel_layout);// 設置timebase, 使用采樣率ost->st->time_base = (AVRational) {1, codec_ctx->sample_rate};break;case AVMEDIA_TYPE_VIDEO:codec_ctx->codec_id = codec_id;codec_ctx->bit_rate = 400000;/* Resolution must be a multiple of two. */codec_ctx->width = 352; // 分辨率codec_ctx->height = 288;codec_ctx->max_b_frames = 1;/* timebase: This is the fundamental unit of time (in seconds) in terms* of which frame timestamps are represented. For fixed-fps content,* timebase should be 1/framerate and timestamp increments should be* identical to 1. */ost->st->time_base = (AVRational) {1, STREAM_FRAME_RATE}; // 時基codec_ctx->time_base = ost->st->time_base; // 為什么這里需要設置codec_ctx->gop_size = STREAM_FRAME_RATE; //codec_ctx->pix_fmt = STREAM_PIX_FMT;break;default:break;}/* Some formats want stream headers to be separate. */if (oc->oformat->flags & AVFMT_GLOBALHEADER)codec_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; // }/**************************************************************/ /* audio output */static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,uint64_t channel_layout,int sample_rate, int nb_samples) {AVFrame *frame = av_frame_alloc();int ret;if (!frame) {fprintf(stderr, "Error allocating an audio frame\n");exit(1);}frame->format = sample_fmt;frame->channel_layout = channel_layout;frame->sample_rate = sample_rate;frame->nb_samples = nb_samples;if (nb_samples) {ret = av_frame_get_buffer(frame, 0);if (ret < 0) {fprintf(stderr, "Error allocating an audio buffer\n");exit(1);}}return frame; }static void open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg) {AVCodecContext *codec_ctx;int nb_samples;int ret;AVDictionary *opt = NULL;codec_ctx = ost->enc;/* open it */av_dict_copy(&opt, opt_arg, 0);// 1. 關聯編碼器 會設置codec_ctx->time_baseret = avcodec_open2(codec_ctx, codec, &opt);av_dict_free(&opt);if (ret < 0) {fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret));exit(1);}/* init signal generator */// 2. 初始化產生PCM的參數ost->t = 0;ost->tincr = 2 * M_PI * 110.0 / codec_ctx->sample_rate;/* increment frequency by 110 Hz per second */ost->tincr2 = 2 * M_PI * 110.0 / codec_ctx->sample_rate / codec_ctx->sample_rate;// 每次需要的samples // if (codec_ctx->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE) // nb_samples = 10000; // 支持可變FRAME size的編碼器極少,直接注釋掉 // elsenb_samples = codec_ctx->frame_size;// signal generator -> PCM -> ost->tmp_frame -> swr_convert重采樣 -> ost->frame -> 編碼器// 分配送給編碼器的幀, 并申請對應的bufferost->frame = alloc_audio_frame(codec_ctx->sample_fmt, codec_ctx->channel_layout,codec_ctx->sample_rate, nb_samples);// 分配送給信號生成PCM的幀, 并申請對應的bufferost->tmp_frame = alloc_audio_frame(AV_SAMPLE_FMT_S16, codec_ctx->channel_layout,codec_ctx->sample_rate, nb_samples);/* copy the stream parameters to the muxer */ret = avcodec_parameters_from_context(ost->st->codecpar, codec_ctx);if (ret < 0) {fprintf(stderr, "Could not copy the stream parameters\n");exit(1);}/* create resampler context 創建重采樣器 */ost->swr_ctx = swr_alloc();if (!ost->swr_ctx) {fprintf(stderr, "Could not allocate resampler context\n");exit(1);}/* set options */av_opt_set_int(ost->swr_ctx, "in_channel_count", codec_ctx->channels, 0);av_opt_set_int(ost->swr_ctx, "in_sample_rate", codec_ctx->sample_rate, 0);av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);av_opt_set_int(ost->swr_ctx, "out_channel_count", codec_ctx->channels, 0);av_opt_set_int(ost->swr_ctx, "out_sample_rate", codec_ctx->sample_rate, 0);av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt", codec_ctx->sample_fmt, 0);/* initialize the resampling context */if ((ret = swr_init(ost->swr_ctx)) < 0) {fprintf(stderr, "Failed to initialize the resampling context\n");exit(1);} }/* Prepare a 16 bit dummy audio frame of 'frame_size' samples and* 'nb_channels' channels. */ static AVFrame *get_audio_frame(OutputStream *ost) {AVFrame *frame = ost->tmp_frame;int j, i, v;int16_t *q = (int16_t *) frame->data[0];/* check if we want to generate more frames */// 44100 * {1, 44100} = 1 -》44100*5 * {1, 44100} = 5// 5 *{1,1} = 5if (av_compare_ts(ost->next_pts, ost->enc->time_base,STREAM_DURATION, (AVRational) {1, 1}) >= 0)return NULL;for (j = 0; j < frame->nb_samples; j++) {v = (int) (sin(ost->t) * 10000);for (i = 0; i < ost->enc->channels; i++)*q++ = v;ost->t += ost->tincr;ost->tincr += ost->tincr2;}frame->pts = ost->next_pts; // 使用samples作為計數 設置pts 0, nb_samples(1024) 2048ost->next_pts += frame->nb_samples; // 音頻PTS使用采樣samples疊加return frame; }/** encode one audio frame and send it to the muxer* return 1 when encoding is finished, 0 otherwise*/ static int write_audio_frame(AVFormatContext *oc, OutputStream *ost) {AVCodecContext *codec_ctx;AVPacket pkt = {0}; // data and size must be 0;AVFrame *frame;int ret;int got_packet;int dst_nb_samples;av_init_packet(&pkt);codec_ctx = ost->enc;frame = get_audio_frame(ost);if (frame) {/* convert samples from native format to destination codec format, using the resampler *//* compute destination number of samples */dst_nb_samples = av_rescale_rnd(swr_get_delay(ost->swr_ctx, codec_ctx->sample_rate) + frame->nb_samples,codec_ctx->sample_rate, codec_ctx->sample_rate, AV_ROUND_UP);av_assert0(dst_nb_samples == frame->nb_samples);/* when we pass a frame to the encoder, it may keep a reference to it* internally;* make sure we do not overwrite it here*/ret = av_frame_make_writable(ost->frame);if (ret < 0)exit(1);/* convert to destination format */ret = swr_convert(ost->swr_ctx,ost->frame->data, dst_nb_samples,(const uint8_t **) frame->data, frame->nb_samples);if (ret < 0) {fprintf(stderr, "Error while converting\n");exit(1);}frame = ost->frame;// 轉換time_baseframe->pts = av_rescale_q(ost->samples_count, (AVRational) {1, codec_ctx->sample_rate},codec_ctx->time_base);ost->samples_count += dst_nb_samples;}ret = avcodec_encode_audio2(codec_ctx, &pkt, frame, &got_packet);if (ret < 0) {fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));exit(1);}if (got_packet) {ret = write_frame(oc, &codec_ctx->time_base, ost->st, &pkt);if (ret < 0) {fprintf(stderr, "Error while writing audio frame: %s\n",av_err2str(ret));exit(1);}}// frame == NULL 讀取不到frame(比如讀完了5秒的frame); got_packet == 0 沒有幀了return (frame || got_packet) ? 0 : 1; }/**************************************************************/ /* video output */static AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height) {AVFrame *picture;int ret;picture = av_frame_alloc();if (!picture)return NULL;picture->format = pix_fmt;picture->width = width;picture->height = height;/* allocate the buffers for the frame data */ret = av_frame_get_buffer(picture, 32);if (ret < 0) {fprintf(stderr, "Could not allocate frame data.\n");exit(1);}return picture; }static void open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg) {int ret;AVCodecContext *codec_ctx = ost->enc;AVDictionary *opt = NULL;av_dict_copy(&opt, opt_arg, 0);/* open the codec */// 1. 關聯編碼器ret = avcodec_open2(codec_ctx, codec, &opt);av_dict_free(&opt);if (ret < 0) {fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));exit(1);}// 2. 分配幀buffer/* allocate and init a re-usable frame */ost->frame = alloc_picture(codec_ctx->pix_fmt, codec_ctx->width, codec_ctx->height);if (!ost->frame) {fprintf(stderr, "Could not allocate video frame\n");exit(1);}/* If the output format is not YUV420P, then a temporary YUV420P* picture is needed too. It is then converted to the required* output format. */ost->tmp_frame = NULL;if (codec_ctx->pix_fmt != AV_PIX_FMT_YUV420P) {// 編碼器格式需要的數據不是 AV_PIX_FMT_YUV420P才需要 調用圖像scaleost->tmp_frame = alloc_picture(AV_PIX_FMT_YUV420P, codec_ctx->width, codec_ctx->height);if (!ost->tmp_frame) {fprintf(stderr, "Could not allocate temporary picture\n");exit(1);}}/* copy the stream parameters to the muxer */ret = avcodec_parameters_from_context(ost->st->codecpar, codec_ctx);if (ret < 0) {fprintf(stderr, "Could not copy the stream parameters\n");exit(1);} }/* Prepare a dummy image. */ static void fill_yuv_image(AVFrame *pict, int frame_index,int width, int height) {int x, y, i;i = frame_index;/* Y */for (y = 0; y < height; y++)for (x = 0; x < width; x++)pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;/* Cb and Cr */for (y = 0; y < height / 2; y++) {for (x = 0; x < width / 2; x++) {pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;}} }static AVFrame *get_video_frame(OutputStream *ost) {AVCodecContext *codec_ctx = ost->enc;/* check if we want to generate more frames */// 我們測試時只產生STREAM_DURATION(這里是5.0秒)的視頻數據if (av_compare_ts(ost->next_pts, codec_ctx->time_base,STREAM_DURATION, (AVRational) {1, 1}) >= 0)return NULL;/* when we pass a frame to the encoder, it may keep a reference to it* internally; make sure we do not overwrite it here */if (av_frame_make_writable(ost->frame) < 0)exit(1);if (codec_ctx->pix_fmt != AV_PIX_FMT_YUV420P) {/* as we only generate a YUV420P picture, we must convert it* to the codec pixel format if needed */if (!ost->sws_ctx) {ost->sws_ctx = sws_getContext(codec_ctx->width, codec_ctx->height,AV_PIX_FMT_YUV420P,codec_ctx->width, codec_ctx->height,codec_ctx->pix_fmt,SCALE_FLAGS, NULL, NULL, NULL);if (!ost->sws_ctx) {fprintf(stderr,"Could not initialize the conversion context\n");exit(1);}}fill_yuv_image(ost->tmp_frame, ost->next_pts, codec_ctx->width, codec_ctx->height);sws_scale(ost->sws_ctx, (const uint8_t *const *) ost->tmp_frame->data,ost->tmp_frame->linesize, 0, codec_ctx->height, ost->frame->data,ost->frame->linesize);} else {fill_yuv_image(ost->frame, ost->next_pts, codec_ctx->width, codec_ctx->height);}ost->frame->pts = ost->next_pts++; // 為什么+1? 單位是 1/25 = 40ms// 0 1 2 -> 0 40ms 80msreturn ost->frame; }/** encode one video frame and send it to the muxer* return 1 when encoding is finished, 0 otherwise*/ static int write_video_frame(AVFormatContext *oc, OutputStream *ost) {int ret;AVCodecContext *codec_ctx;AVFrame *frame;int got_packet = 0;AVPacket pkt = {0};codec_ctx = ost->enc;frame = get_video_frame(ost);av_init_packet(&pkt);/* encode the image */ret = avcodec_encode_video2(codec_ctx, &pkt, frame, &got_packet);if (ret < 0) {fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));exit(1);}if (got_packet) {ret = write_frame(oc, &codec_ctx->time_base, ost->st, &pkt);} else {ret = 0;}if (ret < 0) {fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));exit(1);}// 這里之所以有兩個判斷條件// frame非NULL: 表示還在產生YUV數據幀// got_packet為1: 編碼器還有緩存的幀return (frame || got_packet) ? 0 : 1; }static void close_stream(AVFormatContext *oc, OutputStream *ost) {avcodec_free_context(&ost->enc);av_frame_free(&ost->frame);av_frame_free(&ost->tmp_frame);sws_freeContext(ost->sws_ctx);swr_free(&ost->swr_ctx); }/**************************************************************/ /* media file output */int main(int argc, char **argv) {OutputStream video_st = {0}; // 封裝視頻編碼相關的OutputStream audio_st = {0}; // 封裝音頻編碼相關的const char *filename; // 輸出文件// AVOutputFormat ff_flv_muxerAVOutputFormat *fmt; // 輸出文件容器格式, 封裝了復用規則,AVInputFormat則是封裝了解復用規則AVFormatContext *oc;AVCodec *audio_codec, *video_codec;int ret;int have_video = 0, have_audio = 0;int encode_video = 0, encode_audio = 0;AVDictionary *opt = NULL;int i;if (argc < 2) {printf("usage: %s output_file\n""API example program to output a media file with libavformat.\n""This program generates a synthetic audio and video stream, encodes and\n""muxes them into a file named output_file.\n""The output format is automatically guessed according to the file extension.\n""Raw images can also be output by using '%%d' in the filename.\n""\n", argv[0]);return 1;}filename = argv[1];for (i = 2; i + 1 < argc; i += 2) {if (!strcmp(argv[i], "-flags") || !strcmp(argv[i], "-fflags"))av_dict_set(&opt, argv[i] + 1, argv[i + 1], 0);}/* 分配AVFormatContext并根據filename綁定合適的AVOutputFormat */avformat_alloc_output_context2(&oc, NULL, NULL, filename);if (!oc) {// 如果不能根據文件后綴名找到合適的格式,那缺省使用flv格式printf("Could not deduce output format from file extension: using flv.\n");avformat_alloc_output_context2(&oc, NULL, "flv", filename);}if (!oc)return 1;fmt = oc->oformat; // 獲取綁定的AVOutputFormat// 我們音視頻課程音視頻編解碼主要涉及H264和AAC, 所以我們指定為H264+AACfmt->video_codec = AV_CODEC_ID_H264; // 指定編碼器fmt->audio_codec = AV_CODEC_ID_AAC; // 指定編碼器/* 使用指定的音視頻編碼格式增加音頻流和視頻流 */if (fmt->video_codec != AV_CODEC_ID_NONE) {add_stream(&video_st, oc, &video_codec, fmt->video_codec);have_video = 1;encode_video = 1;}if (fmt->audio_codec != AV_CODEC_ID_NONE) {add_stream(&audio_st, oc, &audio_codec, fmt->audio_codec);have_audio = 1;encode_audio = 1;}/* Now that all the parameters are set, we can open the audio and* video codecs and allocate the necessary encode buffers. */if (have_video)open_video(oc, video_codec, &video_st, opt);if (have_audio)open_audio(oc, audio_codec, &audio_st, opt);av_dump_format(oc, 0, filename, 1);/* open the output file, if needed */if (!(fmt->flags & AVFMT_NOFILE)) {// 打開對應的輸出文件,沒有則創建ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);if (ret < 0) {fprintf(stderr, "Could not open '%s': %s\n", filename,av_err2str(ret));return 1;}}// audio AVstream->base_time = 1/44100, video AVstream->base_time = 1/25/* 寫頭部. 到底做了什么操作呢? 對應steam的time_base被改寫 和封裝格式有關系*/ret = avformat_write_header(oc, &opt);// base_time audio = 1/1000 video = 1/1000if (ret < 0) {fprintf(stderr, "Error occurred when opening output file: %s\n",av_err2str(ret));return 1;}while (encode_video || encode_audio) {/* select the stream to encode */if (encode_video && // video_st.next_pts值 <= audio_st.next_pts時(!encode_audio || av_compare_ts(video_st.next_pts, video_st.enc->time_base,audio_st.next_pts, audio_st.enc->time_base) <= 0)) {printf("\nwrite_video_frame\n");encode_video = !write_video_frame(oc, &video_st);} else {printf("\nwrite_audio_frame\n");encode_audio = !write_audio_frame(oc, &audio_st);}}/* Write the trailer, if any. The trailer must be written before you* close the CodecContexts open when you wrote the header; otherwise* av_write_trailer() may try to use memory that was freed on* av_codec_close(). */av_write_trailer(oc);/* Close each codec. */if (have_video)close_stream(oc, &video_st);if (have_audio)close_stream(oc, &audio_st);if (!(fmt->flags & AVFMT_NOFILE))/* Close the output file. */avio_closep(&oc->pb);/* free the stream */avformat_free_context(oc);return 0; }總結
以上是生活随笔為你收集整理的H264和AAC合成FLV案例的全部內容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: 视频编码案例
- 下一篇: FFmpeg过滤器框架分析