FFmpeg基础:音视频同步播放
生活随笔
收集整理的這篇文章主要介紹了
FFmpeg基础:音视频同步播放
小編覺得挺不錯的,現在分享給大家,幫大家做個參考.
文章目錄
- 定義全局數據類
- 定義數據隊列
- 定義SDL庫初始化操作
- 定義音視頻流解析函數
- 定義解封裝線程和視頻解碼線程
- 定義音視頻的解碼函數
- 主函數事件響應
視頻文件解復用之后視頻流和音頻流是獨立的,也是獨立播放。由于壓縮方式不同,數據格式不同,在播放的時候音頻流輸出是線性的而視頻流輸出不是線程的,這就會導致視頻流和音頻流的時間偏差越來越大,最終導致音視頻不同步。
為了解決這個問題,我們在播放視頻文件的時候需要調整音頻或者視頻的播放速度,來實現兩種數據的同步??紤]到人對聲音的敏感度要強于視頻,頻繁調節音頻會帶來較差的觀感體驗,且音頻的播放時鐘為線性增長,所以一般會以音頻時鐘為參考時鐘,將視頻同步到音頻上。
這里以一個將視頻流同步到音頻流上的例子來說明一下音視頻同步的實現方式。程序的架構圖如下圖所示:
定義全局數據類
首先定義全局數據類,用于不同線程之間的數據共享。主要數據結構VideoState記錄了音視頻文件的各種上下文參數。
//define.h #ifndef _DEFINE_H_ #define _DEFINE_H_#include <stdio.h> #include <assert.h> #include <math.h>#include <SDL.h> extern "C" { #include <libavcodec/avcodec.h> #include <libavdevice/avdevice.h> #include <libavfilter/avfilter.h> #include <libavformat/avformat.h> #include <libavformat/avio.h> #include <libavutil/avutil.h> #include <libswresample/swresample.h> #include <libswscale/swscale.h> #include <libavutil/frame.h> #include <libavutil/imgutils.h> #include <libavformat/avformat.h> #include <libavutil/time.h> } #include <iostream>#define SDL_AUDIO_BUFFER_SIZE 1024 #define MAX_AUDIO_FRAME_SIZE 192000#define MAX_AUDIOQ_SIZE (5 * 16 * 1024) #define MAX_VIDEOQ_SIZE (5 * 256 * 1024)#define AV_SYNC_THRESHOLD 0.01 #define AV_NOSYNC_THRESHOLD 10.0#define FF_REFRESH_EVENT (SDL_USEREVENT) #define FF_QUIT_EVENT (SDL_USEREVENT + 1)#define VIDEO_PICTURE_QUEUE_SIZE 1typedef struct PacketQueue {AVPacketList *first_pkt, *last_pkt;int nb_packets;int size;SDL_mutex *mutex;SDL_cond *cond; } PacketQueue;typedef struct VideoState {AVFormatContext *pFormatCtx; //音視頻的上下文int videoStreamIndex; //視頻流索引int audioStreamIndex; //音頻流索引AVStream *audio_st; //音頻流指針AVCodecContext *audio_ctx; //音頻流上下文PacketQueue audioq; //音頻流隊列//音頻緩存uint8_t audio_buf[192000 * 3 / 2];unsigned int audio_buf_size; //緩存大小unsigned int audio_buf_index; //緩存索引AVFrame audio_frame; //音頻幀AVPacket audio_pkt; //音頻包uint8_t *audio_pkt_data; //音頻數據指針 int audio_pkt_size; //音頻數據包大小int audio_hw_buf_size;struct SwrContext *audio_swr_ctx; //音頻處理操作類//音視頻的數據幀double audio_clock;double video_clock;double frame_timer;int64_t frame_last_pts;int64_t frame_last_delay;AVStream *video_st; //視頻流AVCodecContext *video_ctx; //視頻流上下文PacketQueue videoq; //視頻數據隊列struct SwsContext *video_sws_ctx; //視頻操作上下文//視頻幀數據隊列AVFrame pictq[VIDEO_PICTURE_QUEUE_SIZE];int pictq_size;int pictq_rindex;int pictq_windex;//操作數據幀的鎖和信號量SDL_mutex *pictq_mutex;SDL_cond *pictq_cond;//解封裝的線程SDL_Thread *parse_tid;//視頻流線程SDL_Thread *video_tid;//輸入文件名稱char filename[1024];//退出標志位int quit;AVFrame wanted_frame;SDL_AudioSpec wantedSpec = { 0 };SDL_AudioSpec audioSpec = { 0 }; } VideoState;SDL_mutex *text_mutex; SDL_Window *win; SDL_Renderer *renderer; SDL_Texture *texture;//視頻全局狀態 VideoState* g_state = NULL;#endif定義數據隊列
定義音視頻數據隊列操作,用來緩存音視頻數據包。
//datequeue.h #ifndef _DATA_QUEUE_H_ #define _DATA_QUEUE_H_#include "define.h"int queue_picture(VideoState *is, AVFrame *pFrame, double pts) {SDL_LockMutex(is->pictq_mutex);while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && !is->quit){SDL_CondWait(is->pictq_cond, is->pictq_mutex);}SDL_UnlockMutex(is->pictq_mutex);if (is->quit)return -1;AVFrame* current_frame = &is->pictq[is->pictq_windex];int ret = av_frame_make_writable(current_frame);if (!current_frame->data ||current_frame->width != is->video_ctx->width ||current_frame->height != is->video_ctx->height) {current_frame->format = pFrame->format;current_frame->width = pFrame->width;current_frame->height = pFrame->height;int ret = av_image_alloc(current_frame->data, current_frame->linesize, is->video_ctx->width, is->video_ctx->height,is->video_ctx->pix_fmt, 32);if (is->quit) {return -1;}}//縮放視頻if (current_frame){current_frame->pts = pFrame->pts;//將圖片數據添加到幀中uint8_t *src_planes[4];int src_linesize[4];av_image_fill_arrays(src_planes, src_linesize, (const uint8_t *)pFrame->data, is->video_ctx->pix_fmt,is->video_ctx->width, is->video_ctx->height, 1);//YUV數據轉變成SDL使用的紋理數據sws_scale(is->video_sws_ctx, (uint8_t const * const *)pFrame->data,pFrame->linesize, 0, is->video_ctx->height,current_frame->data, current_frame->linesize);//通知隊列的消費者取數據if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE){is->pictq_windex = 0;}SDL_LockMutex(is->pictq_mutex);is->pictq_size++;SDL_UnlockMutex(is->pictq_mutex);}return 0; }void packet_queue_init(PacketQueue *q) {memset(q, 0, sizeof(PacketQueue));q->mutex = SDL_CreateMutex();q->cond = SDL_CreateCond(); }//添加到數據隊列中 int packet_queue_put(PacketQueue *q, AVPacket *pkt) {AVPacketList *pkt1;AVPacket* newPkt;newPkt = (AVPacket*)av_mallocz_array(1, sizeof(AVPacket));if (av_packet_ref(newPkt, pkt) < 0)return -1;pkt1 = (AVPacketList*)av_malloc(sizeof(AVPacketList));pkt1->pkt = *newPkt;pkt1->next = NULL;SDL_LockMutex(q->mutex);if (!q->last_pkt)q->first_pkt = pkt1;elseq->last_pkt->next = pkt1;q->last_pkt = pkt1;q->nb_packets++;q->size += newPkt->size;SDL_CondSignal(q->cond);SDL_UnlockMutex(q->mutex);return 0; }//讀取數據包中的數據 int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block) {AVPacketList *pkt1;int ret;SDL_LockMutex(q->mutex);while (1){pkt1 = q->first_pkt;if (pkt1) {q->first_pkt = pkt1->next;if (!q->first_pkt)q->last_pkt = NULL;q->nb_packets--;q->size -= pkt1->pkt.size;*pkt = pkt1->pkt;av_free(pkt1);ret = 1;break;}else if (!block) {ret = 0;break;}else {SDL_CondWait(q->cond, q->mutex);}}SDL_UnlockMutex(q->mutex);return ret; }#endif定義SDL庫初始化操作
//SDL_Wraper.h #ifndef _SDL_WRAPPER_H_ #define _SDL_WRAPPER_H_ #include "define.h"void InitSDL() {//初始化SDLif (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER))printf("There is something wrong with your SDL Libs. Couldn't run");//打開音頻驅動 #ifdef _WIN32SDL_AudioInit("directsound"); #endif }//SDL顯示視頻幀信息 void video_display(VideoState *is) {SDL_Rect rect;AVFrame *vp;float aspect_ratio;int w, h, x, y;int i;vp = &is->pictq[is->pictq_rindex];if (vp){SDL_UpdateYUVTexture(texture, NULL,vp->data[0], vp->linesize[0],vp->data[1], vp->linesize[1],vp->data[2], vp->linesize[2]);rect.x = 0;rect.y = 0;rect.w = is->video_ctx->width;rect.h = is->video_ctx->height;SDL_LockMutex(text_mutex);SDL_RenderClear(renderer);SDL_RenderCopy(renderer, texture, NULL, &rect);SDL_RenderPresent(renderer);SDL_UnlockMutex(text_mutex);} } #endif定義音視頻流解析函數
音視頻流的解析函數將音視頻流的參數解析到全局數據結構體中。
//parser_stream.h #ifndef _PARSER_STREAM_H_ #define _PARSER_STREAM_H_#include "define.h" #include "callback.h"int stream_component_open(VideoState *is, int stream_index) {if (stream_index < 0 || stream_index >= is->pFormatCtx->nb_streams){return -1;}//查找解碼器分配上下文const AVCodec* codec = avcodec_find_decoder(is->pFormatCtx->streams[stream_index]->codecpar->codec_id);if (!codec) {fprintf(stderr, "Unsupported codec!\n");return -1;}AVCodecContext* codecCtx = avcodec_alloc_context3(codec);if (!codecCtx){fprintf(stderr, "new codec context failed!\n");return -1;}int ret = avcodec_parameters_to_context(codecCtx, is->pFormatCtx->streams[stream_index]->codecpar);if (ret < 0){return -2;}if (avcodec_open2(codecCtx, codec, NULL) < 0){fprintf(stderr, "Unsupported codec!\n");return -1;}switch (codecCtx->codec_type){case AVMEDIA_TYPE_AUDIO:is->audio_ctx = codecCtx;//設置音頻參數轉換的上下文is->audio_swr_ctx = swr_alloc();if (is->audio_swr_ctx == NULL){return -4;}//設置通道數,采樣率,采樣格式的輸入輸出格式av_opt_set_channel_layout(is->audio_swr_ctx, "in_channel_layout", codecCtx->channel_layout, 0);av_opt_set_channel_layout(is->audio_swr_ctx, "out_channel_layout", codecCtx->channel_layout, 0);av_opt_set_int(is->audio_swr_ctx, "in_sample_rate", codecCtx->sample_rate, 0);av_opt_set_int(is->audio_swr_ctx, "out_sample_rate", codecCtx->sample_rate, 0);av_opt_set_sample_fmt(is->audio_swr_ctx, "in_sample_fmt", codecCtx->sample_fmt, 0);av_opt_set_sample_fmt(is->audio_swr_ctx, "out_sample_fmt", AV_SAMPLE_FMT_FLT, 0);ret = swr_init(is->audio_swr_ctx);if (ret != 0){return -5;}//打開音響設備memset(&is->wantedSpec, 0, sizeof(is->wantedSpec));is->wantedSpec.channels = codecCtx->channels;is->wantedSpec.freq = codecCtx->sample_rate;is->wantedSpec.format = AUDIO_S16SYS;is->wantedSpec.silence = 0;is->wantedSpec.samples = SDL_AUDIO_BUFFER_SIZE;is->wantedSpec.userdata = codecCtx; //音頻流的上下文is->wantedSpec.callback = audio_callback; //設置數據包的回調函數if (SDL_OpenAudio(&is->wantedSpec, &is->audioSpec) < 0){printf("Failed to open audio");return -6;}packet_queue_init(&is->audioq);is->wanted_frame.format = AV_SAMPLE_FMT_S16;is->wanted_frame.sample_rate = is->audioSpec.freq;is->wanted_frame.channel_layout = av_get_default_channel_layout(is->audioSpec.channels);is->wanted_frame.channels = is->audioSpec.channels;is->audioStreamIndex = stream_index;is->audio_st = is->pFormatCtx->streams[stream_index];is->audio_buf_size = 0;is->audio_buf_index = 0;memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));SDL_PauseAudio(0);break;//對視頻數據進行處理case AVMEDIA_TYPE_VIDEO:is->video_ctx = codecCtx;is->video_st = is->pFormatCtx->streams[stream_index];is->videoStreamIndex = stream_index;is->frame_timer = (double)av_gettime() / 1000000.0;is->frame_last_delay = 40e-3;packet_queue_init(&is->videoq);is->video_sws_ctx = sws_getContext(is->video_ctx->width, is->video_ctx->height,is->video_ctx->pix_fmt, is->video_ctx->width,is->video_ctx->height, AV_PIX_FMT_YUV420P,SWS_BILINEAR, NULL, NULL, NULL);break;default:break;} } #endif定義解封裝線程和視頻解碼線程
解封裝線程負責解析視頻文件并讀取數據包到不同的隊列中。視頻解碼線程負責將視頻數據包解析成SDL能識別的圖片數據類型。
//thread.h #ifndef _THREAD_H_ #define _THREAD_H_ #include "define.h" #include "datequeue.h" #include "parser_stream.h"double synchronize_video(VideoState *is, AVFrame *src_frame, double pts) {double frame_delay;if (pts != 0){is->video_clock = pts;}else{pts = is->video_clock;}//更新幀的時鐘frame_delay = av_q2d(is->video_ctx->time_base);frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);is->video_clock += frame_delay;return pts; }int decode_video_thread(void *arg) {VideoState *is = (VideoState *)arg;AVPacket pkt1, packet;int64_t pts = 0;int ret = -1;AVFrame *pFrame = av_frame_alloc();while (1){//從隊列中提取數據包if (packet_queue_get(&is->videoq, &packet, 1) < 0){continue;}int ret = avcodec_send_packet(is->video_ctx, &packet);ret = avcodec_receive_frame(is->video_ctx, pFrame);if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF){continue;}if (ret < 0){continue;}pts = pFrame->pts;//* av_q2d(is->video_st->time_base);;//同步視頻pts = synchronize_video(is, pFrame, pts);if (queue_picture(is, pFrame, pts) < 0){break;}av_packet_unref(&packet);}av_frame_free(&pFrame);return 0; }int demux_thread(void *arg) {int ret = -1;VideoState *is = (VideoState*)arg;AVPacket packet;AVFrame *pFrame = NULL;//打開上下文解析數據流if (avformat_open_input(&is->pFormatCtx, is->filename, NULL, NULL) != 0)return -1;if (avformat_find_stream_info(is->pFormatCtx, NULL)<0)return -1;//查音視頻流的索引for (int i = 0; i<is->pFormatCtx->nb_streams; i++){if (is->pFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {is->videoStreamIndex = i;}if (is->pFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {is->audioStreamIndex = i;}}//解析音頻流和視頻流if (is->audioStreamIndex >= 0){stream_component_open(is, is->audioStreamIndex);}if (is->videoStreamIndex >= 0){stream_component_open(is, is->videoStreamIndex);//啟動視頻解析線程is->video_tid = SDL_CreateThread(decode_video_thread, "decode_video_thread", is);}if (is->videoStreamIndex < 0 || is->audioStreamIndex < 0){fprintf(stderr, "%s: could not open codecs\n", is->filename);return -1;}pFrame = av_frame_alloc();//讀取數據包while (av_read_frame(is->pFormatCtx, &packet) >= 0){if (packet.stream_index == is->audioStreamIndex){packet_queue_put(&is->audioq, &packet);}else{packet_queue_put(&is->videoq, &packet);SDL_Delay(10);}av_packet_unref(&packet);}__FAIL://后處理清理數據if (pFrame) {av_frame_free(&pFrame);}if (is->audio_ctx){avcodec_close(is->audio_ctx);}SDL_Quit();return ret; } #endif定義音視頻的解碼函數
解碼函數負責從數據隊列里面讀取音視頻數據并進行渲染播放。
//callback.h #ifndef _CALL_BACK_H_ #define _CALL_BACK_H_ #include "define.h"//從音頻流中解析數據包 int audio_decode_frame(AVCodecContext* aCodecCtx, uint8_t* audio_buf, int buf_size) {static AVPacket pkt;static uint8_t* audio_pkt_data = NULL;static int audio_pkt_size = 0;static AVFrame frame;int len1;int data_size = 0;SwrContext* swr_ctx = NULL;while (1){//取到數據之后解析數據while (audio_pkt_size > 0){int got_frame = 0;avcodec_send_packet(aCodecCtx, &pkt);avcodec_receive_frame(aCodecCtx, &frame);len1 = frame.pkt_size;if (len1 < 0){audio_pkt_size = 0;break;}//拷貝音頻數據audio_pkt_data += len1;audio_pkt_size -= len1;data_size = 0;if (got_frame){int linesize = 1;data_size = av_samples_get_buffer_size(&linesize, aCodecCtx->channels, frame.nb_samples, aCodecCtx->sample_fmt, 1);assert(data_size <= buf_size);memcpy(audio_buf, frame.data[0], data_size);}//獲取通道信息if (frame.channels > 0 && frame.channel_layout == 0)frame.channel_layout = av_get_default_channel_layout(frame.channels);else if (frame.channels == 0 && frame.channel_layout > 0)frame.channels = av_get_channel_layout_nb_channels(frame.channel_layout);if (swr_ctx){swr_free(&swr_ctx);swr_ctx = NULL;}//對音頻格式進行轉換,重采樣swr_ctx = swr_alloc_set_opts(NULL, g_state->wanted_frame.channel_layout, (AVSampleFormat)g_state->wanted_frame.format, g_state->wanted_frame.sample_rate,frame.channel_layout, (AVSampleFormat)frame.format, frame.sample_rate, 0, NULL);if (!swr_ctx || swr_init(swr_ctx) < 0){printf("swr_init failed\n");}int dst_nb_samples = (int)av_rescale_rnd(swr_get_delay(swr_ctx, frame.sample_rate) + frame.nb_samples,g_state->wanted_frame.sample_rate, g_state->wanted_frame.format, AV_ROUND_INF);int len2 = swr_convert(swr_ctx, &audio_buf, dst_nb_samples,(const uint8_t**)frame.data, frame.nb_samples);if (len2 < 0){printf("swr_convert failed\n");}//data_size = 2 * g_state->wanted_frame.nb_samples * 2;data_size = g_state->wanted_frame.channels * len2 * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16);int n = 2 * g_state->audio_ctx->channels;g_state->audio_clock += (double)data_size /(double)(n * g_state->audio_ctx->sample_rate);av_packet_unref(&pkt);if (swr_ctx){swr_free(&swr_ctx);swr_ctx = NULL;}//返回數據長度return data_size;//return g_state->wanted_frame.channels * len2 * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16);}//從隊列里面取數據if (packet_queue_get(&g_state->audioq, &pkt, 1) < 0)return -1;audio_pkt_data = pkt.data;audio_pkt_size = pkt.size;} }//音頻數據包的回調函數 void audio_callback(void* userdata, Uint8* stream, int len) {AVCodecContext* aCodecCtx = (AVCodecContext*)userdata;int len1, audio_size;static uint8_t audio_buff[192000 * 3 / 2];static unsigned int audio_buf_size = 0;static unsigned int audio_buf_index = 0;SDL_memset(stream, 0, len);while (len > 0){if (g_state->audio_buf_index >= g_state->audio_buf_size){audio_size = audio_decode_frame(aCodecCtx, audio_buff, sizeof(audio_buff));if (audio_size < 0){g_state->audio_buf_size = 1024*2*2;memset(audio_buff, 0, g_state->audio_buf_size);}elseg_state->audio_buf_size = audio_size;g_state->audio_buf_index = 0;}//播放取到的音頻數據len1 = g_state->audio_buf_size - g_state->audio_buf_index;if (len1 > len)len1 = len;SDL_MixAudio(stream, audio_buff + g_state->audio_buf_index, len1, SDL_MIX_MAXVOLUME);len -= len1;stream += len1;g_state->audio_buf_index += len1;} } #endif主函數事件響應
在主函數里面對各種資源進行整合同時處理事件響應,定時刷新視頻流的顯示。
//main.cpp #include "define.h" #include "SDL_Wraper.h" #include "datequeue.h" #include "parser_stream.h" #include "callback.h" #include "thread.h"double get_audio_clock(VideoState *is) {double pts;int hw_buf_size, bytes_per_sec, n;pts = is->audio_clock;hw_buf_size = is->audio_buf_size - is->audio_buf_index;bytes_per_sec = 0;n = is->audio_ctx->channels * 2;if (is->audio_st) {bytes_per_sec = is->audio_ctx->sample_rate * n;}if (bytes_per_sec) {pts -= (double)hw_buf_size / bytes_per_sec;}return pts; }//定時發送事件 static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque) {SDL_Event event;event.type = FF_REFRESH_EVENT;event.user.data1 = opaque;SDL_PushEvent(&event);SDL_Delay(40);return 0; }//添加一個定時器 static void schedule_refresh(VideoState *is, int delay) {SDL_AddTimer(delay, sdl_refresh_timer_cb, is); }void video_refresh_timer(void *userdata) {VideoState *is = (VideoState *)userdata;AVFrame *vp;int64_t delay, sync_threshold, ref_clock;double actual_delay;if (is->video_st){if (is->pictq_size == 0){schedule_refresh(is, 1);}else{vp = &is->pictq[is->pictq_rindex];delay = vp->pts - is->frame_last_pts;//存儲pts和delay下次使用is->frame_last_delay = delay;is->frame_last_pts = vp->pts;//獲取音頻延遲時間ref_clock = get_audio_clock(is);double diff = vp->pts * av_q2d(is->video_st->time_base) - ref_clock;/* Skip or repeat the frame. Take delay into accountFFPlay still doesn't "know if this is the best guess." */sync_threshold = (delay > AV_SYNC_THRESHOLD) ? delay : AV_SYNC_THRESHOLD;if (fabs(diff) < AV_NOSYNC_THRESHOLD){if (diff <= -sync_threshold) {delay = 0;}else if (diff >= sync_threshold) {delay = 2 * delay;}}is->frame_timer += delay * av_q2d(is->video_st->time_base);//計算真正的延遲時間actual_delay = is->frame_timer - (av_gettime() / 1000000.0);if (actual_delay < 0.010){actual_delay = 0.010;}//std::cout << actual_delay << "frame_timer" << is->frame_timer << std::endl;schedule_refresh(is, (int)(actual_delay * 1000 + 0.5));//顯示視頻幀video_display(is);//刷新視頻信息,為下次刷新做準備if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE) {is->pictq_rindex = 0;}SDL_LockMutex(is->pictq_mutex);is->pictq_size--;SDL_CondSignal(is->pictq_cond);SDL_UnlockMutex(is->pictq_mutex);}}else{schedule_refresh(is, 100);}}int main(int argc, char *argv[]) {int ret = -1;SDL_Event event;if (argc < 2) {printf("Usage: command <file>\n");return ret;}//初始化SDLInitSDL();//初始化各種變量g_state = (VideoState*)av_mallocz(sizeof(VideoState));g_state->pictq_mutex = SDL_CreateMutex();g_state->pictq_cond = SDL_CreateCond();memcpy(g_state->filename, argv[1], sizeof(g_state->filename));//解封裝的線程g_state->parse_tid = SDL_CreateThread(demux_thread, "demux_thread", g_state);if (!g_state->parse_tid){av_free(g_state);goto __FAIL;}while (!g_state->video_ctx){SDL_Delay(10);}//創建窗口渲染視頻,在子線程里面創建會阻塞主線程的時間循環win = SDL_CreateWindow("Feifei Player",SDL_WINDOWPOS_UNDEFINED,SDL_WINDOWPOS_UNDEFINED,g_state->video_ctx->width, g_state->video_ctx->height,SDL_WINDOW_OPENGL | SDL_WINDOW_RESIZABLE);renderer = SDL_CreateRenderer(win, -1, 0);texture = SDL_CreateTexture(renderer,SDL_PIXELFORMAT_IYUV,SDL_TEXTUREACCESS_STREAMING,g_state->video_ctx->width, g_state->video_ctx->height);schedule_refresh(g_state, 40);while (1){SDL_WaitEvent(&event);switch (event.type){case FF_QUIT_EVENT:case SDL_QUIT:g_state->quit = 1;goto __QUIT;break;case FF_REFRESH_EVENT:video_refresh_timer(event.user.data1);break;default:break;}} __QUIT:ret = 0; __FAIL:SDL_Quit();return ret; }完善了音視頻同步之后,demo程序其實就是一個播放器的雛形了。我們可以使用它來播放各種視頻。這里以一個mkv格式的視頻為例進行播放,播放效果如下所示:
總結
以上是生活随笔為你收集整理的FFmpeg基础:音视频同步播放的全部內容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: 苹果手机不显示图片的解决方法
- 下一篇: 数据结构课程设计(考试管理系统)