轉自:http://blog.csdn.net/nonmarking/article/details/47958395
?
本系列目前共三篇文章,后續還會更新
WebRTC VideoEngine超詳細教程(一)——視頻通話的基本流程
WebRTC VideoEngine超詳細教程(二)——集成OPENH264編解碼器
WebRTC VideoEngine超詳細教程(三)——集成X264編碼和ffmpeg解碼
總述
在前一篇文章中,講解了如何將OPENH264編解碼器集成到WebRTC中,但是OPENH264只能編碼baseline的H264視頻,而且就編碼質量而言,還是X264最好,本文就來講解一下如何將X264編碼器集成到WebRTC中,為了實現解碼,同時要用到ffmpeg??傮w流程和之前一樣,分為重新封裝編解碼器和注冊調用兩大步驟,注冊調用這一步沒有任何不同,主要是重新封裝這一步驟有較大區別。
重新封裝X264編碼功能
首先當然還是要下載X264源碼編譯出相應的庫以供調用。在windows下使用mingw進行編譯,再使用poxports工具導出庫,最后得到libx264.dll和libx264.lib,同時把x264.h和x264_config.h總共四個文件放到工程目錄下,并在項目屬性中進行相應配置。 使用x264進行視頻編碼的基本流程如下
[cpp]?view plaincopy
#include?<stdint.h>??#include?<stdio.h>??#include?<x264.h>????int?main(?int?argc,?char?**argv?)??{??????int?width,?height;??????x264_param_t?param;??????x264_picture_t?pic;??????x264_picture_t?pic_out;??????x264_t?*h;??????int?i_frame?=?0;??????int?i_frame_size;??????x264_nal_t?*nal;??????int?i_nal;????????????if(?x264_param_default_preset(?¶m,?"medium",?NULL?)?<?0?)??????????goto?fail;????????????param.i_csp?=?X264_CSP_I420;??????param.i_width??=?width;??????param.i_height?=?height;??????param.b_vfr_input?=?0;??????param.b_repeat_headers?=?1;??????param.b_annexb?=?1;????????????if(?x264_param_apply_profile(?¶m,?"high"?)?<?0?)??????????goto?fail;????????if(?x264_picture_alloc(?&pic,?param.i_csp,?param.i_width,?param.i_height?)?<?0?)??????????goto?fail;????????h?=?x264_encoder_open(?¶m);??????if(?!h?)??????????goto?fail;????????int?luma_size?=?width?*?height;??????int?chroma_size?=?luma_size?/?4;??????????for(?;;?i_frame++?)??????{??????????????????if(?fread(?pic.img.plane[0],?1,?luma_size,?stdin?)?!=?luma_size?)??????????????break;??????????if(?fread(?pic.img.plane[1],?1,?chroma_size,?stdin?)?!=?chroma_size?)??????????????break;??????????if(?fread(?pic.img.plane[2],?1,?chroma_size,?stdin?)?!=?chroma_size?)??????????????break;????????????pic.i_pts?=?i_frame;??????????i_frame_size?=?x264_encoder_encode(?h,?&nal,?&i_nal,?&pic,?&pic_out?);??????????if(?i_frame_size?<?0?)??????????????goto?fail;??????????else?if(?i_frame_size?)??????????{??????????????if(?!fwrite(?nal->p_payload,?i_frame_size,?1,?stdout?)?)??????????????????goto?fail;??????????}??????}??????????while(?x264_encoder_delayed_frames(?h?)?)??????{??????????i_frame_size?=?x264_encoder_encode(?h,?&nal,?&i_nal,?NULL,?&pic_out?);??????????if(?i_frame_size?<?0?)??????????????goto?fail;??????????else?if(?i_frame_size?)??????????{??????????????if(?!fwrite(?nal->p_payload,?i_frame_size,?1,?stdout?)?)??????????????????goto?fail;??????????}??????}????????x264_encoder_close(?h?);??????x264_picture_clean(?&pic?);??????return?0;??}?? 還是一樣,照葫蘆畫瓢,改寫上一篇文章中提到的H264EncoderImpl類 首先是類的定義,去掉了原來的私有成員變量ISVCEncoder* encoder_,加入了以下幾項,其他內容不變
[cpp]?view plaincopy
x264_picture_t?pic;??x264_picture_t?pic_out;??x264_t?*encoder_;??int?i_frame?=?0;x264_nal_t?*nal;?? 相應的,構造函數和析構函數也要改變,這里就不贅述了,重點看InitEncode方法和Encode方法。 InitEncode方法的實現改寫如下
[cpp]?view plaincopy
int?H264EncoderImpl::InitEncode(const?VideoCodec*?inst,??????????int?number_of_cores,??????????size_t?max_payload_size)?{??????????if?(inst?==?NULL)?{??????????????return?WEBRTC_VIDEO_CODEC_ERR_PARAMETER;??????????}??????????if?(inst->maxFramerate?<?1)?{??????????????return?WEBRTC_VIDEO_CODEC_ERR_PARAMETER;??????????}??????????????????if?(inst->maxBitrate?>?0?&&?inst->startBitrate?>?inst->maxBitrate)?{??????????????return?WEBRTC_VIDEO_CODEC_ERR_PARAMETER;??????????}??????????if?(inst->width?<?1?||?inst->height?<?1)?{??????????????return?WEBRTC_VIDEO_CODEC_ERR_PARAMETER;??????????}??????????if?(number_of_cores?<?1)?{??????????????return?WEBRTC_VIDEO_CODEC_ERR_PARAMETER;??????????}????????????int?ret_val?=?Release();??????????if?(ret_val?<?0)?{??????????????return?ret_val;??????????}??????????????????x264_param_t?param;??????????ret_val?=?x264_param_default_preset(¶m,?"medium",?NULL);??????????if?(ret_val?!=?0)?{??????????????WEBRTC_TRACE(webrtc::kTraceError,?webrtc::kTraceVideoCoding,?-1,??????????????????"H264EncoderImpl::InitEncode()?fails?to?initialize?encoder?ret_val?%d",??????????????????ret_val);??????????????x264_encoder_close(encoder_);??????????????encoder_?=?NULL;??????????????return?WEBRTC_VIDEO_CODEC_ERROR;??????????}??????????????????param.i_csp?=?X264_CSP_I420;??????????param.i_width?=?inst->width;??????????param.i_height?=?inst->height;??????????param.b_vfr_input?=?0;??????????param.b_repeat_headers?=?1;??????????param.b_annexb?=?0;????????param.i_fps_num?=?1;??????????param.i_fps_num?=?codec_.maxFramerate;??????????param.rc.i_bitrate?=?codec_.maxBitrate;??????????????????ret_val?=?x264_param_apply_profile(¶m,?"high");??????????if?(ret_val?!=?0)?{??????????????WEBRTC_TRACE(webrtc::kTraceError,?webrtc::kTraceVideoCoding,?-1,??????????????????"H264EncoderImpl::InitEncode()?fails?to?initialize?encoder?ret_val?%d",??????????????????ret_val);??????????????x264_encoder_close(encoder_);??????????????encoder_?=?NULL;??????????????return?WEBRTC_VIDEO_CODEC_ERROR;??????????}????????????ret_val?=?x264_picture_alloc(&pic,?param.i_csp,?param.i_width,?param.i_height);??????????if?(ret_val?!=?0)?{??????????????WEBRTC_TRACE(webrtc::kTraceError,?webrtc::kTraceVideoCoding,?-1,??????????????????"H264EncoderImpl::InitEncode()?fails?to?initialize?encoder?ret_val?%d",??????????????????ret_val);??????????????x264_encoder_close(encoder_);??????????????encoder_?=?NULL;??????????????return?WEBRTC_VIDEO_CODEC_ERROR;??????????}????????????encoder_?=?x264_encoder_open(¶m);??????????if?(!encoder_){??????????????WEBRTC_TRACE(webrtc::kTraceError,?webrtc::kTraceVideoCoding,?-1,??????????????????"H264EncoderImpl::InitEncode()?fails?to?initialize?encoder?ret_val?%d",??????????????????ret_val);??????????????x264_encoder_close(encoder_);??????????????x264_picture_clean(&pic);??????????????encoder_?=?NULL;??????????????return?WEBRTC_VIDEO_CODEC_ERROR;??????????}????????????if?(&codec_?!=?inst)?{??????????????codec_?=?*inst;??????????}????????????if?(encoded_image_._buffer?!=?NULL)?{??????????????delete[]?encoded_image_._buffer;??????????}??????????encoded_image_._size?=?CalcBufferSize(kI420,?codec_.width,?codec_.height);??????????encoded_image_._buffer?=?new?uint8_t[encoded_image_._size];??????????encoded_image_._completeFrame?=?true;????????????inited_?=?true;??????????WEBRTC_TRACE(webrtc::kTraceApiCall,?webrtc::kTraceVideoCoding,?-1,??????????????"H264EncoderImpl::InitEncode(width:%d,?height:%d,?framerate:%d,?start_bitrate:%d,?max_bitrate:%d)",??????????????inst->width,?inst->height,?inst->maxFramerate,?inst->startBitrate,?inst->maxBitrate);????????????return?WEBRTC_VIDEO_CODEC_OK;??????}?? Encode方法的實現改寫如下
[cpp]?view plaincopy
int?H264EncoderImpl::Encode(const?I420VideoFrame&?input_image,??????????const?CodecSpecificInfo*?codec_specific_info,??????????const?std::vector<VideoFrameType>*?frame_types)?{??????????if?(!inited_)?{??????????????return?WEBRTC_VIDEO_CODEC_UNINITIALIZED;??????????}??????????if?(input_image.IsZeroSize())?{??????????????return?WEBRTC_VIDEO_CODEC_ERR_PARAMETER;??????????}??????????if?(encoded_complete_callback_?==?NULL)?{??????????????return?WEBRTC_VIDEO_CODEC_UNINITIALIZED;??????????}????????????VideoFrameType?frame_type?=?kDeltaFrame;??????????????????if?(frame_types?&&?frame_types->size()?>?0)?{??????????????frame_type?=?(*frame_types)[0];??????????}????????????bool?send_keyframe?=?(frame_type?==?kKeyFrame);??????????if?(send_keyframe)?{??????????????pic.b_keyframe?=?TRUE;??????????????WEBRTC_TRACE(webrtc::kTraceApiCall,?webrtc::kTraceVideoCoding,?-1,??????????????????"H264EncoderImpl::EncodeKeyFrame(width:%d,?height:%d)",??????????????????input_image.width(),?input_image.height());??????????}????????????????????if?(input_image.width()?!=?codec_.width?||??????????????input_image.height()?!=?codec_.height)?{??????????????int?ret?=?UpdateCodecFrameSize(input_image);??????????????if?(ret?<?0)?{??????????????????return?ret;??????????????}??????????}????????????????????pic.img.plane[0]?=?const_cast<uint8_t*>(input_image.buffer(kYPlane));??????????pic.img.plane[1]?=?const_cast<uint8_t*>(input_image.buffer(kUPlane));??????????pic.img.plane[2]?=?const_cast<uint8_t*>(input_image.buffer(kVPlane));??????????pic.i_pts?=?i_frame;????????????int?i_nal?=?0;??????????int?i_frame_size?=?x264_encoder_encode(encoder_,?&nal,?&i_nal,?&pic,?&pic_out);??????????if?(i_frame_size?<?0)??????????{??????????????WEBRTC_TRACE(webrtc::kTraceError,?webrtc::kTraceVideoCoding,?-1,??????????????????"H264EncoderImpl::Encode()?fails?to?encode?%d",??????????????????i_frame_size);??????????????x264_encoder_close(encoder_);??????????????x264_picture_clean(&pic);??????????????encoder_?=?NULL;??????????????return?WEBRTC_VIDEO_CODEC_ERROR;??????????}????????????RTPFragmentationHeader?frag_info;????????????????????if?(i_frame_size)??????????{??????????????if?(i_nal?==?0)?{??????????????????return?WEBRTC_VIDEO_CODEC_OK;??????????????}??????????????frag_info.VerifyAndAllocateFragmentationHeader(i_nal);????????????????encoded_image_._length?=?0;????????????????uint32_t?totalNaluIndex?=?0;??????????????for?(int?nal_index?=?0;?nal_index?<?i_nal;?nal_index++)??????????????{??????????????????uint32_t?currentNaluSize?=?0;??????????????????currentNaluSize?=?nal[nal_index].i_payload?-?4;?????????????????memcpy(encoded_image_._buffer?+?encoded_image_._length,?nal[nal_index].p_payload?+?4,?currentNaluSize);????????????????encoded_image_._length?+=?currentNaluSize;????????????????????WEBRTC_TRACE(webrtc::kTraceApiCall,?webrtc::kTraceVideoCoding,?-1,??????????????????????"H264EncoderImpl::Encode()?nal_type?%d,?length:%d",??????????????????????nal[nal_index].i_type,?encoded_image_._length);????????????????????frag_info.fragmentationOffset[totalNaluIndex]?=?encoded_image_._length?-?currentNaluSize;??????????????????frag_info.fragmentationLength[totalNaluIndex]?=?currentNaluSize;??????????????????frag_info.fragmentationPlType[totalNaluIndex]?=?nal[nal_index].i_type;??????????????????frag_info.fragmentationTimeDiff[totalNaluIndex]?=?0;??????????????????totalNaluIndex++;??????????????}??????????}??????????i_frame++;??????????if?(encoded_image_._length?>?0)?{??????????????encoded_image_._timeStamp?=?input_image.timestamp();??????????????encoded_image_.capture_time_ms_?=?input_image.render_time_ms();??????????????encoded_image_._encodedHeight?=?codec_.height;??????????????encoded_image_._encodedWidth?=?codec_.width;??????????????encoded_image_._frameType?=?frame_type;??????????????????????????encoded_complete_callback_->Encoded(encoded_image_,?NULL,?&frag_info);??????????}??????????return?WEBRTC_VIDEO_CODEC_OK;??????}?? 其他方法的實現均沒有改變。 至此,X264編碼器重新封裝完畢,還是比較好理解的。
重新封裝ffmpeg解碼功能
首先還是一樣,獲得ffmpeg的頭文件和庫文件,加入工程中并進行相應設置,這里只需使用avcodec avformat avutil swscale四個庫,頭文件也可以做相應的刪減。 ffmpeg解碼的基本流程如下,實際集成之后是從WebRTC的EncodedImage& input_image中獲得待解碼數據的,所以不能使用常見的基于文件的解碼流程
[cpp]?view plaincopy
AVCodec?*codec?=?avcodec_find_decoder(AV_CODEC_ID_H264);??AVCodecContext?*codecCtx?=?avcodec_alloc_context3(codec);??avcodec_open2(codecCtx,?codec,?nil);??char?*videoData;??int?len;??AVFrame?*frame?=?av_frame_alloc();??AVPacket?packet;??av_new_packet(&packet,?len);??memcpy(packet.data,?videoData,?len);??int?ret,?got_picture;??ret?=?avcodec_decode_video2(codecCtx,?frame,?&got_picture,?&packet);??if?(ret?>?0){??????if(got_picture){??????????}??}?? 相應的,對H264DecoderImpl類的定義和各方法的實現要進行改寫。
首先是類的定義,去掉了ISVCDecoder* decoder_,加入了以下私有成員變量
[cpp]?view plaincopy
AVCodecContext??*pCodecCtx;????AVCodec???????????*pCodec;????AVFrame???*pFrame,?*pFrameYUV;????AVPacket?*packet;????struct?SwsContext?*img_convert_ctx;????uint8_t?*decode_buffer;??uint8_t?*out_buffer;????int?framecnt?=?0;????int?encoded_length?=?0;?? 構造函數和析構函數的改寫省略不表,重點看一下InitDecode方法和Decode方法 InitDecode方法改寫如下
[cpp]?view plaincopy
int?H264DecoderImpl::InitDecode(const?VideoCodec*?inst,?int?number_of_cores)?{??????????if?(inst?==?NULL)?{??????????????return?WEBRTC_VIDEO_CODEC_ERR_PARAMETER;??????????}??????????int?ret_val?=?Release();??????????if?(ret_val?<?0)?{??????????????return?ret_val;??????????}????????????if?(&codec_?!=?inst)?{??????????????????????????codec_?=?*inst;??????????}??????????pCodec?=?avcodec_find_decoder(AV_CODEC_ID_H264);??????????pCodecCtx?=?avcodec_alloc_context3(pCodec);??????????pCodecCtx->pix_fmt?=?PIX_FMT_YUV420P;??????????pCodecCtx->width?=?codec_.width;??????????pCodecCtx->height?=?codec_.height;??????????????????pCodecCtx->time_base.num?=?1;??????????pCodecCtx->time_base.den?=?codec_.maxFramerate;????????????if?(pCodec?==?NULL){??????????????WEBRTC_TRACE(webrtc::kTraceError,?webrtc::kTraceVideoCoding,?-1,??????????????????"H264DecoderImpl::InitDecode,?Codec?not?found.");??????????????return?WEBRTC_VIDEO_CODEC_ERROR;??????????}??????????if?(avcodec_open2(pCodecCtx,?pCodec,?NULL)?<?0){??????????????WEBRTC_TRACE(webrtc::kTraceError,?webrtc::kTraceVideoCoding,?-1,??????????????????"H264DecoderImpl::InitDecode,?Could?not?open?codec.");??????????????return?WEBRTC_VIDEO_CODEC_ERROR;??????????}??????????inited_?=?true;????????????????????key_frame_required_?=?true;??????????WEBRTC_TRACE(webrtc::kTraceApiCall,?webrtc::kTraceVideoCoding,?-1,??????????????"H264DecoderImpl::InitDecode(width:%d,?height:%d,?framerate:%d,?start_bitrate:%d,?max_bitrate:%d)",??????????????inst->width,?inst->height,?inst->maxFramerate,?inst->startBitrate,?inst->maxBitrate);??????????return?WEBRTC_VIDEO_CODEC_OK;??????}?? Decode方法的實現改寫如下
[cpp]?view plaincopy
int?H264DecoderImpl::Decode(const?EncodedImage&?input_image,??????????bool?missing_frames,??????????const?RTPFragmentationHeader*?fragmentation,??????????const?CodecSpecificInfo*?codec_specific_info,??????????int64_t?????????if?(!inited_)?{??????????????WEBRTC_TRACE(webrtc::kTraceError,?webrtc::kTraceVideoCoding,?-1,??????????????????"H264DecoderImpl::Decode,?decoder?is?not?initialized");??????????????return?WEBRTC_VIDEO_CODEC_UNINITIALIZED;??????????}????????????if?(decode_complete_callback_?==?NULL)?{??????????????WEBRTC_TRACE(webrtc::kTraceError,?webrtc::kTraceVideoCoding,?-1,??????????????????"H264DecoderImpl::Decode,?decode?complete?call?back?is?not?set");??????????????return?WEBRTC_VIDEO_CODEC_UNINITIALIZED;??????????}????????????if?(input_image._buffer?==?NULL)?{??????????????WEBRTC_TRACE(webrtc::kTraceError,?webrtc::kTraceVideoCoding,?-1,??????????????????"H264DecoderImpl::Decode,?null?buffer");??????????????return?WEBRTC_VIDEO_CODEC_ERR_PARAMETER;??????????}??????????if?(!codec_specific_info)?{??????????????WEBRTC_TRACE(webrtc::kTraceError,?webrtc::kTraceVideoCoding,?-1,??????????????????"H264EncoderImpl::Decode,?no?codec?info");??????????????return?WEBRTC_VIDEO_CODEC_ERROR;??????????}??????????if?(codec_specific_info->codecType?!=?kVideoCodecH264)?{??????????????WEBRTC_TRACE(webrtc::kTraceError,?webrtc::kTraceVideoCoding,?-1,??????????????????"H264EncoderImpl::Decode,?non?h264?codec?%d",?codec_specific_info->codecType);??????????????return?WEBRTC_VIDEO_CODEC_ERROR;??????????}????????????WEBRTC_TRACE(webrtc::kTraceApiCall,?webrtc::kTraceVideoCoding,?-1,??????????????"H264DecoderImpl::Decode(frame_type:%d,?length:%d",??????????????input_image._frameType,?input_image._length);????????????????if?(framecnt?<?2)??????????{????????????memcpy(decode_buffer?+?encoded_length,?input_image._buffer,?input_image._length);??????????????encoded_length?+=?input_image._length;??????????????framecnt++;??????????}??????????else??????????{??????????????pFrame?=?av_frame_alloc();??????????????pFrameYUV?=?av_frame_alloc();??????????????out_buffer?=?(uint8_t?*)av_malloc(avpicture_get_size(PIX_FMT_YUV420P,?pCodecCtx->width,?pCodecCtx->height));??????????????avpicture_fill((AVPicture?*)pFrameYUV,?out_buffer,?PIX_FMT_YUV420P,?pCodecCtx->width,?pCodecCtx->height);??????????????img_convert_ctx?=?sws_getContext(pCodecCtx->width,?pCodecCtx->height,?pCodecCtx->pix_fmt,??????????????????pCodecCtx->width,?pCodecCtx->height,?PIX_FMT_YUV420P,?SWS_BICUBIC,?NULL,?NULL,?NULL);????????????????????????????if?(framecnt?==?2)??????????????{??????????????????packet?=?(AVPacket?*)av_malloc(sizeof(AVPacket));??????????????????av_new_packet(packet,?encoded_length);??????????????????memcpy(packet->data,?decode_buffer,?encoded_length);??????????????????av_free(decode_buffer);??????????????????framecnt++;??????????????????printf("\n\nLoading");??????????????}??????????????else??????????????{??????????????????packet?=?(AVPacket?*)av_malloc(sizeof(AVPacket));??????????????????av_new_packet(packet,?input_image._length);??????????????????memcpy(packet->data,?input_image._buffer,?input_image._length);??????????????}????????????????????????????int?got_picture?=?0;??????????????int?ret?=?avcodec_decode_video2(pCodecCtx,?pFrame,?&got_picture,?packet);??????????????if?(ret?<?0){??????????????????WEBRTC_TRACE(webrtc::kTraceError,?webrtc::kTraceVideoCoding,?-1,??????????????????????"H264DecoderImpl::Decode,?Decode?Error.");??????????????????return?WEBRTC_VIDEO_CODEC_ERROR;??????????????}??????????????if?(got_picture){??????????????????sws_scale(img_convert_ctx,?(const?uint8_t*?const*)pFrame->data,?pFrame->linesize,?0,?pCodecCtx->height,??????????????????????pFrameYUV->data,?pFrameYUV->linesize);????????????????????int?size_y?=?pFrameYUV->linesize[0]?*?pCodecCtx->height;??????????????????int?size_u?=?pFrameYUV->linesize[1]?*?pCodecCtx->height?/?2;??????????????????int?size_v?=?pFrameYUV->linesize[2]?*?pCodecCtx->height?/?2;????????????????????decoded_image_.CreateFrame(size_y,?static_cast<uint8_t*>(pFrameYUV->data[0]),??????????????????????size_u,?static_cast<uint8_t*>(pFrameYUV->data[1]),??????????????????????size_v,?static_cast<uint8_t*>(pFrameYUV->data[2]),??????????????????????pCodecCtx->width,??????????????????????pCodecCtx->height,??????????????????????pFrameYUV->linesize[0],??????????????????????pFrameYUV->linesize[1],??????????????????????pFrameYUV->linesize[2]);????????????????????decoded_image_.set_timestamp(input_image._timeStamp);??????????????????decode_complete_callback_->Decoded(decoded_image_);??????????????????return?WEBRTC_VIDEO_CODEC_OK;??????????????}??????????????else??????????????????printf(".");??????????????av_free_packet(packet);??????????}??????????return?WEBRTC_VIDEO_CODEC_OK;??????}?? 其他方法的實現保持不變,至此ffmpeg解碼功能的重新封裝也完成了。 從最后實現的效果來看,X264的視頻質量的確是最好的,但是播放端的解碼延時比較高,暫時還不清楚原因,希望了解的朋友指教。
本項目源代碼
轉載于:https://www.cnblogs.com/x_wukong/p/4880961.html
總結
以上是生活随笔為你收集整理的WebRTC VideoEngine超详细教程(三)——集成X264编码和ffmpeg解码的全部內容,希望文章能夠幫你解決所遇到的問題。
如果覺得生活随笔網站內容還不錯,歡迎將生活随笔推薦給好友。