Android-NDK-audio-echo
項(xiàng)目圖
運(yùn)行界面
界面分析
setContentView(R.layout.activity_main);//設(shè)置布局文件controlButton = (Button)findViewById((R.id.capture_control_button));statusView = (TextView)findViewById(R.id.statusView);{
private void queryNativeAudioParameters() {supportRecording = true;AudioManager myAudioMgr = (AudioManager) getSystemService(Context.AUDIO_SERVICE);//獲取audioManager的服務(wù)if(myAudioMgr == null) {supportRecording = false;return;}nativeSampleRate = myAudioMgr.getProperty(AudioManager.PROPERTY_OUTPUT_SAMPLE_RATE);//audio的采樣率nativeSampleBufSize =myAudioMgr.getProperty(AudioManager.PROPERTY_OUTPUT_FRAMES_PER_BUFFER);//每個(gè)sample的大小 {-
音頻源:我們可以使用麥克風(fēng)作為采集音頻的數(shù)據(jù)源。
-
采樣率:一秒鐘對聲音數(shù)據(jù)的采樣次數(shù),采樣率越高,音質(zhì)越好。
-
音頻通道:單聲道,雙聲道等,
-
音頻格式:一般選用PCM格式,即原始的音頻樣本。
-
緩沖區(qū)大小:音頻數(shù)據(jù)寫入緩沖區(qū)的總數(shù),可以通過AudioRecord.getMinBufferSize獲取最小的緩沖區(qū)。(將音頻采集到緩沖區(qū)中然后再從緩沖區(qū)中讀取)
}
queryNativeAudioParameters(); delaySeekBar = (SeekBar)findViewById(R.id.delaySeekBar); curDelayTV = (TextView)findViewById(R.id.curDelay); echoDelayProgress = delaySeekBar.getProgress() * 1000 / delaySeekBar.getMax();設(shè)置seekbar的監(jiān)聽器
delaySeekBar.setOnSeekBarChangeListener(new SeekBar.OnSeekBarChangeListener() {@Overridepublic void onProgressChanged(SeekBar seekBar, int progress, boolean fromUser) {float curVal = (float)progress / delaySeekBar.getMax();curDelayTV.setText(String.format("%s", curVal)); {//設(shè)置坐標(biāo)
private void setSeekBarPromptPosition(SeekBar seekBar, TextView label) {float thumbX = (float)seekBar.getProgress()/ seekBar.getMax() *seekBar.getWidth() + seekBar.getX();label.setX(thumbX - label.getWidth()/2.0f); } }setSeekBarPromptPosition(delaySeekBar, curDelayTV);if (!fromUser) return;echoDelayProgress = progress * 1000 / delaySeekBar.getMax(); { static native boolean configureEcho(int delayInMs, float decay); }configureEcho(echoDelayProgress, echoDecayProgress);}@Overridepublic void onStartTrackingTouch(SeekBar seekBar) {}@Overridepublic void onStopTrackingTouch(SeekBar seekBar) {} }); //當(dāng)前view 在attachedToWindow之后執(zhí)行操作 delaySeekBar.post(new Runnable() {@Overridepublic void run() {setSeekBarPromptPosition(delaySeekBar, curDelayTV);} }); private void startEcho() {if(!supportRecording){return;}if (!isPlaying) {if(!createSLBufferQueueAudioPlayer()) {statusView.setText(getString(R.string.player_error_msg));return;}if(!createAudioRecorder()) {deleteSLBufferQueueAudioPlayer();statusView.setText(getString(R.string.recorder_error_msg));return;}startPlay(); // startPlay() triggers startRecording()statusView.setText(getString(R.string.echoing_status_msg));} else {stopPlay(); // stopPlay() triggers stopRecording()updateNativeAudioUI();deleteAudioRecorder();deleteSLBufferQueueAudioPlayer();}isPlaying = !isPlaying;controlButton.setText(getString(isPlaying ?R.string.cmd_stop_echo: R.string.cmd_start_echo)); }?jni function 聲明
/** jni function declarations*/ static native void createSLEngine(int rate, int framesPerBuf,long delayInMs, float decay); static native void deleteSLEngine(); static native boolean configureEcho(int delayInMs, float decay); static native boolean createSLBufferQueueAudioPlayer(); static native void deleteSLBufferQueueAudioPlayer();static native boolean createAudioRecorder(); static native void deleteAudioRecorder(); static native void startPlay(); static native void stopPlay();OpenSL ES
以上圖片引用自:https://www.jianshu.com/p/82da5f87314f
OpensSL ES是無授權(quán)費(fèi),跨平臺(tái)的,針對嵌入式精心優(yōu)化的硬件音頻加速API。
對象和接口的概念:
對象:提供一組資源及其狀態(tài)的抽象
接口:提供特定功能的方法的抽象
對象與接口的關(guān)系:
對象暴露的接口,有以下三個(gè)方面決定:
1)對象的類型
2)應(yīng)用程序在對象創(chuàng)建期間的,接口請求。
3)在對象聲明周期,接口的添加和移除。
一個(gè)對象的類型,表明了它有implicat interfaces,implicat interface的含義是:無論應(yīng)用程序是否request,它都存在對象暴露的接口函數(shù)中。
在對象創(chuàng)建的時(shí)候,如果有應(yīng)用請求才暴露的接口,被稱為explicit interfaces。
對象定義的可以動(dòng)態(tài)添加和移除的接口,被稱為dynamic interfaces.SLDynamicInterfaceManagementItf
EchoAudioEngine
struct EchoAudioEngine {SLmilliHertz fastPathSampleRate_;uint32_t fastPathFramesPerBuf_;uint16_t sampleChannels_;uint16_t bitsPerSample_;SLObjectItf slEngineObj_;SLEngineItf slEngineItf_;AudioRecorder *recorder_;AudioPlayer *player_;AudioQueue *freeBufQueue_; // Owner of the queueAudioQueue *recBufQueue_; // Owner of the queuesample_buf *bufs_;uint32_t bufCount_;uint32_t frameCount_;int64_t echoDelay_;float echoDecay_;AudioDelay *delayEffect_; }; static EchoAudioEngine engine;SLObjectItf:任何對象都暴露這個(gè)接口。每個(gè)方法創(chuàng)建一個(gè)對象,都返回這個(gè)接口SLObjectItf. 銷毀對象通過destory().應(yīng)用程序獲取其他的接口,通過type ID 使用GetInterface來返回。通過SLObjectItf接口的Realize和resume來控制狀態(tài)。
SLEngineItf:應(yīng)用程序開啟一個(gè)回話的方式,是通過創(chuàng)建一個(gè)engine對象的。Engine對象的創(chuàng)建是通過一個(gè)SLCreateEngine()來獲取的,返回一個(gè)SLObjectItf.
當(dāng)Engine對象創(chuàng)建后,可以獲取它的SLEngineItf。
SLBufferQueueItf:被用于流式的音頻數(shù)據(jù),填充到一個(gè)player object或者record object的buffer隊(duì)列里面。
1)對于recorder對象,當(dāng)recorder的狀態(tài)處于SL_RECORDSTATE_RECORDING時(shí)。這個(gè)對象被SLRecordItf接口控制添加buffer,來隱含開始填充的進(jìn)程。如果隊(duì)列中沒有足夠的buffer,這個(gè)auido的數(shù)據(jù)的填充將會(huì)停止和在buffer隊(duì)列中的要被錄制的audio數(shù)據(jù)會(huì)丟失。這個(gè)錄制仍舊是SL_RECORDSTATE_RECORDING狀態(tài)。一旦入隊(duì)了額外的buffer,
填充音頻數(shù)據(jù)將與當(dāng)前音頻數(shù)據(jù)一起恢復(fù),而不是從饑餓開始了。如果recorder沒有處于錄制狀態(tài),額外的buffer并沒有填充隊(duì)列中任何buffer.
?2)在播放對象中的buffer被就地使用,并不會(huì)被設(shè)備拷貝。應(yīng)用程序的開發(fā)者應(yīng)該注意到,修改在已經(jīng)入隊(duì)的buffer的內(nèi)容是無效的和會(huì)引起音頻數(shù)據(jù)損壞的。
3)一旦入隊(duì)列的buffer完成了播放或者填充,有callback進(jìn)行通知,它是安全刪除buffer。它也是安全的用新數(shù)據(jù)填充buffer,和在在播放對象入隊(duì)buffer和再次入隊(duì)buffer在錄制對象。
4)狀態(tài)轉(zhuǎn)換為SL_PLAYSTATE_STOPPED,通過release所有的buffer來清空隊(duì)列,和設(shè)置cursor為0.每一個(gè)buffer被釋放,都會(huì)回調(diào)被帶有SL_BUFFERQUEUENVENT_STOP的flag.
5)一旦轉(zhuǎn)換為了SL_RECORDSTATE_STOPPED狀態(tài),應(yīng)用程序應(yīng)該繼續(xù)放buffer到隊(duì)列中,來取回系統(tǒng)中剩余的buffer。獲取剩余的buffer完成的標(biāo)志是回調(diào)方法帶有了SL_BUFFERQUEUEEVNENT_CONTEN_END的事件flag??盏腷uffer可以被用于下一個(gè)錄制的回話。錄制的cursor被設(shè)置為0.
6)一旦轉(zhuǎn)化為SL_PLAYSTATE_PAUSEDor SL_RECORDSTATE_PAUSED,cursor仍舊保留在當(dāng)前的位置。
?
createSLEngine
engine.fastPathSampleRate_ = static_cast<SLmilliHertz>(sampleRate) * 1000; engine.fastPathFramesPerBuf_ = static_cast<uint32_t>(framesPerBuf); engine.sampleChannels_ = AUDIO_SAMPLE_CHANNELS; engine.bitsPerSample_ = SL_PCMSAMPLEFORMAT_FIXED_16; /* SL_API SLresultSLAPIENTRY slCreateEngine( SLObjectItf *pEngine, SLuint32 numOptions const SLEngineOption *pEngineOptions, SLuint32 numInterfaces, const SLInterfaceID *pInterfaceIds, const SLboolean * pInterfaceRequired )*/result = slCreateEngine(&engine.slEngineObj_, 0, NULL, 0, NULL, NULL); SLASSERT(result);//Realizing the object in synchronous mode. */result =(*engine.slEngineObj_)->Realize(engine.slEngineObj_, SL_BOOLEAN_FALSE); SLASSERT(result);//獲取slEngineItf_
result = (*engine.slEngineObj_)->GetInterface(engine.slEngineObj_, SL_IID_ENGINE,&engine.slEngineItf_); // compute the RECOMMENDED fast audio buffer size: // the lower latency required // *) the smaller the buffer should be (adjust it here) AND // *) the less buffering should be before starting player AFTER // receiving the recorder buffer // Adjust the bufSize here to fit your bill [before it busts] uint32_t bufSize = engine.fastPathFramesPerBuf_ * engine.sampleChannels_ *engine.bitsPerSample_; bufSize = (bufSize + 7) >> 3; // bits --> byte engine.bufCount_ = BUF_COUNT; engine.bufs_ = allocateSampleBufs(engine.bufCount_, bufSize); assert(engine.bufs_);//創(chuàng)建緩沖隊(duì)列,freeBufQueue是指空閑的buffer隊(duì)列,主要是提供空的采樣數(shù)組。recBufQueue是接收緩沖隊(duì)列,主要是用來存儲(chǔ)已采集到的音頻數(shù)據(jù),同樣也是播放數(shù)據(jù)的來源。引擎初始化完畢之后會(huì)初始化freeBufQueue,初始化了16個(gè)空的大小為480字節(jié)的數(shù)組。至此音頻引擎的初始化結(jié)束。
engine.freeBufQueue_ = new AudioQueue(engine.bufCount_); engine.recBufQueue_ = new AudioQueue(engine.bufCount_); assert(engine.freeBufQueue_ && engine.recBufQueue_); for (uint32_t i = 0; i < engine.bufCount_; i++) {engine.freeBufQueue_->push(&engine.bufs_[i]); }//創(chuàng)建AudioDelay
engine.echoDelay_ = delayInMs; engine.echoDecay_ = decay; engine.delayEffect_ = new AudioDelay(engine.fastPathSampleRate_, .sampleChannels_, engine.bitsPerSample_,engine.echoDelay_, engine.echoDecay_); assert(engine.delayEffect_);AudioPlayer
構(gòu)造方法
SLresult result; assert(sampleFormat); sampleInfo_ = *sampleFormat; /* SLresult (*CreateOutputMix) ( SLEngineItf self, SLObjectItf* pMix, SLuint32 numInterfaces, const SLInterfaceID * pInterfaceIds, const SLboolean * pInterfaceRequired ); 創(chuàng)建混音器的對象 */ result = (*slEngine)->CreateOutputMix(slEngine, &outputMixObjectItf_, 0, NULL, NULL); SLASSERT(result);// realize the output mix result =(*outputMixObjectItf_)->Realize(outputMixObjectItf_, SL_BOOLEAN_FALSE); SLASSERT(result); // configure audio source,配置audio source SLDataLocator_AndroidSimpleBufferQueue loc_bufq = {SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE, DEVICE_SHADOW_BUFFER_QUEUE_LEN}; SLAndroidDataFormat_PCM_EX format_pcm; ConvertToSLSampleFormat(&format_pcm, &sampleInfo_); SLDataSource audioSrc = {&loc_bufq, &format_pcm}; // configure audio sink,配置音頻的輸出 SLDataLocator_OutputMix loc_outmix = {SL_DATALOCATOR_OUTPUTMIX,outputMixObjectItf_}; SLDataSink audioSnk = {&loc_outmix, NULL}; /** create fast path audio player: SL_IID_BUFFERQUEUE and SL_IID_VOLUME* and other non-signal processing interfaces are ok.,創(chuàng)建audioplayer*/ SLInterfaceID ids[2] = {SL_IID_BUFFERQUEUE, SL_IID_VOLUME}; SLboolean req[2] = {SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE}; /* SLresult (*CreateAudioPlayer) ( SLEngineItf self, SLObjectItf* pPlayer, const SLDataSource *pAudioSrc, const SLDataSink *pAudioSnk, SLuint32 numInterfaces, const SLInterfaceID * pInterfaceIds, const SLboolean * pInterfaceRequired ); 創(chuàng)建音頻播放器 */ result = (*slEngine)->CreateAudioPlayer(slEngine, &playerObjectItf_, &audioSrc, &audioSnk,sizeof(ids) / sizeof(ids[0]), ids, req); SLASSERT(result); // realize the player,實(shí)現(xiàn)播放器 result = (*playerObjectItf_)->Realize(playerObjectItf_, SL_BOOLEAN_FALSE); SLASSERT(result); // get the play interface,獲取播放接口 result = (*playerObjectItf_)->GetInterface(playerObjectItf_, SL_IID_PLAY, &playItf_); SLASSERT(result); // get the buffer queue interface,獲取bufferq queue的接口 result = (*playerObjectItf_)->GetInterface(playerObjectItf_, SL_IID_BUFFERQUEUE,&playBufferQueueItf_); SLASSERT(result); // register callback on the buffer queue,在bufferq queue上注冊接口 result = (*playBufferQueueItf_)->RegisterCallback(playBufferQueueItf_, bqPlayerCallback, this); SLASSERT(result);//設(shè)置播放狀態(tài)
result = (*playItf_)->SetPlayState(playItf_, SL_PLAYSTATE_STOPPED); SLASSERT(result); // create an empty queue to track deviceQueue devShadowQueue_ = new AudioQueue(DEVICE_SHADOW_BUFFER_QUEUE_LEN); assert(devShadowQueue_);silentBuf_.cap_ = (format_pcm.containerSize >> 3) * format_pcm.numChannels *sampleInfo_.framesPerBuf_; silentBuf_.buf_ = new uint8_t[silentBuf_.cap_]; memset(silentBuf_.buf_, 0, silentBuf_.cap_); silentBuf_.size_ = silentBuf_.cap_;AudioPlayer::Start
//首先獲取播放狀態(tài)
SLuint32 state; SLresult result = (*playItf_)->GetPlayState(playItf_, &state); if (result != SL_RESULT_SUCCESS) {return SL_BOOLEAN_FALSE; } if (state == SL_PLAYSTATE_PLAYING) {return SL_BOOLEAN_TRUE; } //先設(shè)置播放狀態(tài)是STOPPED狀態(tài) result = (*playItf_)->SetPlayState(playItf_, SL_PLAYSTATE_STOPPED); SLASSERT(result); //然后如隊(duì)列相應(yīng)的buffer。 result =(*playBufferQueueItf_)->Enqueue(playBufferQueueItf_, silentBuf_.buf_, silentBuf_.size_); SLASSERT(result); devShadowQueue_->push(&silentBuf_); //設(shè)置播放的狀態(tài)為playing result = (*playItf_)->SetPlayState(playItf_, SL_PLAYSTATE_PLAYING); SLASSERT(result); return SL_BOOLEAN_TRUE;ProcessSLCallback
注冊callback:
void bqPlayerCallback(SLAndroidSimpleBufferQueueItf bq, void *ctx) {(static_cast<AudioPlayer *>(ctx))->ProcessSLCallback(bq); } void AudioPlayer::ProcessSLCallback(SLAndroidSimpleBufferQueueItf bq) { #ifdef ENABLE_LOGlogFile_->logTime(); #endifstd::lock_guard<std::mutex> lock(stopMutex_);// retrieve the finished device buf and put onto the free queue// so recorder could re-use it,獲取已經(jīng)完成的device buf,并把它放到空閑隊(duì)列的緩沖區(qū)。recorder可以重復(fù)使用它sample_buf *buf; devShadowQueue_->pop();if (buf != &silentBuf_) {buf->size_ = 0;freeQueue_->push(buf);if (!playQueue_->front(&buf)) { #ifdef ENABLE_LOGlogFile_->log("%s", "====Warning: running out of the Audio buffers"); #endifreturn;} devShadowQueue_->push(buf); (*bq)->Enqueue(bq, buf->buf_, buf->size_);//在從播放隊(duì)列中,獲取一個(gè)buffer并入隊(duì)列。 playQueue_->pop();//如果播放隊(duì)列的大小小于kickstart buffer的數(shù)量。入隊(duì)列silent數(shù)據(jù)。
playQueue是播放隊(duì)列,如果為空的話表示沒有緩沖數(shù)據(jù),這里回調(diào)到用的地方做錯(cuò)誤處理,若是成功取出,那么先將其存入中轉(zhuǎn)隊(duì)列,并且將其傳入調(diào)用播放的方法中開啟播放,最后在播放隊(duì)列中刪除該已經(jīng)播放的數(shù)組,在播放完成之后會(huì)進(jìn)入Player播放隊(duì)列注冊的回調(diào)中。
if (playQueue_->size() < PLAY_KICKSTART_BUFFER_COUNT) {(*bq)->Enqueue(bq, buf->buf_, buf->size_);devShadowQueue_->push(&silentBuf_);return; }//填充要播放的數(shù)據(jù)。
for (int32_t idx = 0; idx < PLAY_KICKSTART_BUFFER_COUNT; idx++) {playQueue_->front(&buf);playQueue_->pop();devShadowQueue_->push(buf);//devshadow的queue的作用是中轉(zhuǎn)隊(duì)列。(*bq)->Enqueue(bq, buf->buf_, buf->size_); }AudioPlayer::stop
SLuint32 state;SLresult result = (*playItf_)->GetPlayState(playItf_, &state);SLASSERT(result);if (state == SL_PLAYSTATE_STOPPED) return;std::lock_guard<std::mutex> lock(stopMutex_);result = (*playItf_)->SetPlayState(playItf_, SL_PLAYSTATE_STOPPED);SLASSERT(result);(*playBufferQueueItf_)->Clear(playBufferQueueItf_);#ifdef ENABLE_LOGif (logFile_) {delete logFile_;logFile_ = nullptr;} #endifAudioPlayer:~AudioPlayer
AudioPlayer::~AudioPlayer() {std::lock_guard<std::mutex> lock(stopMutex_);// destroy buffer queue audio player object, and invalidate all associated// interfacesif (playerObjectItf_ != NULL) {(*playerObjectItf_)->Destroy(playerObjectItf_);}// Consume all non-completed audio buffers,消耗掉所有未完成的buffersample_buf *buf = NULL;while (devShadowQueue_->front(&buf)) {buf->size_ = 0;devShadowQueue_->pop();if(buf != &silentBuf_) {freeQueue_->push(buf);}}delete devShadowQueue_;//把正在播放的隊(duì)列,放audiobuffer到freeQueuewhile (playQueue_->front(&buf)) {buf->size_ = 0;playQueue_->pop();freeQueue_->push(buf);} //銷毀混音器的對象接口// destroy output mix object, and invalidate all associated interfacesif (outputMixObjectItf_) {(*outputMixObjectItf_)->Destroy(outputMixObjectItf_);}delete[] silentBuf_.buf_; }AudioRecorder
AudioRecorder::AudioRecorder
?//轉(zhuǎn)為SL的格式
SLresult result; sampleInfo_ = *sampleFormat; SLAndroidDataFormat_PCM_EX format_pcm; ConvertToSLSampleFormat(&format_pcm, &sampleInfo_);// configure audio source,配置audiosource,SL_DATALOCATOR_IODEVICE Data will be generated or consumed by the
specified IO device. Note: for audio output use
the output mix.
SL_DEFAULTDEVICEID_AUDIOINPUTIdentifier denoting the set of input devicesfrom
whichthe implementation receives audio from by
default.
SLDataLocator_IODevice loc_dev = {SL_DATALOCATOR_IODEVICE, SL_IODEVICE_AUDIOINPUT, SL_DEFAULTDEVICEID_AUDIOINPUT, NULL}; SLDataSource audioSrc = {&loc_dev, NULL};
// configure audio sink,配置audi sink SLDataLocator_AndroidSimpleBufferQueue loc_bq = {SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE, DEVICE_SHADOW_BUFFER_QUEUE_LEN}; SLDataSink audioSnk = {&loc_bq, &format_pcm};//創(chuàng)建audiorecorder的object
// create audio recorder // (requires the RECORD_AUDIO permission) const SLInterfaceID id[2] = {SL_IID_ANDROIDSIMPLEBUFFERQUEUE,SL_IID_ANDROIDCONFIGURATION}; const SLboolean req[2] = {SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE}; result = (*slEngine)->CreateAudioRecorder(slEngine, &recObjectItf_, &audioSrc, &audioSnk,sizeof(id) / sizeof(id[0]), id, req); SLASSERT(result); // Configure the voice recognition preset which has no // signal processing for lower latency. /*/*---------------------------------------------------------------------------*/ /* Android AudioRecorder configuration ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? */ /*---------------------------------------------------------------------------*//** Audio recording preset */ /** Audio recording preset key */ #define SL_ANDROID_KEY_RECORDING_PRESET ((const SLchar*) "androidRecordingPreset") /** Audio recording preset values */ /** ? preset "none" cannot be set, it is used to indicate the current settings* ? ? do not match any of the presets. */ #define SL_ANDROID_RECORDING_PRESET_NONE ? ? ? ? ? ? ? ?((SLuint32) 0x00000000) /** ? generic recording configuration on the platform */ #define SL_ANDROID_RECORDING_PRESET_GENERIC ? ? ? ? ? ? ((SLuint32) 0x00000001) /** ? uses the microphone audio source with the same orientation as the camera* ? ? if available, the main device microphone otherwise */ #define SL_ANDROID_RECORDING_PRESET_CAMCORDER ? ? ? ? ? ((SLuint32) 0x00000002) /** ? uses the main microphone tuned for voice recognition */ #define SL_ANDROID_RECORDING_PRESET_VOICE_RECOGNITION ? ((SLuint32) 0x00000003) /** ? uses the main microphone tuned for audio communications */ #define SL_ANDROID_RECORDING_PRESET_VOICE_COMMUNICATION ((SLuint32) 0x00000004) /** ? uses the main microphone unprocessed */ #define SL_ANDROID_RECORDING_PRESET_UNPROCESSED ? ? ? ? ((SLuint32) 0x00000005)/*---------------------------------------------------------------------------*/ /* Android AudioPlayer configuration ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? */ /*---------------------------------------------------------------------------*//** Audio playback stream type */ /** Audio playback stream type key */ #define SL_ANDROID_KEY_STREAM_TYPE ((const SLchar*) "androidPlaybackStreamType")/** Audio playback stream type ?values */ /* ? ? ?same as android.media.AudioManager.STREAM_VOICE_CALL */ #define SL_ANDROID_STREAM_VOICE ? ? ? ?((SLint32) 0x00000000) /* ? ? ?same as android.media.AudioManager.STREAM_SYSTEM */ #define SL_ANDROID_STREAM_SYSTEM ? ? ? ((SLint32) 0x00000001) /* ? ? ?same as android.media.AudioManager.STREAM_RING */ #define SL_ANDROID_STREAM_RING ? ? ? ? ((SLint32) 0x00000002) /* ? ? ?same as android.media.AudioManager.STREAM_MUSIC */ #define SL_ANDROID_STREAM_MEDIA ? ? ? ?((SLint32) 0x00000003) /* ? ? ?same as android.media.AudioManager.STREAM_ALARM */ #define SL_ANDROID_STREAM_ALARM ? ? ? ?((SLint32) 0x00000004) /* ? ? ?same as android.media.AudioManager.STREAM_NOTIFICATION */ #define SL_ANDROID_STREAM_NOTIFICATION ((SLint32) 0x00000005)/*---------------------------------------------------------------------------*/ /* Android AudioPlayer and AudioRecorder configuration ? ? ? ? ? ? ? ? ? ? ? */ /*---------------------------------------------------------------------------*//** Audio Performance mode.* Performance mode tells the framework how to configure the audio path* for a player or recorder according to application performance and* functional requirements.* It affects the output or input latency based on acceptable tradeoffs on* battery drain and use of pre or post processing effects.* Performance mode should be set before realizing the object and should be* read after realizing the object to check if the requested mode could be* granted or not.*/ /** Audio Performance mode key */ #define SL_ANDROID_KEY_PERFORMANCE_MODE ((const SLchar*) "androidPerformanceMode")/** Audio performance values */ /* ? ? ?No specific performance requirement. Allows HW and SW pre/post processing. */ #define SL_ANDROID_PERFORMANCE_NONE ((SLuint32) 0x00000000) /* ? ? ?Priority given to latency. No HW or software pre/post processing.* ? ? ?This is the default if no performance mode is specified. */ #define SL_ANDROID_PERFORMANCE_LATENCY ((SLuint32) 0x00000001) /* ? ? ?Priority given to latency while still allowing HW pre and post processing. */ #define SL_ANDROID_PERFORMANCE_LATENCY_EFFECTS ((SLuint32) 0x00000002) /* ? ? ?Priority given to power saving if latency is not a concern.* ? ? ?Allows HW and SW pre/post processing. */ #define SL_ANDROID_PERFORMANCE_POWER_SAVING ((SLuint32) 0x00000003) 在采集選項(xiàng)中包含xxx_RECORDING_PRESET_GENERIC(通用配置,不知道是啥意思) xxx_RECORDING_PRESET_CAMCORDER(錄像中優(yōu)先使用攝像頭同方向的Mic,如果沒有同方向的就使用主Mic) xxx_RECORDING_PRESET_VOICE_RECOGNITION(針對語音識(shí)別業(yè)務(wù)進(jìn)行了優(yōu)化,可能使用降噪Mic) xxx_RECORDING_PRESET_VOICE_COMMUNICATION(針對電話或網(wǎng)絡(luò)電話優(yōu)化,可能會(huì)硬件AEC、NS、AGC) xxx_RECORDING_PRESET_UNPROCESSED(使用主Mic采集,不經(jīng)過任何優(yōu)化處理)在渲染選項(xiàng)中包含xxx_STREAM_VOICE(VoIP或者電話,音量需要通過通話音量調(diào)節(jié)) xxx_STREAM_SYSTEM(系統(tǒng)音量,我的華為P10沒有這個(gè)音量選項(xiàng)) xxx_STREAM_RING(鈴聲音量) xxx_STREAM_MEDIA(媒體音量) xxx_STREAM_ALARM(鬧鐘音量)———————————————— 版權(quán)聲明:本文為CSDN博主「everlastxc」的原創(chuàng)文章,遵循CC 4.0 BY-SA版權(quán)協(xié)議,轉(zhuǎn)載請附上原文出處鏈接及本聲明。 原文鏈接:https://blog.csdn.net/qq_29621351/article/details/94562600 */ SLAndroidConfigurationItf inputConfig; result = (*recObjectItf_)->GetInterface(recObjectItf_, SL_IID_ANDROIDCONFIGURATION,&inputConfig); if (SL_RESULT_SUCCESS == result) {SLuint32 presetValue = SL_ANDROID_RECORDING_PRESET_VOICE_RECOGNITION;(*inputConfig)->SetConfiguration(inputConfig, SL_ANDROID_KEY_RECORDING_PRESET,&presetValue, sizeof(SLuint32)); } result = (*recObjectItf_)->Realize(recObjectItf_, SL_BOOLEAN_FALSE); SLASSERT(result);//獲取recordobjectItf
result =(*recObjectItf_)->GetInterface(recObjectItf_, SL_IID_RECORD, &recItf_); SLASSERT(result);//獲取對象中的bufferqueue
result = (*recObjectItf_)->GetInterface(recObjectItf_, SL_IID_ANDROIDSIMPLEBUFFERQUEUE,&recBufQueueItf_); SLASSERT(result);//注冊bufferqueue中的callback回調(diào)接口
result = (*recBufQueueItf_)->RegisterCallback(recBufQueueItf_, bqRecorderCallback, this); SLASSERT(result);//創(chuàng)建中轉(zhuǎn)的緩沖區(qū)
devShadowQueue_ = new AudioQueue(DEVICE_SHADOW_BUFFER_QUEUE_LEN); assert(devShadowQueue_);AudioRecorder::Start
// in case already recording, stop recording and clear buffer queue,先暫停錄制和情況buffer queue隊(duì)列 result = (*recItf_)->SetRecordState(recItf_, SL_RECORDSTATE_STOPPED); SLASSERT(result); result = (*recBufQueueItf_)->Clear(recBufQueueItf_); SLASSERT(result); for (int i = 0; i < RECORD_DEVICE_KICKSTART_BUF_COUNT; i++) {sample_buf *buf = NULL;if (!freeQueue_->front(&buf)) {LOGE("=====OutOfFreeBuffers @ startingRecording @ (%d)", i);break;}freeQueue_->pop();assert(buf->buf_ && buf->cap_ && !buf->size_);result = (*recBufQueueItf_)->Enqueue(recBufQueueItf_, buf->buf_, buf->cap_);//把空閑的buffer,輸入進(jìn)去SLASSERT(result);devShadowQueue_->push(buf);//并把這個(gè)buffer放到這個(gè)中轉(zhuǎn)隊(duì)列 }//設(shè)置錄制狀態(tài)為RECORDING
result = (*recItf_)->SetRecordState(recItf_, SL_RECORDSTATE_RECORDING); SLASSERT(result);AudioRecorder::ProcessSLCallback
assert(bq == recBufQueueItf_); sample_buf *dataBuf = NULL; devShadowQueue_->front(&dataBuf); devShadowQueue_->pop(); dataBuf->size_ = dataBuf->cap_; // device only calls us when it is really// fullcallback_(ctx_, ENGINE_SERVICE_MSG_RECORDED_AUDIO_AVAILABLE, dataBuf);//callback,audio數(shù)據(jù)已經(jīng)存在 recQueue_->push(dataBuf); //重新設(shè)置空閑的buffer到如隊(duì)列 sample_buf *freeBuf; while (freeQueue_->front(&freeBuf) && devShadowQueue_->push(freeBuf)) {freeQueue_->pop();SLresult result = (*bq)->Enqueue(bq, freeBuf->buf_, freeBuf->cap_);SLASSERT(result); }Audio Delay
當(dāng)信號(hào)輸入進(jìn)來時(shí),使信號(hào)的輸出波形比輸入滯后設(shè)定的時(shí)間值比如100ms
把信號(hào)持續(xù)輸入看成一個(gè)和時(shí)間相關(guān)的數(shù)據(jù)流,設(shè)定一個(gè)緩沖Buffer,大小根據(jù)delay的最大值設(shè)定,比如delay最大值為2000ms、那么對于96Khz采樣頻率的處理來說B u f f e r = 96 ? 2000 Buffer = 96 * 2000Buffer=96?2000; Buffer的大小可能還需要加上DSP一次運(yùn)算的樣本點(diǎn)數(shù)比如128,這樣最終的數(shù)據(jù)緩沖值B u f f e r = 192128 Buffer = 192128Buffer=192128;
設(shè)定一個(gè)結(jié)構(gòu)體,內(nèi)部含有緩沖區(qū)信號(hào)樣本點(diǎn)輸入計(jì)數(shù)器指針? i n P *inP?inP和樣本信號(hào)計(jì)數(shù)器輸出指針? o u t P *outP?outP、以及延遲設(shè)置值D l y DlyDly;
? o u t P *outP?outP的值根據(jù)? i n P *inP?inP和D l y DlyDly之差來確定,要注意? i n P *inP?inP比D l y DlyDly小的情況,這種情況下 ? o u t P *outP?outP的值應(yīng)該取上一個(gè)Buffer的數(shù)據(jù),即? o u t P *outP?outP位于? i n P *inP?inP上一個(gè)Buffer中而不是同屬一個(gè)Buffer里面
————————————————
版權(quán)聲明:本文為CSDN博主「Flynn2019」的原創(chuàng)文章,遵循CC 4.0 BY-SA版權(quán)協(xié)議,轉(zhuǎn)載請附上原文出處鏈接及本聲明。
原文鏈接:https://blog.csdn.net/bentengdema/article/details/102495512
AudioDelay::AudioDelay
feedbackFactor_ = static_cast<int32_t>(decayWeight_ * kFloatToIntMapFactor); liveAudioFactor_ = kFloatToIntMapFactor - feedbackFactor_; allocateBuffer();allocteBuffer
//轉(zhuǎn)為s
float floatDelayTime = (float)delayTime_ / kMsPerSec; //轉(zhuǎn)為1s的總幀數(shù) float fNumFrames = floatDelayTime * (float)sampleRate_ / kMsPerSec; //總幀數(shù) size_t sampleCount = static_cast<uint32_t>(fNumFrames + 0.5f) * channelCount_;//字節(jié)數(shù)
uint32_t bytePerSample = format_ / 8; assert(bytePerSample <= 4 && bytePerSample);//每幀的字節(jié)數(shù)
uint32_t bytePerFrame = channelCount_ * bytePerSample; // get bufCapacity in bytes bufCapacity_ = sampleCount * bytePerSample; bufCapacity_ =((bufCapacity_ + bytePerFrame - 1) / bytePerFrame) * bytePerFrame;//按每幀的字節(jié)數(shù)對齊buffer_ = new uint8_t[bufCapacity_]; assert(buffer_);memset(buffer_, 0, bufCapacity_); curPos_ = 0;// bufSize_ is in Frames ( not samples, not bytes ) bufSize_ = bufCapacity_ / bytePerFrame;AudioDelay::process
process() filter live audio with "echo" effect: * delay time is run-time adjustable * decay time could also be adjustable, but not used * in this sample, hardcoded to .5 * * @param liveAudio is recorded audio stream * @param channelCount for liveAudio, must be 2 for stereo * @param numFrames is length of liveAudio in Frames ( not in byte ) // process every sample,處理每一幀 int32_t sampleCount = channelCount_ * numFrames; int16_t* samples = &static_cast<int16_t*>(buffer_)[curPos_ * channelCount_]; for (size_t idx = 0; idx < sampleCount; idx++) { #if 1int32_t curSample =(samples[idx] * feedbackFactor_ + liveAudio[idx] * liveAudioFactor_) /kFloatToIntMapFactor; //當(dāng)前幀和上一陣的根據(jù)相應(yīng)的因素的疊加 if (curSample > SHRT_MAX)curSample = SHRT_MAX;else if (curSample < SHRT_MIN)curSample = SHRT_MIN;liveAudio[idx] = samples[idx];samples[idx] = static_cast<int16_t>(curSample); #else// Pure delayint16_t tmp = liveAudio[idx];liveAudio[idx] = samples[idx];samples[idx] = tmp; #endif}curPos_ += numFrames;lock_.unlock();Audio Common
void ConvertToSLSampleFormat(SLAndroidDataFormat_PCM_EX* pFormat,SampleFormat* pSampleInfo_) { // Only support 2 channels,設(shè)置聲道數(shù) // For channelMask, refer to wilhelm/src/android/channels.c for details if (pSampleInfo_->channels_ <= 1) {pFormat->numChannels = 1;pFormat->channelMask = SL_SPEAKER_FRONT_LEFT; } else {pFormat->numChannels = 2;pFormat->channelMask = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT; } case SL_ANDROID_PCM_REPRESENTATION_FLOAT:pFormat->bitsPerSample = SL_PCMSAMPLEFORMAT_FIXED_32;//設(shè)置每一幀的bit數(shù)pFormat->containerSize = SL_PCMSAMPLEFORMAT_FIXED_32;//設(shè)置container的大小pFormat->formatType = SL_ANDROID_DATAFORMAT_PCM_EX;//設(shè)置數(shù)據(jù)類型break;audio_main
分別創(chuàng)建audioplayer和audio record,同時(shí)注冊了
engine.player_->SetBufQueue(engine.recBufQueue_, engine.freeBufQueue_); engine.player_->RegisterCallback(EngineService, (void *)&engine); engine.recorder_->SetBufQueues(engine.freeBufQueue_, engine.recBufQueue_); engine.recorder_->RegisterCallback(EngineService, (void *)&engine); bool EngineService(void *ctx, uint32_t msg, void *data) {assert(ctx == &engine);switch (msg) {case ENGINE_SERVICE_MSG_RETRIEVE_DUMP_BUFS: {*(static_cast<uint32_t *>(data)) = dbgEngineGetBufCount();break;}case ENGINE_SERVICE_MSG_RECORDED_AUDIO_AVAILABLE: {//當(dāng)有錄制的audio數(shù)據(jù)時(shí)// adding audio delay effectsample_buf *buf = static_cast<sample_buf *>(data);assert(engine.fastPathFramesPerBuf_ ==buf->size_ / engine.sampleChannels_ / (engine.bitsPerSample_ / 8));engine.delayEffect_->process(reinterpret_cast<int16_t *>(buf->buf_),engine.fastPathFramesPerBuf_);//達(dá)到延遲的效果break;}default:assert(false);return false;}return true; }?
?
總結(jié)
以上是生活随笔為你收集整理的Android-NDK-audio-echo的全部內(nèi)容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: Android-NDK:native-m
- 下一篇: Android-NDK-EGL