当前位置: 代码迷 >> 综合 >> Android audio 二 AudioRecord 分析上
  详细解决方案

Android audio 二 AudioRecord 分析上

热度:35   发布时间:2023-12-29 09:24:27.0

Android audio 一 源码路径

Android audio 二 AudioRecord 分析上

Android audio 三 AudioRecord 分析下

Android audio 四 AudioTrack 分析上

Android audio 五 AudioTrack 分析下

Android audio 六 AudioRecord AudiTrack 拾音放音例子

Android 采集音频类 AudioRecord

文件:

frameworks/base/media/java/android/media/AudioRecord.java

frameworks/base/core/jni/android_media_AudioRecord.cpp

frameworks/av/media/libmedia/AudioRecord.cpp

 

在 APP 里创建一个拾音线程,先要实例化 AudioRecord 对象,下面从实例化对象 AudioRecord 开始分析 

private AudioRecord audiorecord = new AudioRecord(......)

 

AudioRecord 源码如下, 有三个构造函数 AudioRecord :

第一个构造函数实例化 AudioRecord 对象, APP 调用。

第二个 @SystemApi 是系统 API , 这个函数中调用了 native_setup ,实例化本地 AudioRecord 对象。

    // 调用 JNI ,创建本地 audiorecord 实例int initResult = native_setup(new WeakReference<AudioRecord>(this),mAudioAttributes, sampleRate, mChannelMask, mChannelIndexMask,mAudioFormat, mNativeBufferSizeInBytes,session, ActivityThread.currentOpPackageName(), 0 /*nativeRecordInJavaObj*/);
// frameworks/base/media/java/android/media/AudioRecord.java
//---------------------------------------------------------
// Constructor, Finalize
//--------------------
/*** Class constructor.* Though some invalid parameters will result in an {@link IllegalArgumentException} exception,* other errors do not.  Thus you should call {@link #getState()} immediately after construction* to confirm that the object is usable.* @param audioSource the recording source.*   See {@link MediaRecorder.AudioSource} for the recording source definitions.* @param sampleRateInHz the sample rate expressed in Hertz. 44100Hz is currently the only*   rate that is guaranteed to work on all devices, but other rates such as 22050,*   16000, and 11025 may work on some devices.*   {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED} means to use a route-dependent value*   which is usually the sample rate of the source.*   {@link #getSampleRate()} can be used to retrieve the actual sample rate chosen.* @param channelConfig describes the configuration of the audio channels.*   See {@link AudioFormat#CHANNEL_IN_MONO} and*   {@link AudioFormat#CHANNEL_IN_STEREO}.  {@link AudioFormat#CHANNEL_IN_MONO} is guaranteed*   to work on all devices.* @param audioFormat the format in which the audio data is to be returned.*   See {@link AudioFormat#ENCODING_PCM_8BIT}, {@link AudioFormat#ENCODING_PCM_16BIT},*   and {@link AudioFormat#ENCODING_PCM_FLOAT}.* @param bufferSizeInBytes the total size (in bytes) of the buffer where audio data is written*   to during the recording. New audio data can be read from this buffer in smaller chunks*   than this size. See {@link #getMinBufferSize(int, int, int)} to determine the minimum*   required buffer size for the successful creation of an AudioRecord instance. Using values*   smaller than getMinBufferSize() will result in an initialization failure.* @throws java.lang.IllegalArgumentException*/
public AudioRecord(int audioSource, int sampleRateInHz, int channelConfig, int audioFormat,int bufferSizeInBytes)
throws IllegalArgumentException
{this((new AudioAttributes.Builder()).setInternalCapturePreset(audioSource).build(),(new AudioFormat.Builder()).setChannelMask(getChannelMaskFromLegacyConfig(channelConfig,true/*allow legacy configurations*/)).setEncoding(audioFormat).setSampleRate(sampleRateInHz).build(),bufferSizeInBytes,AudioManager.AUDIO_SESSION_ID_GENERATE);
}/*** @hide* Class constructor with {@link AudioAttributes} and {@link AudioFormat}.* @param attributes a non-null {@link AudioAttributes} instance. Use*     {@link AudioAttributes.Builder#setAudioSource(int)} for configuring the audio*     source for this instance.* @param format a non-null {@link AudioFormat} instance describing the format of the data*     that will be recorded through this AudioRecord. See {@link AudioFormat.Builder} for*     configuring the audio format parameters such as encoding, channel mask and sample rate.* @param bufferSizeInBytes the total size (in bytes) of the buffer where audio data is written*   to during the recording. New audio data can be read from this buffer in smaller chunks*   than this size. See {@link #getMinBufferSize(int, int, int)} to determine the minimum*   required buffer size for the successful creation of an AudioRecord instance. Using values*   smaller than getMinBufferSize() will result in an initialization failure.* @param sessionId ID of audio session the AudioRecord must be attached to, or*   {@link AudioManager#AUDIO_SESSION_ID_GENERATE} if the session isn't known at construction*   time. See also {@link AudioManager#generateAudioSessionId()} to obtain a session ID before*   construction.* @throws IllegalArgumentException*/
@SystemApi
public AudioRecord(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes,int sessionId) throws IllegalArgumentException
{mRecordingState = RECORDSTATE_STOPPED;if(attributes == null){throw new IllegalArgumentException("Illegal null AudioAttributes");}if(format == null){throw new IllegalArgumentException("Illegal null AudioFormat");}// remember which looper is associated with the AudioRecord instanciation// 记住哪个线程与音频记录的安装相关if((mInitializationLooper = Looper.myLooper()) == null){mInitializationLooper = Looper.getMainLooper();}// is this AudioRecord using REMOTE_SUBMIX at full volume?if(attributes.getCapturePreset() == MediaRecorder.AudioSource.REMOTE_SUBMIX){final AudioAttributes.Builder filteredAttr = new AudioAttributes.Builder();final Iterator<String> tagsIter = attributes.getTags().iterator();while(tagsIter.hasNext()){final String tag = tagsIter.next();if(tag.equalsIgnoreCase(SUBMIX_FIXED_VOLUME)){mIsSubmixFullVolume = true;Log.v(TAG, "Will record from REMOTE_SUBMIX at full fixed volume");}else     // SUBMIX_FIXED_VOLUME: is not to be propagated to the native layers{filteredAttr.addTag(tag);}}filteredAttr.setInternalCapturePreset(attributes.getCapturePreset());mAudioAttributes = filteredAttr.build();}else{mAudioAttributes = attributes;}int rate = format.getSampleRate();if(rate == AudioFormat.SAMPLE_RATE_UNSPECIFIED){rate = 0;}int encoding = AudioFormat.ENCODING_DEFAULT;if((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_ENCODING) != 0){encoding = format.getEncoding();}audioParamCheck(attributes.getCapturePreset(), rate, encoding);if((format.getPropertySetMask()& AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_INDEX_MASK) != 0){mChannelIndexMask = format.getChannelIndexMask();mChannelCount = format.getChannelCount();}if((format.getPropertySetMask()& AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK) != 0){mChannelMask = getChannelMaskFromLegacyConfig(format.getChannelMask(), false);mChannelCount = format.getChannelCount();}else if(mChannelIndexMask == 0){mChannelMask = getChannelMaskFromLegacyConfig(AudioFormat.CHANNEL_IN_DEFAULT, false);mChannelCount =  AudioFormat.channelCountFromInChannelMask(mChannelMask);}audioBuffSizeCheck(bufferSizeInBytes);int[] sampleRate = new int[] {mSampleRate};int[] session = new int[1];session[0] = sessionId;//TODO: update native initialization when information about hardware init failure//      due to capture device already open is available.// 调用 native 方法,创建 audiorecord 实例int initResult = native_setup(new WeakReference<AudioRecord>(this),mAudioAttributes, sampleRate, mChannelMask, mChannelIndexMask,mAudioFormat, mNativeBufferSizeInBytes,session, ActivityThread.currentOpPackageName(), 0 /*nativeRecordInJavaObj*/);if(initResult != SUCCESS){loge("Error code "+initResult+" when initializing native AudioRecord object.");return; // with mState == STATE_UNINITIALIZED}mSampleRate = sampleRate[0];mSessionId = session[0];mState = STATE_INITIALIZED;
}/*** A constructor which explicitly connects a Native (C++) AudioRecord. For use by* the AudioRecordRoutingProxy subclass.* @param nativeRecordInJavaObj A C/C++ pointer to a native AudioRecord* (associated with an OpenSL ES recorder). Note: the caller must ensure a correct* value here as no error checking is or can be done.*/
/*package*/ AudioRecord(long nativeRecordInJavaObj)
{mNativeRecorderInJavaObj = 0;mNativeCallbackCookie = 0;mNativeDeviceCallback = 0;// other initialization...if(nativeRecordInJavaObj != 0){deferred_connect(nativeRecordInJavaObj);}else{mState = STATE_UNINITIALIZED;}
}

 

在实例化 Audio Record 调用 native_setup 方法,进入 native 。

本地方法接口如下列表:

// frameworks/base/core/jni/android_media_AudioRecord.cpp
static const JNINativeMethod gMethods[] = {// name,               signature,  funcPtr{"native_start",         "(II)I",    (void *)android_media_AudioRecord_start},{"native_stop",          "()V",    (void *)android_media_AudioRecord_stop},{"native_setup",         "(Ljava/lang/Object;Ljava/lang/Object;[IIIII[ILjava/lang/String;J)I",(void *)android_media_AudioRecord_setup},{"native_finalize",      "()V",    (void *)android_media_AudioRecord_finalize},{"native_release",       "()V",    (void *)android_media_AudioRecord_release},{"native_read_in_byte_array","([BIIZ)I",(void *)android_media_AudioRecord_readInArray<jbyteArray>},{"native_read_in_short_array","([SIIZ)I",(void *)android_media_AudioRecord_readInArray<jshortArray>},{"native_read_in_float_array","([FIIZ)I",(void *)android_media_AudioRecord_readInArray<jfloatArray>},{"native_read_in_direct_buffer","(Ljava/lang/Object;IZ)I",(void *)android_media_AudioRecord_readInDirectBuffer},{"native_get_buffer_size_in_frames","()I", (void *)android_media_AudioRecord_get_buffer_size_in_frames},{"native_set_marker_pos","(I)I",   (void *)android_media_AudioRecord_set_marker_pos},{"native_get_marker_pos","()I",    (void *)android_media_AudioRecord_get_marker_pos},{"native_set_pos_update_period","(I)I",   (void *)android_media_AudioRecord_set_pos_update_period},{"native_get_pos_update_period","()I",    (void *)android_media_AudioRecord_get_pos_update_period},{"native_get_min_buff_size","(III)I",   (void *)android_media_AudioRecord_get_min_buff_size},{"native_setInputDevice", "(I)Z", (void *)android_media_AudioRecord_setInputDevice},{"native_getRoutedDeviceId", "()I", (void *)android_media_AudioRecord_getRoutedDeviceId},{"native_enableDeviceCallback", "()V", (void *)android_media_AudioRecord_enableDeviceCallback},{"native_disableDeviceCallback", "()V",(void *)android_media_AudioRecord_disableDeviceCallback},{"native_get_timestamp", "(Landroid/media/AudioTimestamp;I)I",(void *)android_media_AudioRecord_get_timestamp},
};

从 JNI 的接口声明映射,知道

native_setup  <-->  android_media_AudioRecord_setup

接下来分析 android_media_AudioRecord_setup

在 android_media_AudioRecord_setup 中先判断是否已经存在 nativeRecordInJavaObj 

如果不存在 nativeRecordInJavaObj 

      lpRecorder = new AudioRecord(String16(opPackageNameStr.c_str()));

如果存在

     lpRecorder = (AudioRecord*)nativeRecordInJavaObj;

     把 long 类型指针,转换成 AudioRecord 对象指针,然后 setAudioRecord(env, thiz, lpRecorder);

在 CPP 中一般使用  static_cast 和 reinterpret_cast 模板转换 type-id 类型。

这里使用  (AudioRecord*)  强制指针类型转换。 

// ----------------------------------------------------------------------------
static jint
android_media_AudioRecord_setup(JNIEnv *env, jobject thiz, jobject weak_this,jobject jaa, jintArray jSampleRate, jint channelMask, jint channelIndexMask,jint audioFormat, jint buffSizeInBytes, jintArray jSession, jstring opPackageName,jlong nativeRecordInJavaObj)
{......audio_attributes_t *paa = NULL;sp<AudioRecord> lpRecorder = 0;audiorecord_callback_cookie *lpCallbackData = NULL;jclass clazz = env->GetObjectClass(thiz);if(clazz == NULL){ALOGE("Can't find %s when setting up callback.", kClassPathName);return (jint) AUDIORECORD_ERROR_SETUP_NATIVEINITFAILED;}// if we pass in an existing *Native* AudioRecord, we don't need to create/initialize one.if(nativeRecordInJavaObj == 0){......// create an uninitialized AudioRecord objectlpRecorder = new AudioRecord(String16(opPackageNameStr.c_str()));// read the AudioAttributes valuespaa = (audio_attributes_t *) calloc(1, sizeof(audio_attributes_t));const jstring jtags =(jstring) env->GetObjectField(jaa, javaAudioAttrFields.fieldFormattedTags);const char* tags = env->GetStringUTFChars(jtags, NULL);// copying array size -1, char array for tags was calloc'd, no need to NULL-terminate itstrncpy(paa->tags, tags, AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - 1);env->ReleaseStringUTFChars(jtags, tags);paa->source = (audio_source_t) env->GetIntField(jaa, javaAudioAttrFields.fieldRecSource);paa->flags = (audio_flags_mask_t)env->GetIntField(jaa, javaAudioAttrFields.fieldFlags);ALOGV("AudioRecord_setup for source=%d tags=%s flags=%08x", paa->source, paa->tags, paa->flags);audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE;if(paa->flags & AUDIO_FLAG_HW_HOTWORD){flags = AUDIO_INPUT_FLAG_HW_HOTWORD;}// create the callback information:// this data will be passed with every AudioRecord callbacklpCallbackData = new audiorecord_callback_cookie;lpCallbackData->audioRecord_class = (jclass)env->NewGlobalRef(clazz);// we use a weak reference so the AudioRecord object can be garbage collected.lpCallbackData->audioRecord_ref = env->NewGlobalRef(weak_this);lpCallbackData->busy = false;const status_t status = lpRecorder->set(paa->source,sampleRateInHertz,format,        // word length, PCMlocalChanMask,frameCount,recorderCallback,// callback_tlpCallbackData,// void* user0,             // notificationFrames,true,          // threadCanCallJavasessionId,AudioRecord::TRANSFER_DEFAULT,flags,-1, -1,        // default uid, pidpaa);if(status != NO_ERROR){ALOGE("Error creating AudioRecord instance: initialization check failed with status %d.",status);goto native_init_failure;}}else     // end if nativeRecordInJavaObj == 0){lpRecorder = (AudioRecord*)nativeRecordInJavaObj;// create the callback information:// this data will be passed with every AudioRecord callbacklpCallbackData = new audiorecord_callback_cookie;lpCallbackData->audioRecord_class = (jclass)env->NewGlobalRef(clazz);// we use a weak reference so the AudioRecord object can be garbage collected.lpCallbackData->audioRecord_ref = env->NewGlobalRef(weak_this);lpCallbackData->busy = false;}......// save our newly created C++ AudioRecord in the "nativeRecorderInJavaObj" field// of the Java objectsetAudioRecord(env, thiz, lpRecorder);// save our newly created callback information in the "nativeCallbackCookie" field// of the Java object (in mNativeCallbackCookie) so we can free the memory in finalize()env->SetLongField(thiz, javaAudioRecordFields.nativeCallbackCookie, (jlong)lpCallbackData);return (jint) AUDIO_JAVA_SUCCESS;// failure:native_init_failure:env->DeleteGlobalRef(lpCallbackData->audioRecord_class);env->DeleteGlobalRef(lpCallbackData->audioRecord_ref);delete lpCallbackData;env->SetLongField(thiz, javaAudioRecordFields.nativeCallbackCookie, 0);// lpRecorder goes out of scope, so reference count drops to zeroreturn (jint) AUDIORECORD_ERROR_SETUP_NATIVEINITFAILED;
}

接下来分析 setAudioRecord 函数,该函数返回 AudioRecord 对象的指针

// frameworks/base/core/jni/android_media_AudioRecord.cpp
// 结构体中的成员很重要, 把 AudioRecord 对象, 回调函数 和回调的音频数据保存到属性 jfielID,
// 并保存到 java 层,提高 JAVA <--> CPP 相互调用的效率。
struct audio_record_fields_t {// these fields provide access from C++ to the...jmethodID postNativeEventInJava; //... event post callback methodjfieldID  nativeRecorderInJavaObj; // provides access to the C++ AudioRecord objectjfieldID  nativeCallbackCookie;    // provides access to the AudioRecord callback datajfieldID  nativeDeviceCallback;    // provides access to the JNIDeviceCallback instance
};static sp<AudioRecord> setAudioRecord(JNIEnv* env, jobject thiz, const sp<AudioRecord>& ar)
{Mutex::Autolock l(sLock);sp<AudioRecord> old =(AudioRecord*)env->GetLongField(thiz, javaAudioRecordFields.nativeRecorderInJavaObj);if (ar.get()) {ar->incStrong((void*)setAudioRecord);}if (old != 0) {old->decStrong((void*)setAudioRecord);}env->SetLongField(thiz, javaAudioRecordFields.nativeRecorderInJavaObj, (jlong)ar.get());return old;
}

 

分析了 stepup ,接下来分析 start 和 stop 函数,发现在 JNI 接口里调用本地 AudioRecord 对象。

主要的拾音业务逻辑在 AudioRecord.cpp 中。下一节分析 AudioRecord.cpp 。

// frameworks/base/core/jni/android_media_AudioRecord.cpp
// ----------------------------------------------------------------------------
static jint
android_media_AudioRecord_start(JNIEnv *env, jobject thiz, jint event, jint triggerSession)
{sp<AudioRecord> lpRecorder = getAudioRecord(env, thiz);if (lpRecorder == NULL ) {jniThrowException(env, "java/lang/IllegalStateException", NULL);return (jint) AUDIO_JAVA_ERROR;}return nativeToJavaStatus(lpRecorder->start((AudioSystem::sync_event_t)event, (audio_session_t) triggerSession));
}// ----------------------------------------------------------------------------
static void
android_media_AudioRecord_stop(JNIEnv *env, jobject thiz)
{sp<AudioRecord> lpRecorder = getAudioRecord(env, thiz);if (lpRecorder == NULL ) {jniThrowException(env, "java/lang/IllegalStateException", NULL);return;}lpRecorder->stop();//ALOGV("Called lpRecorder->stop()");
}

 

  相关解决方案