Android audio 输出设备选择过程分析(上)

        众(搞音频的(此处应该有个笑哭的表情)所周知的,要播放一个声音,创建AudioTrack之后,set函数设置参数的时候,会调用createTrack_l函数。

第一步:获取output

createTrack_l用binder去调用AudioFlinger的createTrack创建AudioFlinger::Track之前,首先会去调用getOutputForAttr:

status = AudioSystem::getOutputForAttr(attr, &output,
                                           mSessionId, &streamType, mClientUid,
                                           &config,
                                           mFlags, &mRoutedDeviceId, &mPortId); = AudioSystem::getOutputForAttr(attr, &output,
                                           mSessionId, &streamType, mClientUid,
                                           &config,
                                           mFlags, &mRoutedDeviceId, &mPortId);

根据传入的audio_attributes_t等参数,带回output(这是个出参)!

这一调用,在AudioPolicyManager中得到落实。

AudioPolicyManager::getOutputForAttr:


//准备设置好attributes
...
//根据attributes选中Strategy
routing_strategy strategy = (routing_strategy) getStrategyForAttr(&attributes);
...
//根据Strategy选中device
audio_devices_t device = getDeviceForStrategy(strategy, false /*fromCache*/);
...
//根据选定的device获取Output
*output = getOutputForDevice(device, session, *stream,
                                    config->sample_rate, 
                                    config->format,
                                    config->channel_mask,
                                    flags,
                                    &config->offload_info);//准备设置好attributes
...
//根据attributes选中Strategy
routing_strategy strategy = (routing_strategy) getStrategyForAttr(&attributes);
...
//根据Strategy选中device
audio_devices_t device = getDeviceForStrategy(strategy, false /*fromCache*/);
...
//根据选定的device获取Output
*output = getOutputForDevice(device, session, *stream,
                                    config->sample_rate, 
                                    config->format,
                                    config->channel_mask,
                                    flags,
                                    &config->offload_info);

这个名字取得很有意思哈,一步步往下推进的感觉。

getOutputForAttr: attr>>strategy>>device>>output

其中,getDeviceForStrategy这一步真正决定了使用什么设备.

比如说,下面的例子是播放音乐(AUDIO_STREAM_MUSIC)的时候选中的Strategy:

//代码节选自Engine::getDeviceForStrategyInt
case STRATEGY_MEDIA: {
        uint32_t device2 = AUDIO_DEVICE_NONE;
        ...
          //device2在前面没有被选中,而且没有设置这个setForceUse(setBluetoothA2dpOn(false)的时                 候,会设置
           //AudioSystem.FOR_MEDIA,AudioSystem.FORCE_NO_BT_A2DP标记)
        if ((device2 == AUDIO_DEVICE_NONE) &&
                (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
                (outputs.getA2dpOutput() != 0)) {
          //第一个优先项出现了.如果此时a2dp可用,直接到下面我用****标记的特殊情况
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP;
            if (device2 == AUDIO_DEVICE_NONE) {
                device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES;
            }
            if (device2 == AUDIO_DEVICE_NONE) {
                device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER;
            }
        }
        //这次轮到了AudioSystem.FOR_MEDIA,AudioSystem.FORCE_SPEAKER这种情况,speaker胜出
        if ((device2 == AUDIO_DEVICE_NONE) &&
            (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] == AUDIO_POLICY_FORCE_SPEAKER)) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
        }  
        //接下来就是依据优先级去选择设备了.规则就是
        //选中一个就结束,直接去和特殊设备做共存
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_WIRED_HEADPHONE;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_LINE;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_WIRED_HEADSET;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_HEADSET;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_LINE;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_WIRED_HEADSET;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_HEADSET;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_ACCESSORY;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_DEVICE;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET;
        }
        if ((device2 == AUDIO_DEVICE_NONE) && (strategy != STRATEGY_SONIFICATION)) {
            // no sonification on aux digital (e.g. HDMI)
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_AUX_DIGITAL;
        }
        if ((device2 == AUDIO_DEVICE_NONE) &&
                (mForceUse[AUDIO_POLICY_FORCE_FOR_DOCK] == AUDIO_POLICY_FORCE_ANALOG_DOCK)) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
        }
  ***********************************************************************************
        //特殊情况
        int device3 = AUDIO_DEVICE_NONE;
        if (strategy == STRATEGY_MEDIA) {
          //如果arc,spdif,aux_line可用,赋值给device3
            // ARC, SPDIF and AUX_LINE can co-exist with others.
            device3 = availableOutputDevicesType & AUDIO_DEVICE_OUT_HDMI_ARC;
            device3 |= (availableOutputDevicesType & AUDIO_DEVICE_OUT_SPDIF);
            device3 |= (availableOutputDevicesType & AUDIO_DEVICE_OUT_AUX_LINE);
        }
        //device2和arc,spdif,aux_line做一个共存
        device2 |= device3;
        // 一般情况下,在这之前device还是AUDIO_DEVICE_NONE
        device |= device2;
        // If hdmi system audio mode is on, remove speaker out of output list.
        if ((strategy == STRATEGY_MEDIA) &&
            (mForceUse[AUDIO_POLICY_FORCE_FOR_HDMI_SYSTEM_AUDIO] ==
                AUDIO_POLICY_FORCE_HDMI_SYSTEM_AUDIO_ENFORCED)) {
            device &= ~AUDIO_DEVICE_OUT_SPEAKER;
        }
        } break;
case STRATEGY_MEDIA: {
        uint32_t device2 = AUDIO_DEVICE_NONE;
        ...
          //device2在前面没有被选中,而且没有设置这个setForceUse(setBluetoothA2dpOn(false)的时                 候,会设置
           //AudioSystem.FOR_MEDIA,AudioSystem.FORCE_NO_BT_A2DP标记)
        if ((device2 == AUDIO_DEVICE_NONE) &&
                (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
                (outputs.getA2dpOutput() != 0)) {
          //第一个优先项出现了.如果此时a2dp可用,直接到下面我用****标记的特殊情况
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP;
            if (device2 == AUDIO_DEVICE_NONE) {
                device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES;
            }
            if (device2 == AUDIO_DEVICE_NONE) {
                device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER;
            }
        }
        //这次轮到了AudioSystem.FOR_MEDIA,AudioSystem.FORCE_SPEAKER这种情况,speaker胜出
        if ((device2 == AUDIO_DEVICE_NONE) &&
            (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] == AUDIO_POLICY_FORCE_SPEAKER)) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
        }  
        //接下来就是依据优先级去选择设备了.规则就是
        //选中一个就结束,直接去和特殊设备做共存
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_WIRED_HEADPHONE;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_LINE;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_WIRED_HEADSET;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_HEADSET;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_LINE;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_WIRED_HEADSET;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_HEADSET;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_ACCESSORY;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_DEVICE;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET;
        }
        if ((device2 == AUDIO_DEVICE_NONE) && (strategy != STRATEGY_SONIFICATION)) {
            // no sonification on aux digital (e.g. HDMI)
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_AUX_DIGITAL;
        }
        if ((device2 == AUDIO_DEVICE_NONE) &&
                (mForceUse[AUDIO_POLICY_FORCE_FOR_DOCK] == AUDIO_POLICY_FORCE_ANALOG_DOCK)) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
        }
  ***********************************************************************************
        //特殊情况
        int device3 = AUDIO_DEVICE_NONE;
        if (strategy == STRATEGY_MEDIA) {
          //如果arc,spdif,aux_line可用,赋值给device3
            // ARC, SPDIF and AUX_LINE can co-exist with others.
            device3 = availableOutputDevicesType & AUDIO_DEVICE_OUT_HDMI_ARC;
            device3 |= (availableOutputDevicesType & AUDIO_DEVICE_OUT_SPDIF);
            device3 |= (availableOutputDevicesType & AUDIO_DEVICE_OUT_AUX_LINE);
        }
        //device2和arc,spdif,aux_line做一个共存
        device2 |= device3;
        // 一般情况下,在这之前device还是AUDIO_DEVICE_NONE
        device |= device2;
        // If hdmi system audio mode is on, remove speaker out of output list.
        if ((strategy == STRATEGY_MEDIA) &&
            (mForceUse[AUDIO_POLICY_FORCE_FOR_HDMI_SYSTEM_AUDIO] ==
                AUDIO_POLICY_FORCE_HDMI_SYSTEM_AUDIO_ENFORCED)) {
            device &= ~AUDIO_DEVICE_OUT_SPEAKER;
        }
        } break;

根据上面的代码,简单做个总结吧:

播放音乐选设备优先级如下


AUDIO_DEVICE_OUT_BLUETOOTH_A2DP
AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES(普通蓝牙耳机)
AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER(蓝牙小音箱)
//此处属于setForceUse的强制插队
(if FORCE_SPEAKER)AUDIO_DEVICE_OUT_SPEAKER(扬声器)
AUDIO_DEVICE_OUT_WIRED_HEADPHONE(普通耳机,只能听,不能操控播放)
AUDIO_DEVICE_OUT_LINE
AUDIO_DEVICE_OUT_WIRED_HEADSET(线控耳机)
AUDIO_DEVICE_OUT_USB_HEADSET(USB耳机)
...
AUDIO_DEVICE_OUT_SPEAKER(扬声器)AUDIO_DEVICE_OUT_BLUETOOTH_A2DP
AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES(普通蓝牙耳机)
AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER(蓝牙小音箱)
//此处属于setForceUse的强制插队
(if FORCE_SPEAKER)AUDIO_DEVICE_OUT_SPEAKER(扬声器)
AUDIO_DEVICE_OUT_WIRED_HEADPHONE(普通耳机,只能听,不能操控播放)
AUDIO_DEVICE_OUT_LINE
AUDIO_DEVICE_OUT_WIRED_HEADSET(线控耳机)
AUDIO_DEVICE_OUT_USB_HEADSET(USB耳机)
...
AUDIO_DEVICE_OUT_SPEAKER(扬声器)

选好设备就返回device,然后getOutputForDevice

AudioPolicyManager::getOutputForDevice

//处理入参flags
if ((flags & AUDIO_OUTPUT_FLAG_XXX) != 0) {
    flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_XXX);
}
//咱一般不是AUDIO_OUTPUT_FLAG_DIRECT,当然是
goto non_direct_output;
...
//播放音乐什么之类的,mediaplayer已经做完decodec.这里一般都是pcm
    if (audio_is_linear_pcm(format)) {
        //根据指定的stream类型获取匹配的output.实际的路由改变需要等到startOutput被调用的时候
        //注意这个函数是getOutputsForDevice,要获取的是一些output,而我们当前讨论的函数是获取一个output.这些outputs从mOutputs中来.那么mOutputs来自哪里?
        SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(device, mOutputs);
        // 从匹配到的outputs(请注意,是复数)中选出一个output
        output = selectOutput(outputs, flags, format);
    }
if ((flags & AUDIO_OUTPUT_FLAG_XXX) != 0) {
    flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_XXX);
}
//咱一般不是AUDIO_OUTPUT_FLAG_DIRECT,当然是
goto non_direct_output;
...
//播放音乐什么之类的,mediaplayer已经做完decodec.这里一般都是pcm
    if (audio_is_linear_pcm(format)) {
        //根据指定的stream类型获取匹配的output.实际的路由改变需要等到startOutput被调用的时候
        //注意这个函数是getOutputsForDevice,要获取的是一些output,而我们当前讨论的函数是获取一个output.这些outputs从mOutputs中来.那么mOutputs来自哪里?
        SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(device, mOutputs);
        // 从匹配到的outputs(请注意,是复数)中选出一个output
        output = selectOutput(outputs, flags, format);
    }

先把疑问解了:那么mOutputs来自哪里?

void AudioPolicyManager::addOutput(audio_io_handle_t output, const sp<SwAudioOutputDescriptor>& outputDesc)
{
    outputDesc->setIoHandle(output);
    mOutputs.add(output, outputDesc);
    updateMono(output); // update mono status when adding to output list
    selectOutputForMusicEffects();
    nextAudioPortGeneration();
}
//哪里调用的?
AudioPolicyManager::AudioPolicyManager(){
  ...
    addOutput(output, outputDesc);
  ...
}
//是的.想起来了,就是在解析完audio_policy.conf之后!
//遍历mHwModules[i]->mOutputProfiles,然后加到mOutputs中去 AudioPolicyManager::addOutput(audio_io_handle_t output, const sp<SwAudioOutputDescriptor>& outputDesc)
{
    outputDesc->setIoHandle(output);
    mOutputs.add(output, outputDesc);
    updateMono(output); // update mono status when adding to output list
    selectOutputForMusicEffects();
    nextAudioPortGeneration();
}
//哪里调用的?
AudioPolicyManager::AudioPolicyManager(){
  ...
    addOutput(output, outputDesc);
  ...
}
//是的.想起来了,就是在解析完audio_policy.conf之后!
//遍历mHwModules[i]->mOutputProfiles,然后加到mOutputs中去

继续啊.从mOutputs中选出和匹配的一些output之后,用selectOutput选中我们真正需要的那一个.

selectOutput选择规则:


// select one output among several that provide a path to a particular device or set of
    // devices (the list was previously build by getOutputsForDevice()).
    // The priority is as follows:
    // 1: the output with the highest number of requested policy flags
    // 2: the output with the bit depth the closest to the requested one
    // 3: the primary output
    // 4: the first output in the list
翻译:
//在几个提供一个特定设备或一组路径的路径中选择一个输出
     //设备(该列表以前由getOutputsForDevice()构建)。
     //优先级如下:
     // 1:请求的policy flags数量最多的输出
     // 2:bit depth最接近请求的输出
     // 3:主输出
     // 4:列表中的第一个输出// select one output among several that provide a path to a particular device or set of
    // devices (the list was previously build by getOutputsForDevice()).
    // The priority is as follows:
    // 1: the output with the highest number of requested policy flags
    // 2: the output with the bit depth the closest to the requested one
    // 3: the primary output
    // 4: the first output in the list
翻译:
//在几个提供一个特定设备或一组路径的路径中选择一个输出
     //设备(该列表以前由getOutputsForDevice()构建)。
     //优先级如下:
     // 1:请求的policy flags数量最多的输出
     // 2:bit depth最接近请求的输出
     // 3:主输出
     // 4:列表中的第一个输出

然后返回选中的output即可.

整个getOutputForAttr就完成了.

第二步 应用output

取得output之后,需要应用它.

首先


//代码摘自AudioTrack::createTrack_l()
...
AudioSystem::getLatency(output, &mAfLatency);
...
AudioSystem::getFrameCount(output, &mAfFrameCount);
...
AudioSystem::getFrameCountHAL(output, &afFrameCountHAL);
...
AudioSystem::getSamplingRate(output, &mAfSampleRate);//代码摘自AudioTrack::createTrack_l()
...
AudioSystem::getLatency(output, &mAfLatency);
...
AudioSystem::getFrameCount(output, &mAfFrameCount);
...
AudioSystem::getFrameCountHAL(output, &afFrameCountHAL);
...
AudioSystem::getSamplingRate(output, &mAfSampleRate);

注意上方调用的参数,第一个入参,第二个出参!

以上过程获得了mAfLatency,mAfFrameCount,afFrameCountHAL,mAfSampleRate四个值.注意,全是

af打头的,表示AudioFlinger端对output的设置.做一些修正调整,变成mSampleRate,temp(mAfFrameCount和mAfFrameCount,mAfLatency三个参数综合计算).

然后


sp<IAudioTrack> track = audioFlinger->createTrack(streamType,
                                                      mSampleRate,
                                                      mFormat,
                                                      mChannelMask,
                                                      &temp,
                                                      &flags,
                                                      mSharedBuffer,
                                                      output,
                                                      mClientPid,
                                                      tid,
                                                      &mSessionId,
                                                      mClientUid,
                                                      &status,
                                                      mPortId);sp<IAudioTrack> track = audioFlinger->createTrack(streamType,
                                                      mSampleRate,
                                                      mFormat,
                                                      mChannelMask,
                                                      &temp,
                                                      &flags,
                                                      mSharedBuffer,
                                                      output,
                                                      mClientPid,
                                                      tid,
                                                      &mSessionId,
                                                      mClientUid,
                                                      &status,
                                                      mPortId);

我们跳到AudioFlinger端去看看


//AudioFlinger::createTrack
...
PlaybackThread *thread = checkPlaybackThread_l(output);
...//AudioFlinger::createTrack
...
PlaybackThread *thread = checkPlaybackThread_l(output);
...

首先checkPlaybackThread_l从mPlaybackThreads中检索出output对应的回放线程.

这个地方需要复习一下,我之前分析过的一个知识.

在AudioPolicyManager构造的时候,会根据audio_policy.conf中的配置,挨个调用

mpClientInterface->openOutput.(最终调用AudioFlinger::openOutput_l)

这其中


//AudioFlinger::openOutput_l
//找到匹配AudioDevice
AudioHwDevice *outHwDev = findSuitableHwDev_l(module, devices);
//打开硬件的输出流
status_t status = outHwDev->openOutputStream(
            &outputStream,
            *output,
            devices,
            flags,
            config,
            address.string());
//创建PlaybackThread
thread = new (PlaybackThread子类)((this, outputStream, *output, devices, mSystemReady))
//关联output和PlaybackThread
mPlaybackThreads.add(*output, thread);//AudioFlinger::openOutput_l
//找到匹配AudioDevice
AudioHwDevice *outHwDev = findSuitableHwDev_l(module, devices);
//打开硬件的输出流
status_t status = outHwDev->openOutputStream(
            &outputStream,
            *output,
            devices,
            flags,
            config,
            address.string());
//创建PlaybackThread
thread = new (PlaybackThread子类)((this, outputStream, *output, devices, mSystemReady))
//关联output和PlaybackThread
mPlaybackThreads.add(*output, thread);

我们做个实验先:


实验步骤:
1.adb shell dumpsys media.audio_flinger
2.播放音乐
3.adb shell dumpsys media.audio_flinger实验步骤:
1.adb shell dumpsys media.audio_flinger
2.播放音乐
3.adb shell dumpsys media.audio_flinger

没播放的时候,对应的Output thread是这样的:


Output thread 0xe7d836c0 type 0 (MIXER):
  Thread name: AudioOut_1D
  I/O handle: 29
  TID: 1090
  Standby: yes
  Sample rate: 48000 Hz
  HAL frame count: 1920
  HAL format: 0x1 (pcm16)
  HAL buffer size: 7680 bytes
  Channel count: 2
  Channel mask: 0x00000003 (front-left, front-right)
  Processing format: 0x1 (pcm16)
  Processing frame size: 4 bytes
  Pending config events: none
  Output device: 0 (NONE)
  Input device: 0 (NONE)
  Audio source: 0 (default)
  Normal frame count: 1920
  Last write occurred (msecs): 1949365
  Total writes: 215725
  Delayed writes: 0
  Blocked in write: no
  Suspend count: 0
  Sink buffer : 0xe8574000
  Mixer buffer: 0xe8576000
  Effect buffer: 0xe857b000
  Fast track availMask=0xfe
  Standby delay ns=3000000000
  AudioStreamOut: 0xe962d2d8 flags 0x8 (DEEP_BUFFER)
  Frames written: 414192000
  Suspended frames: 0
  Hal stream dump:
  Thread throttle time (msecs): 3470
  AudioMixer tracks: 0x00000000
  Master mono: off
  FastMixer not initialized
  Stream volumes in dB: 0:0, 1:-inf, 2:-inf, 3:-35, 4:-13, 5:-inf, 6:0, 7:-6, 8:-inf, 9:0, 10:-35, 11:0, 12:0
  Normal mixer raw underrun counters: partial=0 empty=0
  0 Tracks
  0 Effect ChainsOutput thread 0xe7d836c0 type 0 (MIXER):
  Thread name: AudioOut_1D
  I/O handle: 29
  TID: 1090
  Standby: yes
  Sample rate: 48000 Hz
  HAL frame count: 1920
  HAL format: 0x1 (pcm16)
  HAL buffer size: 7680 bytes
  Channel count: 2
  Channel mask: 0x00000003 (front-left, front-right)
  Processing format: 0x1 (pcm16)
  Processing frame size: 4 bytes
  Pending config events: none
  Output device: 0 (NONE)
  Input device: 0 (NONE)
  Audio source: 0 (default)
  Normal frame count: 1920
  Last write occurred (msecs): 1949365
  Total writes: 215725
  Delayed writes: 0
  Blocked in write: no
  Suspend count: 0
  Sink buffer : 0xe8574000
  Mixer buffer: 0xe8576000
  Effect buffer: 0xe857b000
  Fast track availMask=0xfe
  Standby delay ns=3000000000
  AudioStreamOut: 0xe962d2d8 flags 0x8 (DEEP_BUFFER)
  Frames written: 414192000
  Suspended frames: 0
  Hal stream dump:
  Thread throttle time (msecs): 3470
  AudioMixer tracks: 0x00000000
  Master mono: off
  FastMixer not initialized
  Stream volumes in dB: 0:0, 1:-inf, 2:-inf, 3:-35, 4:-13, 5:-inf, 6:0, 7:-6, 8:-inf, 9:0, 10:-35, 11:0, 12:0
  Normal mixer raw underrun counters: partial=0 empty=0
  0 Tracks
  0 Effect Chains

播放状态下:


Output thread 0xe7d836c0 type 0 (MIXER):
  Thread name: AudioOut_1D
  I/O handle: 29
  TID: 1090
  Standby: no
  Sample rate: 48000 Hz
  HAL frame count: 1920
  HAL format: 0x1 (pcm16)
  HAL buffer size: 7680 bytes
  Channel count: 2
  Channel mask: 0x00000003 (front-left, front-right)
  Processing format: 0x1 (pcm16)
  Processing frame size: 4 bytes
  Pending config events: none
  Output device: 0x8 (WIRED_HEADPHONE)
  Input device: 0 (NONE)
  Audio source: 0 (default)
  Normal frame count: 1920
  Last write occurred (msecs): 20
  Total writes: 215830
  Delayed writes: 0
  Blocked in write: yes
  Suspend count: 0
  Sink buffer : 0xe8574000
  Mixer buffer: 0xe8576000
  Effect buffer: 0xe857b000
  Fast track availMask=0xfe
  Standby delay ns=3000000000
  AudioStreamOut: 0xe962d2d8 flags 0x8 (DEEP_BUFFER)
  Frames written: 414393600
  Suspended frames: 0
  Hal stream dump:
  Thread throttle time (msecs): 3470
  AudioMixer tracks: 0x00000001
  Master mono: off
  FastMixer not initialized
  Stream volumes in dB: 0:-24, 1:-inf, 2:-inf, 3:-3, 4:-13, 5:-inf, 6:0, 7:-24, 8:-inf, 9:-96, 10:-3, 11:0, 12:0
  Normal mixer raw underrun counters: partial=0 empty=0
  1 Tracks of which 1 are active
    Name Active Client Type      Fmt Chn mask Session fCount S F SRate  L dB  R dB    Server Main buf  Aux Buf Flags UndFrmCnt
       0    yes  25927    3 00000001 00000003    3761  15376 A 3 48000     0     0  0002F580 0xe8574000 0x0 0x001     17298 
  0 Effect ChainsOutput thread 0xe7d836c0 type 0 (MIXER):
  Thread name: AudioOut_1D
  I/O handle: 29
  TID: 1090
  Standby: no
  Sample rate: 48000 Hz
  HAL frame count: 1920
  HAL format: 0x1 (pcm16)
  HAL buffer size: 7680 bytes
  Channel count: 2
  Channel mask: 0x00000003 (front-left, front-right)
  Processing format: 0x1 (pcm16)
  Processing frame size: 4 bytes
  Pending config events: none
  Output device: 0x8 (WIRED_HEADPHONE)
  Input device: 0 (NONE)
  Audio source: 0 (default)
  Normal frame count: 1920
  Last write occurred (msecs): 20
  Total writes: 215830
  Delayed writes: 0
  Blocked in write: yes
  Suspend count: 0
  Sink buffer : 0xe8574000
  Mixer buffer: 0xe8576000
  Effect buffer: 0xe857b000
  Fast track availMask=0xfe
  Standby delay ns=3000000000
  AudioStreamOut: 0xe962d2d8 flags 0x8 (DEEP_BUFFER)
  Frames written: 414393600
  Suspended frames: 0
  Hal stream dump:
  Thread throttle time (msecs): 3470
  AudioMixer tracks: 0x00000001
  Master mono: off
  FastMixer not initialized
  Stream volumes in dB: 0:-24, 1:-inf, 2:-inf, 3:-3, 4:-13, 5:-inf, 6:0, 7:-24, 8:-inf, 9:-96, 10:-3, 11:0, 12:0
  Normal mixer raw underrun counters: partial=0 empty=0
  1 Tracks of which 1 are active
    Name Active Client Type      Fmt Chn mask Session fCount S F SRate  L dB  R dB    Server Main buf  Aux Buf Flags UndFrmCnt
       0    yes  25927    3 00000001 00000003    3761  15376 A 3 48000     0     0  0002F580 0xe8574000 0x0 0x001     17298 
  0 Effect Chains

最明显的变化:

1.Standby: yes->no

2.Output device:NONE->WIRED_HEADPHONE(我确实是戴着耳机测试的)

3.多了"1 Tracks of which 1 are active..."

而,Output thread 0xe7d836c0这个是没有变化的(dump函数打印出来的playbackThread的this指针).

I/O handle: 29,也没有变化.开启追溯模式:


//AudioFlinger::ThreadBase::dumpBase打印:
dprintf(fd, "  I/O handle: %d\n", mId);
//AudioFlinger::ThreadBase::ThreadBase赋值
mId(id),
//嗯,基类构造函数
//AudioFlinger::PlaybackThread::PlaybackThread
:   ThreadBase(audioFlinger, id
//继续找到PlaybackThread的子类构造函数
//AudioFlinger::MixerThread::MixerThread
AudioFlinger::MixerThread::MixerThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
        audio_io_handle_t id, audio_devices_t device, bool systemReady, type_t type)
    :   PlaybackThread(audioFlinger, output, id, device, type, systemReady),//AudioFlinger::ThreadBase::dumpBase打印:
dprintf(fd, "  I/O handle: %d\n", mId);
//AudioFlinger::ThreadBase::ThreadBase赋值
mId(id),
//嗯,基类构造函数
//AudioFlinger::PlaybackThread::PlaybackThread
:   ThreadBase(audioFlinger, id
//继续找到PlaybackThread的子类构造函数
//AudioFlinger::MixerThread::MixerThread
AudioFlinger::MixerThread::MixerThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
        audio_io_handle_t id, audio_devices_t device, bool systemReady, type_t type)
    :   PlaybackThread(audioFlinger, output, id, device, type, systemReady),

就是它了.构造MixerThread的时候,传进来的第三个参数.回过头去看前文中分析的openOutput_l方法


//创建PlaybackThread
thread = new (PlaybackThread子类)((this, outputStream, *output, devices, mSystemReady))//创建PlaybackThread
thread = new (PlaybackThread子类)((this, outputStream, *output, devices, mSystemReady))

是它,就是它:

*output

最终来源也在附近了:


 //AudioFlinger::openOutput_l
*output = nextUniqueId(AUDIO_UNIQUE_ID_USE_OUTPUT); //AudioFlinger::openOutput_l
*output = nextUniqueId(AUDIO_UNIQUE_ID_USE_OUTPUT);

就是这个nextUniqueId函数产生的!回过头去简单总结就是

生成output编号,然后创建一个playbackThread(子类),最后添加到AudioFlinger的mPlaybackThreads数组中.

好的,我们再绕回之前话题中来.选中output之后

checkPlaybackThread_l将检索出之前创建的output对应的PlaybackThread.

接下来


//AudioFlinger::createTrack
track = thread->createTrack_l(client, streamType, sampleRate, format,
                channelMask, frameCount, sharedBuffer, lSessionId, flags, tid,
                clientUid, &lStatus, portId);//AudioFlinger::createTrack
track = thread->createTrack_l(client, streamType, sampleRate, format,
                channelMask, frameCount, sharedBuffer, lSessionId, flags, tid,
                clientUid, &lStatus, portId);

创建AudioFlinger::Track对象返回给AudioTrack做为

服务端在客户端的代表(通信兵,卧底?),不详述了...不然就跑题了.

 

接下来,我们讲讲真正开始播放的时候的情况(抱歉,这个转折转的有点勉强).

第三步 开始播放

​ Android系统是怎么从AudioTrack的start开始之后,一路走到AudioFlinger的Track::start的,我们略过不提了,毕竟今天的主题是设备的选择.我们知道,AudioFlinger::Track::start中,调用PlaybackThread::addTrack_l,使沉睡的PlaybackThread被唤醒,然后就调用到了我们重要的


status_t AudioFlinger::PlaybackThread::addTrack_l(const sp<Track>& track)
{
    status = AudioSystem::startOutput(mId, track->streamType(),
                                              track->sessionId());
  ...
}status_t AudioFlinger::PlaybackThread::addTrack_l(const sp<Track>& track)
{
    status = AudioSystem::startOutput(mId, track->streamType(),
                                              track->sessionId());
  ...
}

mId我们前面讨论过了,就是PlaybackThread的序号,标识,output!

接下来我们转入我们可爱的startOutput函数

status_t AudioPolicyManager::startOutput(audio_io_handle_t output,
                                             audio_stream_type_t stream,
                                             audio_session_t session)
{
    //根据output这个id.找出outputDesc
    ssize_t index = mOutputs.indexOfKey(output);
    sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueAt(index);
    //
    if (outputDesc->mPolicyMix != NULL) {
      ...
    } else if (mOutputRoutes.hasRouteChanged(session)) {
        //选用新的设备
        newDevice = getNewOutputDevice(outputDesc, false /*fromCache*/);
        checkStrategyRoute(getStrategy(stream), output);
    }
    ...
    status_t status = startSource(outputDesc, stream, newDevice, address, &delayMs);
    ...
} AudioPolicyManager::startOutput(audio_io_handle_t output,
                                             audio_stream_type_t stream,
                                             audio_session_t session)
{
    //根据output这个id.找出outputDesc
    ssize_t index = mOutputs.indexOfKey(output);
    sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueAt(index);
    //
    if (outputDesc->mPolicyMix != NULL) {
      ...
    } else if (mOutputRoutes.hasRouteChanged(session)) {
        //选用新的设备
        newDevice = getNewOutputDevice(outputDesc, false /*fromCache*/);
        checkStrategyRoute(getStrategy(stream), output);
    }
    ...
    status_t status = startSource(outputDesc, stream, newDevice, address, &delayMs);
    ...
}

是什么决定了我们startSource时候是否选用newDevice?

是它:mOutputRoutes.hasRouteChanged(session):


bool SessionRouteMap::hasRouteChanged(audio_session_t session)
{
    if (indexOfKey(session) >= 0) {
        if (valueFor(session)->mChanged) {
            valueFor(session)->mChanged = false;
            return true;
        }
    }
    return false;
}bool SessionRouteMap::hasRouteChanged(audio_session_t session)
{
    if (indexOfKey(session) >= 0) {
        if (valueFor(session)->mChanged) {
            valueFor(session)->mChanged = false;
            return true;
        }
    }
    return false;
}

两个条件:a.session属于mOutputRoutes. b.valueFor(session)->mChanged = true.

status_t AudioPolicyManager::getOutputForAttr(...){
  ...
        mOutputRoutes.addRoute(session, *stream, SessionRoute::SOURCE_TYPE_NA, deviceDesc, uid);
  ...       
} AudioPolicyManager::getOutputForAttr(...){
  ...
        mOutputRoutes.addRoute(session, *stream, SessionRoute::SOURCE_TYPE_NA, deviceDesc, uid);
  ...       
}

void SessionRouteMap::addRoute:


void SessionRouteMap::addRoute(audio_session_t session,
                               audio_stream_type_t streamType,
                               audio_source_t source,
                               const sp<DeviceDescriptor>& descriptor,
                               uid_t uid)
{
    ...
     //之前是否存在?
    sp<SessionRoute> route = indexOfKey(session) >= 0 ? valueFor(session) : 0;
    //之前已经存在此route
    if (route != 0) {
        //相关descriptor有变化
        if (((route->mDeviceDescriptor == 0) && (descriptor != 0)) ||
                ((route->mDeviceDescriptor != 0) &&
                 ((descriptor == 0) || (!route->mDeviceDescriptor->equals(descriptor))))) { 
            route->mChanged = true;
        }
        route->mRefCount++;
        route->mDeviceDescriptor = descriptor;
    } else {
        //之前不存在此route
        route = new SessionRoute(session, streamType, source, descriptor, uid);
        route->mRefCount++;
        add(session, route);
        if (descriptor != 0) {
            route->mChanged = true;
        }
    }
}void SessionRouteMap::addRoute(audio_session_t session,
                               audio_stream_type_t streamType,
                               audio_source_t source,
                               const sp<DeviceDescriptor>& descriptor,
                               uid_t uid)
{
    ...
     //之前是否存在?
    sp<SessionRoute> route = indexOfKey(session) >= 0 ? valueFor(session) : 0;
    //之前已经存在此route
    if (route != 0) {
        //相关descriptor有变化
        if (((route->mDeviceDescriptor == 0) && (descriptor != 0)) ||
                ((route->mDeviceDescriptor != 0) &&
                 ((descriptor == 0) || (!route->mDeviceDescriptor->equals(descriptor))))) { 
            route->mChanged = true;
        }
        route->mRefCount++;
        route->mDeviceDescriptor = descriptor;
    } else {
        //之前不存在此route
        route = new SessionRoute(session, streamType, source, descriptor, uid);
        route->mRefCount++;
        add(session, route);
        if (descriptor != 0) {
            route->mChanged = true;
        }
    }
}

session这里其实是AudioPolicy提供给client端的route的索引,和之前的output的作用类似.我目前的理解就是为了确定是否需要重新规划一次Audio Route.

流着眼泪回去追溯下session参数的源头吧:

(这里我们用MediaPlayerService.cpp中对AudioTrack的使用来做典型实例)


status_t MediaPlayerService::AudioOutput::open()
{
    ...
        t = new AudioTrack(
                    ...
                    mSessionId,
                    ...);
    ...
}
//一番寻找之后
status_t MediaPlayer::setAudioSessionId(audio_session_t sessionId)
//或者
mAudioSessionId = (audio_session_t) AudioSystem::newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);status_t MediaPlayerService::AudioOutput::open()
{
    ...
        t = new AudioTrack(
                    ...
                    mSessionId,
                    ...);
    ...
}
//一番寻找之后
status_t MediaPlayer::setAudioSessionId(audio_session_t sessionId)
//或者
mAudioSessionId = (audio_session_t) AudioSystem::newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);

这个session要么来自用户MediaPlayer::setAudioSessionId的调用,要么是自己用newAudioUniqueId(new一个唯一的ID)生成的.

AudioManager里也有类似的东西:


//AudioManager.java
public int generateAudioSessionId() {
        int session = AudioSystem.newAudioSessionId();
        if (session > 0) {
            return session;
        } else {
            Log.e(TAG, "Failure to generate a new audio session ID");
            return ERROR;
        }
    }
public static final int AUDIO_SESSION_ID_GENERATE = AudioSystem.AUDIO_SESSION_ALLOCATE;//AudioManager.java
public int generateAudioSessionId() {
        int session = AudioSystem.newAudioSessionId();
        if (session > 0) {
            return session;
        } else {
            Log.e(TAG, "Failure to generate a new audio session ID");
            return ERROR;
        }
    }
public static final int AUDIO_SESSION_ID_GENERATE = AudioSystem.AUDIO_SESSION_ALLOCATE;

就这样,接着聊

之前调用过getOutputForAttr,由此看来,第一个条件是满足的.

第二个条件即:要么是新添加的,要么虽然route已存在,但是descriptor变了,判定为mChanged.

(hasRouteChanged的时候mChanged会被改回false.)

只有第一次使用到该route的时候,或者同一个route,但是descriptor有变化的时候,才会在startOutput的时候,需要getNewOutputDevice了!其他时候,route的引用计数(mRefCount)加一.

那我们先看看需要创建新设备的情况吧!

getNewOutputDevice

先检查AudioPatch


//outputDesc的patchHandle确实是mAudioPatches的一个索引
ssize_t index = mAudioPatches.indexOfKey(outputDesc->getPatchHandle());
    if (index >= 0) {
        //取出mAudioPatches对应的AudioPatch(audioPatch就是一个有source和sink的一个结构体)
        sp<AudioPatch> patchDesc = mAudioPatches.valueAt(index);
        //mUidCached = getuid().没找到哪里来的
        if (patchDesc->mUid != mUidCached) {
            ALOGV("getNewOutputDevice() device %08x forced by patch %d",
                  outputDesc->device(), outputDesc->getPatchHandle());
            return outputDesc->device();
        }
    }//outputDesc的patchHandle确实是mAudioPatches的一个索引
ssize_t index = mAudioPatches.indexOfKey(outputDesc->getPatchHandle());
    if (index >= 0) {
        //取出mAudioPatches对应的AudioPatch(audioPatch就是一个有source和sink的一个结构体)
        sp<AudioPatch> patchDesc = mAudioPatches.valueAt(index);
        //mUidCached = getuid().没找到哪里来的
        if (patchDesc->mUid != mUidCached) {
            ALOGV("getNewOutputDevice() device %08x forced by patch %d",
                  outputDesc->device(), outputDesc->getPatchHandle());
            return outputDesc->device();
        }
    }

我的理解就是一种给app层配置audioSource和audioSink的特殊手段.

如若没有AudioPatch,按照以下这个优先级来选定newDevice


哪个Strategy是active状态,则使用该Strategy的device(调用getDeviceForStrategy)
优先级:
1. STRATEGY_ENFORCED_AUDIBLE && AUDIO_POLICY_FORCE_SYSTEM_ENFORCED
2. STRATEGY_PHONE || isInCall()
3. STRATEGY_SONIFICATION
4. STRATEGY_ENFORCED_AUDIBLE
5. STRATEGY_ACCESSIBILITY
6. STRATEGY_SONIFICATION_RESPECTFUL
7. STRATEGY_MEDIA
8. STRATEGY_DTMF
9. STRATEGY_TRANSMITTED_THROUGH_SPEAKER
10. STRATEGY_REROUTING哪个Strategy是active状态,则使用该Strategy的device(调用getDeviceForStrategy)
优先级:
1. STRATEGY_ENFORCED_AUDIBLE && AUDIO_POLICY_FORCE_SYSTEM_ENFORCED
2. STRATEGY_PHONE || isInCall()
3. STRATEGY_SONIFICATION
4. STRATEGY_ENFORCED_AUDIBLE
5. STRATEGY_ACCESSIBILITY
6. STRATEGY_SONIFICATION_RESPECTFUL
7. STRATEGY_MEDIA
8. STRATEGY_DTMF
9. STRATEGY_TRANSMITTED_THROUGH_SPEAKER
10. STRATEGY_REROUTING

checkStrategyRoute

参数一:getStrategy(stream)

参数二:output

//找到当前stream对应的Strategy对应的default device
getDeviceForStrategy
//找出所有在使用此device输出的所有output(之前getOutputForAttr那遍做的不算?)
outputs = getOutputsForDevice(device, mOutputs);
for(size_t j = 0; j < mOutputs.size(); j++) {
    //跳过入参output(此次check的不包括startOutput传入的output)
    //如果当前Strategy是active
    isStrategyActive(outputDesc, (routing_strategy)strategy))
    {
        // If the default device for this strategy is on another output mix,
        // invalidate all tracks in this strategy to force re connection.
        // Otherwise select new device on the output mix.
        if (outputs.indexOf(mOutputs.keyAt(j)) < 0) {
            for (int stream = 0; stream < AUDIO_STREAM_FOR_POLICY_CNT; stream++) {
                if (getStrategy((audio_stream_type_t)stream) == strategy) {
                    mpClientInterface->invalidateStream((audio_stream_type_t)stream);
                }
            }
        } else {
            audio_devices_t newDevice = getNewOutputDevice(outputDesc, false /*fromCache*/);
            setOutputDevice(outputDesc, newDevice, false);
        }
    }
}
​
getDeviceForStrategy
//找出所有在使用此device输出的所有output(之前getOutputForAttr那遍做的不算?)
outputs = getOutputsForDevice(device, mOutputs);
for(size_t j = 0; j < mOutputs.size(); j++) {
    //跳过入参output(此次check的不包括startOutput传入的output)
    //如果当前Strategy是active
    isStrategyActive(outputDesc, (routing_strategy)strategy))
    {
        // If the default device for this strategy is on another output mix,
        // invalidate all tracks in this strategy to force re connection.
        // Otherwise select new device on the output mix.
        if (outputs.indexOf(mOutputs.keyAt(j)) < 0) {
            for (int stream = 0; stream < AUDIO_STREAM_FOR_POLICY_CNT; stream++) {
                if (getStrategy((audio_stream_type_t)stream) == strategy) {
                    mpClientInterface->invalidateStream((audio_stream_type_t)stream);
                }
            }
        } else {
            audio_devices_t newDevice = getNewOutputDevice(outputDesc, false /*fromCache*/);
            setOutputDevice(outputDesc, newDevice, false);
        }
    }
}
​

我去!好复杂,没看懂,先不管了.

startSource


//cannot start playback of STREAM_TTS if any other output is being used
//如果正在使用任何其他输出,则无法开始播放STREAM_TTS
//(不知道为什么这么做)前面这一小段代码就是用handleEventForBeacon去处理tts的mute.
//STARTING_BEACON播放信标(TTS),STARTING_OUTPUT播放其他的,mute TTS.
(代码略)
//如果output inactive而且没有存在的audio patch,强制改变device
bool force = !outputDesc->isActive() &&
    (outputDesc->getPatchHandle() == AUDIO_PATCH_HANDLE_NONE);
//在请求的输出上增加此流的使用次数:
//这个usage count和通过startOutput和stopoutput正确控制硬件输出路由所必须的duplicated output
//以及hardware output一样.
outputDesc->changeRefCount(stream, 1);
...
if (outputDesc->mRefCount[stream] == 1 || device != AUDIO_DEVICE_NONE) {
    //device还是AUDIO_DEVICE_NONE.再重新选设备
    if (device == AUDIO_DEVICE_NONE) {
         device = getNewOutputDevice(outputDesc, false /*fromCache*/);
    }
    //处理waitMs(延迟输出?)
    (代码略)
    //setOutputDevice之后,返回muteWaitMs
    uint32_t muteWaitMs = setOutputDevice(outputDesc, device, force, 0, NULL, address);
    // 处理通话中突然输出的Sonification
    if (isInCall()) {
        handleIncallSonification(stream, true, false);
    }
    //应用音量规则
    checkAndSetVolume(stream,
                          mVolumeCurves->getVolumeIndex(stream, outputDesc->device()),
                          outputDesc,
                          outputDesc->device());
    ...
    //设定delayMs
    if (waitMs > muteWaitMs) {
            *delayMs = waitMs - muteWaitMs;
    }
    return NO_ERROR;
}//cannot start playback of STREAM_TTS if any other output is being used
//如果正在使用任何其他输出,则无法开始播放STREAM_TTS
//(不知道为什么这么做)前面这一小段代码就是用handleEventForBeacon去处理tts的mute.
//STARTING_BEACON播放信标(TTS),STARTING_OUTPUT播放其他的,mute TTS.
(代码略)
//如果output inactive而且没有存在的audio patch,强制改变device
bool force = !outputDesc->isActive() &&
    (outputDesc->getPatchHandle() == AUDIO_PATCH_HANDLE_NONE);
//在请求的输出上增加此流的使用次数:
//这个usage count和通过startOutput和stopoutput正确控制硬件输出路由所必须的duplicated output
//以及hardware output一样.
outputDesc->changeRefCount(stream, 1);
...
if (outputDesc->mRefCount[stream] == 1 || device != AUDIO_DEVICE_NONE) {
    //device还是AUDIO_DEVICE_NONE.再重新选设备
    if (device == AUDIO_DEVICE_NONE) {
         device = getNewOutputDevice(outputDesc, false /*fromCache*/);
    }
    //处理waitMs(延迟输出?)
    (代码略)
    //setOutputDevice之后,返回muteWaitMs
    uint32_t muteWaitMs = setOutputDevice(outputDesc, device, force, 0, NULL, address);
    // 处理通话中突然输出的Sonification
    if (isInCall()) {
        handleIncallSonification(stream, true, false);
    }
    //应用音量规则
    checkAndSetVolume(stream,
                          mVolumeCurves->getVolumeIndex(stream, outputDesc->device()),
                          outputDesc,
                          outputDesc->device());
    ...
    //设定delayMs
    if (waitMs > muteWaitMs) {
            *delayMs = waitMs - muteWaitMs;
    }
    return NO_ERROR;
}

下面具体分析下前文一笔带过的setOutputDevice

uint32_t AudioPolicyManager::setOutputDevice(const sp<AudioOutputDescriptor>& outputDesc,
                                             audio_devices_t device,
                                             bool force,
                                             int delayMs,
                                             audio_patch_handle_t *patchHandle,
                                             const char* address)
{
    AudioParameter param;
    uint32_t muteWaitMs;
    //Duplicated output,output1和output2各来一遍setOutputDevice
    if (outputDesc->isDuplicated()) {
        muteWaitMs = setOutputDevice(outputDesc->subOutput1(), device, force, delayMs);
        muteWaitMs += setOutputDevice(outputDesc->subOutput2(), device, force, delayMs);
        return muteWaitMs;
    }
    ...
    if (device == AUDIO_DEVICE_NONE) {
        resetOutputDevice(outputDesc, delayMs, NULL);
    } else {
        DeviceVector deviceList;
        if ((address == NULL) || (strlen(address) == 0)) {
            //mAvailableOutputDevices在APM构造的时候就已经准备好了
            //setDeviceConnectionStateInt中也会对新设备做add
            deviceList = mAvailableOutputDevices.getDevicesFromType(device);
        } else {
            deviceList = mAvailableOutputDevices.getDevicesFromTypeAddr(device, String8(address));
        }
        if (!deviceList.isEmpty()) {
            struct audio_patch patch;
            outputDesc->toAudioPortConfig(&patch.sources[0]);
            patch.num_sources = 1;
            patch.num_sinks = 0;
            for (size_t i = 0; i < deviceList.size() && i < AUDIO_PATCH_PORTS_MAX; i++)             {
                deviceList.itemAt(i)->toAudioPortConfig(&patch.sinks[i]);
                patch.num_sinks++;
            }
            //从mAudioPatches中取出patch的index
            ssize_t index;
            if (patchHandle && *patchHandle != AUDIO_PATCH_HANDLE_NONE) {
                index = mAudioPatches.indexOfKey(*patchHandle);
            } else {
                index = mAudioPatches.indexOfKey(outputDesc->getPatchHandle());
            }
            //处理afPatchHandle
            sp< AudioPatch> patchDesc;
            audio_patch_handle_t afPatchHandle = AUDIO_PATCH_HANDLE_NONE;
            if (index >= 0) {
                patchDesc = mAudioPatches.valueAt(index);
                afPatchHandle = patchDesc->mAfPatchHandle;
            }
            //巨长的函数,看得我脑袋疼.
            status_t status = mpClientInterface->createAudioPatch(&patch,
                                                                   &afPatchHandle,
                                                                   delayMs);
          if (status == NO_ERROR) {
                if (index < 0) {
                    patchDesc = new AudioPatch(&patch, mUidCached);
                    addAudioPatch(patchDesc->mHandle, patchDesc);
                } else {
                    patchDesc->mPatch = patch;
                }   
                patchDesc->mAfPatchHandle = afPatchHandle;
                if (patchHandle) {
                    *patchHandle = patchDesc->mHandle;
                }   
                outputDesc->setPatchHandle(patchDesc->mHandle);
                nextAudioPortGeneration();
                mpClientInterface->onAudioPatchListUpdate();
          }
        }
    }
} AudioPolicyManager::setOutputDevice(const sp<AudioOutputDescriptor>& outputDesc,
                                             audio_devices_t device,
                                             bool force,
                                             int delayMs,
                                             audio_patch_handle_t *patchHandle,
                                             const char* address)
{
    AudioParameter param;
    uint32_t muteWaitMs;
    //Duplicated output,output1和output2各来一遍setOutputDevice
    if (outputDesc->isDuplicated()) {
        muteWaitMs = setOutputDevice(outputDesc->subOutput1(), device, force, delayMs);
        muteWaitMs += setOutputDevice(outputDesc->subOutput2(), device, force, delayMs);
        return muteWaitMs;
    }
    ...
    if (device == AUDIO_DEVICE_NONE) {
        resetOutputDevice(outputDesc, delayMs, NULL);
    } else {
        DeviceVector deviceList;
        if ((address == NULL) || (strlen(address) == 0)) {
            //mAvailableOutputDevices在APM构造的时候就已经准备好了
            //setDeviceConnectionStateInt中也会对新设备做add
            deviceList = mAvailableOutputDevices.getDevicesFromType(device);
        } else {
            deviceList = mAvailableOutputDevices.getDevicesFromTypeAddr(device, String8(address));
        }
        if (!deviceList.isEmpty()) {
            struct audio_patch patch;
            outputDesc->toAudioPortConfig(&patch.sources[0]);
            patch.num_sources = 1;
            patch.num_sinks = 0;
            for (size_t i = 0; i < deviceList.size() && i < AUDIO_PATCH_PORTS_MAX; i++)             {
                deviceList.itemAt(i)->toAudioPortConfig(&patch.sinks[i]);
                patch.num_sinks++;
            }
            //从mAudioPatches中取出patch的index
            ssize_t index;
            if (patchHandle && *patchHandle != AUDIO_PATCH_HANDLE_NONE) {
                index = mAudioPatches.indexOfKey(*patchHandle);
            } else {
                index = mAudioPatches.indexOfKey(outputDesc->getPatchHandle());
            }
            //处理afPatchHandle
            sp< AudioPatch> patchDesc;
            audio_patch_handle_t afPatchHandle = AUDIO_PATCH_HANDLE_NONE;
            if (index >= 0) {
                patchDesc = mAudioPatches.valueAt(index);
                afPatchHandle = patchDesc->mAfPatchHandle;
            }
            //巨长的函数,看得我脑袋疼.
            status_t status = mpClientInterface->createAudioPatch(&patch,
                                                                   &afPatchHandle,
                                                                   delayMs);
          if (status == NO_ERROR) {
                if (index < 0) {
                    patchDesc = new AudioPatch(&patch, mUidCached);
                    addAudioPatch(patchDesc->mHandle, patchDesc);
                } else {
                    patchDesc->mPatch = patch;
                }   
                patchDesc->mAfPatchHandle = afPatchHandle;
                if (patchHandle) {
                    *patchHandle = patchDesc->mHandle;
                }   
                outputDesc->setPatchHandle(patchDesc->mHandle);
                nextAudioPortGeneration();
                mpClientInterface->onAudioPatchListUpdate();
          }
        }
    }
}

关于Audio Patch的分析,就不强上了,底下这篇文章写的太好了.

 

在Android5.0上Audio Patch和Patch Panel的一些分析

经过一堆调用流程之后,代码会走到这里:

status_t AudioFlinger::PlaybackThread::createAudioPatch_l(const struct audio_patch *patch,audio_patch_handle_t *handle) {
  ...
        if (mOutput->audioHwDev->supportsAudioPatches()) {
        sp<DeviceHalInterface> hwDevice = mOutput->audioHwDev->hwDevice();
        status = hwDevice->createAudioPatch(patch->num_sources,
                                            patch->sources,
                                            patch->num_sinks,
                                            patch->sinks,
                                            handle);
    } else {
        char *address;
        if (strcmp(patch->sinks[0].ext.device.address, "") != 0) {
            //FIXME: we only support address on first sink with HAL version < 3.0
            address = audio_device_address_to_parameter(
                                                        patch->sinks[0].ext.device.type,
                                                        patch->sinks[0].ext.device.address);
        } else {
            address = (char *)calloc(1, 1);
        }
        AudioParameter param = AudioParameter(String8(address));
        free(address);
        param.addInt(String8(AudioParameter::keyRouting), (int)type);
        status = mOutput->stream->setParameters(param.toString());
        *handle = AUDIO_PATCH_HANDLE_NONE;
    }
  ...
} AudioFlinger::PlaybackThread::createAudioPatch_l(const struct audio_patch *patch,audio_patch_handle_t *handle) {
  ...
        if (mOutput->audioHwDev->supportsAudioPatches()) {
        sp<DeviceHalInterface> hwDevice = mOutput->audioHwDev->hwDevice();
        status = hwDevice->createAudioPatch(patch->num_sources,
                                            patch->sources,
                                            patch->num_sinks,
                                            patch->sinks,
                                            handle);
    } else {
        char *address;
        if (strcmp(patch->sinks[0].ext.device.address, "") != 0) {
            //FIXME: we only support address on first sink with HAL version < 3.0
            address = audio_device_address_to_parameter(
                                                        patch->sinks[0].ext.device.type,
                                                        patch->sinks[0].ext.device.address);
        } else {
            address = (char *)calloc(1, 1);
        }
        AudioParameter param = AudioParameter(String8(address));
        free(address);
        param.addInt(String8(AudioParameter::keyRouting), (int)type);
        status = mOutput->stream->setParameters(param.toString());
        *handle = AUDIO_PATCH_HANDLE_NONE;
    }
  ...
}

如果supportsAudioPatches.那么就继续createAudioPatch.分别会经过 libaudiohal底下DeviceHalHidl::createAudioPatch和hardware/interfaces/audio/2.0/default底下的Device.cpp.然后进入

hardware/qcom/audio/hal(假设是高通平台)底下的audio_hw.c中实现的create_audio_patch函数.

上面这个过程如不清楚,可以看看我的文章:

https://blog.csdn.net/bberdong/article/details/79472208

我在我手上的代码里audio_hw.c的实现里没找到create_audio_patch,难不成是不支持?


status_t DeviceHalHidl::supportsAudioPatches(bool *supportsPatches) {
    if (mDevice == 0) return NO_INIT;
    return processReturn("supportsAudioPatches", mDevice->supportsAudioPatches(), supportsPatches);
}
Return<bool> Device::supportsAudioPatches() {
    return version() >= AUDIO_DEVICE_API_VERSION_3_0;
}
//Device.h
uint32_t version() const { return mDevice->common.version; }status_t DeviceHalHidl::supportsAudioPatches(bool *supportsPatches) {
    if (mDevice == 0) return NO_INIT;
    return processReturn("supportsAudioPatches", mDevice->supportsAudioPatches(), supportsPatches);
}
Return<bool> Device::supportsAudioPatches() {
    return version() >= AUDIO_DEVICE_API_VERSION_3_0;
}
//Device.h
uint32_t version() const { return mDevice->common.version; }

然后看了一下,qcom/audio/hal/audio_hw.c,AUDIO_DEVICE_API_VERSION20.看来真是不支持.

太好了,找到一个不继续跟create_audio_patch这一支的借口了!

那我们继续看setParameters这一支吧.

先是来到了StreamHalHidl::setParameters,然后hidl的Stream::setParameters,StreamOut::setParameters,然后

mStreamCommon->setParameters,然后Stream::halSetParameters

int Stream::halSetParameters(const char* keysAndValues) {
    return mStream->set_parameters(mStream, keysAndValues);
} Stream::halSetParameters(const char* keysAndValues) {
    return mStream->set_parameters(mStream, keysAndValues);
}

mStream是构造函数来的

Stream::Stream(audio_stream_t* stream)
        : mStream(stream) {
}(audio_stream_t* stream)
        : mStream(stream) {
}

audio_stream_t来自


StreamOut::StreamOut(const sp<Device>& device, audio_stream_out_t* stream)
    : ...
      mStreamCommon(new Stream(&stream->common)),                                             ...StreamOut::StreamOut(const sp<Device>& device, audio_stream_out_t* stream)
    : ...
      mStreamCommon(new Stream(&stream->common)),                                             ...

StreamOut构造的时候.即:

Return<void> Device::openOutputStream(int32_t ioHandle,
                                      const DeviceAddress& device,
                                      const AudioConfig& config,
                                      AudioOutputFlag flags,
                                      openOutputStream_cb _hidl_cb) {
    audio_config_t halConfig;
    HidlUtils::audioConfigToHal(config, &halConfig);
    audio_stream_out_t* halStream;                                                                                                                             
    ALOGV(
        "open_output_stream handle: %d devices: %x flags: %#x "
        "srate: %d format %#x channels %x address %s",
        ioHandle, static_cast<audio_devices_t>(device.device),
        static_cast<audio_output_flags_t>(flags), halConfig.sample_rate,
        halConfig.format, halConfig.channel_mask,
        deviceAddressToHal(device).c_str());
    int status = mDevice->open_output_stream(
        mDevice, ioHandle, static_cast<audio_devices_t>(device.device),
        static_cast<audio_output_flags_t>(flags), &halConfig, &halStream,
        deviceAddressToHal(device).c_str());
    ALOGV("open_output_stream status %d stream %p", status, halStream);
    sp<IStreamOut> streamOut;
    if (status == OK) {
        streamOut = new StreamOut(this, halStream);
    }
    AudioConfig suggestedConfig;
    HidlUtils::audioConfigFromHal(halConfig, &suggestedConfig);
    _hidl_cb(analyzeStatus("open_output_stream", status), streamOut,
             suggestedConfig);
    return Void();
}<void> Device::openOutputStream(int32_t ioHandle,
                                      const DeviceAddress& device,
                                      const AudioConfig& config,
                                      AudioOutputFlag flags,
                                      openOutputStream_cb _hidl_cb) {
    audio_config_t halConfig;
    HidlUtils::audioConfigToHal(config, &halConfig);
    audio_stream_out_t* halStream;                                                                                                                             
    ALOGV(
        "open_output_stream handle: %d devices: %x flags: %#x "
        "srate: %d format %#x channels %x address %s",
        ioHandle, static_cast<audio_devices_t>(device.device),
        static_cast<audio_output_flags_t>(flags), halConfig.sample_rate,
        halConfig.format, halConfig.channel_mask,
        deviceAddressToHal(device).c_str());
    int status = mDevice->open_output_stream(
        mDevice, ioHandle, static_cast<audio_devices_t>(device.device),
        static_cast<audio_output_flags_t>(flags), &halConfig, &halStream,
        deviceAddressToHal(device).c_str());
    ALOGV("open_output_stream status %d stream %p", status, halStream);
    sp<IStreamOut> streamOut;
    if (status == OK) {
        streamOut = new StreamOut(this, halStream);
    }
    AudioConfig suggestedConfig;
    HidlUtils::audioConfigFromHal(halConfig, &suggestedConfig);
    _hidl_cb(analyzeStatus("open_output_stream", status), streamOut,
             suggestedConfig);
    return Void();
}

即,audio_hw.c中adev_open_output_stream带回来的out->stream.common对象.不是赋值关系,只是一个对应关系(stub).

其中

//qcom/audio/hal/audio_hw.c
out->stream.common.set_parameters = out_set_parameters;
out->stream.common.set_parameters = out_set_parameters;

mStream->set_parameters(mStream, keysAndValues);->set_parameters(mStream, keysAndValues);

的调用指向的就是audio_hw.c的out_set_parameters函数

真是费劲啊!这么多弯弯绕绕.

参数1:名字也叫mStream,fuck!对应的应该是StreamOut对象.

参数2:keysAndValues:简单理解是一个关键字对,

经我查证.大概是这样的


"routing,整数((int)patch->sources[0].ext.device.type)"
"input_source,整数((int)patch->sinks[0].ext.mix.usecase.source)""routing,整数((int)patch->sources[0].ext.device.type)"
"input_source,整数((int)patch->sinks[0].ext.mix.usecase.source)"

然后就到audio_hw.c对kvpairs进行解析.

//out_set_parameters
if (new_dev != AUDIO_DEVICE_NONE) {
    //在实际路由之前通知adm(aDsp那边的音频数据管理器)以防止毛刺(pop音)。
    adev->adm_on_routing_change
    ...
    select_devices(adev, out->usecase);
    ...
}
if (new_dev != AUDIO_DEVICE_NONE) {
    //在实际路由之前通知adm(aDsp那边的音频数据管理器)以防止毛刺(pop音)。
    adev->adm_on_routing_change
    ...
    select_devices(adev, out->usecase);
    ...
}

然后就到了select_devices这一步:

//select_devices
usecase = get_usecase_from_list
...
enable_audio_route(adev, usecase);  
usecase = get_usecase_from_list
...
enable_audio_route(adev, usecase);  

然后


//enable_audio_route
...
platform_add_backend_name(adev->platform, mixer_path, snd_device);
audio_route_apply_and_update_path(adev->audio_route, mixer_path);
...//enable_audio_route
...
platform_add_backend_name(adev->platform, mixer_path, snd_device);
audio_route_apply_and_update_path(adev->audio_route, mixer_path);
...

接下来,就到

system/media/audio_route/audio_route里了.

然后我们认认真真地看看这个过程:


int audio_route_apply_and_update_path(struct audio_route *ar, const char *name)
{
    if (audio_route_apply_path(ar, name) < 0) {
        return -1;
    }
    return audio_route_update_path(ar, name, false /*reverse*/);
}int audio_route_apply_and_update_path(struct audio_route *ar, const char *name)
{
    if (audio_route_apply_path(ar, name) < 0) {
        return -1;
    }
    return audio_route_update_path(ar, name, false /*reverse*/);
}

这是续集

 

猜你喜欢

转载自blog.csdn.net/bberdong/article/details/80484568