继上一篇AudioTrack的分析,本篇我们来看AudioFlinger,AF主要承担音频混合输出,是Audio系统的核心,从AudioTrack来的数据最终都会在这里处理,并被写入到Audio的HAL。
1. AudioFlinger 创建
frameworks\av\services\audioflinger\AudioFlinger.cpp
AudioFlinger::AudioFlinger()
: BnAudioFlinger(),
mMediaLogNotifier(new AudioFlinger::MediaLogNotifier()),
mPrimaryHardwareDev(NULL),
mAudioHwDevs(NULL),
mHardwareStatus(AUDIO_HW_IDLE),
mMasterVolume(1.0f),
mMasterMute(false),
// mNextUniqueId(AUDIO_UNIQUE_ID_USE_MAX),
mMode(AUDIO_MODE_INVALID),
mBtNrecIsOff(false),
mIsLowRamDevice(true),
mIsDeviceTypeKnown(false),
mGlobalEffectEnableTime(0),
mSystemReady(false)
{
......
mDevicesFactoryHal = DevicesFactoryHalInterface::create();
mEffectsFactoryHal = EffectsFactoryHalInterface::create();
......
}
frameworks\av\media\libaudiohal\include\DevicesFactoryHalInterface.h
其中 DevicesFactoryHalHidl.cpp , DevicesFactoryHalHybrid.cpp , DevicesFactoryHalLocal.cpp分别都继承自DevicesFactoryHalInterface
class DevicesFactoryHalInterface : public RefBase
{
public:
// Opens a device with the specified name. To close the device, it is
// necessary to release references to the returned object.
virtual status_t openDevice(const char *name, sp<DeviceHalInterface> *device) = 0;
static sp<DevicesFactoryHalInterface> create();
protected:
// Subclasses can not be constructed directly by clients.
DevicesFactoryHalInterface() {}
virtual ~DevicesFactoryHalInterface() {}
};
在DevicesFactoryHalHybrid.cpp文件中找到了create()函数的实现
sp<DevicesFactoryHalInterface> DevicesFactoryHalInterface::create() {
return new DevicesFactoryHalHybrid();
}
DevicesFactoryHalHybrid.h文件中定义了mLocalFactory,mHidlFactory
class DevicesFactoryHalHybrid : public DevicesFactoryHalInterface
{
public:
// Opens a device with the specified name. To close the device, it is
// necessary to release references to the returned object.
virtual status_t openDevice(const char *name, sp<DeviceHalInterface> *device);
private:
friend class DevicesFactoryHalInterface;
// Can not be constructed directly by clients.
DevicesFactoryHalHybrid();
virtual ~DevicesFactoryHalHybrid();
sp<DevicesFactoryHalInterface> mLocalFactory;
sp<DevicesFactoryHalInterface> mHidlFactory;
};
DevicesFactoryHalHybrid.cpp中分别创建了不同的实现对象,通过 USE_LEGACY_LOCAL_AUDIO_HAL
DevicesFactoryHalHybrid::DevicesFactoryHalHybrid()
: mLocalFactory(new DevicesFactoryHalLocal()),
mHidlFactory(
#ifdef USE_LEGACY_LOCAL_AUDIO_HAL
nullptr
#else
new DevicesFactoryHalHidl()
#endif
) {
}
status_t DevicesFactoryHalHybrid::openDevice(const char *name, sp<DeviceHalInterface> *device) {
if (mHidlFactory != 0 && strcmp(AUDIO_HARDWARE_MODULE_ID_A2DP, name) != 0) {
return mHidlFactory->openDevice(name, device);
}
return mLocalFactory->openDevice(name, device);
}
如果是本地模式,DevicesFactoryHalLocal.cpp中使用8.0以前的HAL加载方式
static status_t load_audio_interface(const char *if_name, audio_hw_device_t **dev)
{
const hw_module_t *mod;
int rc;
rc = hw_get_module_by_class(AUDIO_HARDWARE_MODULE_ID, if_name, &mod);
if (rc) {
ALOGE("%s couldn't load audio hw module %s.%s (%s)", __func__,
AUDIO_HARDWARE_MODULE_ID, if_name, strerror(-rc));
goto out;
}
rc = audio_hw_device_open(mod, dev);
if (rc) {
ALOGE("%s couldn't open audio hw device in %s.%s (%s)", __func__,
AUDIO_HARDWARE_MODULE_ID, if_name, strerror(-rc));
goto out;
}
if ((*dev)->common.version < AUDIO_DEVICE_API_VERSION_MIN) {
ALOGE("%s wrong audio hw device version %04x", __func__, (*dev)->common.version);
rc = BAD_VALUE;
audio_hw_device_close(*dev);
goto out;
}
return OK;
out:
*dev = NULL;
return rc;
}
status_t DevicesFactoryHalLocal::openDevice(const char *name, sp<DeviceHalInterface> *device) {
audio_hw_device_t *dev;
status_t rc = load_audio_interface(name, &dev);
if (rc == OK) {
*device = new DeviceHalLocal(dev);
}
return rc;
}
如果是HIDL的Treble模式,将采用8.0的新架构
DevicesFactoryHalHidl::DevicesFactoryHalHidl() {
mDevicesFactory = IDevicesFactory::getService(); //后续会讲到
if (mDevicesFactory != 0) {
// It is assumed that DevicesFactory is owned by AudioFlinger
// and thus have the same lifespan.
mDevicesFactory->linkToDeath(HalDeathHandler::getInstance(), 0 /*cookie*/);
} else {
ALOGE("Failed to obtain IDevicesFactory service, terminating process.");
exit(1);
}
}
DevicesFactoryHalHidl::~DevicesFactoryHalHidl() {
}
// static
status_t DevicesFactoryHalHidl::nameFromHal(const char *name, IDevicesFactory::Device *device) {
if (strcmp(name, AUDIO_HARDWARE_MODULE_ID_PRIMARY) == 0) {
*device = IDevicesFactory::Device::PRIMARY;
return OK;
} else if(strcmp(name, AUDIO_HARDWARE_MODULE_ID_A2DP) == 0) {
*device = IDevicesFactory::Device::A2DP;
return OK;
} else if(strcmp(name, AUDIO_HARDWARE_MODULE_ID_USB) == 0) {
*device = IDevicesFactory::Device::USB;
return OK;
} else if(strcmp(name, AUDIO_HARDWARE_MODULE_ID_REMOTE_SUBMIX) == 0) {
*device = IDevicesFactory::Device::R_SUBMIX;
return OK;
} else if(strcmp(name, AUDIO_HARDWARE_MODULE_ID_STUB) == 0) {
*device = IDevicesFactory::Device::STUB;
return OK;
}
ALOGE("Invalid device name %s", name);
return BAD_VALUE;
}
status_t DevicesFactoryHalHidl::openDevice(const char *name, sp<DeviceHalInterface> *device) {
if (mDevicesFactory == 0) return NO_INIT;
IDevicesFactory::Device hidlDevice;
status_t status = nameFromHal(name, &hidlDevice);
if (status != OK) return status;
Result retval = Result::NOT_INITIALIZED;
Return<void> ret = mDevicesFactory->openDevice(
hidlDevice,
[&](Result r, const sp<IDevice>& result) {
retval = r;
if (retval == Result::OK) {
*device = new DeviceHalHidl(result); //后续讲解
}
});
if (ret.isOk()) {
if (retval == Result::OK) return OK;
else if (retval == Result::INVALID_ARGUMENTS) return BAD_VALUE;
else return NO_INIT;
}
return FAILED_TRANSACTION;
}
2. createTrack()
我们暂且跳过Audio的硬件抽象层,回头看看AudioTrack调用AF创建Track的过程
sp<IAudioTrack> AudioFlinger::createTrack(
audio_stream_type_t streamType,
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
size_t *frameCount,
audio_output_flags_t *flags,
const sp<IMemory>& sharedBuffer,
audio_io_handle_t output,
pid_t pid,
pid_t tid,
audio_session_t *sessionId,
int clientUid,
status_t *status,
audio_port_handle_t portId)
{
sp<PlaybackThread::Track> track;
sp<TrackHandle> trackHandle;
sp<Client> client;
status_t lStatus;
audio_session_t lSessionId;
{
Mutex::Autolock _l(mLock);
PlaybackThread *thread = checkPlaybackThread_l(output); //创建回放线程
if (thread == NULL) {
ALOGE("no playback thread found for output handle %d", output);
lStatus = BAD_VALUE;
goto Exit;
}
client = registerPid(pid); //注册到Client
PlaybackThread *effectThread = NULL;
if (sessionId != NULL && *sessionId != AUDIO_SESSION_ALLOCATE) {
......
track = thread->createTrack_l(client, streamType, sampleRate, format,
channelMask, frameCount, sharedBuffer, lSessionId, flags, tid,
clientUid, &lStatus, portId);
setAudioHwSyncForSession_l(thread, lSessionId);
}
// return handle to client
trackHandle = new TrackHandle(track); //返回Track的代理
Exit:
*status = lStatus;
return trackHandle;
}
为Track取一个线程,并没有创建,奇怪
// checkPlaybackThread_l() 取出线程
AudioFlinger::PlaybackThread *AudioFlinger::checkPlaybackThread_l(audio_io_handle_t output) const
{
return mPlaybackThreads.valueFor(output).get(); //那么线程在哪里创建的
}
frameworks\av\services\audioflinger\Threads.cpp
sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrack_l(
const sp<AudioFlinger::Client>& client,
audio_stream_type_t streamType,
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
size_t *pFrameCount,
const sp<IMemory>& sharedBuffer,
audio_session_t sessionId,
audio_output_flags_t *flags,
pid_t tid,
uid_t uid,
status_t *status,
audio_port_handle_t portId)
{
//创建Track
track = new Track(this, client, streamType, sampleRate, format,
channelMask, frameCount, NULL, sharedBuffer,
sessionId, uid, *flags, TrackBase::TYPE_DEFAULT, portId);
return track;
}
那么上面的线程是如何创建出来的,我们得到APS里去看看
void AudioPolicyService::onFirstRef()
{
{
Mutex::Autolock _l(mLock);
// start tone playback thread
mTonePlaybackThread = new AudioCommandThread(String8("ApmTone"), this);
// start audio commands thread
mAudioCommandThread = new AudioCommandThread(String8("ApmAudio"), this);
// start output activity command thread
mOutputCommandThread = new AudioCommandThread(String8("ApmOutput"), this);
mAudioPolicyClient = new AudioPolicyClient(this);
mAudioPolicyManager = createAudioPolicyManager(mAudioPolicyClient); //创建代理管家
}
// load audio processing modules
sp<AudioPolicyEffects>audioPolicyEffects = new AudioPolicyEffects();
{
Mutex::Autolock _l(mLock);
mAudioPolicyEffects = audioPolicyEffects;
}
}
frameworks\av\services\audiopolicy\manager\AudioPolicyFactory.cpp
extern "C" AudioPolicyInterface* createAudioPolicyManager(
AudioPolicyClientInterface *clientInterface)
{
return new AudioPolicyManager(clientInterface);
}
extern "C" void destroyAudioPolicyManager(AudioPolicyInterface *interface)
{
delete interface;
}
frameworks\av\services\audiopolicy\managerdefault\AudioPolicyManager.cpp
udioPolicyManager::AudioPolicyManager(AudioPolicyClientInterface *clientInterface)
......
{
......
//调用APS的openOutput
status_t status = mpClientInterface->openOutput(......);
}
status_t AudioPolicyService::AudioPolicyClient::openOutput(......)
{
sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
if (af == 0) {
ALOGW("%s: could not get AudioFlinger", __func__);
return PERMISSION_DENIED;
}
return af->openOutput(module, output, config, devices, address, latencyMs, flags);
}
调用AF的openOutput
status_t AudioFlinger::openOutput(......)
{
......
sp<ThreadBase> thread = openOutput_l(module, output, config, *devices, address, flags);
......
return NO_INIT;
}
饶了一圈回来在此创建线程,并放入到mPlaybackThreads中
sp<AudioFlinger::ThreadBase> AudioFlinger::openOutput_l(......)
{
sp<PlaybackThread> thread;
if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
thread = new OffloadThread(this, outputStream, *output, devices, mSystemReady);
} else if ((flags & AUDIO_OUTPUT_FLAG_DIRECT)
|| !isValidPcmSinkFormat(config->format)
|| !isValidPcmSinkChannelMask(config->channel_mask)) {
thread = new DirectOutputThread(this, outputStream, *output, devices, mSystemReady);
} else {
thread = new MixerThread(this, outputStream, *output, devices, mSystemReady);
}
mPlaybackThreads.add(*output, thread);
return thread;
}
3. start()
frameworks\av\services\audioflinger\Tracks.cpp
由此可见APS控制着整个音频系统,而AF管理的是音频输入混合输出,AudioTrack创建成功以后,开始调用start()函数,start()函数将由TrackHandle代理并调用Track的start()函数
status_t AudioFlinger::PlaybackThread::Track::start(......)
{
status_t status = NO_ERROR;
sp<ThreadBase> thread = mThread.promote();
if (state == PAUSED || state == PAUSING) {
if (mResumeToStopping) {
// happened we need to resume to STOPPING_1
mState = TrackBase::STOPPING_1;
ALOGV("PAUSED => STOPPING_1 (%d) on thread %p", mName, this);
} else {
mState = TrackBase::RESUMING;
ALOGV("PAUSED => RESUMING (%d) on thread %p", mName, this);
}
} else {
mState = TrackBase::ACTIVE; //设置Track的状态
ALOGV("? => ACTIVE (%d) on thread %p", mName, this);
}
status = playbackThread->addTrack_l(this);
return status;
}
将创建好的Track加入到mActiveTracks中
status_t AudioFlinger::PlaybackThread::addTrack_l(const sp<Track>& track)
{
status_t status = ALREADY_EXISTS;
if (mActiveTracks.indexOf(track) < 0) {
......
// set retry count for buffer fill
if (track->isOffloaded()) {
if (track->isStopping_1()) {
track->mRetryCount = kMaxTrackStopRetriesOffload;
} else {
track->mRetryCount = kMaxTrackStartupRetriesOffload;
}
track->mFillingUpStatus = mStandby ? Track::FS_FILLING : Track::FS_FILLED;
} else {
track->mRetryCount = kMaxTrackStartupRetries; //重试次数
track->mFillingUpStatus =
track->sharedBuffer() != 0 ? Track::FS_FILLED : Track::FS_FILLING;
}
track->mResetDone = false;
track->mPresentationCompleteFrames = 0;
mActiveTracks.add(track);
sp<EffectChain> chain = getEffectChain_l(track->sessionId());
if (chain != 0) {
chain->incActiveTrackCnt();
}
char buffer[256];
track->dump(buffer, ARRAY_SIZE(buffer), false /* active */);
status = NO_ERROR;
}
onAddNewTrack_l(); //触发音频混合线程MixerThread的
return status;
}
PlaybackThread的threadLoop()将调用MixerThread::prepareTracks_l()函数
AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTracks_l(
Vector< sp<Track> > *tracksToRemove)
{
mixer_state mixerStatus = MIXER_IDLE;
// find out which tracks need to be processed
size_t count = mActiveTracks.size(); //已激活的Track数量
......
for (size_t i=0 ; i<count ; i++) { //遍历查询
const sp<Track> t = mActiveTracks[i];
// this const just means the local variable doesn't change
Track* const track = t.get();
{
//取cblk
audio_track_cblk_t* cblk = track->cblk();
// The first time a track is added we wait
// for all its buffers to be filled before processing it
int name = track->name();
......
// XXX: these things DON'T need to be done each time
//设置数据来源为Track
mAudioMixer->setBufferProvider(name, track);
mAudioMixer->enable(name);
//设置音量等参数
mAudioMixer->setParameter(name, param, AudioMixer::VOLUME0, &vlf);
mAudioMixer->setParameter(name, param, AudioMixer::VOLUME1, &vrf);
mAudioMixer->setParameter(name, param, AudioMixer::AUXLEVEL, &vaf);
mAudioMixer->setParameter(
name,
AudioMixer::TRACK,
AudioMixer::FORMAT, (void *)track->format());
mAudioMixer->setParameter(
name,
AudioMixer::TRACK,
AudioMixer::CHANNEL_MASK, (void *)(uintptr_t)track->channelMask());
mAudioMixer->setParameter(
name,
AudioMixer::TRACK,
AudioMixer::MIXER_CHANNEL_MASK, (void *)(uintptr_t)mChannelMask);
......
mMixerStatusIgnoringFastTracks = mixerStatus;
if (fastTracks > 0) {
mixerStatus = MIXER_TRACKS_READY;
}
return mixerStatus;
}
混合线程的处理函数
void AudioFlinger::MixerThread::threadLoop_mix()
{
// mix buffers...
mAudioMixer->process();
if ((mSleepTimeUs == 0) && (sleepTimeShift > 0)) {
sleepTimeShift--;
}
mSleepTimeUs = 0;
mStandbyTimeNs = systemTime() + mStandbyDelayNs;
//TODO: delay standby when effects have a tail
}
调用混音器的处理函数
void AudioMixer::process()
{
mState.hook(&mState); //hook指针根据Track的个数和它的音频格式使用不同的处理函数
}
使能混合器
void AudioMixer::enable(int name)
{
name -= TRACK0;
ALOG_ASSERT(uint32_t(name) < MAX_NUM_TRACKS, "bad track name %d", name);
track_t& track = mState.tracks[name];
if (!track.enabled) {
track.enabled = true;
ALOGV("enable(%d)", name);
invalidateState(1 << name); //刷新
}
}
刷新缓冲
void AudioMixer::invalidateState(uint32_t mask)
{
if (mask != 0) {
mState.needsChanged |= mask;
mState.hook = process__validate; //赋给函数指针
}
}
采样处理函数
void AudioMixer::process__validate(state_t* state)
{
uint32_t enabled = 0;
uint32_t disabled = 0;
......
if (countActiveTracks > 0) {
if (resampling) {
.....
state->hook = process__genericResampling; //采样
} else {
if (state->outputTemp) {
delete [] state->outputTemp;
state->outputTemp = NULL;
}
if (state->resampleTemp) {
delete [] state->resampleTemp;
state->resampleTemp = NULL;
}
state->hook = process_OneTrack16BitsStereoNoResampling; //采样
}
}
从Track中取出音频数据,写出
void AudioMixer::process__OneTrack16BitsStereoNoResampling(state_t* state)
{
t.bufferProvider->getNextBuffer(&b); //获得可读缓冲
numFrames -= b.frameCount;
t.bufferProvider->releaseBuffer(&b); //释放缓冲区
}
MixerThread获取Track的数据,进行混音后通过AudioStreamOutPut的 *output 写入音频输出设备
AudioFlinger的分析就到这里,AudioFlinger通过APS创建混音线程,混音线程将Track中的数据取出,进入环形缓冲区处理,最终输出到音频硬件设备;这个过程较为复杂,特别audio_track_cblk在此并没有详细分析;AF的分析暂且到此结束,后续连贯分析音频流程的时候会更清晰。Bye,Bye !