Android跨进程通信Binder原理分析(二)

1 Binder源代码分析

1.1 Service的注册流程

    Android 系统中将大量的核心服务以 Service 的形式来对外提供,Service 只有注册到 ServiceManager 后,Client 端才能通过 ServiceManager 获取到 Service 的代理对象,从而使用到 Service 提供的服务。下面以MediaServer为例来分析Service 的注册流程。先看一下MediaServer的main方法。

// frameworks/av/media/mediaserver/main_mediaserver.cpp
int main(int argc __unused, char **argv __unused)
{
    signal(SIGPIPE, SIG_IGN);

    sp<ProcessState> proc(ProcessState::self());                  <1>
    sp<IServiceManager> sm(defaultServiceManager());              <2>
    ALOGI("ServiceManager: %p", sm.get());
    InitializeIcuOrDie();
    MediaPlayerService::instantiate();
    ResourceManagerService::instantiate();
    registerExtensions();
    ProcessState::self()->startThreadPool();	
    IPCThreadState::self()->joinThreadPool();	
}
// frameworks/av/media/libmediaplayerservice/MediaPlayerService.cpp
void MediaPlayerService::instantiate() {
    defaultServiceManager()->addService(
            String16("media.player"), new MediaPlayerService());  <3>
}

<1> 首先调用了ProcessState::self()函数

// frameworks/native/libs/binder/ProcessState.cpp
sp<ProcessState> ProcessState::self()
{
    Mutex::Autolock _l(gProcessMutex);
    if (gProcess != NULL) {
        return gProcess;
    }
    gProcess = new ProcessState("/dev/binder");
    return gProcess;
}

ProcessState::self()创建了一个singleton的ProcessState

// frameworks/native/libs/binder/ProcessState.cpp
ProcessState::ProcessState(const char *driver)
    : mDriverName(String8(driver))
    , mDriverFD(open_driver(driver))      // 调用open_driver打开设备节点“/dev/binder”
    , mVMStart(MAP_FAILED)
    , mThreadCountLock(PTHREAD_MUTEX_INITIALIZER)
    , mThreadCountDecrement(PTHREAD_COND_INITIALIZER)
    , mExecutingThreadsCount(0)
    , mMaxThreads(DEFAULT_MAX_BINDER_THREADS)
    , mStarvationStartTimeMs(0)
    , mManagesContexts(false)
    , mBinderContextCheckFunc(NULL)
    , mBinderContextUserData(NULL)
    , mThreadPoolStarted(false)
    , mThreadPoolSeq(1)
{
    if (mDriverFD >= 0) {
        // mmap the binder, providing a chunk of virtual address space to receive transactions.
        // 这里的BINDER_VM_SIZE = ((1*1024*1024) - (4096 *2));也就是1M-8K,在使用Binder传输数据的时候不应该超过这个大小的限制
        mVMStart = mmap(0, BINDER_VM_SIZE, PROT_READ, MAP_PRIVATE | MAP_NORESERVE, mDriverFD, 0);
        if (mVMStart == MAP_FAILED) {
            // *sigh*
            ALOGE("Using %s failed: unable to mmap transaction memory.\n", mDriverName.c_str());
            close(mDriverFD);
            mDriverFD = -1;
            mDriverName.clear();
        }
    }

    LOG_ALWAYS_FATAL_IF(mDriverFD < 0, "Binder driver could not be opened.  Terminating.");
}

static int open_driver(const char *driver)
{
    int fd = open(driver, O_RDWR | O_CLOEXEC);
    if (fd >= 0) {
        int vers = 0;
        status_t result = ioctl(fd, BINDER_VERSION, &vers);
        if (result == -1) {
            ALOGE("Binder ioctl to obtain version failed: %s", strerror(errno));
            close(fd);
            fd = -1;
        }
        if (result != 0 || vers != BINDER_CURRENT_PROTOCOL_VERSION) {
          ALOGE("Binder driver protocol(%d) does not match user space protocol(%d)! ioctl() return value: %d",
                vers, BINDER_CURRENT_PROTOCOL_VERSION, result);
            close(fd);
            fd = -1;
        }
        size_t maxThreads = DEFAULT_MAX_BINDER_THREADS;
        result = ioctl(fd, BINDER_SET_MAX_THREADS, &maxThreads); // 设置Binder的最大线程数DEFAULT_MAX_BINDER_THREADS = 15
        if (result == -1) {
            ALOGE("Binder ioctl to set max threads failed: %s", strerror(errno));
        }
    } else {
        ALOGW("Opening '%s' failed: %s\n", driver, strerror(errno));
    }
    return fd;
}

在ProcessState的构造函数中首先调用open_driver打开设备节点"/dev/binder",之后调用函数mmap(),它通过映射的方式使进程的用户空间和内核空间访问的是同一块物理内存,从而减少了一次拷贝过程(Binder一次数据拷贝见《Android跨进程通信Binder原理分析(一)》)。

<2> 接下来分析函数defaultservicemanager()

// frameworks/native/libs/binder/IServiceManager.cpp
sp<IServiceManager> defaultServiceManager()
{
    if (gDefaultServiceManager != NULL) return gDefaultServiceManager;

    {
        AutoMutex _l(gDefaultServiceManagerLock);
        while (gDefaultServiceManager == NULL) {
            gDefaultServiceManager = interface_cast<IServiceManager>(
                ProcessState::self()->getContextObject(NULL));
            if (gDefaultServiceManager == NULL)   // gDefaultServiceManager == NULL说名ServiceManager还未初始化完成
                sleep(1);
        }
    }

    return gDefaultServiceManager;
}

在defaultServiceManager()中首先调用了ProcessState::self()->getContextObject(NULL)获取IBinder对象,然后调用interface_cast()函数将IBinder对象转换成IServiceManager对象(interface_cast见《Android跨进程通信Binder原理分析(一)》)

// frameworks/native/libs/binder/ProcessState.cpp
sp<IBinder> ProcessState::getContextObject(const sp<IBinder>& /*caller*/)
{
    return getStrongProxyForHandle(0);         //规定ServiceManager的handle值为0
}

sp<IBinder> ProcessState::getStrongProxyForHandle(int32_t handle)
{
    sp<IBinder> result;
    ...
    b = new BpBinder(handle); // 这个handle值会保存在BpBinder的成员变量mHandle中,在调用IPCThreadState的transact函数时会作为参数使用
    e->binder = b;
    if (b) e->refs = b->getWeakRefs();
    result = b;
    ...        
    return result;
}

这里创建了BpBinder对象handle的值为0代表这个BpBinder是ServiceManager的代理端。
<3> 接下来分析defaultServiceManager()->addService()函数

// frameworks/native/libs/binder/IServiceManager.cpp
virtual status_t addService(const String16& name, const sp<IBinder>& service,
        bool allowIsolated)
{
    Parcel data, reply;
    data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());
    data.writeString16(name);
    data.writeStrongBinder(service);
    data.writeInt32(allowIsolated ? 1 : 0);
    status_t err = remote()->transact(ADD_SERVICE_TRANSACTION, data, &reply);
    return err == NO_ERROR ? reply.readExceptionCode() : err;
}

通过 remote 函数得到保存在 BpRefBase 中的成员变量mRemote中 ,这里mRemote就是BpBinder对象,然后调用其 transact 函数(mRemote见《Android跨进程通信Binder原理分析(一)》)

//frameworks/native/libs/binder/BpBinder.cpp
status_t BpBinder::transact(
    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
    // Once a binder has died, it will never come back to life.
    if (mAlive) {
        status_t status = IPCThreadState::self()->transact(
            mHandle, code, data, reply, flags); // mHandle的值为0
        if (status == DEAD_OBJECT) mAlive = 0;
        return status;
    }

    return DEAD_OBJECT;
}

IPCThreadState是进程中真正干活的“伙计”具体实现代码如下:

扫描二维码关注公众号,回复: 4426554 查看本文章
// frameworks/native/libs/binder/IPCThreadState.cpp
IPCThreadState* IPCThreadState::self()
{
    if (gHaveTLS) {
restart:
        const pthread_key_t k = gTLS;
        IPCThreadState* st = (IPCThreadState*)pthread_getspecific(k);
        if (st) return st;
        return new IPCThreadState;
    }
    ...

    pthread_mutex_unlock(&gTLSMutex);
    goto restart;
}

这里创建了IPCThreadState对象,接下来我们分析IPCThreadState的构造函数

// frameworks/native/libs/binder/IPCThreadState.cpp
IPCThreadState::IPCThreadState()
    : mProcess(ProcessState::self()),
      mStrictModePolicy(0),
      mLastTransactionBinderFlags(0)
{
    pthread_setspecific(gTLS, this);
    clearCaller();
    mIn.setDataCapacity(256);
    mOut.setDataCapacity(256);
#ifdef OUTPUT_DEBUG_LOG
    mWriteHistoryIndex = 0;
    mReadHistoryIndex = 0;
    for (int i = 0; i < CMD_HISTORY_SIZE; i++) {
        mWriteHistory[i].when.tv_sec = 0;
        mWriteHistory[i].size = 0;
        mReadHistory[i].when.tv_sec = 0;
        mReadHistory[i].size = 0;
    }
#endif

由此可见每一个线程都有一个IPCThreadState,每个IPCThreadState中都有一个mIn和mOut,其中mIn是用来接收Binder设备数据的,而mOut则是用来存储发往Binder设备数据的。
传输工作是很辛苦的,BpBinder的transact()调用了IPCThreadState的transact()函数,这个函数实际完成了与Binder通信的工作,我们看看IPCThreadState的transact()函数的实现。

// frameworks/native/libs/binder/IPCThreadState.cpp
status_t IPCThreadState::transact(int32_t handle,
                                  uint32_t code, const Parcel& data,
                                  Parcel* reply, uint32_t flags)
{
    ...
    if (err == NO_ERROR) {
        LOG_ONEWAY(">>>> SEND from pid %d uid %d %s", getpid(), getuid(),
            (flags & TF_ONE_WAY) == 0 ? "READ REPLY" : "ONE WAY");
        err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);
    }
    ...
    if (reply) {
        err = waitForResponse(reply);
    } else {
        Parcel fakeReply;
        err = waitForResponse(&fakeReply);
    }
    ....
}

IPCThreadState函数负责与Binder驱动进行交互,具体的交互过程这里就先不进行分析了,感兴趣的可以去看看代码。这里我们只需要知道在创建Service的时候,在Binder驱动中会创建一个flat_binder_object结构体,该结构体中保存着一个handle变量。
Android 中每个Service都有一个handle(根据创建时间递增,handle = 0的是ServiceManager),Binder Driver可以通过这个handle的值去找到指定的Service;

#bionic/libc/kernel/uapi/linux/android/binder.h
struct flat_binder_object {
 unsigned long type;
 unsigned long flags;
 union {
    void *binder;
    signed long handle;
 };
 void *cookie;
};

到此为止就完成了MediaServer的注册工作。

1.2 Service的获取流程

接下来分析MediaServer的Client端是如何链接到MediaServer的Server端的,我们先看下面的代码

// frameworks/av/media/libmedia/mediametadataretriever.cpp
const sp<IMediaPlayerService> MediaMetadataRetriever::getService()
{
    Mutex::Autolock lock(sServiceLock);
    if (sService == 0) {
        sp<IServiceManager> sm = defaultServiceManager();
        sp<IBinder> binder;
        do {
            binder = sm->getService(String16("media.player"));     <1>
            if (binder != 0) {
                break;
            }
            ALOGW("MediaPlayerService not published, waiting...");
            usleep(500000); // 0.5 s
        } while (true);
        if (sDeathNotifier == NULL) {
            sDeathNotifier = new DeathNotifier();
        }
        binder->linkToDeath(sDeathNotifier);
        //interface_cast函数在Android跨进程通信Binder原理分析(一)分析过,他能够将IBinder对象转为IMediaPlayerService
        sService = interface_cast<IMediaPlayerService>(binder);  
    }
    ALOGE_IF(sService == 0, "no MediaPlayerService!?");
    return sService;
}

<1> 这里调用了ServiceManager的getService()函数,参数为"media.player"与之前addService()传的参数是一样的。看getService()的实现

// frameworks/native/libs/binder/IServiceManager.cpp
virtual sp<IBinder> getService(const String16& name) const
{
    unsigned n;
    for (n = 0; n < 5; n++){
        if (n > 0) {
            if (!strcmp(ProcessState::self()->getDriverName().c_str(), "/dev/vndbinder")) {
                ALOGI("Waiting for vendor service %s...", String8(name).string());
                CallStack stack(LOG_TAG);
            } else {
                ALOGI("Waiting for service %s...", String8(name).string());
            }
            sleep(1);
        }
        sp<IBinder> svc = checkService(name);    
        if (svc != NULL) return svc;
    }
    return NULL;
}

virtual sp<IBinder> checkService( const String16& name) const
{
    Parcel data, reply;
    data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());
    data.writeString16(name);
    remote()->transact(CHECK_SERVICE_TRANSACTION, data, &reply);
    return reply.readStrongBinder();
}

getService()函数调用了内部的checkService(),在checkService()中把”media.player“封装在Parcel对象里之后调用remote()->transact(),该函数在分析addService()的时候分析过,只不过这里传的参数不同为CHECK_SERVICE_TRANSACTION。transact()会和Binder Driver进行交互并将返回结果保存在reply中,下面分析函数readStrongBinder()。

// frameworks/native/libs/binder/Parcel.cpp
status_t Parcel::readStrongBinder(sp<IBinder>* val) const
{
    status_t status = readNullableStrongBinder(val);
    if (status == OK && !val->get()) {
        status = UNEXPECTED_NULL;
    }
    return status;
}

status_t Parcel::readNullableStrongBinder(sp<IBinder>* val) const
{
    return unflatten_binder(ProcessState::self(), *this, val);
}

这里只是一些转调用,接着看unflatten_binder函数

// frameworks/native/libs/binder/Parcel.cpp
status_t unflatten_binder(const sp<ProcessState>& proc,
    const Parcel& in, sp<IBinder>* out)
{
    const flat_binder_object* flat = in.readObject(false);
    ...case BINDER_TYPE_HANDLE:
                *out = proc->getStrongProxyForHandle(flat->handle);
                return finish_unflatten_binder(
                    static_cast<BpBinder*>(out->get()), *flat, in);
        }        
    }
    return BAD_TYPE;
}

这里出现了flat_binder_object结构体,前面addService()时会初始化这个结构体,这里用到了flat_binder_object结构体中的handle,接下来看getStrongProxyForHandle函数

// frameworks/native/libs/binder/ProcessState.cpp
sp<IBinder> ProcessState::getStrongProxyForHandle(int32_t handle)
{
    sp<IBinder> result;

    AutoMutex _l(mLock);

    handle_entry* e = lookupHandleLocked(handle);

    if (e != NULL) {
        IBinder* b = e->binder;
        if (b == NULL || !e->refs->attemptIncWeak(this)) {
            if (handle == 0) {
            ...
            }

            b = new BpBinder(handle);
            e->binder = b;
            if (b) e->refs = b->getWeakRefs();
            result = b;
        } else {
           ...
        }
    }
    return result;
}

这里创建了BpBinder,参数为handle。下面看BpBinder的构造函数

// frameworks/native/libs/binder/BpBinder.cpp
BpBinder::BpBinder(int32_t handle)
    : mHandle(handle)

BpBinder的构成员变量函数将handle保存在mHandlec中,然后当客户端需要访问远端服务的时候,将mHandle附上,这样Binder Driver根据mHandle查找对应的Service,然后唤醒该Service处理客户端的请求并返回。

1.3 Service的使用流程

在Service注册的时候实际上已经完成了一次跨进程通信,只不过这个Server端比较特殊,他是ServiceManager(handle的值为0),在上面分析getService流程的后面会获取到handle值并保存在mHandle中。这里我们在IMediaPlayer中随便找一个接口开始分析

// frameworks/av/media/libmedia/IMediaPlayer.cpp
status_t start()
{
    Parcel data, reply;
    data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
    remote()->transact(START, data, &reply);
    return reply.readInt32();
}

这里调用到BpBinder的transact函数

// frameworks/native/libs/binder/BpBinder.cpp
status_t BpBinder::transact(
    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
    // Once a binder has died, it will never come back to life.
    if (mAlive) {
        status_t status = IPCThreadState::self()->transact(
            mHandle, code, data, reply, flags);
        if (status == DEAD_OBJECT) mAlive = 0;
        return status;
    }
    return DEAD_OBJECT;
}

前面在addService的时候也调用到这个函数,只不过当时的mHandle为0,而此时的mHandle是getService时获取的。后面的流程就不继续分析了。

Binder相关链接:
https://blog.csdn.net/codefly/article/details/17058607

猜你喜欢

转载自blog.csdn.net/caiyu_09/article/details/83178748