Android binder: ServiceManager Client端

1. IServiceManager:server向外提供的接口

class IServiceManager : public IInterface
{
public:
    /*DECLARE_META_INTERFACE(ServiceManager)*/
    static const ::android::String16 descriptor;
    static ::android::sp<IServiceManager> asInterface(const ::android::sp<::android::IBinder>& obj);
    virtual const ::android::String16& getInterfaceDescriptor() const;

    IServiceManager();
    virtual ~IServiceManager();

    virtual sp<IBinder>         getService( const String16& name) const = 0;
    virtual sp<IBinder>         checkService( const String16& name) const = 0;
    virtual status_t            addService( const String16& name,
                                            const sp<IBinder>& service,
                                            bool allowIsolated = false) = 0;
    virtual Vector<String16>    listServices() = 0;
    enum {
        GET_SERVICE_TRANSACTION = IBinder::FIRST_CALL_TRANSACTION,
        CHECK_SERVICE_TRANSACTION,
        ADD_SERVICE_TRANSACTION,
        LIST_SERVICES_TRANSACTION,
    };

};

从IServiceManager的成员看并没有哪里和IBinder有联系,再看下IInterface,其中asBinder/asInterface

有点相互转换的意思。

class IInterface : public virtual RefBase
{
public:
            IInterface();
            static sp<IBinder>  asBinder(const IInterface*);
            static sp<IBinder>  asBinder(const sp<IInterface>&);

protected:
    virtual                     ~IInterface();
    virtual IBinder*            onAsBinder() = 0;

};

2. 获得SeviceManager远程代理

sp<IServiceManager> defaultServiceManager()
{
    gDefaultServiceManager = interface_cast<IServiceManager>(ProcessState::self()->getContextObject(NULL));
    return gDefaultServiceManager;

}

2.1 ProcessState::self()->getContextObject(NULL)

最终就对应new BpBinder(0)

class ProcessState : public virtual RefBase

{
public:
    static  sp<ProcessState>    self();
    static  sp<ProcessState>    initWithDriver(const char *driver);
}

sp<ProcessState> ProcessState::self()
{
    gProcess = new ProcessState("/dev/binder");
    return gProcess;
}

ProcessState::ProcessState(const char *driver)
    : mDriverName(String8(driver))
    , mDriverFD(open_driver(driver))
    , mVMStart(MAP_FAILED)
    , mThreadCountLock(PTHREAD_MUTEX_INITIALIZER)
    , mThreadCountDecrement(PTHREAD_COND_INITIALIZER)
    , mExecutingThreadsCount(0)
    , mMaxThreads(DEFAULT_MAX_BINDER_THREADS)
    , mStarvationStartTimeMs(0)
    , mManagesContexts(false)
    , mBinderContextCheckFunc(NULL)
    , mBinderContextUserData(NULL)
    , mThreadPoolStarted(false)
    , mThreadPoolSeq(1)
{
    if (mDriverFD >= 0) {
        // mmap the binder, providing a chunk of virtual address space to receive transactions.
        mVMStart = mmap(0, BINDER_VM_SIZE, PROT_READ, MAP_PRIVATE | MAP_NORESERVE, mDriverFD, 0);
    }
}

sp<IBinder> ProcessState::getContextObject(const sp<IBinder>& /*caller*/)
{
    return getStrongProxyForHandle(0);
}

sp<IBinder> ProcessState::getStrongProxyForHandle(int32_t handle: 0)
{
    sp<IBinder> result;
    handle_entry* e = lookupHandleLocked(handle);
    IBinder* b = e->binder;   
    b = new BpBinder(handle); /*通过handle开始构建IBinder*/
    e->binder = b;
    result = b;
    return result;
}

BpBinder::BpBinder(int32_t handle)
    : mHandle(handle)
    , mAlive(1)
    , mObitsSent(0)
    , mObituaries(NULL)
{
    ALOGV("Creating BpBinder %p handle %d\n", this, mHandle);
    extendObjectLifetime(OBJECT_LIFETIME_WEAK);
    IPCThreadState::self()->incWeakHandle(handle);
}

2.2 interface_cast从IBinder使用模板类INTERFACE创建Interface

template<typename INTERFACE>
inline sp<IServiceManager> interface_cast(const sp<IBinder>& obj)
{
    return IServiceManager::asInterface(obj);
}

从IBinder得到IServiceManager
    ::android::sp<IServiceManager> IServiceManager::asInterface(
            const ::android::sp<::android::IBinder>& obj)
    {                                                                  
        ::android::sp<IServiceManager> intr;                              
        if (obj != NULL) {                                              
            intr = static_cast<IServiceManager*>(obj->queryLocalInterface(IServiceManager::descriptor).get());    
            if (intr == NULL) { 
                intr = new BpServiceManager(obj);
            }    
        }                            
        return intr;         
    }

对于BpBinder,queryLocalInterface的实现是基类IBinder::queryLocalInterface是返回NULL
sp<IInterface>  IBinder::queryLocalInterface(const String16& /*descriptor*/)
{
    return NULL;
}

上面简化为:new BpServiceManager(new BpBinder(0));

class BpServiceManager : public BpInterface<IServiceManager>

BpInterface是个模板类参数是Ixxx:server向外提供的接口
template<typename IServiceManager>
class BpInterface : public IServiceManager, public BpRefBase
{
public:
    explicit                    BpInterface(const sp<IBinder>& remote);

protected:
    virtual IBinder*            onAsBinder();
};
                      
BpRefBase的成员是IBinder,"has a" 方式关联binder 
class BpRefBase : public virtual RefBase
{
protected:
    explicit                BpRefBase(const sp<IBinder>& o);
    virtual                 ~BpRefBase();
    virtual void            onFirstRef();
    virtual void            onLastStrongRef(const void* id);
    virtual bool            onIncStrongAttempted(uint32_t flags, const void* id);

    inline  IBinder*        remote()                { return mRemote; }
    inline  IBinder*        remote() const          { return mRemote; }

private:
                            BpRefBase(const BpRefBase& o);
    BpRefBase&              operator=(const BpRefBase& o);

    IBinder* const          mRemote;
    RefBase::weakref_type*  mRefs;
    std::atomic<int32_t>    mState;

};

2.3从BpServiceManager的构造函数看下mRemote是怎样赋值的:

看下BpServiceManager的构造函数,最后赋值到 mRemote
 explicit BpServiceManager(const sp<IBinder>& impl)
        : BpInterface<IServiceManager>(impl)
{
}
template<typename INTERFACE>
inline BpInterface<INTERFACE>::BpInterface(const sp<IBinder>& remote)
    : BpRefBase(remote)
{
}
BpRefBase::BpRefBase(const sp<IBinder>& o) /**/
    : mRemote(o.get()), mRefs(NULL), mState(0)
{其中o.get()是sp类的获取实际数据指针的一个方法

}

mRemote的赋值来着IBinder.get(), o.get()的具体实现是:

frameworks/base/include/utils/RefBase.h
template <typename T>
class sp
{
inline  T*      get() const         { return m_ptr; }

}

3. client 使用addService的过程

virtual status_t addService(const String16& name, const sp<IBinder>& service,
    bool allowIsolated)
{
Parcel data, reply;
data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());
data.writeString16(name);
data.writeStrongBinder(service);
data.writeInt32(allowIsolated ? 1 : 0);
status_t err = remote()->transact(ADD_SERVICE_TRANSACTION, data, &reply);
return err == NO_ERROR ? reply.readExceptionCode() : err;

}

3.1 Parcel 传输的数据

对IBinder对象的特殊处理,也是binder机制的关键所在

1. flat_binder_object

struct flat_binder_object {
/* 8 bytes for large_flat_header. */
__u32 type;
__u32 flags;

/* 8 bytes of data. */
union {
binder_uintptr_t binder; /* local object */
__u32 handle; /* remote object */
};
/* extra data associated with local object */
binder_uintptr_t cookie;
};
这里看下writeStrongBinder
status_t Parcel::writeStrongBinder(const sp<IBinder>& val)
{
    return flatten_binder(ProcessState::self(), val, this);
}

status_t flatten_binder(const sp<ProcessState>& /*proc*/,
    const sp<IBinder>& binder, Parcel* out)
{
    flat_binder_object obj;

    if (binder != NULL) {
        /*从IBinder的具体实现确定是BBinder还是BpBinder: binder/handle*/
        IBinder *local = binder->localBinder();
        if (!local) {
            BpBinder *proxy = binder->remoteBinder();
            const int32_t handle = proxy ? proxy->handle() : 0;
            obj.type = BINDER_TYPE_HANDLE;
            obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */
            obj.handle = handle;
            obj.cookie = 0;
        } else {
            obj.type = BINDER_TYPE_BINDER;
            obj.binder = reinterpret_cast<uintptr_t>(local->getWeakRefs());
            obj.cookie = reinterpret_cast<uintptr_t>(local);
        }
    }
    return finish_flatten_binder(binder, obj, out);
}
上面的流程分支控制是下面的函数实现决定的
和BpBinder对应的是BBinder;
从上面的代码跟踪基类IBinder包含localBinder和remoteBinder两个函数,IBinder都实现为NULL.
BBinder重写了localBinder而BpBinder重写了remoteBinder

class IBinder : public virtual RefBase
{
    virtual BBinder*        localBinder();
    virtual BpBinder*       remoteBinder();
}
BBinder* IBinder::localBinder()
{
    return NULL;
}
BpBinder* IBinder::remoteBinder()
{
    return NULL;
}
BBinder* BBinder::localBinder()
{
    return this;
}
BpBinder* BpBinder::remoteBinder()
{
    return this;

}

3.2  transact过程

status_t BpBinder::transact(
    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
    // Once a binder has died, it will never come back to life.
    if (mAlive) {
        status_t status = IPCThreadState::self()->transact(
            mHandle, code, data, reply, flags);
        return status;
    }
}
mHandle:赋值为BpBinder的构造函数,这里为0
status_t IPCThreadState::transact(int32_t handle,
                                  uint32_t code, const Parcel& data,
                                  Parcel* reply, uint32_t flags)
{
writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);
waitForResponse(NULL, NULL);
}

3. 2.1 writeTransactionData: 组成binder_transaction_data数据格式
status_t IPCThreadState::writeTransactionData(int32_t cmd, uint32_t binderFlags,
    int32_t handle, uint32_t code, const Parcel& data, status_t* statusBuffer)
{
    binder_transaction_data tr;


    tr.target.ptr = 0; /* Don't pass uninitialized stack data to a remote process */
    tr.target.handle = handle; /*command transaction*/
    tr.code = code;
    tr.flags = binderFlags;
    tr.cookie = 0;
    tr.sender_pid = 0;
    tr.sender_euid = 0;


    const status_t err = data.errorCheck();
    if (err == NO_ERROR) {/*怎样找到flat_object对象*/
        tr.data_size = data.ipcDataSize();
        tr.data.ptr.buffer = data.ipcData();
/*offsets_size: flat_binder_object的个数*/
        tr.offsets_size = data.ipcObjectsCount()*sizeof(binder_size_t);
/*data.ptr.offsetoffset: flat_binder_object对象在buffer中的偏移量*/
        tr.data.ptr.offsets = data.ipcObjects();
    }

    mOut.writeInt32(cmd);
    mOut.write(&tr, sizeof(tr));
    return NO_ERROR;
}

3.2.2 waitForResponse(NULL, NULL)

status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
    uint32_t cmd;
    int32_t err;
    while (1) {
        if ((err=talkWithDriver()) < NO_ERROR) break;
        err = mIn.errorCheck();
        if (err < NO_ERROR) break;
        if (mIn.dataAvail() == 0) continue;
        cmd = (uint32_t)mIn.readInt32();
        switch (cmd) {
        case BR_TRANSACTION_COMPLETE:
            if (!reply && !acquireResult) goto finish;
            break;
       ---
    }
}
这里关注下talkWithDriver():
status_t IPCThreadState::talkWithDriver(bool doReceive)
{
    binder_write_read bwr;
    // Is the read buffer empty?
    const bool needRead = mIn.dataPosition() >= mIn.dataSize();

    // We don't want to write anything if we are still reading
    // from data left in the input buffer and the caller
    // has requested to read the next data.
    const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;

    bwr.write_size = outAvail;
    bwr.write_buffer = (uintptr_t)mOut.data();

    // This is what we'll read.
    if (doReceive && needRead) {
        bwr.read_size = mIn.dataCapacity();
        bwr.read_buffer = (uintptr_t)mIn.data();
    } else {
        bwr.read_size = 0;
        bwr.read_buffer = 0;
    }
    // Return immediately if there is nothing to do.
    if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR;
    bwr.write_consumed = 0;
    bwr.read_consumed = 0;
    status_t err;
    do {
        if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
            err = NO_ERROR;
    } while (err == -EINTR);
    if (err >= NO_ERROR) {
        if (bwr.write_consumed > 0) {
            if (bwr.write_consumed < mOut.dataSize())
                mOut.remove(0, bwr.write_consumed);
            else
                mOut.setDataSize(0);
        }
        if (bwr.read_consumed > 0) {
            mIn.setDataSize(bwr.read_consumed);
            mIn.setDataPosition(0);
        }
        return NO_ERROR;
    }
    return err;

}

4 binder in kernel

4.1 找到要唤醒的target 进程

static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)

{
int ret;
struct binder_proc *proc = filp->private_data;
struct binder_thread *thread;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
thread = binder_get_thread(proc);
switch (cmd) {
case BINDER_WRITE_READ:
ret = binder_ioctl_write_read(filp, cmd, arg, thread);
if (ret)
goto err;
break;
}
}

static int binder_ioctl_write_read(struct file *filp,
unsigned int cmd, unsigned long arg,
struct binder_thread *thread)
{
int ret = 0;
struct binder_proc *proc = filp->private_data;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
struct binder_write_read bwr;
if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
ret = -EFAULT;
goto out;
}
if (bwr.write_size > 0) {
ret = binder_thread_write(proc, thread,
 bwr.write_buffer,
 bwr.write_size,
 &bwr.write_consumed);
}
out:
return ret;
}

static int binder_thread_write(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed)
{
uint32_t cmd;
struct binder_context *context = proc->context;
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
while (ptr < end && thread->return_error.cmd == BR_OK) {
if (get_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
switch (cmd) {
case BC_TRANSACTION:
case BC_REPLY: {
struct binder_transaction_data tr;
if (copy_from_user(&tr, ptr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
binder_transaction(proc, thread, &tr,
  cmd == BC_REPLY, 0);
break;
}
}
}

static void
binder_transaction(struct binder_proc *proc, struct binder_thread *thread,
struct binder_transaction_data *tr, int reply)
{
struct binder_transaction *t;
struct binder_work *tcomplete;
size_t *offp, *off_end;
struct binder_proc *target_proc;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL;
struct list_head *target_list;
wait_queue_head_t *target_wait;
struct binder_transaction *in_reply_to = NULL;
struct binder_transaction_log_entry *e;
uint32_t return_error;
        ......
if (reply) {
         ......
} else {
if (tr->target.handle) {/*target的handle值是已知的*/
            ......
} else {/*target的handle值是0的情况*/
target_node = binder_context_mgr_node;
}
/*从target handle -> target node -> target proc*/
......
target_proc = target_node->proc;
......
}
if (target_thread) {
......
} else {
target_list = &target_proc->todo;
target_wait = &target_proc->wait;
}
......
/* 分配了一个待处理事务t和一个待完成工作项tcomplete,并执行初始化工作*/
t = kzalloc(sizeof(*t), GFP_KERNEL);
......
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
......
if (!reply && !(tr->flags & TF_ONE_WAY))
t->from = thread;
else
t->from = NULL;
t->sender_euid = proc->tsk->cred->euid;
t->to_proc = target_proc;
t->to_thread = target_thread;
t->code = tr->code;
t->flags = tr->flags;
t->priority = task_nice(current);
/*这里的事务t是要交给target_proc处理的,在这个场景之下,就是Service Manager了
      *就是在Service Manager的进程空间中分配一块内存来保存用户传进入的参数了*/

t->buffer = binder_alloc_buf(target_proc, tr->data_size,
tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
t->buffer->allow_user_free = 0;
t->buffer->debug_id = t->debug_id;
t->buffer->transaction = t;
t->buffer->target_node = target_node;
if (target_node)
binder_inc_node(target_node, 1, 0, NULL);
 
offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));
 
if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) {
......
return_error = BR_FAILED_REPLY;
goto err_copy_data_failed;
}
if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) {
......
return_error = BR_FAILED_REPLY;
goto err_copy_data_failed;
}
......
  /*for循环,就是用来处理传输数据中的Binder对象*/
off_end = (void *)offp + tr->offsets_size;
for (; offp < off_end; offp++) {
struct flat_binder_object *fp;
......
fp = (struct flat_binder_object *)(t->buffer->data + *offp);
switch (fp->type) {
case BINDER_TYPE_BINDER:
case BINDER_TYPE_WEAK_BINDER: {
struct binder_ref *ref;
struct binder_node *node = binder_get_node(proc, fp->binder);
if (node == NULL) {
node = binder_new_node(proc, fp->binder, fp->cookie);
if (node == NULL) {
return_error = BR_FAILED_REPLY;
goto err_binder_new_node_failed;
}
node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
}
if (fp->cookie != node->cookie) {
......
goto err_binder_get_ref_for_node_failed;
}
ref = binder_get_ref_for_node(target_proc, node);
/*更改了传输的内容:binder type and handle value*/
if (fp->type == BINDER_TYPE_BINDER)
fp->type = BINDER_TYPE_HANDLE;
else
fp->type = BINDER_TYPE_WEAK_HANDLE;
fp->handle = ref->desc;
binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE, &thread->todo);
......
 
} break;
......
}
}
 
if (reply) {
......
} else if (!(t->flags & TF_ONE_WAY)) {
BUG_ON(t->buffer->async_transaction != 0);
t->need_reply = 1;
t->from_parent = thread->transaction_stack;
thread->transaction_stack = t;
} else {
......
}
t->work.type = BINDER_WORK_TRANSACTION;
/*把待处理事务加入到target_list列表中去*/
list_add_tail(&t->work.entry, target_list);
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
/*把待完成工作项加入到本线程的todo等待执行列表中去*/
list_add_tail(&tcomplete->entry, &thread->todo);
/*现在目标进程有事情可做了,于是唤醒它*/
if (target_wait)
wake_up_interruptible(target_wait);
return;
}

4.2 target 唤醒过程

4.2.1 从内核态唤醒

Service Manager正在binder_thread_read函数中调用wait_event_interruptible_exclusive进入休眠状态。
被MediaPlayerService启动后进程唤醒后,继续执行binder_thread_read函数:

static int
binder_thread_read(struct binder_proc *proc, struct binder_thread *thread,
  void  __user *buffer, int size, signed long *consumed, int non_block)
{
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
 
int ret = 0;
int wait_for_proc_work;
 
if (*consumed == 0) {
if (put_user(BR_NOOP, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
}
 
retry:
wait_for_proc_work = thread->transaction_stack == NULL && list_empty(&thread->todo);
......
 
if (wait_for_proc_work) {
......
if (non_block) {
if (!binder_has_proc_work(proc, thread))
ret = -EAGAIN;
} else
ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread));
} else {
......
}

......
 
while (1) {
uint32_t cmd;
struct binder_transaction_data tr;
struct binder_work *w;
struct binder_transaction *t = NULL;
 
if (!list_empty(&thread->todo))
w = list_first_entry(&thread->todo, struct binder_work, entry);
else if (!list_empty(&proc->todo) && wait_for_proc_work)
w = list_first_entry(&proc->todo, struct binder_work, entry);
else {
if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */
goto retry;
break;
}
 
if (end - ptr < sizeof(tr) + 4)
break;
 
switch (w->type) {
case BINDER_WORK_TRANSACTION: {
/*工作项的类型为BINDER_WORK_TRANSACTION,于是通过下面语句得到事务项*/
t = container_of(w, struct binder_transaction, work);
} break;
......
}
 
if (!t)
continue;
 
/*把事务项t中的数据拷贝到本地局部变量struct binder_transaction_data tr中去*/
if (t->buffer->target_node) {
struct binder_node *target_node = t->buffer->target_node;
tr.target.ptr = target_node->ptr;
tr.cookie =  target_node->cookie;
......
cmd = BR_TRANSACTION;
} else {
......
}
tr.code = t->code;
tr.flags = t->flags;
tr.sender_euid = t->sender_euid;
 
if (t->from) {
struct task_struct *sender = t->from->proc->tsk;
tr.sender_pid = task_tgid_nr_ns(sender, current->nsproxy->pid_ns);
} else {
tr.sender_pid = 0;
}
 
tr.data_size = t->buffer->data_size;
tr.offsets_size = t->buffer->offsets_size;
/*t->buffer->data所指向的地址是内核空间的,现在要把数据返回给Service Manager进程的用户空间,
*而Service Manager进程的用户空间是不能访问内核空间的数据的

*通过在用户空间分配一个虚拟地址,然后让这个用户空间虚拟地址与 t->buffer->data这个内核空间虚拟地址指向同一个物理地址
*这里是加上一个偏移量*/

tr.data.ptr.buffer = (void *)t->buffer->data + proc->user_buffer_offset;
tr.data.ptr.offsets = tr.data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *));
  /*把tr的内容拷贝到用户传进来的缓冲区去了,指针ptr指向这个用户缓冲区的地址*/
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
if (copy_to_user(ptr, &tr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
......
  /*已经处理了这个事务,要把它从todo列表中删除*/
list_del(&t->work.entry);
t->buffer->allow_user_free = 1;
if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
t->to_parent = thread->transaction_stack;
t->to_thread = thread;
thread->transaction_stack = t;
} else {
t->buffer->transaction = NULL;
kfree(t);
binder_stats.obj_deleted[BINDER_STAT_TRANSACTION]++;
}
break;

done: 
    ......
return 0;
}

4.2.2 执行用户态处理函数
从内核态返回到:
frameworks/base/cmds/servicemanager/binder.c文件中的binder_loop函数
返回来的数据都放在readbuf中,接着调用binder_parse进行解析
int binder_parse(struct binder_state *bs, struct binder_io *bio,
uint32_t *ptr, uint32_t size, binder_handler func)
{
int r = 1;
uint32_t *end = ptr + (size / 4);
 
while (ptr < end) {
uint32_t cmd = *ptr++;
        ......
case BR_TRANSACTION: {
/*从Binder驱动程序读出来的数据转换为一个struct binder_txn结构体*/
struct binder_txn *txn = (void *) ptr;
if ((end - ptr) * sizeof(uint32_t) < sizeof(struct binder_txn)) {
LOGE("parse: txn too small!\n");
return -1;
}
binder_dump_txn(txn);
if (func) {
unsigned rdata[256/4];
struct binder_io msg;
struct binder_io reply;
int res;
  /*调bio_init来初始化reply变量*/
bio_init(&reply, rdata, sizeof(rdata), 4);
/*调用bio_init_from_txn来初始化msg变量*/
bio_init_from_txn(&msg, txn);
res = func(bs, txn, &msg, &reply);
binder_send_reply(bs, &reply, txn->data, res);
}
ptr += sizeof(*txn) / sizeof(uint32_t);
break;
}
......
default:
LOGE("parse: OOPS %d\n", cmd);
return -1;
}
}
 
return r;
}

int svcmgr_handler(struct binder_state *bs,
  struct binder_txn *txn,
  struct binder_io *msg,
  struct binder_io *reply)
{
struct svcinfo *si;
uint16_t *s;
unsigned len;
void *ptr;
uint32_t strict_policy;
 
if (txn->target != svcmgr_handle)
return -1;
  /*对应传输端:writeInt32(IPCThreadState::self()->getStrictModePolicy()*/
strict_policy = bio_get_uint32(msg);
/*对应传输端writeString16("android.os.IServiceManager")*/
s = bio_get_string16(msg, &len);
if ((len != (sizeof(svcmgr_id) / 2)) ||
memcmp(svcmgr_id, s, sizeof(svcmgr_id))) {
fprintf(stderr,"invalid id %s\n", str8(s));
return -1;
}
 
switch(txn->code) {
......
case SVC_MGR_ADD_SERVICE:
/*service name: writeString16("xxxx")*/
s = bio_get_string16(msg, &len);
/*writeStrongBinder(new xxx())*/
ptr = bio_get_ref(msg);
if (do_add_service(bs, s, len, ptr, txn->sender_euid))
return -1;
break;
......
}
 
bio_put_uint32(reply, 0);
return 0;
}
/*看下传输的binder对象:*/
void *bio_get_ref(struct binder_io *bio)
{
    struct binder_object *obj;
 
    obj = _bio_get_obj(bio);
    if (!obj)
        return 0;
    /*_bio_get_obj这个函数就不跟进去看了,它的作用就是从binder_io中取得第一个还没取获取过的binder_object
     * 这个原始的flat_binder_obj的type为BINDER_TYPE_BINDER
     * 但驱动把他更改为BINDER_TYPE_HANDLE,更改后handle值就是obj->pointer的值
     */

    if (obj->type == BINDER_TYPE_HANDLE)
        return obj->pointer;
 
    return 0;
}

/*这个函数的实现很简单,就是把MediaPlayerService这个Binder实体的引用写到一个struct svcinfo结构体中,
主要是它的名称和句柄值,然后插入到链接svclist的头部去。这样,Client来向Service Manager查询服务接口时,
只要给定服务名称,Service Manger就可以返回相应的句柄值了。*/

int do_add_service(struct binder_state *bs,
                   uint16_t *s, unsigned len,
                   void *ptr, unsigned uid)
{
    struct svcinfo *si;
//    LOGI("add_service('%s',%p) uid=%d\n", str8(s), ptr, uid);
 
    if (!ptr || (len == 0) || (len > 127))
        return -1;
 
    if (!svc_can_register(uid, s)) {
        LOGE("add_service('%s',%p) uid=%d - PERMISSION DENIED\n",
             str8(s), ptr, uid);
        return -1;
    }
 
    si = find_svc(s, len);
    if (si) {
        if (si->ptr) {
            LOGE("add_service('%s',%p) uid=%d - ALREADY REGISTERED\n",
                 str8(s), ptr, uid);
            return -1;
        }
        si->ptr = ptr;
    } else {
        si = malloc(sizeof(*si) + (len + 1) * sizeof(uint16_t));
        if (!si) {
            LOGE("add_service('%s',%p) uid=%d - OUT OF MEMORY\n",
                 str8(s), ptr, uid);
            return -1;
        }
        si->ptr = ptr;
        si->len = len;
        memcpy(si->name, s, (len + 1) * sizeof(uint16_t));
        si->name[len] = '\0';
        si->death.func = svcinfo_death;
        si->death.ptr = si;
        si->next = svclist;
        svclist = si;
    }
 
    binder_acquire(bs, ptr);
    binder_link_to_death(bs, ptr, &si->death);
    return 0;

}

5. 怎样通过ServiceManager获得一个服务的代理

5.1 根据service name 返回IBinder对象

以“media.play”为例,怎样获得服务的代理
根据service的名字,在svclist列表中查找对应名称的svcinfo。
然后返回到do_find_service函数中,这里的si->ptr就是指MediaPlayerService这个Binder实体在Service Manager进程中的句柄值了。

/*把一个类型为BINDER_TYPE_HANDLE的binder_object写入到reply缓冲区中*/
void bio_put_ref(struct binder_io *bio, void *ptr)
{
    struct binder_object *obj;
 
    if (ptr)
        obj = bio_alloc_obj(bio);
    else
        obj = bio_alloc(bio, sizeof(*obj));
 
    if (!obj)
        return;
 
    obj->flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
    obj->type = BINDER_TYPE_HANDLE;
    obj->pointer = ptr;
    obj->cookie = 0;
}

/*binder_parse函数中,调用binder_send_reply函数将操作结果反馈给Binder驱动程序*/
void binder_send_reply(struct binder_state *bs,
                       struct binder_io *reply,
                       void *buffer_to_free,
                       int status)
{
    struct {
        uint32_t cmd_free;
        void *buffer;
        uint32_t cmd_reply;
        struct binder_txn txn;
    } __attribute__((packed)) data;
    /*释放之前在binder_transaction分配的空间,地址为buffer_to_free,
     *buffer_to_free这个地址是Binder驱动程序把自己在内核空间用的地址转换成用户空间地址再传给Service Manager的,
     *所以Binder驱动程序拿到这个地址后,知道怎么样释放这个空间*/

    data.cmd_free = BC_FREE_BUFFER;
    data.buffer = buffer_to_free;
    /*告诉Binder驱动程序,它的SVC_MGR_CHECK_SERVICE操作已经完成了,要查询的服务的句柄值也是保存在data.txn.data*/
    data.cmd_reply = BC_REPLY;
    data.txn.target = 0;
    data.txn.cookie = 0;
    data.txn.code = 0;
    {
        data.txn.flags = 0;
        data.txn.data_size = reply->data - reply->data0;
        data.txn.offs_size = ((char*) reply->offs) - ((char*) reply->offs0);
        data.txn.data = reply->data0;
        data.txn.offs = reply->offs0;
    }
    binder_write(bs, &data, sizeof(data));
}

int  
binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,  
                    void __user *buffer, int size, signed long *consumed)  
{  
    uint32_t cmd;  
    void __user *ptr = buffer + *consumed;  
    void __user *end = buffer + size;  
  
    while (ptr < end && thread->return_error == BR_OK) {  
        if (get_user(cmd, (uint32_t __user *)ptr))  
            return -EFAULT;  
        ptr += sizeof(uint32_t);  
        if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {  
            binder_stats.bc[_IOC_NR(cmd)]++;  
            proc->stats.bc[_IOC_NR(cmd)]++;  
            thread->stats.bc[_IOC_NR(cmd)]++;  
        }  
        switch (cmd) {  
        ......  
        case BC_TRANSACTION:  
        case BC_REPLY: {  
            struct binder_transaction_data tr;  
  
            if (copy_from_user(&tr, ptr, sizeof(tr)))  
                return -EFAULT;  
            ptr += sizeof(tr);  
            binder_transaction(proc, thread, &tr, cmd == BC_REPLY);  
            break;  
                       }  
  
        ......  
        *consumed = ptr - buffer;  
    }  
    return 0;  


/*这里再次进入binder_transaction函数*/
1. reply是如何找到target_proc和target_thread?
这次进入binder_transaction函数的情形和因getService而进入binder_transaction函数的情形基本一致,
只是这里的proc、thread和target_proc、target_thread调换了角色,这里的proc和thread指的是Service Manager进程,
而target_proc和target_thread指的是刚才请求SVC_MGR_CHECK_SERVICE的进程。

reply是如何找到target_proc和target_thread?
首先:我们注意到,这里的reply等于1,
其次:上面我们提到,Binder驱动程序在唤醒Service Manager,告诉它有一个事务t要处理时,
     事务t虽然从Service Manager的todo队列中删除了,但是仍然保留在transaction_stack中。
因此,这里可以从thread->transaction_stack找回这个等待回复的事务t,然后通过它找回target_proc和target_thread。

2. binder数据结构内容的转换

由于Service Manager返回来了一个Binder引用,所以这里要处理一下。
这是一个BINDER_TYPE_HANDLE类型的Binder引用,这是前面设置的。
先把t->buffer->data的内容转换为一个struct flat_binder_object对象fp,
这里的fp->handle值就是这个Service在Service Manager进程里面的引用值了。
接着通过调用binder_get_ref函数得到Binder引用对象struct binder_ref类型的对象ref。
struct binder_ref *ref = binder_get_ref(proc, fp->handle);

这里面的ref->node->proc不等于target_proc,因为这个Binder实体是属于创建MediaPlayerService的进程的,
而不是请求这个服务的远程接口的进程的,因此,这里调用binder_get_ref_for_node函数为这个Binder实体在target_proc创建一个引用:
struct binder_ref *new_ref;
new_ref = binder_get_ref_for_node(target_proc, ref->node);

这样,返回数据中的Binder对象就处理完成了。注意,这里会把fp->handle的值改为在target_proc中的引用值:
fp->handle = new_ref->desc;

这里就相当于是把t->buffer->data里面的Binder对象的句柄值改写了。因为这是在另外一个不同的进程里面的Binder引用,
所以句柄值当然要用新的了。这个值最终是要拷贝回target_proc进程的用户空间去的。

3. 请求getServcice处理返回值 
Service Manger回复调用SVC_MGR_CHECK_SERVICE请求就算完成了,
重新回到frameworks/base/cmds/servicemanager/binder.c文件中的binder_loop函数等待下一个Client请求的到来。
唤醒过程这里不在描述;
返回到用户空间IPCThreadState::talkWithDriver函数,最后返回到IPCThreadState::waitForResponse函数,最终执行到下面语句:
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)  
{  
    int32_t cmd;  
    int32_t err;  
  
    while (1) {  
        if ((err=talkWithDriver()) < NO_ERROR) break;  
          
        ......  
  
        cmd = mIn.readInt32();  
  
        ......  
  
        switch (cmd) {  
        ......  
        case BR_REPLY:  
            {  
                binder_transaction_data tr;  
                err = mIn.read(&tr, sizeof(tr));  
                LOG_ASSERT(err == NO_ERROR, "Not enough command data for brREPLY");  
                if (err != NO_ERROR) goto finish;  
  
                if (reply) {  
                    if ((tr.flags & TF_STATUS_CODE) == 0) {  
                        reply->ipcSetDataReference(  
                            reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),  
                            tr.data_size,  
                            reinterpret_cast<const size_t*>(tr.data.ptr.offsets),  
                            tr.offsets_size/sizeof(size_t),  
                            freeBuffer, this);  
                    } else {  
                        ......
                    }  
                } else {  
                    ...... 
                }  
            }  
            goto finish;  
  
        ......  
        }  
    }  
  
finish:  
    ......  
    return err;  
}  

virtual sp<IBinder> BpServiceManager::checkService( const String16& name) const
{
    Parcel data, reply;
    data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());
    data.writeString16(name);
    remote()->transact(CHECK_SERVICE_TRANSACTION, data, &reply);
    return reply.readStrongBinder();
}

5.2 由IBinder对象中的handle生成Bpxxxxx

sp<IBinder> Parcel::readStrongBinder() const
{
    sp<IBinder> val;
    unflatten_binder(ProcessState::self(), *this, &val);
    return val;
}

status_t unflatten_binder(const sp<ProcessState>& proc,
    const Parcel& in, sp<IBinder>* out)
{
    const flat_binder_object* flat = in.readObject(false);
    
    if (flat) {
        switch (flat->type) {
            case BINDER_TYPE_BINDER:
                *out = static_cast<IBinder*>(flat->cookie);
                return finish_unflatten_binder(NULL, *flat, in);
            case BINDER_TYPE_HANDLE:
                *out = proc->getStrongProxyForHandle(flat->handle);
                return finish_unflatten_binder( static_cast<BpBinder*>(out->get()), *flat, in);
        }        
    }
    return BAD_TYPE;
}

sp<IBinder> ProcessState::getStrongProxyForHandle(int32_t handle)
{
    sp<IBinder> result;
 
    AutoMutex _l(mLock);
 
    handle_entry* e = lookupHandleLocked(handle);
 
    if (e != NULL) {
        // We need to create a new BpBinder if there isn't currently one, OR we
        // are unable to acquire a weak reference on this current one.  See comment
        // in getWeakProxyForHandle() for more info about this.
        IBinder* b = e->binder;
        if (b == NULL || !e->refs->attemptIncWeak(this)) {
            b = new BpBinder(handle); 
            e->binder = b;
            if (b) e->refs = b->getWeakRefs();
            result = b;
        } else {
            // This little bit of nastyness is to allow us to add a primary
            // reference to the remote proxy when this team doesn't have one
            // but another team is sending the handle to us.
            result.force_set(b);
            e->refs->decWeak(this);
        }
    }
 
    return result;
}


binder = sm->getService(String16("media.player"));

binder = new BpBinder(handle);

interface_cast<IMediaPlayerService>(binder)
sMediaPlayerService = interface_cast<IMediaPlayerService>(binder);
android::sp<IMediaPlayerService> IMediaPlayerService::asInterface(const android::sp<android::IBinder>& obj)
{
android::sp<IServiceManager> intr;
if (obj != NULL) {             
intr = static_cast<IMediaPlayerService*>( 
obj->queryLocalInterface(IMediaPlayerService::descriptor).get());
if (intr == NULL) {
intr = new BpMediaPlayerService(obj);
}
}
return intr; 
}

intr = new BpMediaPlayerService(new BpBinder(handle));


猜你喜欢

转载自blog.csdn.net/u011279649/article/details/80907584