Chinaunix首页 | 论坛 | 博客
  • 博客访问: 15501878
  • 博文数量: 2005
  • 博客积分: 11986
  • 博客等级: 上将
  • 技术积分: 22535
  • 用 户 组: 普通用户
  • 注册时间: 2007-05-17 13:56
文章分类

全部博文(2005)

文章存档

2014年(2)

2013年(2)

2012年(16)

2011年(66)

2010年(368)

2009年(743)

2008年(491)

2007年(317)

分类: LINUX

2008-12-11 16:57:35

浅析android下native层binder的部分具体实现

在defaultServiceManager()方法中:[luther.gliethttp]
==>defaultServiceManager
==>
sp<IServiceManager> gDefaultServiceManager;的m_ptr指向new BpServiceManager(obj);因为=这个操作符被sp类模板重载
这个obj就是ProcessState::self()->getContextObject(NULL)返回的gProcess.
sp<ProcessState> gProcess;的m_ptr指向new BpBinder(handle);[ProcessState::self()函数返回]因为=这个操作符被sp类模板重载
看看BpServiceManager构造函数:
    BpServiceManager(const sp<IBinder>& impl)
        : BpInterface<IServiceManager>(impl)
    {
    }

    template<typename INTERFACE>
    inline BpInterface<INTERFACE>::BpInterface(const sp<IBinder>& remote)
        : BpRefBase(remote)
    {
    }

    BpRefBase::BpRefBase(const sp<IBinder>& o)
        : mRemote(o.get()), mRefs(NULL), mState(0)//这里o.get()就是gProcess.get(),返回gProcess指向new BpBinder(handle);的m_ptr
    {//mRemote在类IBinder中的定义:IBinder* const mRemote;[luther.gliethttp]
        extendObjectLifetime(OBJECT_LIFETIME_WEAK);

        if (mRemote) {
            mRemote->incStrong(this); // Removed on first IncStrong().
            mRefs = mRemote->createWeak(this); // Held for our entire lifetime.
        }
    }

remote()->transact(ADD_SERVICE_TRANSACTION, data, &reply);
这里的remote来自inline IBinder* remote() { return mRemote; }
因为mRemote为IBinder类的指针,->操作符没有被重载,所以就是调用BpBinder::transact方法[luther.gliethttp]
status_t BpBinder::transact(
    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
    // Once a binder has died, it will never come back to life.
    if (mAlive) {
        status_t status = IPCThreadState::self()->transact(
            mHandle, code, data, reply, flags);
        if (status == DEAD_OBJECT) mAlive = 0;
        return status;
    }

    return DEAD_OBJECT;
}


status_t flatten_binder(const sp<ProcessState>& proc,
    const sp<IBinder>& binder, Parcel* out)
{
    flat_binder_object obj;
    
    obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
    if (binder != NULL) {
        IBinder *local = binder->localBinder();
//对于BBinder* BBinder::localBinder()
//{
// return this;
//}
//对于BBinder* IBinder::localBinder()
//{
// return NULL;
//}
        if (!local) {
            BpBinder *proxy = binder->remoteBinder();
            if (proxy == NULL) {
                LOGE("null proxy");
            }
            const int32_t handle = proxy ? proxy->handle() : 0;
            obj.type = BINDER_TYPE_HANDLE;
            obj.handle = handle;
            obj.cookie = NULL;
        } else {//对于IServiceManager::addService将执行到这里
            obj.type = BINDER_TYPE_BINDER;
            obj.binder = local->getWeakRefs();//作为红黑节点树的唯一id标号
            obj.cookie = local;
        }
    } else {
        obj.type = BINDER_TYPE_BINDER;
        obj.binder = NULL;
        obj.cookie = NULL;
    }
    
    return finish_flatten_binder(binder, obj, out);
}

commands/binder/service_manager.c
=>svcmgr_handler
=>do_add_service
commands/binder/service_manager.c
==>main
==>bs = binder_open(128*1024);
//proc->tsk = current;
//filp->private_data = proc;
==>binder_become_context_manager(bs);//将bs作为context_manager管理binder,发送ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);
//这样binder驱动的binder_context_mgr_node就是该bs了
==>binder_loop(bs, svcmgr_handler);//回调函数svcmgr_handler,用来处理消息
void binder_loop(struct binder_state *bs, binder_handler func)
{
    ...
    readbuf[0] = BC_ENTER_LOOPER;
    binder_write(bs, readbuf, sizeof(unsigned));
//thread->looper |= BINDER_LOOPER_STATE_ENTERED;
//struct binder_thread *thread = binder_get_thread()会查找该proc自己是否已经在binder_thread,
//rb_link_node(&thread->rb_node, parent, p);
//rb_insert_color(&thread->rb_node, &proc->threads);将该current[就是proc打开者自己]绑定到binder_thread,然后插入到proc管理的红黑树中.
//proc管理的红黑树的索引依据为pid数值:thread->pid = current->pid;
/*
 *struct binder_stats {
    int br[_IOC_NR(BR_FAILED_REPLY) + 1];
    int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1];
    int obj_created[BINDER_STAT_COUNT];
    int obj_deleted[BINDER_STAT_COUNT];
};

static struct binder_stats binder_stats;
 * */

    for (;;) {
        ...
        bwr.read_size = sizeof(readbuf);
        bwr.read_consumed = 0;
        bwr.read_buffer = (unsigned) readbuf;
//wait_for_proc_work = thread->transaction_stack == NULL && list_empty(&thread->todo);
//没有todo的咚咚
        res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
//首先binder推入4字节的BR_NOOP控制头
        ...
        res = binder_parse(bs, 0, readbuf, bwr.read_consumed, func);
        ...
    }
    ...
}


driver中
device_initcall(binder_init);
=>binder_init
=>创建/proc/binder根目录
=>创建/proc/binder/proc目录
=>misc_register(&binder_miscdev);注册/dev/binder字符设备文件
=>/dev/binder节点由init进程在handle_device_fd(device_fd);==>handle_device_event(&uevent);中uevent-netlink处理之后,"/dev/"目录下创建.
=>创建只读文件"state""stats""transactions""transaction_log""failed_transaction_log",
  帮定读文件函数binder_read_proc_stats
这样binder驱动在内核中登记工作就完成了,剩下的工作就是应用程序使用这个binder进行IPC了.
        if (tr->target.handle) {//目的handle
            struct binder_ref *ref;
            ref = binder_get_ref(proc, tr->target.handle);//返回目的handle的ref,该handle由后面的binder_get_ref_for_node()函数生成
            if (ref == NULL) {
                binder_user_error("binder: %d:%d got "
                    "transaction to invalid handle\n",
                    proc->pid, thread->pid);
                return_error = BR_FAILED_REPLY;
                goto err_invalid_target_handle;
            }
            target_node = ref->node;
        } else {
            target_node = binder_context_mgr_node;
            if (target_node == NULL) {
                return_error = BR_DEAD_REPLY;
                goto err_no_context_mgr_node;
            }
        }

static struct binder_ref *
binder_get_ref(struct binder_proc *proc, uint32_t desc)
{
    struct rb_node *n = proc->refs_by_desc.rb_node;//是否有该desc对应的内容
    struct binder_ref *ref;

    while (n) {
        ref = rb_entry(n, struct binder_ref, rb_node_desc);

        if (desc < ref->desc)
            n = n->rb_left;
        else if (desc > ref->desc)
            n = n->rb_right;
        else
            return ref;
    }
    return NULL;
}

struct flat_binder_object {
    /* 8 bytes for large_flat_header. */
    unsigned long type;
    unsigned long flags;

    /* 8 bytes of data. */
    union {
        void *binder; /* local object */
        signed long handle; /* remote object */
    };

    /* extra data associated with local object */
    void *cookie;
};
==>binder_transaction
==> struct flat_binder_object *fp;
        ...
        switch (fp->type) {
        case BINDER_TYPE_BINDER:
        case BINDER_TYPE_WEAK_BINDER: {
            struct binder_ref *ref;
//上面obj.binder = local->getWeakRefs();//作为红黑节点树的唯一id标号,所以这里从红黑树上查找fp->binder节点[luther.gliethttp]
            struct binder_node *node = binder_get_node(proc, fp->binder);//如果从红黑树中没有找到,那么
            if (node == NULL) {
                node = binder_new_node(proc, fp->binder, fp->cookie);//创建一个新的node
                if (node == NULL) {
                    return_error = BR_FAILED_REPLY;
                    goto err_binder_new_node_failed;
                }
                node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
                node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
            }
            if (fp->cookie != node->cookie) {
                binder_user_error("binder: %d:%d sending u%p "
                    "node %d, cookie mismatch %p != %p\n",
                    proc->pid, thread->pid,
                    fp->binder, node->debug_id,
                    fp->cookie, node->cookie);
                goto err_binder_get_ref_for_node_failed;
            }
            ref = binder_get_ref_for_node(target_proc, node);//将新创建的node挂接到红黑树上
            if (ref == NULL) {
                return_error = BR_FAILED_REPLY;
                goto err_binder_get_ref_for_node_failed;
            }
            if (fp->type == BINDER_TYPE_BINDER)
                fp->type = BINDER_TYPE_HANDLE;
            else
                fp->type = BINDER_TYPE_WEAK_HANDLE;
            fp->handle = ref->desc;//获取唯一id号,作为handle用来区分service们
            binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE, &thread->todo);
            if (binder_debug_mask & BINDER_DEBUG_TRANSACTION)
                printk(KERN_INFO " node %d u%p -> ref %d desc %d\n",
                 node->debug_id, node->ptr, ref->debug_id, ref->desc);
        } break;
        case BINDER_TYPE_HANDLE:
        case BINDER_TYPE_WEAK_HANDLE: {
            ...
            new_ref = binder_get_ref_for_node(target_proc, ref->node);
            ...
==>binder_thread_write
==> if (target == 0 && binder_context_mgr_node &&
             (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
                ref = binder_get_ref_for_node(proc,
                     binder_context_mgr_node);

static struct binder_ref *
binder_get_ref_for_node(struct binder_proc *proc, struct binder_node *node)
{
    ...
    rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
    new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1;
    for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
        ref = rb_entry(n, struct binder_ref, rb_node_desc);
        if (ref->desc > new_ref->desc)
            break;
        new_ref->desc = ref->desc + 1;//计算service们对应的唯一handle号[luther.gliethttp]
    }
    ...
}

阅读(4971) | 评论(3) | 转发(0) |
给主人留下些什么吧!~~

chinaunix网友2009-09-02 15:24:04

对于BR_TRANSACTION的分发如何处理的呢,对于service_manager发送到进程,对于其他的cameraservice等,发送到ontransact函数,内部有什么具体控制的吗,多谢了!

chinaunix网友2009-09-02 09:53:42

如果在Service_Manager进程中执行(其实应该是这样的),那也要调用IPCThreadState::waitForResponse,还会跑到executeCommand的BR_TRANSACTION case中去,难道是被Service_Manager进程把ReadThread的数据抢去了,这里就获取不到数据了还是怎么回事?

chinaunix网友2009-09-01 17:08:37

请教个问题,谢谢! BServiceManager是ServiceManager的native实现类,此类new过,但是ProcessState代码中都没取出来,在IPCThreadState::executeCommand中的 if (tr.target.ptr) { sp b((BBinder*)tr.cookie); const status_t error = b->transact(tr.code, buffer, &reply, 0); if (error < NO_ERROR) reply.setError(error); } else { const status_t error = the_context_object->transact(tr.code, buffer, &reply, 0);