Chinaunix首页 | 论坛 | 博客
  • 博客访问: 15529243
  • 博文数量: 2005
  • 博客积分: 11986
  • 博客等级: 上将
  • 技术积分: 22535
  • 用 户 组: 普通用户
  • 注册时间: 2007-05-17 13:56
文章分类

全部博文(2005)

文章存档

2014年(2)

2013年(2)

2012年(16)

2011年(66)

2010年(368)

2009年(743)

2008年(491)

2007年(317)

分类: LINUX

2009-05-17 11:24:44

浅析android的IPC机制binder内部实现流程
《浅析android中getStrongProxyForHandle函数动态申请handle索引对应的vector内存空间》
《浅析android下native层binder的部分具体实现》
《三次浅析binder内幕》

frameworks/base/cmds/servicemanager/service_manager.c
==>svcmgr_handler
int main(int argc, char **argv)
{
    struct binder_state *bs;
    void *svcmgr = BINDER_SERVICE_MANAGER;
// #define BINDER_SERVICE_MANAGER ((void*) 0)

    bs = binder_open(128*1024);

    if (binder_become_context_manager(bs)) {
        LOGE("cannot become context manager (%s)\n", strerror(errno));
        return -1;
    }

    svcmgr_handle = svcmgr; // 其值为0
    binder_loop(bs, svcmgr_handler); // svcmgr_handler - 数据处理回调函数
    return 0;
}

void binder_loop(struct binder_state *bs, binder_handler func)
{
    int res;
    struct binder_write_read bwr;
    unsigned readbuf[32];

    bwr.write_size = 0;
    bwr.write_consumed = 0;
    bwr.write_buffer = 0;
    
    readbuf[0] = BC_ENTER_LOOPER;
    binder_write(bs, readbuf, sizeof(unsigned)); // 只写

    for (;;) {
        bwr.read_size = sizeof(readbuf);
        bwr.read_consumed = 0;
        bwr.read_buffer = (unsigned) readbuf;

        res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
        // 只读,pending,直到数据到来,首先最大读取32*4个字节,这将执行kernel中binder_thread_read()函数[lutehr.gliethttp]

        if (res < 0) {
            LOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
            break;
        }

        res = binder_parse(bs, 0, readbuf, bwr.read_consumed, func);
        if (res == 0) {
            LOGE("binder_loop: unexpected reply?!\n");
            break;
        }
        if (res < 0) {
            LOGE("binder_loop: io error %d %s\n", res, strerror(errno));
            break;
        }
    }
}

#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
int binder_write(struct binder_state *bs, void *data, unsigned len)
{
    struct binder_write_read bwr;
    int res;
    bwr.write_size = len;
    bwr.write_consumed = 0;
    bwr.write_buffer = (unsigned) data;
    bwr.read_size = 0;
    bwr.read_consumed = 0;
    bwr.read_buffer = 0;
    res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr); // 这里BINDER_WRITE_READ命令包含数据长度信息struct binder_write_read
    if (res < 0) {
        fprintf(stderr,"binder_write: ioctl failed (%s)\n",
                strerror(errno));
    }
    return res;
}

struct binder_txn
{
    void *target;
    void *cookie;
    uint32_t code;
    uint32_t flags;

    uint32_t sender_pid;
    uint32_t sender_euid;

    uint32_t data_size;
    uint32_t offs_size;
    void *data;
    void *offs;
};
// 内核空间定义如下,和上面的应用程序中的定义是一致的:
struct binder_transaction_data {
    /* The first two are only used for bcTRANSACTION and brTRANSACTION,
     * identifying the target and contents of the transaction.
     */
    union {
        size_t    handle;    /* target descriptor of command transaction */
        void    *ptr;    /* target descriptor of return transaction */
    } target;
    void        *cookie;    /* target object cookie */
    unsigned int    code;        /* transaction command */

    /* General information about the transaction. */
    unsigned int    flags;
    pid_t        sender_pid;
    uid_t        sender_euid;
    size_t        data_size;    /* number of bytes of data */
    size_t        offsets_size;    /* number of bytes of offsets */

    /* If this transaction is inline, the data immediately
     * follows here; otherwise, it ends with a pointer to
     * the data buffer.
     */
    union {
        struct {
            /* transaction data */
            const void    *buffer;
            /* offsets from buffer to flat_binder_object structs */
            const void    *offsets;
        } ptr;
        uint8_t    buf[8];
    } data;
};

// 数据格式
// cmd+data
int binder_parse(struct binder_state *bs, struct binder_io *bio,
                 uint32_t *ptr, uint32_t size, binder_handler func)
{
    int r = 1;
    uint32_t *end = ptr + (size / 4);

    while (ptr < end) {
        uint32_t cmd = *ptr++; //取出cmd,之后ptr就指向了该cmd命令对应的数据参数区地址[luther.gliethttp].
#if TRACE
        fprintf(stderr,"%s:\n", cmd_name(cmd));
#endif
        switch(cmd) {
        case BR_NOOP: // 忽略
            break;
        case BR_TRANSACTION_COMPLETE:
            break;
        case BR_INCREFS:
        case BR_ACQUIRE:
        case BR_RELEASE:
        case BR_DECREFS:
#if TRACE
            fprintf(stderr,"  %08x %08x\n", ptr[0], ptr[1]);
#endif
            ptr += 2;
            break;
        case BR_TRANSACTION: {
            struct binder_txn *txn = (void *) ptr;
            if ((end - ptr) * sizeof(uint32_t) < sizeof(struct binder_txn)) {
                LOGE("parse: txn too small!\n");
                return -1;
            }
            binder_dump_txn(txn);
            if (func) {
                unsigned rdata[256/4];
                struct binder_io msg;
                struct binder_io reply;
                int res;

                bio_init(&reply, rdata, sizeof(rdata), 4);
                // 进行如下设置:
                // reply.data =  rdata + 4*4;
                // reply.offs = rdata;
                bio_init_from_txn(&msg, txn);
                res = func(bs, txn, &msg, &reply); // 调用svcmgr_handler
                // txn->data就是kernel driver随后要释放的内存起始地址
                binder_send_reply(bs, &reply, txn->data, res); // 发送reply回执数据
            }
            ptr += sizeof(*txn) / sizeof(uint32_t);
            break;
        }
        case BR_REPLY: {
            struct binder_txn *txn = (void*) ptr;
            if ((end - ptr) * sizeof(uint32_t) < sizeof(struct binder_txn)) {
                LOGE("parse: reply too small!\n");
                return -1;
            }
            binder_dump_txn(txn);
            if (bio) {
                bio_init_from_txn(bio, txn);
                bio = 0;
            } else {
                    /* todo FREE BUFFER */
            }
            ptr += (sizeof(*txn) / sizeof(uint32_t));
            r = 0;
            break;
        }
        case BR_DEAD_BINDER: {
            struct binder_death *death = (void*) *ptr++;
            death->func(bs, death->ptr);
            break;
        }
        case BR_FAILED_REPLY:
            r = -1;
            break;
        case BR_DEAD_REPLY:
            r = -1;
            break;
        default:
            LOGE("parse: OOPS %d\n", cmd);
            return -1;
        }
    }

    return r;
}

void binder_send_reply(struct binder_state *bs,
                       struct binder_io *reply,
                       void *buffer_to_free,
                       int status)
{
    struct {
        uint32_t cmd_free;
        void *buffer;
        uint32_t cmd_reply;
        struct binder_txn txn;
    } __attribute__((packed)) data;

    data.cmd_free = BC_FREE_BUFFER; // 第1个命令cmd为BC_FREE_BUFFER - 释放内存
    data.buffer = buffer_to_free; // 需要释放内存的数据空间从buffer_to_free地址开始
    data.cmd_reply = BC_REPLY; // 第2个命令cmd为BC_REPLY
    data.txn.target = 0; // BC_REPLY对应的数据就是struct binder_txn了[luther.gliethttp]
    data.txn.cookie = 0;
    data.txn.code = 0;
    if (status) {
        data.txn.flags = TF_STATUS_CODE;
        data.txn.data_size = sizeof(int);
        data.txn.offs_size = 0;
        data.txn.data = &status;
        data.txn.offs = 0;
    } else {
        data.txn.flags = 0;
        data.txn.data_size = reply->data - reply->data0; // reply->data0为数据起始指针,reply->data推入数据之后的指针.
        data.txn.offs_size = ((char*) reply->offs) - ((char*) reply->offs0); // reply->offs0起始指针,reply->offs操作之后的指针
        data.txn.data = reply->data0;
        data.txn.offs = reply->offs0;
    }
    binder_write(bs, &data, sizeof(data));
}

int svcmgr_handler(struct binder_state *bs,
                   struct binder_txn *txn,
                   struct binder_io *msg,
                   struct binder_io *reply)
{
    struct svcinfo *si;
    uint16_t *s;
    unsigned len;
    void *ptr;

//    LOGI("target=%p code=%d pid=%d uid=%d\n",
//         txn->target, txn->code, txn->sender_pid, txn->sender_euid);

    if (txn->target != svcmgr_handle)
        return -1;

    s = bio_get_string16(msg, &len);
    // binder_txn.data数据区格式为: len+data
    // 4字节数据len + len个2字节数据
    // s为2字节数据区的首地址,
    // 经过bio_get_string16函数之后,msg->data指向了len个2字节数据的末尾
    // 即:下一个数据区4字节长度区开始地址[luther.gliethttp]
uint16_t svcmgr_id[] = {
    'a','n','d','r','o','i','d','.','o','s','.',
    'I','S','e','r','v','i','c','e','M','a','n','a','g','e','r'
};
// 那应用程序什么时候添加进svcmgr_id这个数据的呢,拿addService为例
// void AudioFlinger::instantiate() {
//    defaultServiceManager()->addService(
//            String16("media.audio_flinger"), new AudioFlinger());
// }
// virtual status_t addService(const String16& name, const sp& service)
// {
//     Parcel data, reply;
//     data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());
//     // 写入该Interface的字符串描述信息,这里为"android.os.IServiceManager"16位字符串[luther.gliethttp]
//     data.writeString16(name); // 紧接着写入"media.audio_flinger"16位字符串
//     data.writeStrongBinder(service); // 填充struct flat_binder_object结构体
//     status_t err = remote()->transact(ADD_SERVICE_TRANSACTION, data, &reply);
//     // 将data作为ADD_SERVICE_TRANSACTION命令的附加数据发送给kernel[luther.gliethttp]
//     return err == NO_ERROR ? reply.readInt32() : err;
// }
// 该Interface的字符串描述信息在该Interface创建时创建
// IMPLEMENT_META_INTERFACE(ServiceManager, "android.os.IServiceManager");
    if ((len != (sizeof(svcmgr_id) / 2)) ||
        memcmp(svcmgr_id, s, sizeof(svcmgr_id))) {
        fprintf(stderr,"invalid id %s\n", str8(s));
        return -1;
    }

    switch(txn->code) {
    case SVC_MGR_GET_SERVICE:
    case SVC_MGR_CHECK_SERVICE:
// 看看JNI层getService()的数据发送格式
// virtual sp getService(const String16& name) const
// {
//     unsigned n;
//     for (n = 0; n < 5; n++){
//         sp svc = checkService(name);
//         if (svc != NULL) return svc;
//         LOGI("Waiting for sevice %s...\n", String8(name).string());
//         sleep(1);
//     }
//     return NULL;
// }
//
// virtual sp checkService( const String16& name) const
// {
//     Parcel data, reply;
//     data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor()); // 写入"android.os.IServiceManager"16位字符串
//     data.writeString16(name); // 写入待查询service的名字"media.audio_flinger"16位字符串
//     remote()->transact(CHECK_SERVICE_TRANSACTION, data, &reply);
//     // 将data作为CHECK_SERVICE_TRANSACTION命令的附加数据发送给kernel[luther.gliethttp]
//     return reply.readStrongBinder();
// }
        s = bio_get_string16(msg, &len); // 获取欲查询service的名字"media.audio_flinger"16位字符串地址
        ptr = do_find_service(bs, s, len); // 找到"media.audio_flinger"在servicemanager所在proc中的desc
        if (!ptr)
            break;
        bio_put_ref(reply, ptr); // 将desc作为返回数据推入reply空间[luther.gliethttp]
        return 0;

    case SVC_MGR_ADD_SERVICE:
        s = bio_get_string16(msg, &len); // 获取"media.audio_flinger"16位字符串地址
        ptr = bio_get_ref(msg); // 获取struct flat_binder_object结构体pointer域数据
        // 注意:这里的ptr就是driver中执行fp->handle = ref->desc;进行重新赋值后的数据
        // 也就是"media.audio_flinger"所在proc建立的node节点,添加到servicemanager所在proc之后
        // servicemanager读取"media.audio_flinger"的node节点使用到的desc索引值.[luter.gliethttp]
        if (do_add_service(bs, s, len, ptr, txn->sender_euid))
            return -1;
        break;

    case SVC_MGR_LIST_SERVICES: {
        unsigned n = bio_get_uint32(msg);

        si = svclist;
        while ((n-- > 0) && si)
            si = si->next;
        if (si) {
            bio_put_string16(reply, si->name);
            return 0;
        }
        return -1;
    }
    default:
        LOGE("unknown code %d\n", txn->code);
        return -1;
    }

    bio_put_uint32(reply, 0);
    return 0;
}

void bio_put_ref(struct binder_io *bio, void *ptr)
{
    struct binder_object *obj;

    if (ptr)
        obj = bio_alloc_obj(bio);
    else
        obj = bio_alloc(bio, sizeof(*obj));

    if (!obj)
        return;

    obj->flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
    obj->type = BINDER_TYPE_HANDLE; // reply的命令类型为BINDER_TYPE_HANDLE
    obj->pointer = ptr;
// ptr就是"media.audio_flinger"对应的node节点在servicemanager所在proc的desc索引数值[luther.gliethttp]
    obj->cookie = 0;
}

// binder kernel driver端
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
    unsigned int size = _IOC_SIZE(cmd); // 读取cmd命令对应的数据长度
    ......
    switch (cmd) {
    case BINDER_WRITE_READ: {
        struct binder_write_read bwr;
        if (size != sizeof(struct binder_write_read)) {
            ret = -EINVAL;
            goto err;
        }
        if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
            ret = -EFAULT;
            goto err;
        }
        if (bwr.write_size > 0) { // 欲写
            ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
            if (ret < 0) {
                bwr.read_consumed = 0;
                if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
                    ret = -EFAULT;
                goto err;
            }
        }
        if (bwr.read_size > 0) { // 欲读
            ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
            if (!list_empty(&proc->todo))
                wake_up_interruptible(&proc->wait);
            if (ret < 0) {
                if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
                    ret = -EFAULT;
                goto err;
            }
        }

    ......
}

int
binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
            void __user *buffer, int size, signed long *consumed)
{
    uint32_t cmd;
    void __user *ptr = buffer + *consumed;
    void __user *end = buffer + size;

    while (ptr < end && thread->return_error == BR_OK) {
        if (get_user(cmd, (uint32_t __user *)ptr)) // 读命令先
            return -EFAULT;
        ptr += sizeof(uint32_t);
        if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
            binder_stats.bc[_IOC_NR(cmd)]++;
            proc->stats.bc[_IOC_NR(cmd)]++;
            thread->stats.bc[_IOC_NR(cmd)]++;
        }
        switch (cmd) {
        case BC_INCREFS:
        case BC_ACQUIRE:
        case BC_RELEASE:
        case BC_DECREFS: {
            uint32_t target;
            struct binder_ref *ref;
            const char *debug_string;

            if (get_user(target, (uint32_t __user *)ptr))
                return -EFAULT;
            ptr += sizeof(uint32_t);
            if (target == 0 && binder_context_mgr_node &&
                (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
            // 表示应用程序B打算引用binder_context_mgr_node,
            // 所以发送BC_INCREFS[弱引用]或者BC_ACQUIRE[强行引用]
            // 增加binder_context_mgr_node在该应用程序B自身所在proc上
            // 创建的binder_context_mgr_node节点refs引用结构体引用计数ref->weak++或者ref->strong++.
// 对应JNI执行代码为:
// getStrongProxyForHandle(0)
// ==> b = new BpBinder(0)
// ==> IPCThreadState::self()->incWeakHandle(0);
// void IPCThreadState::incWeakHandle(int32_t handle) // 将命令送入缓存区
// {
//      LOG_REMOTEREFS("IPCThreadState::incWeakHandle(%d)\n", handle);
//      mOut.writeInt32(BC_INCREFS); // 将第一个命令BC_INCREFS填入flat buffer
//      mOut.writeInt32(handle); // 填入命令所需参数[luther.glethttp]
// }
// 那什么时候才会将缓存区mOut中的缓存了的n多个命令数据发送给kernel呢,
// 那就是IPCThreadState::talkWithDriver()了.
// 有如下3处会调用到IPCThreadState::talkWithDriver()
// .IPCThreadState::joinThreadPool()
// .IPCThreadState::waitForResponse()
// .IPCThreadState::flushCommands()

                ref = binder_get_ref_for_node(proc,
                           binder_context_mgr_node); // 添加服务节点binder_context_mgr_node引用到B应用程序所在proc
             // 之后应用程序B就可以通过ref->desc数值,对于binder_context_mgr_node节点引用该值不论在哪一个proc中
             // 都是0,即ref->desc == target == 0.[luther.gliethttp]
                if (ref->desc != target) {
                    binder_user_error("binder: %d:"
                        "%d tried to acquire "
                        "reference to desc 0, "
                        "got %d instead\n",
                        proc->pid, thread->pid,
                        ref->desc);
                }
            } else
                ref = binder_get_ref(proc, target);
            if (ref == NULL) {
                binder_user_error("binder: %d:%d refcou"
                    "nt change on invalid ref %d\n",
                    proc->pid, thread->pid, target);
                break;
            }
            switch (cmd) {
            case BC_INCREFS:
                debug_string = "IncRefs";
                binder_inc_ref(ref, 0, NULL);
                break;
            case BC_ACQUIRE:
                debug_string = "Acquire";
                binder_inc_ref(ref, 1, NULL);
                break;
            case BC_RELEASE:
                debug_string = "Release";
                binder_dec_ref(ref, 1);
                break;
            case BC_DECREFS:
            default:
                debug_string = "DecRefs";
                binder_dec_ref(ref, 0);
                break;
            }
            if (binder_debug_mask & BINDER_DEBUG_USER_REFS)
                printk(KERN_INFO "binder: %d:%d %s ref %d desc %d s %d w %d for node %d\n",
                       proc->pid, thread->pid, debug_string, ref->debug_id, ref->desc, ref->strong, ref->weak, ref->node->debug_id);
            break;
        }
            ......
        case BC_FREE_BUFFER: {
            void __user *data_ptr;
            struct binder_buffer *buffer;

            if (get_user(data_ptr, (void * __user *)ptr))
                return -EFAULT;
            ptr += sizeof(void *);

            buffer = binder_buffer_lookup(proc, data_ptr); // 查找data_ptr地址对应的buffer
            if (buffer == NULL) {
                binder_user_error("binder: %d:%d "
                    "BC_FREE_BUFFER u%p no match\n",
                    proc->pid, thread->pid, data_ptr);
                break;
            }
            if (!buffer->allow_user_free) { // 该buffer是否允许用户使用BC_FREE_BUFFER命令释放.
                binder_user_error("binder: %d:%d "
                    "BC_FREE_BUFFER u%p matched "
                    "unreturned buffer\n",
                    proc->pid, thread->pid, data_ptr);
                break;
            }
            if (binder_debug_mask & BINDER_DEBUG_FREE_BUFFER)
                printk(KERN_INFO "binder: %d:%d BC_FREE_BUFFER u%p found buffer %d for %s transaction\n",
                       proc->pid, thread->pid, data_ptr, buffer->debug_id,
                       buffer->transaction ? "active" : "finished");

            if (buffer->transaction) {
                buffer->transaction->buffer = NULL;
                buffer->transaction = NULL;
            }
            if (buffer->async_transaction && buffer->target_node) {
                BUG_ON(!buffer->target_node->has_async_transaction);
                if (list_empty(&buffer->target_node->async_todo))
                    buffer->target_node->has_async_transaction = 0;
                else
                    list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
            }
            binder_transaction_buffer_release(proc, buffer, NULL);
            // binder_transaction_buffer_release用来释放引用计数
            // 比如:
            // .binder_dec_node(buffer->target_node, 1, 0);
            // .binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0);
            // .binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE);
            // .task_close_fd(proc->tsk, fp->handle);
            binder_free_buf(proc, buffer); // 用来释放buffer申请到的VMA空间,同时将buffer自己从链表上摘除掉
            // 挂接到proc->free_buffers.rb_node红黑树上.
            break;
        }
        case BC_TRANSACTION:
        case BC_REPLY: {
            struct binder_transaction_data tr;

            if (copy_from_user(&tr, ptr, sizeof(tr)))
                return -EFAULT;
            ptr += sizeof(tr);
            binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
            break;
        }
            ......
        case BC_ENTER_LOOPER:
            if (binder_debug_mask & BINDER_DEBUG_THREADS)
                printk(KERN_INFO "binder: %d:%d BC_ENTER_LOOPER\n",
                       proc->pid, thread->pid);
            if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
                thread->looper |= BINDER_LOOPER_STATE_INVALID;
                binder_user_error("binder: %d:%d ERROR:"
                    " BC_ENTER_LOOPER called after "
                    "BC_REGISTER_LOOPER\n",
                    proc->pid, thread->pid);
            }
            thread->looper |= BINDER_LOOPER_STATE_ENTERED;
            break;
            ......
        }
    *consumed = ptr - buffer;
    }
    return 0;
}

static void binder_free_buf(
    struct binder_proc *proc, struct binder_buffer *buffer)
{
    size_t size, buffer_size;

    buffer_size = binder_buffer_size(proc, buffer); // 根据buffer地址计算该buffer占用空间大小.

    size = ALIGN(buffer->data_size, sizeof(void *)) +
        ALIGN(buffer->offsets_size, sizeof(void *));
    if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC)
        printk(KERN_INFO "binder: %d: binder_free_buf %p size %d buffer"
               "_size %d\n", proc->pid, buffer, size, buffer_size);

    BUG_ON(buffer->free);
    BUG_ON(size > buffer_size);
    BUG_ON(buffer->transaction != NULL);
    BUG_ON((void *)buffer < proc->buffer);
    BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);

    if (buffer->async_transaction) {
        proc->free_async_space += size + sizeof(struct binder_buffer);
        if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC_ASYNC)
            printk(KERN_INFO "binder: %d: binder_free_buf size %d "
                   "async free %d\n", proc->pid, size,
                   proc->free_async_space);
    }

    binder_update_page_range(proc, 0, // 释放buffer申请到的VMA内存
        (void *)PAGE_ALIGN((size_t)buffer->data),
        (void *)(((size_t)buffer->data + buffer_size) & PAGE_MASK),
        NULL);
    rb_erase(&buffer->rb_node, &proc->allocated_buffers); // 将buffer从allocated_buffers红黑树上删除掉.
    buffer->free = 1; // 标识该buffer空闲
    if (!list_is_last(&buffer->entry, &proc->buffers)) {
        // 如果当前buffer不是最后一个单元
        // next可能已经早就free了,如果是,那么合并next内存.
        struct binder_buffer *next = list_entry(buffer->entry.next,
                        struct binder_buffer, entry); // 读取next对应的buffer
        if (next->free) {
        // next的buffer也是free的,所以可以将buffer和它的next进行合并,将next从链表中删除就完成了内存合并的所有工作.
            rb_erase(&next->rb_node, &proc->free_buffers); // 将next从free_buffers红黑树上删除掉.
            binder_delete_free_buffer(proc, next);
        // 执行list_del(&next->entry);将next从链表上删除掉,这样根据地址计算大小的binder_buffer_size()函数
        // 就自动合并了next占用空间,next从此就消失掉了,这和buddy伙伴算法中将小内存合并还原到连续的大内存效果一样.
        }
    }
    if (proc->buffers.next != &buffer->entry) {
        // prev可能已经早就free了,如果是,那么合并prev内存[lutehr.gliethttp]
        struct binder_buffer *prev = list_entry(buffer->entry.prev,
                        struct binder_buffer, entry);
        if (prev->free) {
            binder_delete_free_buffer(proc, buffer);
            // 应为prev也已经早早的free了,所以buffer将自己从链表上删掉,prev为此次需要释放到proc->free_buffers红黑树上的
            // 实际buffer.
            rb_erase(&prev->rb_node, &proc->free_buffers);
            // 将prve从proc->free_buffers红黑树上删除,因为当前prev空间与buffer进行了合并,所以prev大小已经改变.
            buffer = prev;
        }
    }
    binder_insert_free_buffer(proc, buffer);
    // 这样把next从链表中删除之后的buffer的size就可以通过binder_buffer_size()函数
    // 根据链表前后元素地址值之差计算得到了[luther.gliethttp]
}

static void
binder_transaction(struct binder_proc *proc, struct binder_thread *thread,
    struct binder_transaction_data *tr, int reply)
{
    struct binder_transaction *t;
    struct binder_work *tcomplete;
    size_t *offp, *off_end;
    struct binder_proc *target_proc;
    struct binder_thread *target_thread = NULL;
    struct binder_node *target_node = NULL;
    struct list_head *target_list;
    wait_queue_head_t *target_wait;
    struct binder_transaction *in_reply_to = NULL;
    struct binder_transaction_log_entry *e;
    uint32_t return_error;

    e = binder_transaction_log_add(&binder_transaction_log);
    e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
    e->from_proc = proc->pid;
    e->from_thread = thread->pid;
    e->target_handle = tr->target.handle;
    e->data_size = tr->data_size;
    e->offsets_size = tr->offsets_size;

    if (reply) {
        // 该thread的transaction_stack由binder_thread_read函数在退出时强行设置[luthre.gliethttp]
        in_reply_to = thread->transaction_stack;
        if (in_reply_to == NULL) {
            binder_user_error("binder: %d:%d got reply transaction "
                      "with no transaction stack\n",
                      proc->pid, thread->pid);
            return_error = BR_FAILED_REPLY;
            goto err_empty_call_stack;
        }
        binder_set_nice(in_reply_to->saved_priority);
        if (in_reply_to->to_thread != thread) {
            binder_user_error("binder: %d:%d got reply transaction "
                "with bad transaction stack,"
                " transaction %d has target %d:%d\n",
                proc->pid, thread->pid, in_reply_to->debug_id,
                in_reply_to->to_proc ?
                in_reply_to->to_proc->pid : 0,
                in_reply_to->to_thread ?
                in_reply_to->to_thread->pid : 0);
            return_error = BR_FAILED_REPLY;
            in_reply_to = NULL;
            goto err_bad_call_stack;
        }
        thread->transaction_stack = in_reply_to->to_parent; // 弹出thread原本的transaction操作单元.
        target_thread = in_reply_to->from;
        if (target_thread == NULL) {
            return_error = BR_DEAD_REPLY;
            goto err_dead_binder;
        }
        if (target_thread->transaction_stack != in_reply_to) {
            binder_user_error("binder: %d:%d got reply transaction "
                "with bad target transaction stack %d, "
                "expected %d\n",
                proc->pid, thread->pid,
                target_thread->transaction_stack ?
                target_thread->transaction_stack->debug_id : 0,
                in_reply_to->debug_id);
            return_error = BR_FAILED_REPLY;
            in_reply_to = NULL;
            target_thread = NULL;
            goto err_dead_binder;
        }
        target_proc = target_thread->proc;
    } else {
        if (tr->target.handle) {
            struct binder_ref *ref;
            ref = binder_get_ref(proc, tr->target.handle);
            if (ref == NULL) {
                binder_user_error("binder: %d:%d got "
                    "transaction to invalid handle\n",
                    proc->pid, thread->pid);
                return_error = BR_FAILED_REPLY;
                goto err_invalid_target_handle;
            }
            target_node = ref->node;
        } else {
            target_node = binder_context_mgr_node;
            if (target_node == NULL) {
                return_error = BR_DEAD_REPLY;
                goto err_no_context_mgr_node;
            }
        }
        e->to_node = target_node->debug_id;
        target_proc = target_node->proc;
        if (target_proc == NULL) {
            return_error = BR_DEAD_REPLY;
            goto err_dead_binder;
        }
        if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { // 只有async操作才没有return数据[luther.gliethttp]
            struct binder_transaction *tmp;
            tmp = thread->transaction_stack;
            // 表示A进程的current对应的thread同时发送了n多个
            // 请求,可以都是发送给B的请求,也可以是同时发送给B,C,D,...等
            // 这些请求都不必一直等待回应信息[luther.gliethttp]
            // 如果发送给B的请求多余1个,那么将返回最早发送的那个thread作为target_thread
            while (tmp) {
                if (tmp->from && tmp->from->proc == target_proc)
                    target_thread = tmp->from;
                tmp = tmp->from_parent; // 遍历链表,找到链表最后一个元素,他就是B进程最先获得的transaction控制数据.
            }
        }
    }
    if (target_thread) {
        e->to_thread = target_thread->pid;
        target_list = &target_thread->todo;
        target_wait = &target_thread->wait;
    } else {
        target_list = &target_proc->todo;
        target_wait = &target_proc->wait;
    }
    e->to_proc = target_proc->pid;

    /* TODO: reuse incoming transaction for reply */
    t = kzalloc(sizeof(*t), GFP_KERNEL);
    if (t == NULL) {
        return_error = BR_FAILED_REPLY;
        goto err_alloc_t_failed;
    }
    binder_stats.obj_created[BINDER_STAT_TRANSACTION]++;

    tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
    if (tcomplete == NULL) {
        return_error = BR_FAILED_REPLY;
        goto err_alloc_tcomplete_failed;
    }
    binder_stats.obj_created[BINDER_STAT_TRANSACTION_COMPLETE]++;

    t->debug_id = ++binder_last_id;
    e->debug_id = t->debug_id;

    if (binder_debug_mask & BINDER_DEBUG_TRANSACTION) {
        if (reply)
            printk(KERN_INFO "binder: %d:%d BC_REPLY %d -> %d:%d, "
                   "data %p-%p size %d-%d\n",
                   proc->pid, thread->pid, t->debug_id,
                   target_proc->pid, target_thread->pid,
                   tr->data.ptr.buffer, tr->data.ptr.offsets,
                   tr->data_size, tr->offsets_size);
        else
            printk(KERN_INFO "binder: %d:%d BC_TRANSACTION %d -> "
                   "%d - node %d, data %p-%p size %d-%d\n",
                   proc->pid, thread->pid, t->debug_id,
                   target_proc->pid, target_node->debug_id,
                   tr->data.ptr.buffer, tr->data.ptr.offsets,
                   tr->data_size, tr->offsets_size);
    }

    if (!reply && !(tr->flags & TF_ONE_WAY))
        t->from = thread; // 如果不是reply操作,并且非async操作,那么该transaction操作
                          // 有from发起者,即:本current->pid对应的thread.
    else
        t->from = NULL;   // 否则本transaction操作没有from发起者,是一个单向数据传递操作[luther.gliethttp]
    t->sender_euid = proc->tsk->euid;
    t->to_proc = target_proc;
    t->to_thread = target_thread;
    t->code = tr->code;
    t->flags = tr->flags;
    t->priority = task_nice(current);
    t->buffer = binder_alloc_buf(target_proc, tr->data_size,
    // 从目标proc上申请一块存放此次transaction所需内存,
    // 存放此次从用户空间发送过来的传递的数据[luther.gliethttp]
        tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
    if (t->buffer == NULL) {
        return_error = BR_FAILED_REPLY;
        goto err_binder_alloc_buf_failed;
    }
    t->buffer->allow_user_free = 0;
    t->buffer->debug_id = t->debug_id;
    t->buffer->transaction = t;
    t->buffer->target_node = target_node;
    if (target_node)
        binder_inc_node(target_node, 1, 0, NULL);

    offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));

    if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) {
// 拷贝A应用程序空间数据到B应用程序通过mmap创建的VMA内核buffer空间,
// 在binder_mmap()中有如下一个语句:
// proc->user_buffer_offset = vma->vm_start - (size_t)proc->buffer;
// 所以B应用程序可以通过binder_thread_read()中的
// tr.data.ptr.buffer = (void *)((void *)t->buffer->data + proc->user_buffer_offset);
// 操作,不用再进行任何数据拷贝,就可以直接读取到A进程进行transaction的数据了,
// A进程发生了1次拷贝动作,B进程就可以读取到A进程需要传输的数据,
// 这种1次拷贝就安成了数据传输任务的IPC方式,极大的缩短了数据传输时间,
// 因此binder的这种IPC数据传输效率是绝对高的[luther.gliethttp]
        binder_user_error("binder: %d:%d got transaction with invalid "
            "data ptr\n", proc->pid, thread->pid);
        return_error = BR_FAILED_REPLY;
        goto err_copy_data_failed;
    }
// 拷贝控制信息
    if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) {
        binder_user_error("binder: %d:%d got transaction with invalid "
            "offsets ptr\n", proc->pid, thread->pid);
        return_error = BR_FAILED_REPLY;
        goto err_copy_data_failed;
    }
    off_end = (void *)offp + tr->offsets_size;
// 1.*offp存放了控制结构体相对t->buffer->data的偏移数值
// 2.offsets_size表示一共有多少个fp控制结构体,
// 所以也就对应多少个*offp偏移数值[luther.gliethttp].
    for (; offp < off_end; offp++) {
        struct flat_binder_object *fp;
        if (*offp > t->buffer->data_size - sizeof(*fp)) {
            binder_user_error("binder: %d:%d got transaction with "
                "invalid offset, %d\n",
                proc->pid, thread->pid, *offp);
            return_error = BR_FAILED_REPLY;
            goto err_bad_offset;
        }
        fp = (struct flat_binder_object *)(t->buffer->data + *offp);
        switch (fp->type) {
        case BINDER_TYPE_BINDER:
        case BINDER_TYPE_WEAK_BINDER: {
            struct binder_ref *ref;
            struct binder_node *node = binder_get_node(proc, fp->binder);
            // 查看应用程序A自身所在proc是否已经有了该操作记录node,一个node作为一个记录,
            // 唯一对应应用程序A发出的一个操作,它记录该次操作的发起者,接收者等等信息[luther.gliethttp].
            if (node == NULL) {
                node = binder_new_node(proc, fp->binder, fp->cookie);
                // 如果没有建立该node操作记录,那么建立一个应用程序A对应的node操作记录.
                if (node == NULL) {
                    return_error = BR_FAILED_REPLY;
                    goto err_binder_new_node_failed;
                }
                node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
                node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
            }
            ref = binder_get_ref_for_node(target_proc, node);
            // 将该操作记录登记到target目标proc上,即target_proc上是否有了对该node的引用,
            // 并获得该操作记录node在target目标proc上的一个desc索引值[luther.gliethttp],
            // 之所以登记到target目标proc上,是因为target目标proc才真正完成此次操作动作,
            // 所以把应用程序A期望执行的信息添加到node上,然后挂接到target目标proc上,
            // 等待target目标proc的应用程序B来完成实际的A期望完成的操作动作[luther.gliethttp].
            if (ref == NULL) {
                return_error = BR_FAILED_REPLY;
                goto err_binder_get_ref_for_node_failed;
            }
            if (fp->type == BINDER_TYPE_BINDER)
                fp->type = BINDER_TYPE_HANDLE;
            else
                fp->type = BINDER_TYPE_WEAK_HANDLE;
            fp->handle = ref->desc; // ok,现在ref->desc就是应用程序A的此次操作node节点在target
                                    // 目标proc上的一个desc索引值,target目标proc
                                    // 可以使用该desc获取到A期望的操作node节点[luther.gliethttp]
                                    // 这样servicemanager读取desc为handle索引处的数据
                                    // 就能读取到"media.audio_flinger"的node节点了[luther.gliethttp]
            binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE, &thread->todo);
            if (binder_debug_mask & BINDER_DEBUG_TRANSACTION)
                printk(KERN_INFO "        node %d u%p -> ref %d desc %d\n",
                       node->debug_id, node->ptr, ref->debug_id, ref->desc);
        } break;
        case BINDER_TYPE_HANDLE:
        case BINDER_TYPE_WEAK_HANDLE: {
            // 对于REPLY将执行到这里
            // fp->handle就是"media.audio_flinger"对应的node节点在servicemanager
            // 所在proc的desc索引数值,B应用程序调用sm->getService(String16("media.audio_flinger"))
            // 正待数据返回,这时servicemanager执行到了这里,首先读取出"media.audio_flinger"在
            // servicemanager所在proc的desc索引数值对应的node节点[luther.gliethttp]
            struct binder_ref *ref = binder_get_ref(proc, fp->handle);
            if (ref == NULL) {
                binder_user_error("binder: %d:%d got "
                    "transaction with invalid "
                    "handle, %ld\n", proc->pid,
                    thread->pid, fp->handle);
                return_error = BR_FAILED_REPLY;
                goto err_binder_get_ref_failed;
            }
            if (ref->node->proc == target_proc) {
                if (fp->type == BINDER_TYPE_HANDLE)
                    fp->type = BINDER_TYPE_BINDER;
                else
                    fp->type = BINDER_TYPE_WEAK_BINDER;
                fp->binder = ref->node->ptr;
                fp->cookie = ref->node->cookie;
                binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
                if (binder_debug_mask & BINDER_DEBUG_TRANSACTION)
                    printk(KERN_INFO "        ref %d desc %d -> node %d u%p\n",
                           ref->debug_id, ref->desc, ref->node->debug_id, ref->node->ptr);
            } else {
                struct binder_ref *new_ref;
                // 然后将"media.audio_flinger"的node节点添加到B应用程序的proc中,即target_proc.[luther.gliethtp]
                new_ref = binder_get_ref_for_node(target_proc, ref->node); // 好的,我们获得一个service的操作,到了这里
                // 将该service对应的node追加到target_proc上,并获得service的node在target_proc上的一个desc索引值
                // 这样service就添加到了target_proc上,target_proc就可以向service发送控制数据了[luther.gliethttp]
                if (new_ref == NULL) {
                    return_error = BR_FAILED_REPLY;
                    goto err_binder_get_ref_for_node_failed;
                }
                fp->handle = new_ref->desc; // 该service在target_proc上的desc,即应用程序B所见到的fp->handle值.
                // 这样B应用程序就可以通过desc索引值在B应用程序自己的proc上
                // 引用到"media.audio_flinger"的node节点了[lutehr.gliethttp]
                // 进而B应用程序就可以使用desc索引值与"media.audio_flinger"进行数据通信了[luther.gliethttp]
                binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
                if (binder_debug_mask & BINDER_DEBUG_TRANSACTION)
                    printk(KERN_INFO "        ref %d desc %d -> ref %d desc %d (node %d)\n",
                           ref->debug_id, ref->desc, new_ref->debug_id, new_ref->desc, ref->node->debug_id);
            }
        } break;

        ......

    if (reply) {
        BUG_ON(t->buffer->async_transaction != 0);
        binder_pop_transaction(target_thread, in_reply_to); // 释放in_reply_to
    } else if (!(t->flags & TF_ONE_WAY)) {
        BUG_ON(t->buffer->async_transaction != 0);
        t->need_reply = 1;
        t->from_parent = thread->transaction_stack;
        // 单向链表,B进程自身有很多个操作,
        // 当前thread->transaction_stack对应最新的一次申请[luther.gliethttp].
        thread->transaction_stack = t;
    } else {
        BUG_ON(target_node == NULL);
        BUG_ON(t->buffer->async_transaction != 1);
        if (target_node->has_async_transaction) {
            target_list = &target_node->async_todo;
            target_wait = NULL;
        } else
            target_node->has_async_transaction = 1;
    }
    t->work.type = BINDER_WORK_TRANSACTION; // 递交到target目标proc的操作为BINDER_WORK_TRANSACTION,将由binder_thread_read()解析[luther.gliethttp]
    list_add_tail(&t->work.entry, target_list); // 添加链表尾部,函数binder_thread_read()将从target_list第1个元素开始取数据
    // 所以binder_thread_read()将一个个的顺序操作按时间先后顺序追加到target_list链表上的所有待处理transaction操作单元.
    // 每个transaction操作单元对应的thread由上面的
    // tmp = thread->transaction_stack;
    // //thread->transaction_stack存储了最新添加的transaction操作单元,它和target_list链表最尾对象对应.
    // while (tmp) {
    //      if (tmp->from && tmp->from->proc == target_proc)
    //          target_thread = tmp->from; // 由tmp->from_parent组成的单向链表最末尾数据就是target_list链表的第1个对象
    //      tmp = tmp->from_parent;
    // }
    //所以经过如上遍历之后,找到的thread->transaction_stack最末尾的数据
    //就是当前处理的位于target_list链表的第1个对象对应的数据[luther.gliethttp]
    tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
    list_add_tail(&tcomplete->entry, &thread->todo); // 挂载到A进程上,以便完成之后释放内存
    if (target_wait)
        wake_up_interruptible(target_wait);
    // 唤醒等待target目标proc数据到来的应用程序,读取处理target_list下面t这个transaction操作单元,
    // 比如:binder_ioctl()==>binder_thread_read()
    return;

static int
binder_thread_read(struct binder_proc *proc, struct binder_thread *thread,
    void  __user *buffer, int size, signed long *consumed, int non_block)
{
    void __user *ptr = buffer + *consumed;
    void __user *end = buffer + size;

    int ret = 0;
    int wait_for_proc_work;

    if (*consumed == 0) {
        if (put_user(BR_NOOP, (uint32_t __user *)ptr)) // 首先推入BR_NOOP命令.
            return -EFAULT;
        ptr += sizeof(uint32_t);
    }

retry:
    wait_for_proc_work = thread->transaction_stack == NULL && list_empty(&thread->todo);
    // 没有可读的transaction操作单元[luther.gliethttp]

    if (thread->return_error != BR_OK && ptr < end) {
        if (thread->return_error2 != BR_OK) {
            if (put_user(thread->return_error2, (uint32_t __user *)ptr))
                return -EFAULT;
            ptr += sizeof(uint32_t);
            if (ptr == end)
                goto done;
            thread->return_error2 = BR_OK;
        }
        if (put_user(thread->return_error, (uint32_t __user *)ptr))
            return -EFAULT;
        ptr += sizeof(uint32_t);
        thread->return_error = BR_OK;
        goto done;
    }


    thread->looper |= BINDER_LOOPER_STATE_WAITING;
    if (wait_for_proc_work)
        proc->ready_threads++;
    mutex_unlock(&binder_lock);
    if (wait_for_proc_work) {
        if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
                    BINDER_LOOPER_STATE_ENTERED))) {
            binder_user_error("binder: %d:%d ERROR: Thread waiting "
                "for process work before calling BC_REGISTER_"
                "LOOPER or BC_ENTER_LOOPER (state %x)\n",
                proc->pid, thread->pid, thread->looper);
            wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
        }
        binder_set_nice(proc->default_priority);
        if (non_block) {
            if (!binder_has_proc_work(proc, thread))
                ret = -EAGAIN;
        } else
            ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread)); // 等待wake_up_interruptible(target_wait);唤醒
    } else {
        if (non_block) {
            if (!binder_has_thread_work(thread))
                ret = -EAGAIN;
        } else
            ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread)); // 等待wake_up_interruptible(target_wait);唤醒
    }
    mutex_lock(&binder_lock);
    if (wait_for_proc_work)
        proc->ready_threads--;
    thread->looper &= ~BINDER_LOOPER_STATE_WAITING;

    if (ret)
        return ret;
   
    // 数据解析,注意上面的binder_transaction操作已经将控制单元对应的node挂接到了target目标proc,
    // 数据操作node节点在当前proc[即上面的target目标proc]的desc索引值就是refs->desc,
    // 上面的target目标proc也就是这里被唤醒了的执行binder_thread_read被pending住的程序对应的proc,
    // 所以这里的proc就是上面的target目标proc,[luther.gliethttp]
    while (1) {
        uint32_t cmd;
        struct binder_transaction_data tr;
        struct binder_work *w;
        struct binder_transaction *t = NULL;

        if (!list_empty(&thread->todo))
            w = list_first_entry(&thread->todo, struct binder_work, entry);
        else if (!list_empty(&proc->todo) && wait_for_proc_work)
            w = list_first_entry(&proc->todo, struct binder_work, entry);
        else {
            if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */
                goto retry;
            break;
        }

        if (end - ptr < sizeof(tr) + 4)
            break;

        switch (w->type) {
        case BINDER_WORK_TRANSACTION: {
            t = container_of(w, struct binder_transaction, work);
            // 将执行到这里,该t就是上面在A进程所在proc中申请到的内核内存空间,这里直接IPC共享内核内存中的数据内容[luther.gliethttp]
            } break;
            ......
        }

        if (!t) // 没有transaction需要递交到用户空间的B应用程序处理,那么continue.
            continue;

        BUG_ON(t->buffer == NULL);
        if (t->buffer->target_node) { // 该值为target_node = binder_context_mgr_node;或者target_node = ref->node;
            // 这里的target_node就是等待数据到来的应用程序B所在内核空间代码[luther.gliethttp]
            struct binder_node *target_node = t->buffer->target_node;
            tr.target.ptr = target_node->ptr;
            tr.cookie =  target_node->cookie;
            t->saved_priority = task_nice(current);
            if (t->priority < target_node->min_priority &&
                !(t->flags & TF_ONE_WAY))
                binder_set_nice(t->priority);
            else if (!(t->flags & TF_ONE_WAY) ||
                 t->saved_priority > target_node->min_priority)
                binder_set_nice(target_node->min_priority);
            cmd = BR_TRANSACTION; // 对应的cmd为BR_TRANSACTION
        } else {
            tr.target.ptr = NULL;
            tr.cookie = NULL;
            cmd = BR_REPLY; // 对应的cmd为BR_REPLY
        }
        tr.code = t->code;
        tr.flags = t->flags;
        tr.sender_euid = t->sender_euid;

        if (t->from) {
            struct task_struct *sender = t->from->proc->tsk;
            tr.sender_pid = task_tgid_nr_ns(sender, current->nsproxy->pid_ns);
        } else {
            tr.sender_pid = 0;
        }

        tr.data_size = t->buffer->data_size;
        tr.offsets_size = t->buffer->offsets_size;
        tr.data.ptr.buffer = (void *)((void *)t->buffer->data + proc->user_buffer_offset);
        // 调整到B应用程序执行mmap后的用户空间指针地址,这样B就可以直接读到A传来的数据了[luther.gliethttp]
        tr.data.ptr.offsets = tr.data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *));

        if (put_user(cmd, (uint32_t __user *)ptr)) // 先推出cmd命令数据
            return -EFAULT;
        ptr += sizeof(uint32_t);
        if (copy_to_user(ptr, &tr, sizeof(tr))) // 之后推出struct binder_transaction_data tr;控制信息
            return -EFAULT;
        ptr += sizeof(tr);

        binder_stat_br(proc, thread, cmd);
        if (binder_debug_mask & BINDER_DEBUG_TRANSACTION)
            printk(KERN_INFO "binder: %d:%d %s %d %d:%d, cmd %d size %d-%d ptr %p-%p\n",
                   proc->pid, thread->pid,
                   (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : "BR_REPLY",
                   t->debug_id, t->from ? t->from->proc->pid : 0,
                   t->from ? t->from->pid : 0, cmd,
                   t->buffer->data_size, t->buffer->offsets_size,
                   tr.data.ptr.buffer, tr.data.ptr.offsets);

        list_del(&t->work.entry); // 已经递交给B应用程序用户空间,所以删除该t
        t->buffer->allow_user_free = 1;
        if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
            t->to_parent = thread->transaction_stack;
            t->to_thread = thread;
            thread->transaction_stack = t;
            // 将该t作为当前thread的transaction_stack,在binder_transaction()的reply等于1时会使用到
            // 该transaction_stack控制区[luther.gliethttp]
        } else {
            t->buffer->transaction = NULL;
            kfree(t);
            binder_stats.obj_deleted[BINDER_STAT_TRANSACTION]++;
        }
        break; // 退出while(1);
    }

done:

    *consumed = ptr - buffer;
    if (proc->requested_threads + proc->ready_threads == 0 &&
        proc->requested_threads_started < proc->max_threads &&
        (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
         BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
         /*spawn a new thread if we leave this out */) {
        proc->requested_threads++;
        if (binder_debug_mask & BINDER_DEBUG_THREADS)
            printk(KERN_INFO "binder: %d:%d BR_SPAWN_LOOPER\n",
                   proc->pid, thread->pid);
        if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
            return -EFAULT;
    }
    return 0;
}


static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
{
    int ret;
    struct vm_struct *area;
    struct binder_proc *proc = filp->private_data;
    const char *failure_string;
    struct binder_buffer *buffer;

    if ((vma->vm_end - vma->vm_start) > SZ_4M)
        vma->vm_end = vma->vm_start + SZ_4M; // 强制不能大于4M

    if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE)
        printk(KERN_INFO "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, vma->vm_page_prot);

    if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
        ret = -EPERM;
        failure_string = "bad vm_flags";
        goto err_bad_arg;
    }
    vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;

    area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP); // 获取内核空间中vmalloc函数能够生成的连续内存的起始地址值对应的area.
    if (area == NULL) {
        ret = -ENOMEM;
        failure_string = "get_vm_area";
        goto err_get_vm_area_failed;
    }
    proc->buffer = area->addr; // 内核内存空间MMU之后的对应的内核虚拟地址起始值记录到proc->buffer
    proc->user_buffer_offset = vma->vm_start - (size_t)proc->buffer;
    // 该proc的user用户空间申请到变量地址与kernel空间中该area的距离为user_buffer_offset.

#ifdef CONFIG_CPU_CACHE_VIPT
    if (cache_is_vipt_aliasing()) {
        while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
            printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
            vma->vm_start += PAGE_SIZE;
        }
    }
#endif
    // 申请建立area所需MMU对应的pages目录空间[luther.gliethttp].
    proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
    if (proc->pages == NULL) {
        ret = -ENOMEM;
        failure_string = "alloc page array";
        goto err_alloc_pages_failed;
    }
    proc->buffer_size = vma->vm_end - vma->vm_start; // 本proc实际获得的buffer大小

    vma->vm_ops = &binder_vm_ops;
    vma->vm_private_data = proc;

    // binder_update_page_range将申请PAGE_SIZE大小的内核内存,建立MMU映射
    if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
        ret = -ENOMEM;
        failure_string = "alloc small buf";
        goto err_alloc_small_buf_failed;
    }
    buffer = proc->buffer;
    INIT_LIST_HEAD(&proc->buffers);
    list_add(&buffer->entry, &proc->buffers); // buffer将自己添加到proc->buffers链表上
    buffer->free = 1;
    binder_insert_free_buffer(proc, buffer);
    proc->free_async_space = proc->buffer_size / 2;
    barrier();
    proc->vma = vma;

    /*printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
    return 0;

err_alloc_small_buf_failed:
    kfree(proc->pages);
err_alloc_pages_failed:
    vfree(proc->buffer);
err_get_vm_area_failed:
    mutex_unlock(&binder_lock);
err_bad_arg:
    printk(KERN_ERR "binder_mmap: %d %lx-%lx %s failed %d\n", proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
    return ret;
}

static void binder_insert_free_buffer(
    struct binder_proc *proc, struct binder_buffer *new_buffer)
{
    struct rb_node **p = &proc->free_buffers.rb_node; // 根据buffer_size大小为序组建的红黑树[luther.gliethttp]
    struct rb_node *parent = NULL;
    struct binder_buffer *buffer;
    size_t buffer_size;
    size_t new_buffer_size;

    BUG_ON(!new_buffer->free);

    new_buffer_size = binder_buffer_size(proc, new_buffer); // 根据new_buffer地址计算它所占用内存大小.
    // new_buffer_size等于vma->vm_start - vma->vm_end - offset(buffer->data)

    if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC)
        printk(KERN_INFO "binder: %d: add free buffer, size %d, "
               "at %p\n", proc->pid, new_buffer_size, new_buffer);

    while (*p) {
        parent = *p;
        buffer = rb_entry(parent, struct binder_buffer, rb_node);
        BUG_ON(!buffer->free);

        buffer_size = binder_buffer_size(proc, buffer);

        if (new_buffer_size < buffer_size)
            p = &parent->rb_left;
        else
            p = &parent->rb_right;
    }
    rb_link_node(&new_buffer->rb_node, parent, p);
    rb_insert_color(&new_buffer->rb_node, &proc->free_buffers); // 调整rbtree.
}

static size_t binder_buffer_size(
    struct binder_proc *proc, struct binder_buffer *buffer)
{
    if (list_is_last(&buffer->entry, &proc->buffers))
        return proc->buffer + proc->buffer_size - (void *)buffer->data; // 使用地址计算数据大小
// 在binder_mmap中新创建的,这时proc->buffer等于buffer,返回剔除buffer->data之前的控制区域之后的数据长度.
    else
        return (size_t)list_entry(buffer->entry.next,
            struct binder_buffer, entry) - (size_t)buffer->data; // 使用地址计算数据大小
}

static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
    size_t data_size, size_t offsets_size, int is_async)
{
    struct rb_node *n = proc->free_buffers.rb_node;
    struct binder_buffer *buffer;
    size_t buffer_size;
    struct rb_node *best_fit = NULL;
    void *has_page_addr;
    void *end_page_addr;
    size_t size;

    if (proc->vma == NULL) {
        printk(KERN_ERR "binder: %d: binder_alloc_buf, no vma\n",
               proc->pid);
        return NULL;
    }

    size = ALIGN(data_size, sizeof(void *)) +
        ALIGN(offsets_size, sizeof(void *));

    if (size < data_size || size < offsets_size) {
        binder_user_error("binder: %d: got transaction with invalid "
            "size %d-%d\n", proc->pid, data_size, offsets_size);
        return NULL;
    }

    if (is_async &&
        proc->free_async_space < size + sizeof(struct binder_buffer)) {
        if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC)
            printk(KERN_ERR "binder: %d: binder_alloc_buf size %d f"
                   "ailed, no async space left\n", proc->pid, size);
        return NULL;
    }

    while (n) {
        buffer = rb_entry(n, struct binder_buffer, rb_node);
        BUG_ON(!buffer->free);
        buffer_size = binder_buffer_size(proc, buffer);

        if (size < buffer_size) {
            best_fit = n; // 比期望申请size大的话,我们也暂时忍了
            n = n->rb_left;
        } else if (size > buffer_size)
            n = n->rb_right;
        else {
            best_fit = n; // 如果和我们期望申请的size一边大的话,太perfect了,就是你了,break出去[luther.gliethttp]
            break;
        }
    }
    if (best_fit == NULL) {
        printk(KERN_ERR "binder: %d: binder_alloc_buf size %d failed, "
               "no address space\n", proc->pid, size);
        return NULL;
    }
    if (n == NULL) {
        // 没有和size一边大的buffer,所以best_fit对应的buffer比size大,在后面我们会切割
        buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
        buffer_size = binder_buffer_size(proc, buffer);
    }
    if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC)
        printk(KERN_INFO "binder: %d: binder_alloc_buf size %d got buff"
               "er %p size %d\n", proc->pid, size, buffer, buffer_size);

    has_page_addr =
        (void *)(((size_t)buffer->data + buffer_size) & PAGE_MASK);
    if (n == NULL) {
        if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
            buffer_size = size; /* no room for other buffers */
        else
            buffer_size = size + sizeof(struct binder_buffer); // buffer_size变为我们申请的实际大小
    }
    end_page_addr = (void *)PAGE_ALIGN((size_t)buffer->data + buffer_size);
    if (end_page_addr > has_page_addr)
        end_page_addr = has_page_addr;
    if (binder_update_page_range(proc, 1,
        (void *)PAGE_ALIGN((size_t)buffer->data), end_page_addr, NULL))
// 上面申请了1个page大小的内存,这里实际申请剩下的data区VMA内存-页对齐
        return NULL;

    rb_erase(best_fit, &proc->free_buffers); // 将该buffer从free_buffers上摘掉[luther.gliethttp]
    buffer->free = 0;
    binder_insert_allocated_buffer(proc, buffer); // 将其插到allocated_buffer中.
    if (buffer_size != size) {
// 表明申请的buffer_size大于size,所以需要将
// 申请的buffer_size进行切割[luther.gliethttp]
        struct binder_buffer *new_buffer = (void *)buffer->data + size;
        list_add(&new_buffer->entry, &buffer->entry);
        // 这时new_buffer->entry的next还等于proc->buffers
        new_buffer->free = 1;
        binder_insert_free_buffer(proc, new_buffer);
        // 这样就完成了大的连续内存到合适大小数据区的切片操作[luther.gliethttp]
    }
    if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC)
        printk(KERN_INFO "binder: %d: binder_alloc_buf size %d got "
               "%p\n", proc->pid, size, buffer);
    buffer->data_size = data_size;
    buffer->offsets_size = offsets_size;
    buffer->async_transaction = is_async;
    if (is_async) {
        proc->free_async_space -= size + sizeof(struct binder_buffer);
        if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC_ASYNC)
            printk(KERN_INFO "binder: %d: binder_alloc_buf size %d "
                   "async free %d\n", proc->pid, size,
                   proc->free_async_space);
    }

    return buffer;
}
阅读(8558) | 评论(0) | 转发(0) |
给主人留下些什么吧!~~