Android : Binder 驅動情景分析

Binder驅動情景分析

路徑:

/framework/base/core/java/android/os/  
    - IInterface.java
    - IBinder.java
    - Parcel.java
    - IServiceManager.java
    - ServiceManager.java
    - ServiceManagerNative.java
    - Binder.java  


/framework/base/core/jni/    
    - android_os_Parcel.cpp
    - AndroidRuntime.cpp
    - android_util_Binder.cpp (核心類)


/framework/native/libs/binder         
    - IServiceManager.cpp
    - BpBinder.cpp
    - Binder.cpp
    - IPCThreadState.cpp (核心類)
    - ProcessState.cpp  (核心類)

/framework/native/include/binder/
    - IServiceManager.h
    - IInterface.h

/framework/native/cmds/servicemanager/
    - service_manager.c
    - binder.c


/kernel/drivers/staging/android/
    - binder.c
    - uapi/binder.h

1. 啟動 service manager

源碼的路徑在:
framework/native/cmds/servicemanager/
  - service_manager.c
  - binder.c
  
kernel/drivers/ (不同Linux分支路徑略有不同)
  - staging/android/binder.c
  - android/binder.c 

生成的可執(zhí)行文件的名字是:servicemanager

1.1 概述

create_servicemanager.jpg

(內核中,第一次調用binder_ioctl(),會創(chuàng)建一個binder_thread)

b.調用ioctl 告訴驅動這是servicemanager, ioctl(BINDER_SET_CONTEXT_MGR)

c.調用ioctl 發(fā)起一個寫操作 binder_thread_write, ioctl(BC_ENTER_LOOPER)

d.調用ioctl 發(fā)起一個讀操作 binder_thread_write(一開始讀的時候,內核中 binder_thread_read給data一個cmd=BR_NOOP),

沒有數(shù)據的時候service manager進程休眠,等待其他程序給service manager 發(fā)送消息。</pre>

1.2 源碼分析

service manager的入口函數(shù)在service_manager.c中

struct binder_state
{
    int fd;       // dev/binder的文件描述符
    void *mapped; //指向mmap的內存地址
    size_t mapsize; //分配的內存大小,默認為128KB
};


int main(int argc, char** argv)
{
    struct binder_state *bs;
    union selinux_callback cb;
    char *driver;

    if (argc > 1) {
        driver = argv[1];
    } else {
        driver = "/dev/binder";
    }

    bs = binder_open(driver, 128*1024);  //打開binder驅動 mmap等 申請空間大小128k
    ......

    //調用ioctl,告訴驅動這是service manager
    //return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);
    if (binder_become_context_manager(bs)) {  
        ALOGE("cannot become context manager (%s)\n", strerror(errno));
        return -1;
    }

    ...
    //陷入循環(huán)讀取數(shù)據
    binder_loop(bs, svcmgr_handler);

    return 0;
}

struct binder_state *binder_open(const char* driver, size_t mapsize)
{
    struct binder_state *bs;
    struct binder_version vers;

    // void *malloc(size_t size) 分配所需的內存空間,并返回一個指向它的指針。
    bs = malloc(sizeof(*bs));
    if (!bs) {
        errno = ENOMEM;
        return NULL;
    }

    //調用驅動 binder_open()
    //內核態(tài)創(chuàng)建一個`binder_proc`對象,再將`binder_proc`對象賦值給fd->private_data,同時放入全局鏈表`binder_procs`
    bs->fd = open(driver, O_RDWR | O_CLOEXEC);
    if (bs->fd < 0) {
        fprintf(stderr,"binder: cannot open %s (%s)\n",
                driver, strerror(errno));
        goto fail_open;
    }

    //發(fā)送ioctl 判斷版本是否一致
    if ((ioctl(bs->fd, BINDER_VERSION, &vers) == -1) ||
        (vers.protocol_version != BINDER_CURRENT_PROTOCOL_VERSION)) {
        fprintf(stderr,
                "binder: kernel driver version (%d) differs from user space version (%d)\n",
                vers.protocol_version, BINDER_CURRENT_PROTOCOL_VERSION);
        goto fail_open;
    }

    bs->mapsize = mapsize;
    //驅動中創(chuàng)建`Binder_buffer`對象,并放入當前binder_proc的`proc->buffers`鏈表。
    bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);
    if (bs->mapped == MAP_FAILED) {
        fprintf(stderr,"binder: cannot map device (%s)\n",
                strerror(errno));
        goto fail_map;
    }

    return bs;

fail_map:
    close(bs->fd);
fail_open:
    free(bs);
    return NULL;
}
kmalloc、kzalloc、vmalloc
kmalloc: 在內核空間申請內存,不做清零初始化操作,申請獲得物理內存。
kzalloc: 在kmalloc基礎上增加初始化清零操作
vmalloc: 在內核空間申請內存,它申請的內存是位于vmalloc_start到vmalloc_end之間的虛擬內存,獲取的是虛擬內存地址。

調用:ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);

內核態(tài):binder_ioctl

static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
    int ret;
    struct binder_proc *proc = filp->private_data;
    struct binder_thread *thread;
    unsigned int size = _IOC_SIZE(cmd); //cmd 從用戶態(tài)發(fā)送過來的指令
    
    //(void __user *)arg 指的是arg值是一個用戶空間的地址,不能直接進行拷貝等,要使用例如copy_from_user,copy_to_user等     //函數(shù)。 默認是內核空間,因為這是驅動,是在內核空間運行的
    void __user *ubuf = (void __user *)arg;

    ...

    thread = binder_get_thread(proc); //第一次會創(chuàng)建一個新節(jié)點binder_thread,添加到&binder_proc->threads.rb_node中
    
    ...

    switch (cmd) {
    ...
    case BINDER_SET_CONTEXT_MGR: //只關心該cmd   
        //創(chuàng)建binder_node結構體對象,并將創(chuàng)建的binder_node 結構體掛在binder_proc的nodes紅黑樹上。
        //初始化binder_node 一系列信息。
        //并創(chuàng)建binder_node的async_todo和binder_work兩個隊列。
        ret = binder_ioctl_set_ctx_mgr(filp);  //創(chuàng)建一個binder_node節(jié)點
        if (ret)
            goto err;
        break;
    ...
    return ret; //執(zhí)行完成后返回用戶空間
}

用戶態(tài):

void binder_loop(struct binder_state *bs, binder_handler func)
{
    int res;
    struct binder_write_read bwr;
    uint32_t readbuf[32];

    bwr.write_size = 0;
    bwr.write_consumed = 0;
    bwr.write_buffer = 0;

    readbuf[0] = BC_ENTER_LOOPER;   //cmd
    //一個寫操作 BC_ENTER_LOOPER 內核中設置一個循環(huán)標志
    //1. res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
    binder_write(bs, readbuf, sizeof(uint32_t));

    //進入binder_loop for循環(huán)
    for (;;) {
        bwr.read_size = sizeof(readbuf);//不為0,表示是一個讀操作
        bwr.read_consumed = 0;
        bwr.read_buffer = (uintptr_t) readbuf; 
        
        res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr); //2. 調用ioctl陷入內核態(tài) 讀操作

        if (res < 0) {
            ALOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
            break;
        }
        //收到從內核中返回的數(shù)據后處理數(shù)據
        res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
        if (res == 0) {
            ALOGE("binder_loop: unexpected reply?!\n");
            break;
        }
        if (res < 0) {
            ALOGE("binder_loop: io error %d %s\n", res, strerror(errno));
            break;
        }
    }
}

內核態(tài):

static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
    int ret;
    struct binder_proc *proc = filp->private_data;
    struct binder_thread *thread;
    unsigned int size = _IOC_SIZE(cmd);
    void __user *ubuf = (void __user *)arg;
    
    ...

    thread = binder_get_thread(proc);
    if (thread == NULL) {
        ret = -ENOMEM;
        goto err;
    }

    switch (cmd) {
    case BINDER_WRITE_READ:
        ret = binder_ioctl_write_read(filp, cmd, arg, thread);
        if (ret)
            goto err;
        break;
     ...
    
    return ret; //執(zhí)行完之后返回用戶空間
}

binder_ioctl_write_read

static int binder_ioctl_write_read(struct file *filp,
                unsigned int cmd, unsigned long arg,
                struct binder_thread *thread)
{
    int ret = 0;
    struct binder_proc *proc = filp->private_data;
    unsigned int size = _IOC_SIZE(cmd);
    void __user *ubuf = (void __user *)arg;
    struct binder_write_read bwr;

    ...
    //把用戶空間數(shù)據ubuf拷貝到內核空間bwr
    if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
        ret = -EFAULT;
        goto out;
    }
    
    ...
    //bwr.write_size > 0 先執(zhí)行寫操作
    if (bwr.write_size > 0) {
        ret = binder_thread_write(proc, thread,
                      bwr.write_buffer,
                      bwr.write_size,
                      &bwr.write_consumed);
        trace_binder_write_done(ret);
        if (ret < 0) {
            bwr.read_consumed = 0;
            if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
                ret = -EFAULT;
            goto out;
        }
    }
    
    if (bwr.read_size > 0) {
        ret = binder_thread_read(proc, thread, bwr.read_buffer,
                     bwr.read_size,
                     &bwr.read_consumed,
                     filp->f_flags & O_NONBLOCK);
        trace_binder_read_done(ret);
        binder_inner_proc_lock(proc);
        if (!binder_worklist_empty_ilocked(&proc->todo))//查看當前進程的todo鏈表是否為空
            binder_wakeup_proc_ilocked(proc);//不為空的話喚醒等待的隊列
        binder_inner_proc_unlock(proc);
        if (ret < 0) {
            if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
                ret = -EFAULT;
            goto out;
        }
    }
    ...
    
    if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { //將內核數(shù)據bwr拷貝到用戶空間ubuf
        ret = -EFAULT;
        goto out;
    }
out:
    return ret;
}
static int binder_thread_write(struct binder_proc *proc,
            struct binder_thread *thread,
            binder_uintptr_t binder_buffer, size_t size,
            binder_size_t *consumed)
{
    uint32_t cmd;
    struct binder_context *context = proc->context;
    void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
    void __user *ptr = buffer + *consumed;
    void __user *end = buffer + size;

    //thread->return_error.cmd=BR_OK 在創(chuàng)建binder_thread的時候初始化
    while (ptr < end && thread->return_error.cmd == BR_OK) {
        int ret;

        if (get_user(cmd, (uint32_t __user *)ptr)) //獲取cmd
            return -EFAULT;
        ptr += sizeof(uint32_t);
        
        ...
        case BC_ENTER_LOOPER:
            ...
            if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
                thread->looper |= BINDER_LOOPER_STATE_INVALID;
                binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
                    proc->pid, thread->pid);
            }
            thread->looper |= BINDER_LOOPER_STATE_ENTERED; //設置該線程的looper狀態(tài)
            break;

        ...

        *consumed = ptr - buffer;
    }
    return 0;
}

接著 binde_loop()進入 for 循環(huán),調用ioctl ,此時為讀操作

內核態(tài)

static int binder_thread_read(struct binder_proc *proc,
                  struct binder_thread *thread,
                  binder_uintptr_t binder_buffer, size_t size,
                  binder_size_t *consumed, int non_block)
{
    void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
    void __user *ptr = buffer + *consumed;
    void __user *end = buffer + size;

    int ret = 0;
    int wait_for_proc_work;

    if (*consumed == 0) {
        //一開始讀的時候,由于傳過來的bwr.read_consumed == 0。所以內核中給ptr(bwr.read_buffer)一個BR_NOOP(4字節(jié))
        //**對于所有的讀操作,數(shù)據頭部都是 BR_NOOP**
        if (put_user(BR_NOOP, (uint32_t __user *)ptr))
            return -EFAULT;
        ptr += sizeof(uint32_t); //ptr指針后移四個字節(jié)
    }

retry:
    binder_inner_proc_lock(proc);
    wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
    binder_inner_proc_unlock(proc);

    thread->looper |= BINDER_LOOPER_STATE_WAITING;

    trace_binder_wait_for_work(wait_for_proc_work,
                   !!thread->transaction_stack,
                   !binder_worklist_empty(proc, &thread->todo));
    if (wait_for_proc_work) {
        if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
                    BINDER_LOOPER_STATE_ENTERED))) {
            binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
                proc->pid, thread->pid, thread->looper);
            wait_event_interruptible(binder_user_error_wait,
                         binder_stop_on_user_error < 2);
        }
        binder_restore_priority(current, proc->default_priority);
    }

    if (non_block) {
        if (!binder_has_work(thread, wait_for_proc_work))
            ret = -EAGAIN;
    } else {
        //然后沒有數(shù)據,service manager 休眠。等待被喚醒
        ret = binder_wait_for_work(thread, wait_for_proc_work);
    }

    thread->looper &= ~BINDER_LOOPER_STATE_WAITING;

    if (ret)
        return ret;
    //進入循環(huán)
    while (1) {
        uint32_t cmd;
        struct binder_transaction_data tr;
        struct binder_work *w = NULL;
        struct list_head *list = NULL;
        struct binder_transaction *t = NULL;
        struct binder_thread *t_from;

        binder_inner_proc_lock(proc);
        //判斷 thread->todo 鏈表是否為空
        if (!binder_worklist_empty_ilocked(&thread->todo)) 
            list = &thread->todo;
        //判斷 proc->todo 鏈表是否為空
        else if (!binder_worklist_empty_ilocked(&proc->todo) &&
               wait_for_proc_work)
            list = &proc->todo;
        else {
            binder_inner_proc_unlock(proc);

            /* no data added */ 
            //由于第一次讀沒有數(shù)據,只有一個四個字節(jié)的 BR_NOOP,所以ptr-buffer == 4
            if (ptr - buffer == 4 && !thread->looper_need_return)
                goto retry;
            break;
        }

        if (end - ptr < sizeof(tr) + 4) {
            binder_inner_proc_unlock(proc);
            break;
        }
        w = binder_dequeue_work_head_ilocked(list);
        if (binder_worklist_empty_ilocked(&thread->todo))
            thread->process_todo = false;

        switch (w->type) {
        ...
    return 0;
}

2. Binder系統(tǒng)服務之c實現(xiàn)

binder.c中包含使用的函數(shù)
binder.c 代碼路徑:
frameworks\native\cmds\servicemanager\binder.c

test_server.c

#define CMD_SAY_HELLO 0;  
#define CMD_SAY_HELLO_TO 1; 

void sayhello(void){
    static int cnt = 0;
    fprintf(stderr, "sayhello :%d\n", cnt++);
}

int sayhello_to(char* name) {
    static int cnt = 0;
    fprintf(stderr, "sayhello_to %s :%d\n", name, cnt++);
    return cnt++;
}

int hello_handler(struct binder_state *bs,
                   struct binder_transaction_data *txn, //根據txn->code可以知道要調用哪一個函數(shù)
                   struct binder_io *msg,
                   struct binder_io *reply)   //將返回結果放入reply
{
    
    uint16_t *s;
    size_t len;
    uint32_t handle;
    uint32_t strict_policy;
    char name[512];
 
    strict_policy = bio_get_uint32(msg);

    switch(txn->code) {
    case CMD_SAY_HELLO:
        sayhello();
        break;

    case CMD_SAY_HELLO_TO:
        //從msg中取出字符串
        s = bio_get_string16(msg, &len);
        uint32_t i = sayhello_to(s);
        bio_put_uint32(reply, i); //把結果放入reply
            
        break;
    default:
        ALOGE("unknown code %d\n", txn->code);
        return -1;
    }
    return 0;
}

int svcmgr_publish(struct binder_state *bs, uint32_t target, const char *name, void *ptr)
{
    int status;
    unsigned iodata[512/4];
    struct binder_io msg, reply;

    bio_init(&msg, iodata, sizeof(iodata), 4);
    bio_put_uint32(&msg, 0);  // strict mode header
    bio_put_string16_x(&msg, SVC_MGR_NAME);
    bio_put_string16_x(&msg, name);
    bio_put_obj(&msg, ptr);

    if (binder_call(bs, &msg, &reply, target, SVC_MGR_ADD_SERVICE))
        return -1;

    status = bio_get_uint32(&reply);

    binder_done(bs, &msg, &reply);

    return status;
}


int main(int argc, char **argv)
{
    struct binder_state *bs;
    uint32_t svcmgr = BINDER_SERVICE_MANAGER; //svcmgr == 0
    uint32_t handle;

    bs = binder_open("/dev/binder", 128*1024);
    if (!bs) {
        fprintf(stderr, "failed to open binder driver\n");
        return -1;
    }

    argc--;
    argv++;
    
    //add service
    svcmgr_publish(bs, svcmgr, "hello", (void*)123);
  
    //注冊服務完成后進入循環(huán) 讀數(shù)據-解析數(shù)據-處理數(shù)據-回復
    binder_loop(bs, hello_handler);//svcmgr_handler 處理函數(shù)
    
    return 0;
}

test_client.c

#define CMD_SAY_HELLO 0;  
#define CMD_SAY_HELLO_TO 1; 

struct binder_state *g_bs;
uint32_t g_handle; 

void sayhello(void) {
    //構造一個binder_io
    unsigned iodata[512/4];
    struct binder_io msg, reply;

    bio_init(&msg, iodata, sizeof(iodata), 4);
    bio_put_uint32(&msg, 0);
    
    if (binder_call(g_bs, &msg, &reply, g_handle, CMD_SAY_HELLO)){
        return;
    }
    
    binder_done(g_bs, &msg, &reply);
}

int sayhello_to(char* name) {
    unsigned iodata[512/4];
    struct binder_io msg, reply;
    int ret;

    bio_init(&msg, iodata, sizeof(iodata), 4);
    bio_put_uint32(&msg, 0);
    bio_put_string16_x(&msg, name);
    
    if (binder_call(g_bs, &msg, &reply, g_handle, CMD_SAY_HELLO_TO)){
        return;
    }
    ret = bio_get_uint32(reply);
    binder_done(g_bs, &msg, &reply);
    
    return ret;
}


int main(int argc, char **argv)
{
    struct binder_state *bs;
    uint32_t svcmgr = BINDER_SERVICE_MANAGER; //svcmgr == 0
    uint32_t handle;
    int ret;

    bs = binder_open("/dev/binder", 128*1024);
    if (!bs) {
        fprintf(stderr, "failed to open binder driver\n");
        return -1;
    }
    g_bs = bs;
    //get service
    handle = svcmgr_lookup(bs, svcmgr, "hello");
    g_handle = handle;
    //發(fā)送數(shù)據到server
    if (argc == 2) {
        sayhello();
    } else if (argc == 3) {
        ret = sayhello_to(argv[2]);
    }
    
    binder_release(bs, handle);
    
    return 0;
}

3. 服務注冊過程

3.1 server端

a. open驅動 open("dev/binder")  ioctl(BINDER_VERSION)  mmap

b. 構造數(shù)據,調用ioctl 發(fā)起一個寫操作(cmd = BC_TRANSACTION)

c. 發(fā)起寫操作傳入到內核態(tài)的數(shù)據,最終會放到 service manager 的 todo 鏈表中,之后喚醒 service manager
3.1.1 server 端構造數(shù)據并發(fā)送 (用戶態(tài))
- 構造數(shù)據 binder_io (其中包含重要的數(shù)據結構 binder_flat_object)
- 構造數(shù)據后調用binder_call 向驅動程序發(fā)送數(shù)據

    1.首先 binder_io 轉換為binder_transaction_data, 然后放入binder_write_read 結構體

    2.構造好數(shù)據之后,調用ioctl 發(fā)送數(shù)據

數(shù)據結構:

writebuf.jpg

構造數(shù)據
/*
* target = 0: 表示要發(fā)給service manager
* name: 服務名  
* ptr: 指向一個函數(shù)
*/
int svcmgr_publish(struct binder_state *bs, uint32_t target, const char *name, void *ptr)
{
    int status;
    unsigned iodata[512/4];
    struct binder_io msg, reply;

    bio_init(&msg, iodata, sizeof(iodata), 4);//前面16個字節(jié)空出來 
    bio_put_uint32(&msg, 0);  // strict mode header  先放入全是 0 的4字節(jié)數(shù)據
    bio_put_string16_x(&msg, SVC_MGR_NAME); //用兩個字節(jié)存放一個字符 ,先用四個字節(jié)存放字符串的長度,后存放字符串
    bio_put_string16_x(&msg, name);  //name 是服務的名稱。兩個字節(jié)存放一個字符
    bio_put_obj(&msg, ptr); //構造一個flat_binder_obj結構體,內核中根據該結構體創(chuàng)建 binder_node

    //調用binder_call 發(fā)送數(shù)據到內核態(tài),先寫后讀
    if (binder_call(bs, &msg, &reply, target, SVC_MGR_ADD_SERVICE))
        return -1;

    status = bio_get_uint32(&reply);

    binder_done(bs, &msg, &reply);

    return status;
}
重點 : 構造一個 flat_binder_object
/**
 * struct flat_binder_object
 *.type 表示傳遞的是實體/引用  只有服務的提供者server 能傳遞實體(其他,service manager / client 只能傳遞引用)
 * flags 
 * binder/handle: 當type傳實體的時候, 這一項表示binder. 傳引用的時候,這一項表示handle
 * cookie = 0
 */

void bio_put_obj(struct binder_io *bio, void *ptr)
{
    struct flat_binder_object *obj; //最終將 這個結構體傳給內核態(tài),內核根據此創(chuàng)建binder_node

    obj = bio_alloc_obj(bio);
    if (!obj)
        return;

    obj->flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
    obj->hdr.type = BINDER_TYPE_BINDER;  //type
    obj->binder = (uintptr_t)ptr;   //ptr 指向我們傳入的函數(shù)
    obj->cookie = 0;   //現(xiàn)在暫時未使用
}

調用binder_call構造數(shù)據并發(fā)送,先寫后讀

int binder_call(struct binder_state *bs,
                struct binder_io *msg, struct binder_io *reply,
                uint32_t target, uint32_t code)
{
    int res;
    struct binder_write_read bwr;
    struct {
        uint32_t cmd;  //占四個字節(jié)
        struct binder_transaction_data txn;
    } __attribute__((packed)) writebuf;
    unsigned readbuf[32];

    if (msg->flags & BIO_F_OVERFLOW) {
        fprintf(stderr,"binder: txn buffer overflow\n");
        goto fail;
    }
    
    writebuf.cmd = BC_TRANSACTION;
    //構造 binder_transaction_data
    writebuf.txn.target.handle = target; //表示發(fā)送給誰 handle 為0 表示要發(fā)給 service manager
    writebuf.txn.code = code; //如果是注冊服務 code 為SVC_MGR_ADD_SERVICE
    writebuf.txn.flags = 0;
    writebuf.txn.data_size = msg->data - msg->data0;
    writebuf.txn.offsets_size = ((char*) msg->offs) - ((char*) msg->offs0); //表明flat_binder_object
    writebuf.txn.data.ptr.buffer = (uintptr_t)msg->data0;
    writebuf.txn.data.ptr.offsets = (uintptr_t)msg->offs0;

    //構造 binder_write_read
    bwr.write_size = sizeof(writebuf);
    bwr.write_consumed = 0;
    bwr.write_buffer = (uintptr_t) &writebuf;
    for (;;) {//調用ioctl發(fā)送數(shù)據,等待驅動返回數(shù)據
        bwr.read_size = sizeof(readbuf); //sizeof(readbuf) == 128
        bwr.read_consumed = 0;
        bwr.read_buffer = (uintptr_t) readbuf;

        res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr); //調用ioctl發(fā)送數(shù)據, 先寫后讀

        if (res < 0) {
            fprintf(stderr,"binder: ioctl failed (%s)\n", strerror(errno));
            goto fail;
        }
        //解析從內核態(tài)返回的數(shù)據
        res = binder_parse(bs, reply, (uintptr_t) readbuf, bwr.read_consumed, 0);
        if (res == 0) return 0; //收到 cmd = BR_REPLY時res = 0; 
        if (res < 0) goto fail;
    }
3.1.2 server 端處理 BC_TRANSACTION (內核態(tài))
3.內核態(tài) binder_ioctl , (binder_ioctl的作用是把數(shù)據放入目的進程的binder_proc結構體中todo 鏈表,然后喚醒目的進程)

    a. 根據 handle 找到目的進程 ,本例handle = 0, 即service manager

    b. 把數(shù)據 copy_from_user(), 放到service manager mmap的內核空間

    c. 處理offset數(shù)據 flat_binder_object

        1. 構造當前進程的(即server)binder_node結構體

        2. 構造binder_ref 給目的進程service manager,binder_ref指向binder_node(即server節(jié)點)

        3. 增加引用計數(shù)  **(會返回一些信息給當前進程)**

    d. 喚醒目的進程 service manager

源碼:

binder_thread_write
static int binder_thread_write(struct binder_proc *proc,
            struct binder_thread *thread,    //當前線程在第一次ioctl調用時初始化
            binder_uintptr_t binder_buffer,  //bwr.write_buffer
            size_t size,                     //bwr.write_size,
            binder_size_t *consumed)         //&bwr.write_consumed
{
    uint32_t cmd;
    struct binder_context *context = proc->context;
    //_user 表名是用戶態(tài)地址,必須從用戶態(tài)copy到內核態(tài)
    void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
    void __user *ptr = buffer + *consumed;
    void __user *end = buffer + size;
    
    //取出一個一個cmd。 ptr 指向數(shù)據開頭, end指向數(shù)據結尾
    while (ptr < end && thread->return_error.cmd == BR_OK) {
        int ret;

        //從用戶空間取出 cmd(占四個字節(jié))
        if (get_user(cmd, (uint32_t __user *)ptr)) 
            return -EFAULT;
        //后移四個字節(jié),指向數(shù)據
        ptr += sizeof(uint32_t); 
        
        ...
        
        case BC_TRANSACTION:
        case BC_REPLY: {
            struct binder_transaction_data tr;
            //拷貝用戶空間的ptr 到 內核空間(這里只是將 buffer 的地址 從用戶空間拷貝到內核空間)
            if (copy_from_user(&tr, ptr, sizeof(tr)))
                return -EFAULT;
            //ptr指針后移到下一個cmd(如果存在)
            ptr += sizeof(tr);
            //關鍵性處理過程
            binder_transaction(proc, thread, &tr, cmd == BC_REPLY, 0);
            break;
          }
        ...
    }
    return 0;
}
1.根據handle == 0, 找到特殊 target_node, 進而找到 target_proc.

2.創(chuàng)建 binder_transaction *t ,  binder_work *tcomplete. 并初始化

3.從用戶空間拷貝數(shù)據到目的進程映射的內核空間。

4.處理flat_binder_object (重點)

   a. 給當前進程創(chuàng)建一個 binder_node

   b. 給目的進程service manager 創(chuàng)建一個binder_ref. 
      binder_ref.node = 當前進程的binder_node(此處為server的binder_node)
      binder_ref.desc 引用計數(shù)+1
   c. 修改fp->hdr.type = BINDER_TYPE_HANDLE;
5.tcomplete放到當前線程的todo鏈表中, t放到目標線程的todo鏈表中。喚醒目標進程service manager
static void binder_transaction(struct binder_proc *proc, //當前進程指針
                   struct binder_thread *thread,  //當前線程指針
                   struct binder_transaction_data *tr, //指向真正要處理的數(shù)據
                   int reply, 
                   binder_size_t extra_buffers_size) //extra_buffers_size == 0
{
    int ret;
    //以下兩個是重要的結構體
    struct binder_transaction *t;
    struct binder_work *tcomplete;
    
    
    binder_size_t *offp, *off_end, *off_start;
    binder_size_t off_min;
    u8 *sg_bufp, *sg_buf_end;
    struct binder_proc *target_proc = NULL;  //目標進程
    struct binder_thread *target_thread = NULL;  //目標線程
    struct binder_node *target_node = NULL;  //目標節(jié)點
    struct binder_transaction *in_reply_to = NULL;
    ...
    struct binder_context *context = proc->context;
    
    ...
        
    if (reply) {
        //當前是BC_REPLY走這里
    } else {
        //根據下面代碼查找目的進程
        if (tr->target.handle) { //取出handle ,當前為0(即要發(fā)給servie_manager)
            ...
        } else {
            mutex_lock(&context->context_mgr_node_lock);
            //handle = 0情況下,target_node 為特殊節(jié)點
            //service manager告訴驅動是service manager的時候已經創(chuàng)建 binder_node
            target_node = context->binder_context_mgr_node; 
            if (target_node)
                //從 target_node 獲取 target_proc(即service manager進程)
                target_node = binder_get_node_refs_for_txn(target_node, &target_proc, &return_error);
            else
                return_error = BR_DEAD_REPLY;
            mutex_unlock(&context->context_mgr_node_lock);
            ...
         }  
        ...
    }
    
    ...
    //struct binder_transaction *t; 分配內核內存空間
    t = kzalloc(sizeof(*t), GFP_KERNEL); 
    binder_stats_created(BINDER_STAT_TRANSACTION);

    //binder_work *tcomplete
    tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
    binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
    
    ...
    //添加服務的tr->flags == 0
    if (!reply && !(tr->flags & TF_ONE_WAY))
        //條件滿足,記錄下當前線程到 binder_transaction
        t->from = thread; //判斷是否需要回復,需要則記錄下當前進程的thread信息;
    else
        t->from = NULL;
    t->sender_euid = task_euid(proc->tsk);
    t->to_proc = target_proc; //記錄要發(fā)送的目的進程
    t->to_thread = target_thread; //記錄要發(fā)送的目的線程
    t->code = tr->code; //此次通信 code = ADD_SERVICE_TRANSACTION
    t->flags = tr->flags; //flags == 0
    ...

    //從目的進程所映射的空間分配 t->buffer 出來
    t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
            tr->offsets_size, extra_buffers_size,
            !reply && (t->flags & TF_ONE_WAY)); 

    ...
    
    t->buffer->allow_user_free = 0;
    t->buffer->debug_id = t->debug_id;
    t->buffer->transaction = t;
    t->buffer->target_node = target_node;
 
    off_start = (binder_size_t *)(t->buffer->data +
                          ALIGN(tr->data_size, sizeof(void *)));
    offp = off_start;

    //分別拷貝用戶空間的binder_transaction_data中ptr.buffer和ptr.offsets到內核空間(目的進程所映射的空間)
    //目的空間 t->buffer->data
    if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)tr->data.ptr.buffer, tr->data_size))     {
        ......
    }

    if (copy_from_user(offp, (const void __user *)(uintptr_t)tr->data.ptr.offsets, tr->offsets_size)) {
        ......
    }

    ......
    off_end = (void *)off_start + tr->offsets_size;
    ......
    //逐個取出 flat_binder_object數(shù)據    
    for (; offp < off_end; offp++) {
        struct binder_object_header *hdr;

        ......

        hdr = (struct binder_object_header *)(t->buffer->data + *offp);
        switch (hdr->type) {
            case BINDER_TYPE_BINDER:
            case BINDER_TYPE_WEAK_BINDER: {
                struct flat_binder_object *fp;

                fp = to_flat_binder_object(hdr);
                //重點看看這段代碼 下面有分析
                ret = binder_translate_binder(fp, t, thread);
                ...
            } break;
            case BINDER_TYPE_HANDLE:
            case BINDER_TYPE_WEAK_HANDLE: 

            ...
        }
        ...
    }
    //BINDER_WORK_TRANSACTION_COMPLETE 當前進程處理該cmd,返回用戶空間:cmd = BR_TRANSACTION_COMPLETE;
    tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; 
    //將BINDER_WORK_TRANSACTION添加到目標隊列,本次通信的目標隊列為target_proc->todo
    t->work.type = BINDER_WORK_TRANSACTION;
    if (reply) {
        //BC_REPLY走這里
    } else if (!(t->flags & TF_ONE_WAY)) {
    、
        BUG_ON(t->buffer->async_transaction != 0);
        binder_inner_proc_lock(proc);

        //list_add_tail(&tcomplete->entry, &thread->todo); tcomplete放到當前線程的todo鏈表中
        binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
        t->need_reply = 1;

        //這里一個入棧操作將當前線程的傳輸棧壓入,但是 thread->transaction_stack此時為 NULL,因為第一次執(zhí)行前面沒有賦值;
        t->from_parent = thread->transaction_stack; //t->from_parent : 記錄發(fā)送線程的傳輸棧的父棧
        
        //給當前線程的傳輸棧賦值
        thread->transaction_stack = t;

        binder_inner_proc_unlock(proc);
        //t 放到目的進程的todo鏈表中并喚醒目的進程
        if (!binder_proc_transaction(t, target_proc, target_thread)) {
            binder_inner_proc_lock(proc);
            binder_pop_transaction_ilocked(thread, t);
            binder_inner_proc_unlock(proc);
            goto err_dead_proc_or_thread;
        }
    } else {
        ...
    }
    ...
    return;

此時server 線程的傳輸棧信息如下:

transaction_stack
from server'thread
to_proc ServiceManager
to_thread ServiceManager'thread
from_parent NULL
<font color="red">binder_translate_binder 分析</font>

這段代碼的作用:

1. 給當前進程創(chuàng)建一個 binder_node
2. 給目的進程service manager 創(chuàng)建一個binder_ref. 
    binder_ref.node = 當前進程的binder_node(此處為server的binder_node)
    binder_ref.desc 引用計數(shù)+1
3. 修改fp->hdr.type = BINDER_TYPE_HANDLE;
static int binder_translate_binder(struct flat_binder_object *fp,
                   struct binder_transaction *t,
                   struct binder_thread *thread) //server進程的線程
{
    struct binder_node *node;
    struct binder_proc *proc = thread->proc; //當前進程
    struct binder_proc *target_proc = t->to_proc; //目的進程
    struct binder_ref_data rdata;
    int ret = 0;

    //從當前進程獲得binder_node
    node = binder_get_node(proc, fp->binder);
    if (!node) {
        //為當前進程創(chuàng)建 binder_node 并初始化
        //node->proc = proc;
        //node->ptr = fp->binder;
        //node->cookie = fp->cookie;
        //node->work.type = BINDER_WORK_NODE;
        node = binder_new_node(proc, fp);
    }
    ...

    //給目的進程target_proc 創(chuàng)建引用 binder_ref。binder_ref.proc = target_proc. binder_ref.node指向當前進程node.
    //并增加引用計數(shù),同時導致BR_INCREES BR_ACQUIRE這兩個消息上報
    //并且&node->work放到當前線程的todo鏈表中  binder_enqueue_work_ilocked(&node->work, target_list);
    //ref->data.strong++;
    ret = binder_inc_ref_for_node(target_proc, node, fp->hdr.type == BINDER_TYPE_BINDER, &thread->todo,             &rdata);
    if (ret)
        goto done;

    if (fp->hdr.type == BINDER_TYPE_BINDER)
        fp->hdr.type = BINDER_TYPE_HANDLE; //將實體改為引用
    else
        fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
    fp->binder = 0;
    fp->handle = rdata.desc; //handle值  引用計數(shù)已經加1
    fp->cookie = 0;
    ...
done:
    binder_put_node(node);
    return ret;
}
binder_thread_read
case BINDER_WORK_TRANSACTION_COMPLETE: {
    binder_inner_proc_unlock(proc);
    cmd = BR_TRANSACTION_COMPLETE; //該指令發(fā)送給用戶態(tài),表示 transaction 過程完成
    if (put_user(cmd, (uint32_t __user *)ptr))
        return -EFAULT;
    ptr += sizeof(uint32_t);

    binder_stat_br(proc, thread, cmd);
    kfree(w);
    binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
} break;

//當前進程創(chuàng)建binder_node 時候,node->work.type = BINDER_WORK_NODE;
//下面這段代碼不是很懂 強 弱引用???
case BINDER_WORK_NODE: {
    struct binder_node *node = container_of(w, struct binder_node, work);
    int strong, weak;
    binder_uintptr_t node_ptr = node->ptr;
    binder_uintptr_t node_cookie = node->cookie;
    int node_debug_id = node->debug_id;
    int has_weak_ref;
    int has_strong_ref;
    void __user *orig_ptr = ptr;

    BUG_ON(proc != node->proc);
    strong = node->internal_strong_refs || node->local_strong_refs;
    weak = !hlist_empty(&node->refs) || node->local_weak_refs || node->tmp_refs || strong;
    has_strong_ref = node->has_strong_ref;
    has_weak_ref = node->has_weak_ref;

    if (weak && !has_weak_ref) {
        node->has_weak_ref = 1;
        node->pending_weak_ref = 1;
        node->local_weak_refs++;
    }
    if (strong && !has_strong_ref) {
        node->has_strong_ref = 1;
        node->pending_strong_ref = 1;
        node->local_strong_refs++;
    }
    if (!strong && has_strong_ref)
        node->has_strong_ref = 0;
    if (!weak && has_weak_ref)
        node->has_weak_ref = 0;
    if (!weak && !strong) {
                
        rb_erase(&node->rb_node, &proc->nodes);
        binder_inner_proc_unlock(proc);
        binder_node_lock(node);
            
        binder_node_unlock(node);
        binder_free_node(node);
    } else
        binder_inner_proc_unlock(proc);

    if (weak && !has_weak_ref)
        ret = binder_put_node_cmd(
                    proc, thread, &ptr, node_ptr,
                    node_cookie, node_debug_id,
                    BR_INCREFS, "BR_INCREFS");
    if (!ret && strong && !has_strong_ref)
        ret = binder_put_node_cmd(
                    proc, thread, &ptr, node_ptr,
                    node_cookie, node_debug_id,
                    BR_ACQUIRE, "BR_ACQUIRE");
    if (!ret && !strong && has_strong_ref)
        ret = binder_put_node_cmd(
                    proc, thread, &ptr, node_ptr,
                    node_cookie, node_debug_id,
                    BR_RELEASE, "BR_RELEASE");
    if (!ret && !weak && has_weak_ref)
        ret = binder_put_node_cmd(
                    proc, thread, &ptr, node_ptr,
                    node_cookie, node_debug_id,
                    BR_DECREFS, "BR_DECREFS");
    if (ret)
        return ret;
} break;

BR_NOOP

BINDER_WORK_NODE(增加 cmd : BR_INCREFS BR_ACQUIRE)

BINDER_WORK_TRANSACTION_COMPLETE (增加 cmd : BR_TRANSACTION_COMPLETE)

處理數(shù)據完之后, 返回用戶空間。

用戶空間調用binder_parse() 處理數(shù)據, 進入循環(huán),最先讀到 BR_NOOP 最終由于沒有可讀數(shù)據,當前進程進入休眠。

3.1.3 server 端被喚醒 cmd = BR_REPLY (內核態(tài))
while (1) {
        uint32_t cmd;
        struct binder_transaction_data tr;
        struct binder_work *w = NULL;
        struct list_head *list = NULL;
        struct binder_transaction *t = NULL;
        struct binder_thread *t_from;

        binder_inner_proc_lock(proc);
        if (!binder_worklist_empty_ilocked(&thread->todo))
            list = &thread->todo;
        else if (!binder_worklist_empty_ilocked(&proc->todo) &&
               wait_for_proc_work)
            list = &proc->todo;
        else {
            binder_inner_proc_unlock(proc);

            /* no data added */
            if (ptr - buffer == 4 && !thread->looper_need_return)
                goto retry;
            break;
        }

        if (end - ptr < sizeof(tr) + 4) {
            binder_inner_proc_unlock(proc);
            break;
        }
        w = binder_dequeue_work_head_ilocked(list);
        if (binder_worklist_empty_ilocked(&thread->todo))
            thread->process_todo = false;
    
     switch (w->type) {
        case BINDER_WORK_TRANSACTION: {
            binder_inner_proc_unlock(proc);
            
            t = container_of(w, struct binder_transaction, work);
        } break;
        

        BUG_ON(t->buffer == NULL);
        //由于target_node 是根據flat_binder_object 得到的,所以此處走else
        if (t->buffer->target_node) {
            ...
        } else {
            tr.target.ptr = 0;
            tr.cookie = 0;
            cmd = BR_REPLY;
        }
        
        tr.code = t->code;
        tr.flags = t->flags;
        tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);

        t_from = binder_get_txn_from(t);
        if (t_from) {
            struct task_struct *sender = t_from->proc->tsk;

            tr.sender_pid = task_tgid_nr_ns(sender,
                            task_active_pid_ns(current));
        } else {
            tr.sender_pid = 0;
        }

        tr.data_size = t->buffer->data_size;
        tr.offsets_size = t->buffer->offsets_size;
        tr.data.ptr.buffer = (binder_uintptr_t)
            ((uintptr_t)t->buffer->data +
            binder_alloc_get_user_buffer_offset(&proc->alloc));
        tr.data.ptr.offsets = tr.data.ptr.buffer +
                    ALIGN(t->buffer->data_size,
                        sizeof(void *));

        if (put_user(cmd, (uint32_t __user *)ptr)) {
            if (t_from)
                binder_thread_dec_tmpref(t_from);

            binder_cleanup_transaction(t, "put_user failed",
                           BR_FAILED_REPLY);

            return -EFAULT;
        }
        ptr += sizeof(uint32_t);
        if (copy_to_user(ptr, &tr, sizeof(tr))) {
            if (t_from)
                binder_thread_dec_tmpref(t_from);

            binder_cleanup_transaction(t, "copy_to_user failed",
                           BR_FAILED_REPLY);

            return -EFAULT;
        }
        ptr += sizeof(tr);

        trace_binder_transaction_received(t);
        binder_stat_br(proc, thread, cmd);
        

        if (t_from)
            binder_thread_dec_tmpref(t_from);
        t->buffer->allow_user_free = 1;
        if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
            ...
        } else {
            binder_free_transaction(t);
        }
        break;
    }
3.1.4 server 端處理BR_REPLY (用戶態(tài))
case BR_REPLY: {
    struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
    if ((end - ptr) < sizeof(*txn)) {
        ALOGE("parse: reply too small!\n");
        return -1;
    }
    binder_dump_txn(txn);
    if (bio) {
        bio_init_from_txn(bio, txn);
        bio = 0;
    } else {
        /* todo FREE BUFFER */
    }
    ptr += sizeof(*txn);
    r = 0;
    break;
}

...
return r;

binder_call執(zhí)行完之后

uint32_t svcmgr_lookup(struct binder_state *bs, uint32_t target, const char *name)
{
    ...
    if (binder_call(bs, &msg, &reply, target, SVC_MGR_ADD_SERVICE))
        return -1;

    status = bio_get_uint32(&reply);

    binder_done(bs, &msg, &reply);

    return status;
}


void binder_done(struct binder_state *bs,
                 __unused struct binder_io *msg,
                 struct binder_io *reply)
{
    struct {
        uint32_t cmd;
        uintptr_t buffer;
    } __attribute__((packed)) data;

    if (reply->flags & BIO_F_SHARED) {
        data.cmd = BC_FREE_BUFFER;
        data.buffer = (uintptr_t) reply->data0;
        binder_write(bs, &data, sizeof(data));//發(fā)起一次寫操作, 將占用的內核態(tài)的buffer釋放掉
        reply->flags = 0;
    }
}

然后調用binder_loop()進入循環(huán),等待client 進程發(fā)送消息

3.2 service manager

3.2.1 service manager 被喚醒后 (內核態(tài))
1. 判斷thread->todo proc->todo 鏈表中有沒有任務
2. 從隊列中取出 binder_work
3. 構造 binder_transaction_data ,并把命令改為 BR_TRANSACTION
4. 數(shù)據拷貝到用戶空間,返回用戶空間
while (1) {
    uint32_t cmd;
    struct binder_transaction_data tr;
    struct binder_work *w = NULL;
    struct list_head *list = NULL;
    struct binder_transaction *t = NULL;
    struct binder_thread *t_from;

    binder_inner_proc_lock(proc);
    //判斷thread->todo鏈表中有沒有任務
    if (!binder_worklist_empty_ilocked(&thread->todo))
        list = &thread->todo;
    //判斷proc->todo鏈表中有沒有任務
    else if (!binder_worklist_empty_ilocked(&proc->todo) && wait_for_proc_work)
        list = &proc->todo;
    else {
        binder_inner_proc_unlock(proc);

        /* no data added */
        if (ptr - buffer == 4 && !thread->looper_need_return)
            goto retry;
        break;
    }

    if (end - ptr < sizeof(tr) + 4) {
        binder_inner_proc_unlock(proc);
        break;
    }
    //從隊列中取出 binder_work (server進程放入的)
    w = binder_dequeue_work_head_ilocked(list);
    if (binder_worklist_empty_ilocked(&thread->todo))
        thread->process_todo = false;
    switch (w->type) {
        case BINDER_WORK_TRANSACTION: {
            binder_inner_proc_unlock(proc);
            //找到binder_transaction* t指針的地址
            t = container_of(w, struct binder_transaction, work);
        } break;
    ...
    }
    ...
    BUG_ON(t->buffer == NULL);
    //這個target_node 實際上是service_manager的binder_node
    if (t->buffer->target_node) {
        struct binder_node *target_node = t->buffer->target_node;
        struct binder_priority node_prio;

        tr.target.ptr = target_node->ptr;  //flat_binder_object.binder
        tr.cookie =  target_node->cookie;
        ......
        cmd = BR_TRANSACTION; //把命令改為 BR_TRANSACTION
    } else {
        ...
    }
    tr.code = t->code;
    tr.flags = t->flags;
    ...

    t_from = binder_get_txn_from(t); //t->from
    if (t_from) {
        struct task_struct *sender = t_from->proc->tsk;

        tr.sender_pid = task_tgid_nr_ns(sender, task_active_pid_ns(current));
    } else {
        tr.sender_pid = 0;
    }
    //根據binder_transaction 構造binder_transaction_data
    tr.data_size = t->buffer->data_size;
    tr.offsets_size = t->buffer->offsets_size;
    tr.data.ptr.buffer = (binder_uintptr_t)((uintptr_t)t->buffer->data +
            binder_alloc_get_user_buffer_offset(&proc->alloc));
    tr.data.ptr.offsets = tr.data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *));

    //BR_TRANSACTION寫入用戶空間
    if (put_user(cmd, (uint32_t __user *)ptr)) {
        if (t_from)
            binder_thread_dec_tmpref(t_from);

        binder_cleanup_transaction(t, "put_user failed", BR_FAILED_REPLY);

        return -EFAULT;
    }
    //后移四字節(jié)
    ptr += sizeof(uint32_t);
    if (copy_to_user(ptr, &tr, sizeof(tr))) {
        if (t_from)
            binder_thread_dec_tmpref(t_from);

         binder_cleanup_transaction(t, "copy_to_user failed", BR_FAILED_REPLY);

        return -EFAULT;
    }
    ptr += sizeof(tr);

    ...
    binder_stat_br(proc, thread, cmd);
    ...

    if (t_from)
        binder_thread_dec_tmpref(t_from);
    t->buffer->allow_user_free = 1;
    if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
        
        binder_inner_proc_lock(thread->proc);
        //這里的t是從帶處理事務的鏈表中取出來的,也就是前面server放到ServiceManager的todo鏈表上
        //將當前線程的傳輸棧入棧
        t->to_parent = thread->transaction_stack;
        //記錄目的線程,即本身 service manager
        t->to_thread = thread;
        //當前線程的傳輸棧記錄信息
        thread->transaction_stack = t;
        binder_inner_proc_unlock(thread->proc);
    } else {
        binder_free_transaction(t);
    }
    break;
}
...
return 0; //然后返回到用戶空間

ServiceManager線程的傳輸棧的信息如下:

transaction_starck
from server'thread
to_proc ServiceManager
to_thread ServiceManager'thread
from_parent NULL
to_parent NULL
3.2.2 service manager處理BR_TRANSACTION(用戶態(tài))

處理數(shù)據

int binder_parse(struct binder_state *bs, struct binder_io *bio,
                 uintptr_t ptr, size_t size, binder_handler func)
{
    int r = 1;
    uintptr_t end = ptr + (uintptr_t) size;

    while (ptr < end) {
        uint32_t cmd = *(uint32_t *) ptr;
        ptr += sizeof(uint32_t);
#if TRACE
        fprintf(stderr,"%s:\n", cmd_name(cmd));
#endif
        switch(cmd) {
        case BR_NOOP:
            break;
        ...
        case BR_TRANSACTION: {
            struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
            if ((end - ptr) < sizeof(*txn)) {
                ALOGE("parse: txn too small!\n");
                return -1;
            }
            binder_dump_txn(txn);
            if (func) {
                unsigned rdata[256/4];
                struct binder_io msg;
                struct binder_io reply;
                int res;

                bio_init(&reply, rdata, sizeof(rdata), 4);
                bio_init_from_txn(&msg, txn);
                //處理數(shù)據
                res = func(bs, txn, &msg, &reply);
                if (txn->flags & TF_ONE_WAY) { //txn->flags == 0
                    binder_free_buffer(bs, txn->data.ptr.buffer);
                } else {
                    //處理完數(shù)據之后,發(fā)送回復數(shù)據 這里的reply是四個字節(jié)的0
                    binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);
                }
            }
            ptr += sizeof(*txn);
            break;
        }
    ...
    }

    return r;
}
int svcmgr_handler(struct binder_state *bs,
                   struct binder_transaction_data *txn,
                   struct binder_io *msg,
                   struct binder_io *reply)
{
    struct svcinfo *si;
    uint16_t *s;
    size_t len;
    uint32_t handle;
    uint32_t strict_policy;
    int allow_isolated;
    uint32_t dumpsys_priority;

    if (txn->target.ptr != BINDER_SERVICE_MANAGER)
        return -1;

    if (txn->code == PING_TRANSACTION)
        return 0;

    strict_policy = bio_get_uint32(msg); //四個字節(jié)的 0
    s = bio_get_string16(msg, &len); //獲取字符串
    if (s == NULL) {
        return -1;
    }

    if ((len != (sizeof(svcmgr_id) / 2)) ||
        memcmp(svcmgr_id, s, sizeof(svcmgr_id))) {
        fprintf(stderr,"invalid id %s\n", str8(s, len));
        return -1;
    }

   ...

    switch(txn->code) {
    case SVC_MGR_GET_SERVICE:
    case SVC_MGR_CHECK_SERVICE:
        ...

    case SVC_MGR_ADD_SERVICE:
        s = bio_get_string16(msg, &len); //獲取字符串 服務的名稱
        if (s == NULL) {
            return -1;
        }
        handle = bio_get_ref(msg); //現(xiàn)在的handle 為 1
        allow_isolated = bio_get_uint32(msg) ? 1 : 0;
        dumpsys_priority = bio_get_uint32(msg);
        //在鏈表中增加一個服務節(jié)點
        if (do_add_service(bs, s, len, handle, txn->sender_euid, allow_isolated, dumpsys_priority,
                           txn->sender_pid))
            return -1;
        break;

    ...
    default:
        ALOGE("unknown code %d\n", txn->code);
        return -1;
    }

    bio_put_uint32(reply, 0); //成功處理完數(shù)據,放入四個字節(jié) 0
    return 0;
}
void binder_send_reply(struct binder_state *bs,
                       struct binder_io *reply,
                       binder_uintptr_t buffer_to_free,
                       int status)
{
    struct {
        uint32_t cmd_free;
        binder_uintptr_t buffer;
        uint32_t cmd_reply;
        struct binder_transaction_data txn;
    } __attribute__((packed)) data;

    data.cmd_free = BC_FREE_BUFFER; //釋放數(shù)據占用的內核態(tài)緩沖區(qū)
    data.buffer = buffer_to_free; //要釋放的buffer
    data.cmd_reply = BC_REPLY;  //一次性發(fā)兩個cmd
    data.txn.target.ptr = 0;
    data.txn.cookie = 0;
    data.txn.code = 0;
    
    if (status) {
        data.txn.flags = TF_STATUS_CODE;
        data.txn.data_size = sizeof(int);
        data.txn.offsets_size = 0;
        data.txn.data.ptr.buffer = (uintptr_t)&status;
        data.txn.data.ptr.offsets = 0;
    } else {
        data.txn.flags = 0;
        data.txn.data_size = reply->data - reply->data0;
        data.txn.offsets_size = ((char*) reply->offs) - ((char*) reply->offs0);
        data.txn.data.ptr.buffer = (uintptr_t)reply->data0;
        data.txn.data.ptr.offsets = (uintptr_t)reply->offs0;
    }
    //res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr); 發(fā)起一次寫操作
    binder_write(bs, &data, sizeof(data));
}
3.2.3 service manager 處理BC_REPLY (內核態(tài))

<font color="green">1. 根據thread->transaction_stack 中記錄的 from,找到要發(fā)送的目的線程,目的進程</font>

<font color="green">2. 把&t->work 放入目標線程的 todo鏈表,喚醒目標線程</font>

case BC_FREE_BUFFER: {
    //釋放空間

case BC_TRANSACTION:
case BC_REPLY: {
    struct binder_transaction_data tr;

    if (copy_from_user(&tr, ptr, sizeof(tr)))
        return -EFAULT;
    ptr += sizeof(tr);
    binder_transaction(proc, thread, &tr, cmd == BC_REPLY, 0);
    break;
}

binder_transaction:

//找到目的進程
if (reply) {
   
    //用個臨時變量記錄下當前線程的傳輸棧信息
    in_reply_to = thread->transaction_stack;
    ...
        
    //判斷下接收的線程是否為自己本身,如果不是則出錯
    if (in_reply_to->to_thread != thread) {
        ....
    }
    //一次出棧操作,此時 thread->transaction_stack值為 NULL
    thread->transaction_stack = in_reply_to->to_parent;
    
    //獲取到目標線程, in_reply_to->from中記錄著發(fā)送線程 server 的信息
    target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
    ...
        
    if (target_thread->transaction_stack != in_reply_to) {
        ... 
    }
    //找到目的進程
    target_proc = target_thread->proc;
    ...
} else {
    ...
}
...
    
t = kzalloc(sizeof(*t), GFP_KERNEL);
binder_stats_created(BINDER_STAT_TRANSACTION);

tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);

...

if (!reply && !(tr->flags & TF_ONE_WAY))
    t->from = thread;
else
    t->from = NULL; //當前不需要回復
t->sender_euid = task_euid(proc->tsk);
t->to_proc = target_proc; //目標進程即server進程
t->to_thread = target_thread; //目標線程
t->code = tr->code;
t->flags = tr->flags;

...
    
t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
        tr->offsets_size, extra_buffers_size,
        !reply && (t->flags & TF_ONE_WAY));
......
t->buffer->allow_user_free = 0;
t->buffer->debug_id = t->debug_id;
t->buffer->transaction = t;
t->buffer->target_node = target_node;

//從用戶空間拷貝到目的進程的內核空間
off_start = (binder_size_t *)(t->buffer->data +
                      ALIGN(tr->data_size, sizeof(void *)));
offp = off_start;

if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
        tr->data.ptr.buffer, tr->data_size)) {
}
if (copy_from_user(offp, (const void __user *)(uintptr_t)
        tr->data.ptr.offsets, tr->offsets_size)) {
}

...

tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
t->work.type = BINDER_WORK_TRANSACTION;
if (reply) {
    binder_enqueue_thread_work(thread, tcomplete); //tcomplete 放入當前線程的todo鏈表中
    binder_inner_proc_lock(target_proc);
    if (target_thread->is_dead) {
        binder_inner_proc_unlock(target_proc);
        goto err_dead_proc_or_thread;
    }
    BUG_ON(t->buffer->async_transaction != 0);
    //一個出棧操作
    binder_pop_transaction_ilocked(target_thread, in_reply_to);
    binder_enqueue_thread_work_ilocked(target_thread, &t->work);//放入目標線程的todo 鏈表
    binder_inner_proc_unlock(target_proc);
    wake_up_interruptible_sync(&target_thread->wait) //喚醒目標線程;
    binder_restore_priority(current, in_reply_to->saved_priority);
    binder_free_transaction(in_reply_to);
} else if (!(t->flags & TF_ONE_WAY)) {
    ......
} else {
    ......
}

然后喚醒目的進程 server進程

service manager 處理 BR_TRANSACTION_COMPLETE , 再次進入休眠

4. 服務的獲取過程

4.1 client

4.1.1 client端構造數(shù)據并發(fā)送(用戶態(tài))

數(shù)據結構:

writebuf-client.jpg
/**
* target: 0 發(fā)送給service manager
* name: "hello" 服務的名稱
*/
uint32_t svcmgr_lookup(struct binder_state *bs, uint32_t target, const char *name)
{
    uint32_t handle;
    unsigned iodata[512/4];
    struct binder_io msg, reply;

    bio_init(&msg, iodata, sizeof(iodata), 4); //前16個字節(jié)空出來
    bio_put_uint32(&msg, 0);                   //放入四個字節(jié)的0
    bio_put_string16_x(&msg, SVC_MGR_NAME);    //放入SVC_MGR_NAME
    bio_put_string16_x(&msg, name);            //放入服務名稱 "hello"
    
    //調用binder_call發(fā)送數(shù)據,數(shù)據結構如上圖
    if (binder_call(bs, &msg, &reply, target, SVC_MGR_CHECK_SERVICE)) 
        return 0;

    //從內核回復數(shù)據中取出handle
    handle = bio_get_ref(&reply);

    if (handle)
        binder_acquire(bs, handle);

    binder_done(bs, &msg, &reply);

    return handle;
}
4.1.2 client 端處理BC_TRANSACTION(內核態(tài))

1.根據 handle = 0,找到對應的進程 service manager

2.將數(shù)據放入目標進程的todo鏈表中,喚醒目標進程 service manager

4.1.3 client 端被喚醒 cmd = BR_REPLY(內核態(tài))
4.1.4 client 端處理BR_REPLY(用戶態(tài))

從內核態(tài)返回的數(shù)據binder_io * reply 中 binder_flat_object 中獲取到handle = 1

//調用binder_call 之后返回數(shù)據后,解析數(shù)據
res = binder_parse(bs, reply, (uintptr_t) readbuf, bwr.read_consumed, 0);

int binder_parse(struct binder_state *bs, struct binder_io *bio,
                 uintptr_t ptr, size_t size, binder_handler func)
{
    int r = 1;
    uintptr_t end = ptr + (uintptr_t) size;

    while (ptr < end) {
        uint32_t cmd = *(uint32_t *) ptr;
        ptr += sizeof(uint32_t);

        switch(cmd) {
        ...
        case BR_REPLY: {
            struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
            if ((end - ptr) < sizeof(*txn)) {
                ALOGE("parse: reply too small!\n");
                return -1;
            }
            binder_dump_txn(txn);
            if (bio) {
                bio_init_from_txn(bio, txn); //取出binder_io的數(shù)據
                bio = 0;
            } else {
                /* todo FREE BUFFER */
            }
            ptr += sizeof(*txn);
            r = 0; //此處r = 0, binder_call的循環(huán)結束
            break;
        }
        
        default:
            ALOGE("parse: OOPS %d\n", cmd);
            return -1;
        }
    }

    return r;
}
//binder_call調用結束后,取出handle
// handle = bio_get_ref(&reply);

uint32_t bio_get_ref(struct binder_io *bio)
{
    struct flat_binder_object *obj;

    obj = _bio_get_obj(bio);
    if (!obj)
        return 0;

    if (obj->hdr.type == BINDER_TYPE_HANDLE)
        return obj->handle;

    return 0;
}

4.2 service manager

4.2.1 service manager 被喚醒后(內核態(tài))

構造數(shù)據,返回用戶態(tài)

4.2.2 service manager處理BR_TRANSACTION(用戶態(tài))
1.取出數(shù)據,得到“hello”

2.在鏈表中根據"hello",找到handle =1;

3.用ioctl把handle=1發(fā)給驅動。

數(shù)據結構:

writebuf-service_manager.jpg
switch(txn->code) {
    case SVC_MGR_GET_SERVICE:
    case SVC_MGR_CHECK_SERVICE:
        s = bio_get_string16(msg, &len);
        if (s == NULL) {
            return -1;
        }
        //根據服務名找到handle = 1
        handle = do_find_service(s, len, txn->sender_euid, txn->sender_pid);
        if (!handle)
            break;
        //構造binder_falt_object  重點?。。?        bio_put_ref(reply, handle);
        return 0;
構造binder_flat_object
void bio_put_ref(struct binder_io *bio, uint32_t handle)
{
    struct flat_binder_object *obj;

    if (handle)
        obj = bio_alloc_obj(bio);
    else
        obj = bio_alloc(bio, sizeof(*obj));

    if (!obj)
        return;

    obj->flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
    obj->hdr.type = BINDER_TYPE_HANDLE;
    obj->handle = handle;
    obj->cookie = 0;
}

處理完數(shù)據之后,發(fā)送回復數(shù)據

binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);

void binder_send_reply(struct binder_state *bs,
                       struct binder_io *reply,
                       binder_uintptr_t buffer_to_free,
                       int status)
{
    struct {
        uint32_t cmd_free;
        binder_uintptr_t buffer;
        uint32_t cmd_reply;
        struct binder_transaction_data txn;
    } __attribute__((packed)) data;

    data.cmd_free = BC_FREE_BUFFER;
    data.buffer = buffer_to_free;
    data.cmd_reply = BC_REPLY;
    data.txn.target.ptr = 0;
    data.txn.cookie = 0;
    data.txn.code = 0;
    if (status) {
        data.txn.flags = TF_STATUS_CODE;
        data.txn.data_size = sizeof(int);
        data.txn.offsets_size = 0;
        data.txn.data.ptr.buffer = (uintptr_t)&status;
        data.txn.data.ptr.offsets = 0;
    } else {
        data.txn.flags = 0;
        data.txn.data_size = reply->data - reply->data0;
        data.txn.offsets_size = ((char*) reply->offs) - ((char*) reply->offs0);
        data.txn.data.ptr.buffer = (uintptr_t)reply->data0;
        data.txn.data.ptr.offsets = (uintptr_t)reply->offs0;
    }
    binder_write(bs, &data, sizeof(data));
}
4.2.3 service manager處理BC_REPLY(內核態(tài))
4.在servicemanger 的 binder_proc.refs_by_decs中, 根據handle = 1找到 binder_ref, 進而找到 "hello"服務binder_node

5.為client 創(chuàng)建binder_ref , binder_ref.node指向"hello"服務的binder_node, desc = 1;

6.然后把數(shù)據放入 client的 todo鏈表中,喚醒client
for (; offp < off_end; offp++) {
        struct binder_object_header *hdr;
        size_t object_size = binder_validate_object(t->buffer, *offp);
        ...

        hdr = (struct binder_object_header *)(t->buffer->data + *offp);
        off_min = *offp + object_size;
        switch (hdr->type) {
        case BINDER_TYPE_BINDER:
        case BINDER_TYPE_WEAK_BINDER: {
            ...
        case BINDER_TYPE_HANDLE:
        case BINDER_TYPE_WEAK_HANDLE: {
            struct flat_binder_object *fp;

            fp = to_flat_binder_object(hdr);
            ret = binder_translate_handle(fp, t, thread);
            
        } break;
    ...
    }
static int binder_translate_handle(struct flat_binder_object *fp,
                   struct binder_transaction *t,
                   struct binder_thread *thread)
{
    struct binder_proc *proc = thread->proc;
    struct binder_proc *target_proc = t->to_proc;
    struct binder_node *node;
    struct binder_ref_data src_rdata;
    int ret = 0;

    //根據 handle = 1 在binder_proc.refs_by_desc中找到 binder_ref,進而找到 "hello" 服務binder_node
    node = binder_get_node_from_ref(proc, fp->handle,
            fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
    ...
    if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
        ret = -EPERM;
        goto done;
    }

    binder_node_lock(node);
    
    //目標進程是client , binder_node->proc 是server
    if (node->proc == target_proc) {
        ...
    } else {
        struct binder_ref_data dest_rdata;

        binder_node_unlock(node);
        //為目標進程創(chuàng)建binder_ref.node = server的binder_node
        ret = binder_inc_ref_for_node(target_proc, node,
                fp->hdr.type == BINDER_TYPE_HANDLE,
                NULL, &dest_rdata);
        if (ret)
            goto done;

        fp->binder = 0;
        fp->handle = dest_rdata.desc;//引用計數(shù)加1
        fp->cookie = 0;
        trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
                            &dest_rdata);
    }
done:
    binder_put_node(node);
    return ret;
}

5. 服務使用過程

1.用戶態(tài)構造數(shù)據(含有handle),發(fā)送數(shù)據

2.驅動根據handle 值,找到對應的 binder_ref , 進而找到服務端的 binder_node, 找到服務進程

3.把構造好的數(shù)據放到服務進程的todo鏈表里,喚醒服務進程。

4.服務進程拿到數(shù)據后,根據ptr知道調用哪個服務。

具體源碼略(上面的分析重復,不再進行分析)

6. binder server 的多線程支持

可能有多個client 向一個 server進程發(fā)送數(shù)據。

server 忙不過來時,創(chuàng)建多線程。

問題一:誰來判斷忙不過來?

binder_proc.wait 鏈表中,如果這個等待隊列中一個線程都沒有了就表示忙不過來了。

1.在驅動中判斷是否忙不過來

2.驅動向應用層發(fā)出請求,創(chuàng)建新線程

3.應用層創(chuàng)建新線程

binder_thread_read中

驅動向應用層發(fā)出“創(chuàng)建新線程”的請求的條件

//proc->requested_threads 未處理的新線程請求
//proc->requested_threads_started 已經啟動的線程數(shù) < 最大線程數(shù)
if (proc->requested_threads == 0 && list_empty(&thread->proc->waiting_threads) &&
        proc->requested_threads_started < proc->max_threads &&
        (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
         BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
         /*spawn a new thread if we leave this out */) {
    proc->requested_threads++;
    binder_inner_proc_unlock(proc);
    binder_debug(BINDER_DEBUG_THREADS, "%d:%d BR_SPAWN_LOOPER\n", proc->pid, thread->pid);
    if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
            return -EFAULT;
        binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
} else
    binder_inner_proc_unlock(proc);

問題二:那么應用程序要怎么寫呢?

1.用戶態(tài)通過 ioctl 設置max_threads

2.用戶態(tài)收到 BR_SPAWN_LOOPER 請求

a.創(chuàng)建新線程  使用 int pthread_creat() 函數(shù)創(chuàng)建

b.發(fā)送一個ioctl(BC_ENTER_LOOPER) 

c.進入一個循環(huán)

3.新線程要執(zhí)行ioctl BC_REGISTER_LOOP,表明新線程已經創(chuàng)建,并且進入循環(huán)體了。

4.像主線程一樣,進入一個循環(huán)體。讀數(shù)據,讀驅動然后處理

1.設置max_threads

binder_ioctl中:

case BINDER_SET_MAX_THREADS: {
    int max_threads;

    if (copy_from_user(&max_threads, ubuf, sizeof(max_threads))) {
        ret = -EINVAL;
        goto err;
    }
    binder_inner_proc_lock(proc);
    proc->max_threads = max_threads;
    binder_inner_proc_unlock(proc);
    break;
}

7. 小結---數(shù)據傳輸過程

所有的binder_open 里面有一個很重要的結構體 binder_proc

1.server 在內核態(tài)中為每個服務創(chuàng)建 binder_node。 binder_node.proc 表示當前的server進程。

2.在內核態(tài)中給service manager 創(chuàng)建 binder_ref引用binder_node.

binder_ref.desc = 1,2,3...

在用戶態(tài)會創(chuàng)建一個服務鏈表 svclist。其中包含 .name .handle

3.client 向service manager查詢服務,傳name(服務名)

4.service manager 返回handle給驅動

5.驅動程序在service manager 的binder_ref 紅黑樹中根據handle 找到binder_ref.

再根據 binder_ref 找到binder_node.

最后給client 創(chuàng)建新的binder_ref.node 指定 server 的binder_node .

它的desc從 1開始。

驅動返回 desc 給 client, 就是handle

由此看出,servicemanager 有一系列的binder_ref, 里面的handle 值由注冊服務的順序決定

client 也有一系列的 binder_ref, 里面的handle值由獲取服務的順序決定

6.client 得到 handle , client發(fā)送數(shù)據給 handle. 驅動根據 handle 找到 binder_ref ,

再根據 binder_ref 找到 binder_node。根據 binder_node 找到進程server.

這樣就可以把數(shù)據傳給它了。

?著作權歸作者所有,轉載或內容合作請聯(lián)系作者
【社區(qū)內容提示】社區(qū)部分內容疑似由AI輔助生成,瀏覽時請結合常識與多方信息審慎甄別。
平臺聲明:文章內容(如有圖片或視頻亦包括在內)由作者上傳并發(fā)布,文章內容僅代表作者本人觀點,簡書系信息發(fā)布平臺,僅提供信息存儲服務。

相關閱讀更多精彩內容

友情鏈接更多精彩內容