Binder源碼解讀 01(第一個binder的啟動)

binder第一部分,我們從用戶空間的service_manager.c 看起,看看第一個binder是如何啟動運行的~

service_manager.c :: main

int main(int argc, char** argv)
{
    struct binder_state *bs; 
    union selinux_callback cb;
    char *driver;

    if (argc > 1) {
        driver = argv[1];
    } else {
        driver = "/dev/binder"; //拿到binder 驅(qū)動文件位置
    }

    bs = binder_open(driver, 128*1024); //
     ...
}

binder.c :: binder_open

struct binder_state *binder_open(const char* driver, size_t mapsize)
{
    struct binder_state *bs;
    struct binder_version vers;

    bs = malloc(sizeof(*bs));//分配內(nèi)存存儲binder狀態(tài)信息
    ...
    bs->fd = open(driver, O_RDWR | O_CLOEXEC);//通過系統(tǒng)調(diào)用binder驅(qū)動的open方法,打開文件并把文件描述符存在bs中
    ...
    if ((ioctl(bs->fd, BINDER_VERSION, &vers) == -1) ||//通過系統(tǒng)調(diào)用判斷版本
        (vers.protocol_version != BINDER_CURRENT_PROTOCOL_VERSION)) {
            ...
    }

    bs->mapsize = mapsize;//存儲binder驅(qū)動開辟的的空間大小,及上文申請的128*1024
    bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);//通過系統(tǒng)調(diào)用驅(qū)動mmap,申請和內(nèi)核綁定的內(nèi)存空間,把指針存在bs中
    ...
    return bs;
  ...
}

繼續(xù)回到剛才的service_manager中

service_manager :: open

int main(int argc, char** argv)
{
    ...
    bs = binder_open(driver, 128*1024);
    ...
    if (binder_become_context_manager(bs)) {//這個方法也是通過系統(tǒng)調(diào)用將此binder設(shè)置為context_manager
        ALOGE("cannot become context manager (%s)\n", strerror(errno));
        return -1;
    }
    ...//省略selinux相關(guān)的代碼,暫時不看
    binder_loop(bs, svcmgr_handler);//進(jìn)入binder_loop進(jìn)行循環(huán)
    return 0;
}

進(jìn)入loop循環(huán)

binder.c :: binder_loop

void binder_loop(struct binder_state *bs, binder_handler func)
{
    int res;
    struct binder_write_read bwr; //聲明 binder_write_read 結(jié)構(gòu)體 用于傳輸數(shù)據(jù)
    uint32_t readbuf[32]; // 開辟一塊數(shù)據(jù)

    bwr.write_size = 0;
    bwr.write_consumed = 0;
    bwr.write_buffer = 0;

    readbuf[0] = BC_ENTER_LOOPER; //寫入BC_ENTER_LOOPER標(biāo)記
    binder_write(bs, readbuf, sizeof(uint32_t)); //調(diào)用binder_write通知binder進(jìn)入循環(huán)
  ...
}

binder.c :: binder_write

int binder_write(struct binder_state *bs, void *data, size_t len)
{
    struct binder_write_read bwr;
    int res;
    bwr.write_size = len; 
    bwr.write_consumed = 0;
    bwr.write_buffer = (uintptr_t) data;
    bwr.read_size = 0;
    bwr.read_consumed = 0;
    bwr.read_buffer = 0; //將要讀寫的信息寫入 binder_write_read 結(jié)構(gòu)體
    res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr); //通過系統(tǒng)調(diào)用binder驅(qū)動的ioctl,將數(shù)據(jù)寫入剛剛打開的binder
    ...
    return res;
}

對驅(qū)動通知完畢后,用戶空間正式進(jìn)入循環(huán)

binder.c :: binder_loop

void binder_loop(struct binder_state *bs, binder_handler func)
{
    ...
    for (;;) { //開始不斷的循環(huán)
        bwr.read_size = sizeof(readbuf); //剛才用于寫數(shù)據(jù)的數(shù)據(jù)用完了,現(xiàn)在可以二次利用~用于從binder底層讀取同樣大小的數(shù)據(jù)
        bwr.read_consumed = 0;
        bwr.read_buffer = (uintptr_t) readbuf; //設(shè)置用它接收數(shù)據(jù)

        res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr); //從打開的binder內(nèi)核空間嘗試讀取數(shù)據(jù)
        ...
        res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func); //把從內(nèi)核空間讀取到的數(shù)據(jù)進(jìn)行解析并判斷是否需要退出循環(huán)
        if (res == 0) {
            ALOGE("binder_loop: unexpected reply?!\n");
            break;
        }
        if (res < 0) {
            ALOGE("binder_loop: io error %d %s\n", res, strerror(errno));
            break;
        }
    }
}

最后看一下service_manager是如何解析從內(nèi)核空間讀取到的數(shù)據(jù)的

binder.c :: binder_parse

這個方法用于解析從內(nèi)核讀取到的信息,有多種情況,我們先簡單看一下

int binder_parse(struct binder_state *bs, struct binder_io *bio,
                 uintptr_t ptr, size_t size, binder_handler func)
{
    int r = 1;
    uintptr_t end = ptr + (uintptr_t) size;

    while (ptr < end) { //遍歷底層獲取的整條信息
        uint32_t cmd = *(uint32_t *) ptr;//前幾位存儲的是cmd
        ptr += sizeof(uint32_t);//后移繼續(xù)讀
...
        switch(cmd) { //根據(jù)底層傳來的不同指令執(zhí)行對應(yīng)的操作
        ...
    }
    return r;
}

下面分析一下各個指令

BR_NOOP | BR_TRANSACTION_COMPLETE

case BR_NOOP:
     break;
case BR_TRANSACTION_COMPLETE:
     break;

這兩種情況直接返回默認(rèn)值1,根據(jù)binder_loop中的代碼可知,會繼續(xù)循環(huán)從底層讀取數(shù)據(jù)并繼續(xù)解析

BR_INCREFS | BR_ACQUIRE | BR_RELEASE | BR_DECREFS

case BR_INCREFS:
case BR_ACQUIRE:
case BR_RELEASE:
case BR_DECREFS:
     ptr += sizeof(struct binder_ptr_cookie);
     break;

指針后移越過存放binder_ptr_cookie的位置之后binder_loop繼續(xù)循環(huán)讀取

BR_TRANSACTION

case BR_TRANSACTION: {
    struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;//讀取驅(qū)動層返回的binder_transaction_data
    if ((end - ptr) < sizeof(*txn)) { //判斷是否是一個binder_transaction_data
        ALOGE("parse: txn too small!\n");
        return -1;
    }
    binder_dump_txn(txn); //打印出來
    if (func) {//如果binder_parse調(diào)用者提供了對應(yīng)的回調(diào)
        ...
    }
    ptr += sizeof(*txn);//指針后移,繼續(xù)循環(huán)讀取
    break;
}

上面的描述并不詳細(xì),下面仔細(xì)分析一下。

首先是一個binder_transaction_data結(jié)構(gòu)體,用于存儲binder事務(wù)數(shù)據(jù)

binder_transaction_data
struct binder_transaction_data {
    union {
        __u32   handle; //命令事務(wù)的目標(biāo)描述符
        binder_uintptr_t ptr;//返回事務(wù)的目標(biāo)描述符
    } target;
    binder_uintptr_t    cookie; /* target object cookie */
    __u32       code;       /* transaction command */

    /* General information about the transaction. */
    __u32           flags;
    pid_t       sender_pid; //發(fā)送方pid
    uid_t       sender_euid; //發(fā)送方euid
    binder_size_t   data_size;  //發(fā)送數(shù)據(jù)的大小
    binder_size_t   offsets_size;   //偏移大小

    /* If this transaction is inline, the data immediately
     * follows here; otherwise, it ends with a pointer to
     * the data buffer.
     */
    union {
        struct { //事務(wù)數(shù)據(jù)
            /* transaction data */
            binder_uintptr_t    buffer;
            /* offsets from buffer to flat_binder_object structs */
            binder_uintptr_t    offsets;
        } ptr;
        __u8    buf[8];
    } data;
};

在我們從驅(qū)動層讀取到了本次binder事務(wù)及攜帶的數(shù)據(jù)之后,使用binder_parse調(diào)用者提供的func回調(diào)進(jìn)行處理。

unsigned rdata[256/4];
struct binder_io msg;
struct binder_io reply;
int res;

bio_init(&reply, rdata, sizeof(rdata), 4); 
bio_init_from_txn(&msg, txn);
res = func(bs, txn, &msg, &reply);
if (txn->flags & TF_ONE_WAY) {
    binder_free_buffer(bs, txn->data.ptr.buffer);
} else {
    binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);
}

首先看一下binder_io結(jié)構(gòu)體:

binder_io
struct binder_io
{
    char *data;            /* pointer to read/write from */
    binder_size_t *offs;   /* array of offsets */
    size_t data_avail;     /* bytes available in data buffer */
    size_t offs_avail;     /* entries available in offsets array */
    char *data0;           /* start of data buffer */
    binder_size_t *offs0;  /* start of offsets buffer */
    uint32_t flags;
    uint32_t unused;
};

用于存儲binder傳輸中的數(shù)據(jù)

以及兩個初始化binder_io的方法

//第一個方法用于從binder_transaction_data中直接拷貝數(shù)據(jù)
void bio_init_from_txn(struct binder_io *bio, struct binder_transaction_data *txn)
{
    bio->data = bio->data0 = (char *)(intptr_t)txn->data.ptr.buffer;
    bio->offs = bio->offs0 = (binder_size_t *)(intptr_t)txn->data.ptr.offsets;
    bio->data_avail = txn->data_size;
    bio->offs_avail = txn->offsets_size / sizeof(size_t);
    bio->flags = BIO_F_SHARED;//標(biāo)記SHARED的數(shù)據(jù)
}
//使用data初始化一個bio
void bio_init(struct binder_io *bio, void *data,
              size_t maxdata, size_t maxoffs)
{
    size_t n = maxoffs * sizeof(size_t);

    if (n > maxdata) {
        bio->flags = BIO_F_OVERFLOW;
        bio->data_avail = 0;
        bio->offs_avail = 0;
        return;
    }

    bio->data = bio->data0 = (char *) data + n;
    bio->offs = bio->offs0 = data;
    bio->data_avail = maxdata - n;
    bio->offs_avail = maxoffs;
    bio->flags = 0;
}

在對msg以及reply進(jìn)行了初始化后,執(zhí)行了調(diào)用方提供的方法func。跟蹤一下,可知是service_manager.c 中的svcmgr_handler方法。對于這個方法我們暫時先不看,繼續(xù)下一步。

if (txn->flags & TF_ONE_WAY) { //根據(jù)此事務(wù)時候有TF_ONE_WAY的標(biāo)記位決定
    binder_free_buffer(bs, txn->data.ptr.buffer);
} else {
    binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);
}

void binder_free_buffer(struct binder_state *bs,
                        binder_uintptr_t buffer_to_free)
{
    ...
    binder_write(bs, &data, sizeof(data));
}

void binder_send_reply(struct binder_state *bs,
                       struct binder_io *reply,
                       binder_uintptr_t buffer_to_free,
                       int status)
{
    ...
    binder_write(bs, &data, sizeof(data));
}

可以看到,最終都是通過binder_write向驅(qū)動層寫入數(shù)據(jù)。

到這里為止,binder_parse 中對 BR_TRANSACTION 情況的判斷結(jié)束了。我們接著看。

BR_REPLY

case BR_REPLY: {
    struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
    ...
    ptr += sizeof(*txn); //指針后移
    r = 0; 
    break;
}

在此情況下,binder_parse會返回0。這種情況下,binder_loop會中斷循環(huán)。

BR_DEAD_BINDER | BR_FAILED_REPLY | BR_DEAD_REPLY

case BR_DEAD_BINDER: {
    struct binder_death *death = (struct binder_death *)(uintptr_t) *(binder_uintptr_t *)ptr;//從傳來的數(shù)據(jù)中讀取binder_death
    ptr += sizeof(binder_uintptr_t); //指針后移
    death->func(bs, death->ptr); //執(zhí)行使用讀取到的死亡回調(diào)
    break;
}
case BR_FAILED_REPLY:
    r = -1;
    break;
case BR_DEAD_REPLY:
    r = -1;
    break;

報錯,binder_loop中斷循環(huán)。

到這里,binder_parse方法我們就大致分析完了,同樣,對用戶層的service_manager的啟動及執(zhí)行也告于段落了。下面上一張時序圖簡單總結(jié)。


binder1.png
最后編輯于
?著作權(quán)歸作者所有,轉(zhuǎn)載或內(nèi)容合作請聯(lián)系作者
【社區(qū)內(nèi)容提示】社區(qū)部分內(nèi)容疑似由AI輔助生成,瀏覽時請結(jié)合常識與多方信息審慎甄別。
平臺聲明:文章內(nèi)容(如有圖片或視頻亦包括在內(nèi))由作者上傳并發(fā)布,文章內(nèi)容僅代表作者本人觀點,簡書系信息發(fā)布平臺,僅提供信息存儲服務(wù)。

友情鏈接更多精彩內(nèi)容