1 源碼位置
/frameworks/av/media/libmedia/
- IMediaDeathNotifier.cpp
framework/native/libs/binder/
- Binder.cpp
- BpBinder.cpp
- IPCThreadState.cpp
- ProcessState.cpp
- IServiceManager.cpp
對應(yīng)的鏈接為
在Native層的服務(wù)注冊,我們依舊選擇media為例展開講解,先來看看media類關(guān)系圖。
2類圖

圖解:
- 藍(lán)色:代表獲取MediaPlayerService服務(wù)相關(guān)的類
- 綠色:代表Binder架構(gòu)中與Binder驅(qū)動通信過程中的最為核心的兩個類
- 紫色:代表注冊服務(wù)和獲取服務(wù)的公共接口/父類
3 獲取服務(wù)流程
3.1 getMediaPlayerService()函數(shù)
//frameworks/av/media/libmedia/IMediaDeathNotifier.cpp 35行
sp<IMediaPlayerService>&
IMediaDeathNotifier::getMediaPlayerService()
{
Mutex::Autolock _l(sServiceLock);
if (sMediaPlayerService == 0) {
// 獲取 ServiceManager
sp<IServiceManager> sm = defaultServiceManager();
sp<IBinder> binder;
do {
//獲取名為"media.player"的服務(wù)
binder = sm->getService(String16("media.player"));
if (binder != 0) {
break;
}
usleep(500000); // 0.5s
} while (true);
if (sDeathNotifier == NULL) {
// 創(chuàng)建死亡通知對象
sDeathNotifier = new DeathNotifier();
}
//將死亡通知連接到binder
binder->linkToDeath(sDeathNotifier);
sMediaPlayerService = interface_cast<IMediaPlayerService>(binder);
}
return sMediaPlayerService;
}
其中defaultServiceManager()過程在上面已經(jīng)說了,返回的是BpServiceManager
在請求獲取名為"media.player"的服務(wù)過程中,采用不斷循環(huán)獲取的方法。由于MediaPlayerService服務(wù)可能還沒向ServiceManager注冊完成或者尚未啟動完成等情況,故則binder返回NULL,休眠0.5s后繼續(xù)請求,知道獲取服務(wù)為止。
3.2 BpServiceManager.getService()函數(shù)
//frameworks/native/libs/binder/IServiceManager.cpp 134行
virtual sp<IBinder> getService(const String16& name) const
{
unsigned n;
for (n = 0; n < 5; n++){
sp<IBinder> svc = checkService(name);
if (svc != NULL) return svc;
sleep(1);
}
return NULL;
}
- 通過BpServiceManager來獲取MediaPlayer服務(wù):檢索服務(wù)是否存在,當(dāng)服務(wù)存在則返回相應(yīng)的服務(wù),當(dāng)服務(wù)不存在則休眠1s再繼續(xù)檢索服務(wù)。
- 該循環(huán)進(jìn)行5次。為什么循環(huán)5次?這估計和Android的ANR的時間為5s相關(guān)。如果每次都無法獲取服務(wù),循環(huán)5次,每次循環(huán)休眠1s,忽略checkService()的時間,差不多是5s的時間。
3.3 BpSeriveManager.checkService()函數(shù)
//frameworks/native/libs/binder/IServiceManager.cpp 146行
virtual sp<IBinder> checkService( const String16& name) const
{
Parcel data, reply;
//寫入RPC頭
data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());
//寫入服務(wù)名
data.writeString16(name);
remote()->transact(CHECK_SERVICE_TRANSACTION, data, &reply);
return reply.readStrongBinder();
}
檢索制定服務(wù)是否存在,其中remote()為BpBinder
3.4 BpBinder::transact()函數(shù)
// /frameworks/native/libs/binder/BpBinder.cpp 159行
status_t BpBinder::transact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
if (mAlive) {
status_t status = IPCThreadState::self()->transact(
mHandle, code, data, reply, flags);
if (status == DEAD_OBJECT) mAlive = 0;
return status;
}
return DEAD_OBJECT;
}
Binder代理類調(diào)用transact()方法,真正工作還是交給IPCThreadState來進(jìn)行transact工作。
3.4.1 IPCThreadState::self()函數(shù)
IPCThreadState* IPCThreadState::self()
{
if (gHaveTLS) {
restart:
const pthread_key_t k = gTLS;
IPCThreadState* st = (IPCThreadState*)pthread_getspecific(k);
if (st) return st;
//初始化 IPCThreadState
return new IPCThreadState;
}
if (gShutdown) return NULL;
pthread_mutex_lock(&gTLSMutex);
//首次進(jìn)入gHaveTLS為false
if (!gHaveTLS) {
//創(chuàng)建線程的TLS
if (pthread_key_create(&gTLS, threadDestructor) != 0) {
pthread_mutex_unlock(&gTLSMutex);
return NULL;
}
gHaveTLS = true;
}
pthread_mutex_unlock(&gTLSMutex);
goto restart;
}
- TLS是指Thread local storage(線程本地存儲空間),每個線程都擁有自己的TLS,并且是私有空間,線程之間不會共享。
- 通過pthread_getspecific()/pthread_setspecific()函數(shù)可以獲取/設(shè)置這些空間中的內(nèi)容。從線程本地存儲空間獲的保存期中的IPCThreadState對象。
以后面的流程和上面的注冊流程大致相同,主要流程也是 IPCThreadState:: transact()函數(shù)、IPCThreadState::writeTransactionData()函數(shù)、IPCThreadState::waitForResponse()函數(shù)和IPCThreadState.talkWithDriver()函數(shù),Android跨進(jìn)程通信IPC之15——Binder之Framework層C++篇--注冊服務(wù)已經(jīng)講解過了,這里就不詳細(xì)說明了。我們從IPCThreadState.talkWithDriver() 開始繼講解
3.4.2 IPCThreadState:: talkWithDriver()函數(shù)
status_t IPCThreadState::talkWithDriver(bool doReceive)
{
...
binder_write_read bwr;
const bool needRead = mIn.dataPosition() >= mIn.dataSize();
const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;
bwr.write_size = outAvail;
bwr.write_buffer = (uintptr_t)mOut.data();
//接收數(shù)據(jù)緩沖區(qū)信息的填充。如果以后收到數(shù)據(jù),就直接填在mIn中了。
if (doReceive && needRead) {
bwr.read_size = mIn.dataCapacity();
bwr.read_buffer = (uintptr_t)mIn.data();
} else {
bwr.read_size = 0;
bwr.read_buffer = 0;
}
//當(dāng)讀緩沖和寫緩沖都為空,則直接返回
if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR;
bwr.write_consumed = 0;
bwr.read_consumed = 0;
status_t err;
do {
//通過ioctl不停的讀寫操作,跟Binder Driver進(jìn)行通信
if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
err = NO_ERROR;
...
//當(dāng)被中斷,則繼續(xù)執(zhí)行
} while (err == -EINTR);
...
return err;
}
binder_write_read結(jié)構(gòu)體 用來與Binder設(shè)備交換數(shù)據(jù)的結(jié)構(gòu),通過ioctl與mDriverFD通信,是真正的與Binder驅(qū)動進(jìn)行數(shù)據(jù)讀寫交互的過程。先向service manager進(jìn)程發(fā)送查詢服務(wù)的請求(BR_TRANSACTION)。當(dāng)service manager 進(jìn)程收到帶命令后,會執(zhí)行do_find_service()查詢服務(wù)所對應(yīng)的handle,然后再binder_send_reply()應(yīng)發(fā)送者,發(fā)送BC_REPLY協(xié)議,然后再調(diào)用binder_transaction(),再向服務(wù)請求者的todo隊列插入事務(wù)。接下來,再看看binder_transaction過程。
3.4.2.1 binder_transaction()函數(shù)
//kernel/drivers/android/binder.c 1827行
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply){
//根據(jù)各種判定,獲取以下信息:
// 目標(biāo)線程
struct binder_thread *target_thread;
// 目標(biāo)進(jìn)程
struct binder_proc *target_proc;
/// 目標(biāo)binder節(jié)點(diǎn)
struct binder_node *target_node;
// 目標(biāo) TODO隊列
struct list_head *target_list;
// 目標(biāo)等待隊列
wait_queue_head_t *target_wait;
...
//分配兩個結(jié)構(gòu)體內(nèi)存
struct binder_transaction *t = kzalloc(sizeof(*t), GFP_KERNEL);
struct binder_work *tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
//從target_proc分配一塊buffer
t->buffer = binder_alloc_buf(target_proc, tr->data_size,
for (; offp < off_end; offp++) {
switch (fp->type) {
case BINDER_TYPE_BINDER: ...
case BINDER_TYPE_WEAK_BINDER: ...
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
struct binder_ref *ref = binder_get_ref(proc, fp->handle,
fp->type == BINDER_TYPE_HANDLE);
...
//此時運(yùn)行在servicemanager進(jìn)程,故ref->node是指向服務(wù)所在進(jìn)程的binder實(shí)體,
//而target_proc為請求服務(wù)所在的進(jìn)程,此時并不相等。
if (ref->node->proc == target_proc) {
if (fp->type == BINDER_TYPE_HANDLE)
fp->type = BINDER_TYPE_BINDER;
else
fp->type = BINDER_TYPE_WEAK_BINDER;
fp->binder = ref->node->ptr;
// BBinder服務(wù)的地址
fp->cookie = ref->node->cookie;
binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
} else {
struct binder_ref *new_ref;
//請求服務(wù)所在進(jìn)程并非服務(wù)所在進(jìn)程,則為請求服務(wù)所在進(jìn)程創(chuàng)建binder_ref
new_ref = binder_get_ref_for_node(target_proc, ref->node);
fp->binder = 0;
//重新給handle賦值
fp->handle = new_ref->desc;
fp->cookie = 0;
binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
}
} break;
case BINDER_TYPE_FD: ...
}
}
//分別target_list和當(dāng)前線程TODO隊列插入事務(wù)
t->work.type = BINDER_WORK_TRANSACTION;
list_add_tail(&t->work.entry, target_list);
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
list_add_tail(&tcomplete->entry, &thread->todo);
if (target_wait)
wake_up_interruptible(target_wait);
return;
}
這個過程非常重要,分兩種情況來說:
- 情況1 當(dāng)請求服務(wù)的進(jìn)程與服務(wù)屬于不同的進(jìn)程,則為請求服務(wù)所在進(jìn)程創(chuàng)建binder_ref對象,指向服務(wù)進(jìn)程中的binder_node
- 當(dāng)請求服務(wù)的進(jìn)程與服務(wù)屬于同一進(jìn)程,則不再創(chuàng)建新對象,只是引用計數(shù)+1,并且修改type為BINDER_TYPE_BINER或BINDER_TYPE_WEAK_BINDER。
3.4.2.2 binder_thread_read()函數(shù)
//kernel/drivers/android/binder.c 2650行
binder_thread_read(struct binder_proc *proc,struct binder_thread *thread,binder_uintptr_t binder_buffer, size_t size,binder_size_t *consumed, int non_block){
...
//當(dāng)線程todo隊列有數(shù)據(jù)則執(zhí)行往下執(zhí)行;當(dāng)線程todo隊列沒有數(shù)據(jù),則進(jìn)入休眠等待狀態(tài)
ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
...
while (1) {
uint32_t cmd;
struct binder_transaction_data tr;
struct binder_work *w;
struct binder_transaction *t = NULL;
//先從線程todo隊列獲取事務(wù)數(shù)據(jù)
if (!list_empty(&thread->todo)) {
w = list_first_entry(&thread->todo, struct binder_work, entry);
// 線程todo隊列沒有數(shù)據(jù), 則從進(jìn)程todo對獲取事務(wù)數(shù)據(jù)
} else if (!list_empty(&proc->todo) && wait_for_proc_work) {
...
}
switch (w->type) {
case BINDER_WORK_TRANSACTION:
//獲取transaction數(shù)據(jù)
t = container_of(w, struct binder_transaction, work);
break;
case : ...
}
//只有BINDER_WORK_TRANSACTION命令才能繼續(xù)往下執(zhí)行
if (!t) continue;
if (t->buffer->target_node) {
...
} else {
tr.target.ptr = NULL;
tr.cookie = NULL;
//設(shè)置命令為BR_REPLY
cmd = BR_REPLY;
}
tr.code = t->code;
tr.flags = t->flags;
tr.sender_euid = t->sender_euid;
if (t->from) {
struct task_struct *sender = t->from->proc->tsk;
//當(dāng)非oneway的情況下,將調(diào)用者進(jìn)程的pid保存到sender_pid
tr.sender_pid = task_tgid_nr_ns(sender, current->nsproxy->pid_ns);
} else {
...
}
tr.data_size = t->buffer->data_size;
tr.offsets_size = t->buffer->offsets_size;
tr.data.ptr.buffer = (void *)t->buffer->data +
proc->user_buffer_offset;
tr.data.ptr.offsets = tr.data.ptr.buffer +
ALIGN(t->buffer->data_size,
sizeof(void *));
//將cmd和數(shù)據(jù)寫回用戶空間
put_user(cmd, (uint32_t __user *)ptr);
ptr += sizeof(uint32_t);
copy_to_user(ptr, &tr, sizeof(tr));
ptr += sizeof(tr);
list_del(&t->work.entry);
t->buffer->allow_user_free = 1;
if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
...
} else {
t->buffer->transaction = NULL;
//通信完成則運(yùn)行釋放
kfree(t);
}
break;
}
done:
*consumed = ptr - buffer;
if (proc->requested_threads + proc->ready_threads == 0 &&
proc->requested_threads_started < proc->max_threads &&
(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED))) {
proc->requested_threads++;
// 生成BR_SPAWN_LOOPER命令,用于創(chuàng)建新的線程
put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer);
}
return 0;
}
3.4.3 readStrongBinder()函數(shù)
//frameworks/native/libs/binder/Parcel.cpp 1334行
sp<IBinder> Parcel::readStrongBinder() const
{
sp<IBinder> val;
unflatten_binder(ProcessState::self(), *this, &val);
return val;
}
里面主要是調(diào)用unflatten_binder()函數(shù)
那我們就來詳細(xì)看下
3.4.3.1 unflatten_binder()函數(shù)
status_t unflatten_binder(const sp<ProcessState>& proc,
const Parcel& in, sp<IBinder>* out)
{
const flat_binder_object* flat = in.readObject(false);
if (flat) {
switch (flat->type) {
case BINDER_TYPE_BINDER:
// 當(dāng)請求服務(wù)的進(jìn)程與服務(wù)屬于同一進(jìn)程
*out = reinterpret_cast<IBinder*>(flat->cookie);
return finish_unflatten_binder(NULL, *flat, in);
case BINDER_TYPE_HANDLE:
//請求服務(wù)的進(jìn)程與服務(wù)屬于不同進(jìn)程
*out = proc->getStrongProxyForHandle(flat->handle);
//創(chuàng)建BpBinder對象
return finish_unflatten_binder(
static_cast<BpBinder*>(out->get()), *flat, in);
}
}
return BAD_TYPE;
}
如果服務(wù)的進(jìn)程與服務(wù)屬于不同的進(jìn)程會調(diào)用getStrongProxyForHandle()函數(shù),那我們就好好研究下
3.4.3.2 getStrongProxyForHandle()函數(shù)
sp<IBinder> ProcessState::getStrongProxyForHandle(int32_t handle)
{
sp<IBinder> result;
AutoMutex _l(mLock);
//查找handle對應(yīng)的資源項[2.9.3]
handle_entry* e = lookupHandleLocked(handle);
if (e != NULL) {
IBinder* b = e->binder;
if (b == NULL || !e->refs->attemptIncWeak(this)) {
...
//當(dāng)handle值所對應(yīng)的IBinder不存在或弱引用無效時,則創(chuàng)建BpBinder對象
b = new BpBinder(handle);
e->binder = b;
if (b) e->refs = b->getWeakRefs();
result = b;
} else {
result.force_set(b);
e->refs->decWeak(this);
}
}
return result;
}
readStrong的功能是flat_binder_object解析并創(chuàng)建BpBinder對象
3.4.3.3 lookupHandleLocked函數(shù)
ProcessState::handle_entry* ProcessState::lookupHandleLocked(int32_t handle)
{
const size_t N=mHandleToObject.size();
//當(dāng)handle大于mHandleToObject的長度時,進(jìn)入該分支
if (N <= (size_t)handle) {
handle_entry e;
e.binder = NULL;
e.refs = NULL;
//從mHandleToObject的第N個位置開始,插入(handle+1-N)個e到隊列中
status_t err = mHandleToObject.insertAt(e, N, handle+1-N);
if (err < NO_ERROR) return NULL;
}
return &mHandleToObject.editItemAt(handle);
}
根據(jù)handle值來查找對應(yīng)的handle_entry。
4 死亡通知
死亡通知時為了讓Bp端知道Bn端的生死情況
- DeathNotifier是繼承IBinder::DeathRecipient類,主要需要實(shí)現(xiàn)其binderDied()來進(jìn)行死亡通告。
- 注冊:binder->linkToDeath(sDeathNotifier)是為了將sDeathNotifier死亡通知注冊到Binder上。
Bp端只需要覆寫binderDied()方法,實(shí)現(xiàn)一些后尾清楚類的工作,則在Bn端死掉后,會回調(diào)binderDied()進(jìn)行相應(yīng)處理
4.1 linkToDeath()函數(shù)
// frameworks/native/libs/binder/BpBinder.cpp 173行
status_t BpBinder::linkToDeath(
const sp<DeathRecipient>& recipient, void* cookie, uint32_t flags)
{
Obituary ob;
ob.recipient = recipient;
ob.cookie = cookie;
ob.flags = flags;
{
AutoMutex _l(mLock);
if (!mObitsSent) {
if (!mObituaries) {
mObituaries = new Vector<Obituary>;
if (!mObituaries) {
return NO_MEMORY;
}
getWeakRefs()->incWeak(this);
IPCThreadState* self = IPCThreadState::self();
self->requestDeathNotification(mHandle, this);
self->flushCommands();
}
ssize_t res = mObituaries->add(ob);
return res >= (ssize_t)NO_ERROR ? (status_t)NO_ERROR : res;
}
}
return DEAD_OBJECT;
}
里面調(diào)用了requestDeathNotification()函數(shù)
4.2 requestDeathNotification()函數(shù)
//frameworks/native/libs/binder/IPCThreadState.cpp 670行
status_t IPCThreadState::requestDeathNotification(int32_t handle, BpBinder* proxy)
{
mOut.writeInt32(BC_REQUEST_DEATH_NOTIFICATION);
mOut.writeInt32((int32_t)handle);
mOut.writePointer((uintptr_t)proxy);
return NO_ERROR;
}
向binder driver發(fā)送 BC_REQUEST_DEATH_NOTIFICATION命令。后面的流程和 Service Manager 里面的 ** binder_link_to_death() ** 的過程。
4.3 binderDied()函數(shù)
//frameworks/av/media/libmedia/IMediaDeathNotifier.cpp 78行
void IMediaDeathNotifier::DeathNotifier::binderDied(const wp<IBinder>& who __unused) {
SortedVector< wp<IMediaDeathNotifier> > list;
{
Mutex::Autolock _l(sServiceLock);
// 把Bp端的MediaPlayerService清除掉
sMediaPlayerService.clear();
list = sObitRecipients;
}
size_t count = list.size();
for (size_t iter = 0; iter < count; ++iter) {
sp<IMediaDeathNotifier> notifier = list[iter].promote();
if (notifier != 0) {
//當(dāng)MediaServer掛了則通知應(yīng)用程序,應(yīng)用程序回調(diào)該方法
notifier->died();
}
}
}
客戶端進(jìn)程通過Binder驅(qū)動獲得Binder的代理(BpBinder),死亡通知注冊的過程就是客戶端進(jìn)程向Binder驅(qū)動注冊的一個死亡通知,該死亡通知關(guān)聯(lián)BBinder,即與BpBinder所對應(yīng)的服務(wù)端。
4.4 unlinkToDeath()函數(shù)
當(dāng)Bp在收到服務(wù)端的死亡通知之前先掛了,那么需要在對象的銷毀方法內(nèi),調(diào)用unlinkToDeath()來取消死亡通知;
//frameworks/av/media/libmedia/IMediaDeathNotifier.cpp 101行
IMediaDeathNotifier::DeathNotifier::~DeathNotifier()
{
Mutex::Autolock _l(sServiceLock);
sObitRecipients.clear();
if (sMediaPlayerService != 0) {
IInterface::asBinder(sMediaPlayerService)->unlinkToDeath(this);
}
}
4.5 觸發(fā)時機(jī)
- 每當(dāng)service進(jìn)程退出時,service manager 會收到來自Binder驅(qū)動的死亡通知。
- 這項工作在啟動Service Manager時通過 binder_link_to_death(bs, ptr, &si->death)完成。
- 另外,每個Bp端也可以自己注冊死亡通知,能獲取Binder的死亡消息,比如前面的IMediaDeathNotifier。
那Binder的死亡通知時如何被出發(fā)的?對于Binder的IPC進(jìn)程都會打開/dev/binder文件,當(dāng)進(jìn)程異常退出的時候,Binder驅(qū)動會保證釋放將要退出的進(jìn)程中沒有正常關(guān)閉的/dev/binder文件,實(shí)現(xiàn)機(jī)制是binder驅(qū)動通過調(diào)用/dev/binder文件所對應(yīng)的release回調(diào)函數(shù),執(zhí)行清理工作,并且檢查BBinder是否有注冊死亡通知,當(dāng)發(fā)現(xiàn)存在死亡通知時,那么久向其對應(yīng)的BpBinder端發(fā)送死亡通知消息。
5 總結(jié)
在請求服務(wù)(getService)的過程,當(dāng)執(zhí)行到binder_transaction()時,會區(qū)分請求服務(wù)所屬進(jìn)程情況。
- 當(dāng)請求服務(wù)的進(jìn)程與服務(wù)屬于不同進(jìn)程,則為請求服務(wù)所在進(jìn)程創(chuàng)binder_ref對象,指向服務(wù)進(jìn)程的binder_noder
- 當(dāng)請求服務(wù)的進(jìn)程與服務(wù)屬于同一進(jìn)程, 則不再創(chuàng)建新對象,只是引用計數(shù)+1,并且修改type為BINDER_TYPE_BINDER或BINDER_TYPE_WEAK_BINDER。
- 最終readStrongBinder(),返回的是BB對象的真實(shí)子類