Bind機制由4個部分組成。bind驅動,Client,ServiceManager &Service
1.Bind其實是一個基於linux系統的驅動,目的是為了實現內存共享。
bind驅動的東西,由於偏向內核,並且bind機制的內容非常龐大,所以我們暫時略去這個部分。
2.ServiceManager
Service Manager顧名思義,是一個“管家”。更確切的說,是所有系統service 的manager。
首先從service_manager.c開始\frameworks\native\cmds\servicemanager\service_manager.c
static struct { unsigned uid; const char *name; } allowed[] = { { AID_MEDIA, "media.audio_flinger" }, { AID_MEDIA, "media.log" }, { AID_MEDIA, "media.player" }, { AID_MEDIA, "media.camera" }, { AID_MEDIA, "media.audio_policy" }, { AID_DRM, "drm.drmManager" }, { AID_NFC, "nfc" }, { AID_BLUETOOTH, "bluetooth" }, { AID_RADIO, "radio.phone" }, { AID_RADIO, "radio.sms" }, { AID_RADIO, "radio.phonesubinfo" }, { AID_RADIO, "radio.simphonebook" }, /* TODO: remove after phone services are updated: */ { AID_RADIO, "phone" }, { AID_RADIO, "sip" }, { AID_RADIO, "isms" }, { AID_RADIO, "iphonesubinfo" }, { AID_RADIO, "simphonebook" }, { AID_MEDIA, "common_time.clock" }, { AID_MEDIA, "common_time.config" }, { AID_KEYSTORE, "android.security.keystore" }, };
以上就是系統服務的一個部分。這些都是注冊在servicemanager來管理。
那service manager干那些事:
I.提供IBind對象,也就是各個service的引用,供每個進程使用,且對於每個進程來說,該Ibind對象是唯一的。
II.讓各個系統service注冊到servicemanager中。
這里binder驅動,不是我們通常操作系統結構里的驅動概念,可以理解為是client和ServiceManager交流的媒介。
binder驅動的本質是內存共享。
其實這是整個bind機制的前面部分,就是從client到servicemanager,這樣client可以拿到Ibind對象,進而可以直接“操作servie”。
舉個例子:
AlarmManager alarmManager = context.getSystemService(Context.ALARM_SERVICE);
alarmManager.setExact(AlarmManager.ELAPSED_REALTIME, elapsedRealtime,
pendingIntent);
拿到alaram service bind對象,進而操作service提供的“服務”。
而且這個操作是同步的!
就好象在操作同一個進程的東西。
下面我們看看service Manager究竟是如何做到上面說的幾點的。
2.1 Service Manager的啟動:
既然SM是管理員,那么它應該是最勤快的,也就是必須最“早”啟動。
是的,它的啟動是定義在init.rc里面的:\system\core\rootdir\init.rc
# adbd on at boot in emulator on property:ro.kernel.qemu=1 start adbd service servicemanager /system/bin/servicemanager class core user system group system critical onrestart restart healthd onrestart restart zygote onrestart restart media onrestart restart surfaceflinger onrestart restart drm
Service Manager啟動后,在干什么?
還是在service_manager.c中:
int main(int argc, char **argv) { struct binder_state *bs; void *svcmgr = BINDER_SERVICE_MANAGER; bs = binder_open(128*1024); if (binder_become_context_manager(bs)) { ALOGE("cannot become context manager (%s)\n", strerror(errno)); return -1; } svcmgr_handle = svcmgr; binder_loop(bs, svcmgr_handler); return 0; }
binder_open打開bind驅動,並且分配128K大小。
binder_become_context_manager(bs):
int binder_become_context_manager(struct binder_state *bs) { return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0); }
把自己注冊為Service 大管家。
void binder_loop(struct binder_state *bs, binder_handler func) { int res; struct binder_write_read bwr; unsigned readbuf[32]; bwr.write_size = 0; bwr.write_consumed = 0; bwr.write_buffer = 0; readbuf[0] = BC_ENTER_LOOPER; binder_write(bs, readbuf, sizeof(unsigned)); for (;;) { bwr.read_size = sizeof(readbuf); bwr.read_consumed = 0; bwr.read_buffer = (unsigned) readbuf; res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr); if (res < 0) { ALOGE("binder_loop: ioctl failed (%s)\n", strerror(errno)); break; } res = binder_parse(bs, 0, readbuf, bwr.read_consumed, func); if (res == 0) { ALOGE("binder_loop: unexpected reply?!\n"); break; } if (res < 0) { ALOGE("binder_loop: io error %d %s\n", res, strerror(errno)); break; } } }
開始進入loop,和之前分析的andorid線程消息驅動機制非常相似。
讀取消息隊列,解析它們,知道出現異常。
接下來,看看bind_parse:
int binder_parse(struct binder_state *bs, struct binder_io *bio, uint32_t *ptr, uint32_t size, binder_handler func) { int r = 1; uint32_t *end = ptr + (size / 4); while (ptr < end) { uint32_t cmd = *ptr++; #if TRACE fprintf(stderr,"%s:\n", cmd_name(cmd)); #endif switch(cmd) { case BR_NOOP: break; case BR_TRANSACTION_COMPLETE: break; case BR_INCREFS: case BR_ACQUIRE: case BR_RELEASE: case BR_DECREFS: #if TRACE fprintf(stderr," %08x %08x\n", ptr[0], ptr[1]); #endif ptr += 2; break; case BR_TRANSACTION: { struct binder_txn *txn = (void *) ptr; if ((end - ptr) * sizeof(uint32_t) < sizeof(struct binder_txn)) { ALOGE("parse: txn too small!\n"); return -1; } binder_dump_txn(txn); if (func) { unsigned rdata[256/4]; struct binder_io msg; struct binder_io reply; int res; bio_init(&reply, rdata, sizeof(rdata), 4); bio_init_from_txn(&msg, txn); res = func(bs, txn, &msg, &reply); binder_send_reply(bs, &reply, txn->data, res); } ptr += sizeof(*txn) / sizeof(uint32_t); break; } case BR_REPLY: { struct binder_txn *txn = (void*) ptr; if ((end - ptr) * sizeof(uint32_t) < sizeof(struct binder_txn)) { ALOGE("parse: reply too small!\n"); return -1; } binder_dump_txn(txn); if (bio) { bio_init_from_txn(bio, txn); bio = 0; } else { /* todo FREE BUFFER */ } ptr += (sizeof(*txn) / sizeof(uint32_t)); r = 0; break; } case BR_DEAD_BINDER: { struct binder_death *death = (void*) *ptr++; death->func(bs, death->ptr); break; } case BR_FAILED_REPLY: r = -1; break; case BR_DEAD_REPLY: r = -1; break; default: ALOGE("parse: OOPS %d\n", cmd); return -1; } } return r; }
關鍵是分析:BR_TRANSACTION,BR_REPLY。
BR_TRANSACTION中做了一些初始化,然后
res = func(bs, txn, &msg, &reply);
binder_send_reply(bs, &reply, txn->data, res);
func函數就是在service_manager.c中傳入的
int svcmgr_handler(struct binder_state *bs, struct binder_txn *txn, struct binder_io *msg, struct binder_io *reply)
所以bind_loop最終實現分析的函數是傳入的函數!
至此整個service_manager的流程已經清楚。
事件驅動機制:
1.從bind驅動讀取消息
2.處理消息
3.進入looper,永遠不會主動退出,直到出現致命錯誤。

int svcmgr_handler(struct binder_state *bs, struct binder_txn *txn, struct binder_io *msg, struct binder_io *reply) { struct svcinfo *si; uint16_t *s; unsigned len; void *ptr; uint32_t strict_policy; int allow_isolated; // ALOGI("target=%p code=%d pid=%d uid=%d\n", // txn->target, txn->code, txn->sender_pid, txn->sender_euid); if (txn->target != svcmgr_handle) return -1; // Equivalent to Parcel::enforceInterface(), reading the RPC // header with the strict mode policy mask and the interface name. // Note that we ignore the strict_policy and don't propagate it // further (since we do no outbound RPCs anyway). strict_policy = bio_get_uint32(msg); s = bio_get_string16(msg, &len); if ((len != (sizeof(svcmgr_id) / 2)) || memcmp(svcmgr_id, s, sizeof(svcmgr_id))) { fprintf(stderr,"invalid id %s\n", str8(s)); return -1; } switch(txn->code) { case SVC_MGR_GET_SERVICE: case SVC_MGR_CHECK_SERVICE: s = bio_get_string16(msg, &len); ptr = do_find_service(bs, s, len, txn->sender_euid); if (!ptr) break; bio_put_ref(reply, ptr); return 0; case SVC_MGR_ADD_SERVICE: s = bio_get_string16(msg, &len); ptr = bio_get_ref(msg); allow_isolated = bio_get_uint32(msg) ? 1 : 0; if (do_add_service(bs, s, len, ptr, txn->sender_euid, allow_isolated)) return -1; break; case SVC_MGR_LIST_SERVICES: { unsigned n = bio_get_uint32(msg); si = svclist; while ((n-- > 0) && si) si = si->next; if (si) { bio_put_string16(reply, si->name); return 0; } return -1; } default: ALOGE("unknown code %d\n", txn->code); return -1; } bio_put_uint32(reply, 0); return 0; }
switch語句,查詢和獲取service 或者注冊。
查找svclist里面是否有相同name的服務。
svclist是鏈表的方式,與線程的消息隊列一樣!
struct svcinfo *find_svc(uint16_t *s16, unsigned len) { struct svcinfo *si; for (si = svclist; si; si = si->next) { if ((len == si->len) && !memcmp(s16, si->name, len * sizeof(uint16_t))) { return si; } } return 0; }
接下來我們看看void *do_find_service(struct binder_state *bs, uint16_t *s, unsigned len, unsigned uid)
return的到底是什么?
注冊服務:SVC_MGR_ADD_SERVICE:

int do_add_service(struct binder_state *bs, uint16_t *s, unsigned len, void *ptr, unsigned uid, int allow_isolated) { struct svcinfo *si; //ALOGI("add_service('%s',%p,%s) uid=%d\n", str8(s), ptr, // allow_isolated ? "allow_isolated" : "!allow_isolated", uid); if (!ptr || (len == 0) || (len > 127)) return -1; if (!svc_can_register(uid, s)) { ALOGE("add_service('%s',%p) uid=%d - PERMISSION DENIED\n", str8(s), ptr, uid); return -1; } si = find_svc(s, len); if (si) { if (si->ptr) { ALOGE("add_service('%s',%p) uid=%d - ALREADY REGISTERED, OVERRIDE\n", str8(s), ptr, uid); svcinfo_death(bs, si); } si->ptr = ptr; } else { si = malloc(sizeof(*si) + (len + 1) * sizeof(uint16_t)); if (!si) { ALOGE("add_service('%s',%p) uid=%d - OUT OF MEMORY\n", str8(s), ptr, uid); return -1; } si->ptr = ptr; si->len = len; memcpy(si->name, s, (len + 1) * sizeof(uint16_t)); si->name[len] = '\0'; si->death.func = svcinfo_death; si->death.ptr = si; si->allow_isolated = allow_isolated; si->next = svclist; svclist = si; } binder_acquire(bs, ptr); binder_link_to_death(bs, ptr, &si->death); return 0; }
int svc_can_register(unsigned uid, uint16_t *name)
判斷是否在allowed表格里面。
先看看是否在列表里面?
si = find_svc(s, len);
如果不再的話,就注冊一個新的si,到svclist。
至此service_manager就啟動起來了。