------------------------------------------
本文系本站原创,欢迎转载!
------------------------------------------android使用binder作为进程间的通信工具。典型的应用是android的C/S机制,即client/service。使用这种
机制有以下优势:
1,可扩展性
2,有效性,一个service可以有多个client
3,安全性,client和service运行在不同的进程中,即使client出问题,不会影响到service的运行
我们今天以media_server作为例子来分析binder通信机制。
首先要有这个概念,android中有个服务总管叫servicemanager,mediaserver是负责向里面添加一些多媒体
服务的。所以从这个角度说的话,mediaserver是servicemanager的client。
在main_mediaserver.cpp中:
int main(int argc, char** argv)
{
sp
proc(ProcessState::self());//1生成ProcessState对象
sp sm = defaultServiceManager();//2得到BpServiceManager(BpBinder(0))
LOGI("ServiceManager: %p", sm.get());
AudioFlinger::instantiate();//3初始化AudioFlinger实例,使用sm->addService()方法
MediaPlayerService::instantiate();
CameraService::instantiate();
AudioPolicyService::instantiate();
ProcessState::self()->startThreadPool();//4转化为调用下面的joinThreadPool
IPCThreadState::self()->joinThreadPool();//5talkwithdriver,为该server中的service服务
/*这样相当于两个线程在和Binder驱动对话,为server中的所有service工作,随时获取各个service的client发来的数据,并进行处理*/
}
我们先看第一部分sp proc(ProcessState::self()):
上面可以写成proc=ProcessState::self(),下面看ProcessState::self():
sp ProcessState::self()
{
if (gProcess != NULL) return gProcess;//在Static.cpp中定义,全局变量,同时可以看出是单例模式
AutoMutex _l(gProcessMutex);
if (gProcess == NULL) gProcess = new ProcessState;//ProcessState对象
return gProcess;
}
比较简单,返回ProcessState对象,我们看下它的构造函数:
ProcessState::ProcessState()
: mDriverFD(open_driver())//打开的就是binder驱动
, mVMStart(MAP_FAILED)
, mManagesContexts(false)
, mBinderContextCheckFunc(NULL)
, mBinderContextUserData(NULL)
, mThreadPoolStarted(false)
, mThreadPoolSeq(1)
{
...
mVMStart = mmap(0, BINDER_VM_SIZE, PROT_READ, MAP_PRIVATE | MAP_NORESERVE, mDriverFD, 0);
...
}
我们看到构造函数中打开了binder驱动,然后映射内存。
2,sp sm = defaultServiceManager();
该部分是非常重要的部分,对它的分析直接决定了后面的分析成败。我们找到defaultServiceManager()定义:
sp defaultServiceManager()
{
if (gDefaultServiceManager != NULL) return gDefaultServiceManager;//同样也是单例模式
{
AutoMutex _l(gDefaultServiceManagerLock);
if (gDefaultServiceManager == NULL) {
gDefaultServiceManager = interface_cast(//interface_cast是个模板,返回IServiceManager::asInterface(obj),asInterface使用的是宏定义DECLARE_META_INTERFACE,使用IMPLEMENT_META_INTERFACE宏实现
ProcessState::self()->getContextObject(NULL));//返回BpBinder(0)
}
}
return gDefaultServiceManager;//BpServiceManager(BpBinder(0))
}
我们先看ProcessState::self()->getContextObject(NULL):
sp ProcessState::getContextObject(const sp& caller)
{
if (supportsProcesses()) {//判断Binder打开是否正确
return getStrongProxyForHandle(0);//返回BpBinder(0)
} else {
return getContextObject(String16("default"), caller);
}
}
我们在看getStrongProxyForHandle(0):
sp ProcessState::getStrongProxyForHandle(int32_t handle)//注意上面传下来的参数是0
{
sp result;
AutoMutex _l(mLock);
handle_entry* e = lookupHandleLocked(handle);//寻找0handle,如果没有则创建
if (e != NULL) {
// We need to create a new BpBinder if there isn't currently one, OR we
// are unable to acquire a weak reference on this current one. See comment
// in getWeakProxyForHandle() for more info about this.
IBinder* b = e->binder;
if (b == NULL || !e->refs->attemptIncWeak(this)) {
b = new BpBinder(handle);//!!!根据上面传下来的handle,这里生成BpBinder(0)
e->binder = b;
if (b) e->refs = b->getWeakRefs();
result = b;
} else {
// This little bit of nastyness is to allow us to add a primary
// reference to the remote proxy when this team doesn't have one
// but another team is sending the handle to us.
result.force_set(b);
e->refs->decWeak(this);
}
}
return result;
}
到这里,我们知道了ProcessState::self()->getContextObject(NULL)返回了BpBinder(0),那回到原先的defaultServiceManager()中,也就是:
gDefaultServiceManager = interface_cast(BpBinder(0))。
我们看下interface_cast定义:
template
inline sp interface_cast(const sp& obj)
{
return INTERFACE::asInterface(obj);
}
将上面带入即:
gDefaultServiceManager = IServiceManager::asInterface(BpBinder(0));
我们到IServiceManager.h中并没有找到asInterface定义,但是我们发现由这个宏:
class IServiceManager : public IInterface
{
public:
DECLARE_META_INTERFACE(ServiceManager);
...
}
宏定义如下:
#define DECLARE_META_INTERFACE(INTERFACE) \
static const String16 descriptor; \
static sp asInterface(const sp& obj); \
virtual const String16& getInterfaceDescriptor() const; \
I##INTERFACE(); \
virtual ~I##INTERFACE(); \
带入即:
static const String16 descriptor; \
static sp asInterface(const sp& obj); \
virtual const String16& getInterfaceDescriptor() const; \
IServiceManager(); \
virtual ~IServiceManager();
这里它申明了一个asInterface方法。
在IServiceManager.cpp中有asInterface方法的实现在如下宏:
IMPLEMENT_META_INTERFACE(ServiceManager, "android.os.IServiceManager");
它的定义如下:
#define IMPLEMENT_META_INTERFACE(INTERFACE, NAME) \
const String16 I##INTERFACE::descriptor(NAME); \
const String16& I##INTERFACE::getInterfaceDescriptor() const { \
return I##INTERFACE::descriptor; \
} \
sp I##INTERFACE::asInterface(const sp& obj) \
{ \
sp intr; \
if (obj != NULL) { \
intr = static_cast( \
obj->queryLocalInterface( \
I##INTERFACE::descriptor).get()); \
if (intr == NULL) { \
intr = new Bp##INTERFACE(obj); \
} \
} \
return intr; \
} \
I##INTERFACE::I##INTERFACE() { } \
I##INTERFACE::~I##INTERFACE() { } \
带入后即:
const String16 IServiceManager::descriptor("android.os.IServiceManager");
const String16& IServiceManager::getInterfaceDescriptor() const {
return IServiceManager::descriptor;
}
sp IServiceManager::asInterface(const sp& obj)
{
sp intr;
if (obj != NULL) {
intr = static_cast(
obj->queryLocalInterface(
IServiceManager::descriptor).get());
if (intr == NULL) {
intr = new BpServiceManager(obj); //很明显返回了BpServiceManager对象!!!
}
}
return intr;
}
IServiceManager::IServiceManager() { }
IServiceManager::~IServiceManager() { }
到此,我们带入到gDefaultServiceManager = BpServiceManager(BpBinder(0))
也就是sp sm = defaultServiceManager()= BpServiceManager(BpBinder(0));
我们看下BpServiceManager的构造函数:
BpServiceManager(const sp& impl)
: BpInterface(impl)
{
}
带入也就是:
BpServiceManager(BpBinder(0))
: BpInterface(BpBinder(0))
{
}
BpInterface定义:
template
class BpInterface : public INTERFACE, public BpRefBase
{
public:
BpInterface(const sp& remote);
protected:
virtual IBinder* onAsBinder();
};
上面带入:
class BpInterface : public IServiceManager, public BpRefBase
{
public:
BpInterface(BpBinder(0));//注意这里
protected:
virtual IBinder* onAsBinder();
};
我们看下BpInterface定义:
template
inline BpInterface::BpInterface(const sp& remote)
: BpRefBase(remote)
{
}
带入:
BpRefBase(BpBinder(0))
我们看下其定义:
BpRefBase::BpRefBase(const sp& o)
: mRemote(o.get()), mRefs(NULL), mState(0)
{
extendObjectLifetime(OBJECT_LIFETIME_WEAK);
if (mRemote) {
mRemote->incStrong(this); // Removed on first IncStrong().
mRefs = mRemote->createWeak(this); // Held for our entire lifetime.
}
}
这里最关注的是mRemote(o.get()),即mRemote=BpBinder(0),这可要记住了,它的子类BpServiceManager会使用它进行Binder通信的。
3,AudioFlinger::instantiate():
void AudioFlinger::instantiate() {
defaultServiceManager()->addService(//使用defaultServiceManager()的addService方法
String16("media.audio_flinger"), new AudioFlinger());
}
我们在2中分析知道defaultServiceManager()返回的是BpServiceManager(BpBinder(0)),我们看BpServiceManager的addService方法:
virtual status_t addService(const String16& name, const sp& service)
{
Parcel data, reply;
data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());
data.writeString16(name);
data.writeStrongBinder(service);
status_t err = remote()->transact(ADD_SERVICE_TRANSACTION, data, &reply);//即调用BpBinder->transact()
return err == NO_ERROR ? reply.readInt32() : err;
}
我们看到addService使用了remote()->transact,也即使用了BpBinder()->transact():
status_t BpBinder::transact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
// Once a binder has died, it will never come back to life.
if (mAlive) {
status_t status = IPCThreadState::self()->transact(//IPCThreadState的transact
mHandle, code, data, reply, flags);
if (status == DEAD_OBJECT) mAlive = 0;
return status;
}
return DEAD_OBJECT;
}
我们下面再看IPCThreadState的transact:
status_t IPCThreadState::transact(int32_t handle,
uint32_t code, const Parcel& data,
Parcel* reply, uint32_t flags)
{
status_t err = data.errorCheck();
flags |= TF_ACCEPT_FDS;
IF_LOG_TRANSACTIONS() {
TextOutput::Bundle _b(alog);
alog << "BC_TRANSACTION thr " << (void*)pthread_self() << " / hand "
<< handle << " / code " << TypeCode(code) << ": "
<< indent << data << dedent << endl;
}
if (err == NO_ERROR) {
LOG_ONEWAY(">>>> SEND from pid %d uid %d %s", getpid(), getuid(),
(flags & TF_ONE_WAY) == 0 ? "READ REPLY" : "ONE WAY");
err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);//将数据包写到mOut buffer里面
}
if (err != NO_ERROR) {
if (reply) reply->setError(err);
return (mLastError = err);
}
if ((flags & TF_ONE_WAY) == 0) {
if (reply) {
err = waitForResponse(reply);//这里执行talkdriver和execcmd
} else {
Parcel fakeReply;
err = waitForResponse(&fakeReply);
}
IF_LOG_TRANSACTIONS() {
TextOutput::Bundle _b(alog);
alog << "BR_REPLY thr " << (void*)pthread_self() << " / hand "
<< handle << ": ";
if (reply) alog << indent << *reply << dedent << endl;
else alog << "(none requested)" << endl;
}
} else {
err = waitForResponse(NULL, NULL);
}
return err;
}
这里主要是两个函数: writeTransactionData()和waitForResponse()。
writeTransactionData()主要是将数据包写到mOut buffer里面。我们看下waitForResponse():
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
int32_t cmd;
int32_t err;
while (1) {
if ((err=talkWithDriver()) < NO_ERROR) break;//和Binder驱动通信对话,即将mOut数据写到Binder中后,等待Binder回应
err = mIn.errorCheck();//check Binder返回的数据
if (err < NO_ERROR) break;
if (mIn.dataAvail() == 0) continue;
cmd = mIn.readInt32();//取出cmd
IF_LOG_COMMANDS() {
alog << "Processing waitForResponse Command: "
<< getReturnString(cmd) << endl;
}
switch (cmd) {//根据cmd执行不同的case
case BR_TRANSACTION_COMPLETE:
if (!reply && !acquireResult) goto finish;
break;
...
default:
err = executeCommand(cmd);
if (err != NO_ERROR) goto finish;
break;
}
}
...
return err;
}
我们再看talkWithDriver():
status_t IPCThreadState::talkWithDriver(bool doReceive)
{
...
// Return immediately if there is nothing to do.
if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR;
bwr.write_consumed = 0;
bwr.read_consumed = 0;
status_t err;
do {
IF_LOG_COMMANDS() {
alog << "About to read/write, write size = " << mOut.dataSize() << endl;
}
#if defined(HAVE_ANDROID_OS)
if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)//正真的核心东西,作为client是通过ioctl把数据包写进去,然后再读出service端的的数据。如果作为service端,则相反
err = NO_ERROR;
else
err = -errno;
#else
err = INVALID_OPERATION;
#endif
IF_LOG_COMMANDS() {
alog << "Finished read/write, write size = " << mOut.dataSize() << endl;
}
} while (err == -EINTR);
...
return err;
}
至此,我们client端的工作基本告一段落了,后面的工作交给service_manager。
3-1,service_manager端的工作:
int main(int argc, char **argv)
{
struct binder_state *bs;
void *svcmgr = BINDER_SERVICE_MANAGER;
bs = binder_open(128*1024);//直接打开binder驱动,并没有使用BBinder机制
if (binder_become_context_manager(bs)) {//告诉binder驱动,我是老大,handle为0
LOGE("cannot become context manager (%s)\n", strerror(errno));
return -1;
}
svcmgr_handle = svcmgr;
binder_loop(bs, svcmgr_handler);
return 0;
}
我们下面看binder_loop(bs, svcmgr_handler):
void binder_loop(struct binder_state *bs, binder_handler func)
{
int res;
struct binder_write_read bwr;
unsigned readbuf[32];
bwr.write_size = 0;
bwr.write_consumed = 0;
bwr.write_buffer = 0;
readbuf[0] = BC_ENTER_LOOPER;
binder_write(bs, readbuf, sizeof(unsigned));
for (;;) {//一直循环下去,为所有service工作
bwr.read_size = sizeof(readbuf);
bwr.read_consumed = 0;
bwr.read_buffer = (unsigned) readbuf;
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);//与binder驱动对话,先写再读,不过这里写的size为0(bwr.write_size = 0),所以这里是只读binder端的数据
if (res < 0) {
LOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
break;
}
res = binder_parse(bs, 0, readbuf, bwr.read_consumed, func);//分析读回来的数据,记住这里的func传入的参数是svcmgr_handler
if (res == 0) {
LOGE("binder_loop: unexpected reply?!\n");
break;
}
if (res < 0) {
LOGE("binder_loop: io error %d %s\n", res, strerror(errno));
break;
}
}
}
我们再看binder_parse(bs, 0, readbuf, bwr.read_consumed, func):
int binder_parse(struct binder_state *bs, struct binder_io *bio,
uint32_t *ptr, uint32_t size, binder_handler func)
{
int r = 1;
uint32_t *end = ptr + (size / 4);
while (ptr < end) {
uint32_t cmd = *ptr++;
#if TRACE
fprintf(stderr,"%s:\n", cmd_name(cmd));
#endif
switch(cmd) {
...
case BR_TRANSACTION: {
struct binder_txn *txn = (void *) ptr;
if ((end - ptr) * sizeof(uint32_t) < sizeof(struct binder_txn)) {
LOGE("parse: txn too small!\n");
return -1;
}
binder_dump_txn(txn);
if (func) {
unsigned rdata[256/4];
struct binder_io msg;
struct binder_io reply;
int res;
bio_init(&reply, rdata, sizeof(rdata), 4);
bio_init_from_txn(&msg, txn);
res = func(bs, txn, &msg, &reply);//通过func执行client的请求,即svcmgr_handler执行
binder_send_reply(bs, &reply, txn->data, res);
}
ptr += sizeof(*txn) / sizeof(uint32_t);
break;
}
...
default:
LOGE("parse: OOPS %d\n", cmd);
return -1;
}
}
return r;
}
我们看到如果是 case BR_TRANSACTION主要转换成svcmgr_handler(bs, txn, &msg, &reply):
int svcmgr_handler(struct binder_state *bs,
struct binder_txn *txn,
struct binder_io *msg,
struct binder_io *reply)
{
struct svcinfo *si;
uint16_t *s;
unsigned len;
void *ptr;
// LOGI("target=%p code=%d pid=%d uid=%d\n",
// txn->target, txn->code, txn->sender_pid, txn->sender_euid);
if (txn->target != svcmgr_handle)
return -1;
s = bio_get_string16(msg, &len);
if ((len != (sizeof(svcmgr_id) / 2)) ||
memcmp(svcmgr_id, s, sizeof(svcmgr_id))) {
fprintf(stderr,"invalid id %s\n", str8(s));
return -1;
}
switch(txn->code) {//通过不同的code执行不同的case
case SVC_MGR_GET_SERVICE:
case SVC_MGR_CHECK_SERVICE:
s = bio_get_string16(msg, &len);
ptr = do_find_service(bs, s, len);//找到所需要的service
if (!ptr)
break;
bio_put_ref(reply, ptr);
return 0;
case SVC_MGR_ADD_SERVICE:
s = bio_get_string16(msg, &len);
ptr = bio_get_ref(msg);
if (do_add_service(bs, s, len, ptr, txn->sender_euid))//将service添加到service列表
return -1;
break;
case SVC_MGR_LIST_SERVICES: {
unsigned n = bio_get_uint32(msg);
si = svclist;
while ((n-- > 0) && si)//列出表中的所有service
si = si->next;
if (si) {
bio_put_string16(reply, si->name);
return 0;
}
return -1;
}
default:
LOGE("unknown code %d\n", txn->code);
return -1;
}
bio_put_uint32(reply, 0);
return 0;
}
servicemanager工作通过跟server的注册的service的联系起来了。
我们总结一下,client端主要通过Bpbinder的Transact向Binder传输数据,servicemanager直接读binder,然后执行相应的操作。后面我们会继续分析具体的service和client的怎么样通过Binder通信。
阅读(2289) | 评论(0) | 转发(1) |