Binder框架 – 用户空间和驱动的交互
Binder框架 – android AIDL 的使用
Binder框架 – 用户空间和驱动的交互
Binder框架 – Binder 驱动
Binder 框架 – binder 用户空间框架
MediaPlayerServic 启动的过程中,代码可以简化为:
sp sm = new BpServiceManager(BpBinder(0));
sm->addService(String16(“media.player”), new MediaPlayerService());
针对上面的两步,看下用户态程序和内核驱动是怎么交互的。
Parcel
在用户程序和内核的交互中,Android提供了Parcel 类帮助我们对需要传递的数据进行封装,其实就是数据的序列化,按照类型和先后顺序写入到内存中。Parcel 内部的存储区域主要有两个,
mData和mObjects , mData 存储基本数据类型,mObjects存储Binder 数据类型。Parcel 提供了针对各种数据写入和读取的操作函数。这两块区域都是使用 malloc 分配出来。
uint8_t* mData;
size_t mDataSize;
size_t mDataCapacity;
mutable size_t mDataPos;
binder_size_t* mObjects;
size_t mObjectsSize;
size_t mObjectsCapacity;
mutable size_t mNextObjectHint;
flat_binder_object
在Parcel 的序列化中,Binder 对象使用flat_binder_object 结构体保存。同时提供了flatten_binder 和unflatten_binder 函数用于序列化和反序列化。
struct flat_binder_object {
__u32 type ;
__u32 flags;
union {
binder_uintptr_t binder;
__u32 handle;
};
binder_uintptr_t cookie;
};
status_t flatten_binder(const sp& ,
const wp& binder, Parcel* out)
{
flat_binder_object obj;
obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
if (binder != NULL) {
sp real = binder.promote();
if (real != NULL) {
IBinder *local = real->localBinder();
if (!local) {
BpBinder *proxy = real->remoteBinder();
if (proxy == NULL) {
ALOGE("null proxy" );
}
const int32_t handle = proxy ? proxy->handle() : 0 ;
obj.type = BINDER_TYPE_WEAK_HANDLE;
obj.binder = 0 ;
obj.handle = handle;
obj.cookie = 0 ;
} else {
obj.type = BINDER_TYPE_WEAK_BINDER;
obj.binder = reinterpret_cast (binder.get_refs());
obj.cookie = reinterpret_cast (binder.unsafe_get());
}
return finish_flatten_binder(real, obj, out);
}
ALOGE("Unable to unflatten Binder weak reference!" );
obj.type = BINDER_TYPE_BINDER;
obj.binder = 0 ;
obj.cookie = 0 ;
return finish_flatten_binder(NULL, obj, out);
} else {
obj.type = BINDER_TYPE_BINDER;
obj.binder = 0 ;
obj.cookie = 0 ;
return finish_flatten_binder(NULL, obj, out);
}
}
status_t unflatten_binder(const sp& proc,
const Parcel& in, sp* out)
{
const flat_binder_object* flat = in.readObject(false );
if (flat) {
switch (flat->type) {
case BINDER_TYPE_BINDER:
*out = reinterpret_cast (flat->cookie);
return finish_unflatten_binder(NULL, *flat, in);
case BINDER_TYPE_HANDLE:
*out = proc->getStrongProxyForHandle(flat->handle);
return finish_unflatten_binder(
static_cast (out->get()), *flat, in);
}
}
return BAD_TYPE;
}
status_t unflatten_binder(const sp& proc,
const Parcel& in, wp* out)
{
const flat_binder_object* flat = in.readObject(false );
if (flat) {
switch (flat->type) {
case BINDER_TYPE_BINDER:
*out = reinterpret_cast (flat->cookie);
return finish_unflatten_binder(NULL, *flat, in);
case BINDER_TYPE_WEAK_BINDER:
if (flat->binder != 0 ) {
out->set_object_and_refs(
reinterpret_cast (flat->cookie),
reinterpret_cast (flat->binder));
} else {
*out = NULL;
}
return finish_unflatten_binder(NULL, *flat, in);
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE:
*out = proc->getWeakProxyForHandle(flat->handle);
return finish_unflatten_binder(
static_cast (out->unsafe_get()), *flat, in);
}
}
return BAD_TYPE;
}
new BpServiceManager(BpBinder(0))
BpServiceManager 的构造函数以BpBinder 为参数,在BpBinder 的onFirstRef 中完成了驱动强引用计数加1,构造函数中完成了弱引用计数的+1,这个都比较简单。
BpBinder::BpBinder (int32_t handle )
: mHandle(handle )
, mAlive(1 )
, mObitsSent(0 )
, mObituaries(NULL )
{
ALOGV("Creating BpBinder %p handle %d
" , this, mHandle);
extendObjectLifetime(OBJECT_LIFETIME_WEAK);
IPCThreadState::self ()-> incWeakHandle(handle );
}
void BpBinder::onFirstRef ()
{
ALOGV("onFirstRef BpBinder %p handle %d
" , this, mHandle);
IPCThreadState* ipc = IPCThreadState::self ();
if (ipc) ipc-> incStrongHandle(mHandle);
}
void IPCThreadState::incStrongHandle (int32_t handle )
{
LOG_REMOTEREFS("IPCThreadState::incStrongHandle(%d)
" , handle );
mOut. writeInt32(BC_ACQUIRE);
mOut. writeInt32(handle );
}
addService
virtual status_t addService(const String16 & name, const sp<IBinder >& service, bool allowIsolated)
{
Parcel data , reply;
data .writeInterfaceToken(IServiceManager ::getInterfaceDescriptor () );
data .writeString16(name ) ;
data .writeStrongBinder(service ) ;
data .writeInt32(allowIsolated ? 1 : 0) ;
status_t err = remote()->transact(ADD_SERVICE_TRANSACTION , data , &reply);
return err == NO_ERROR ? reply.readExceptionCode() : err;
}
localBinder 和remoteBinder
android 提供了三个API用于方位本进程的binder, 根据服务端和代理端的不同,这三个API可以做不同的区分,实现了多态。 这是三个虚函数函数定义在IBinder中,BBinder 和BpBinder 根据需要,BBinder 实现了localBinder, BpBinder 实现了remoteBinder,函数的名字与其所代表的含义也恰如其分,BBinder 在服务端,所以是localBinder, BpBinde 在代理端, 对服务端来说,是remote。
function
IBinder
BBinder
BpBinder
queryLocalInterface
NULL
this
localBinder
NULL
this
remoteBinder
NULL
this
writeStrongBinder
Parcel 提供的 writeStrongBinder 是对binder类型的数据进行序列化操作,这一点从内部调用的函数flatten_binder名字上也可以看出来。序列化的时候首先判断是代理端还是服务端,用的是localBinder 函数进行判断。为空,表明是代理端,用remoteBinder 获取BpBinder.不为空则为服务端,这里不为空。序列化以后的Binder 存储在 flat_binder_object结构体中。注意flat_binder_object 的赋值。BBinder 序列化:
type = BINDER_TYPE_BINDER
binder = reinterpret_cast(local->getWeakRefs()); // 弱引用对象
cookie = reinterpret_cast(local); //this 对象,BBinder本身
status_t Parcel::writeStrongBinder(const sp& val)
{
return flatten_binder(ProcessState::self(), val, this );
}
status_t flatten_binder(const sp& ,
const sp& binder, Parcel* out)
{
flat_binder_object obj;
obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
if (binder != NULL) {
IBinder *local = binder->localBinder();
if (!local) {
......
} else {
obj.type = BINDER_TYPE_BINDER;
obj.binder = reinterpret_cast (local->getWeakRefs());
obj.cookie = reinterpret_cast (local);
}
} else {
......
}
return finish_flatten_binder(binder, obj, out);
}
inline static status_t finish_flatten_binder(
const sp& , const flat_binder_object& flat, Parcel* out)
{
return out->writeObject(flat, false );
}
status_t Parcel::writeObject(const flat_binder_object& val, bool nullMetaData)
{
const bool enoughData = (mDataPos+sizeof (val)) <= mDataCapacity;
const bool enoughObjects = mObjectsSize < mObjectsCapacity;
if (enoughData && enoughObjects) {
restart_write:
*reinterpret_cast (mData+mDataPos) = val;
if (val.type == BINDER_TYPE_FD) {
if (!mAllowFds) {
return FDS_NOT_ALLOWED;
}
mHasFds = mFdsKnown = true ;
}
if (nullMetaData || val.binder != 0 ) {
mObjects[mObjectsSize] = mDataPos;
acquire_object(ProcessState::self(), val, this , &mOpenAshmemSize);
mObjectsSize++;
}
return finishWrite(sizeof (flat_binder_object));
}
if (!enoughData) {
const status_t err = growData(sizeof (val));
if (err != NO_ERROR) return err;
}
if (!enoughObjects) {
size_t newSize = ((mObjectsSize+2 )*3 )/2 ;
if (newSize < mObjectsSize) return NO_MEMORY;
binder_size_t* objects = (binder_size_t*)realloc(mObjects, newSize*sizeof (binder_size_t));
if (objects == NULL) return NO_MEMORY;
mObjects = objects;
mObjectsCapacity = newSize;
}
goto restart_write;
}
transact 过程
Parcel 数据的序列化完成以后,就需要把Parcel 数据传递给服务端,transact 出马了。具体的条用流程根据前面的分析是:BpBinder#transact => IPCThreadState#transact. BpBinder 的调用传递了三个参数,第一个是cmd, 第二个是传入的Parcel, 第三个保存返回值。
status_t err = remote()->transact(ADD_SERVICE_TRANSACTION , data , &reply);
BpBinder::transact
BpBinder::transact 中参数增加了一个mHandler, 在BpServiceManager 的构造中,mHandler 被赋值为0。在这里代表要传递的服务。后边会讨论这个hander 的来源和代表的意义。
status_t BpBinder::transact (
uint32_t code, const Parcel& data , Parcel* reply, uint32_t flags)
{
if (mAlive) {
status_t status = IPCThreadState::self ()-> transact(
mHandle, code, data , reply, flags);
if (status == DEAD_OBJECT) mAlive = 0 ;
return status;
}
return DEAD_OBJECT;
}
IPCThreadState::transact
writeTransactionData
waitForResponse
talkWithDriver
status_t IPCThreadState::transact(int32_t handle,
uint32_t code, const Parcel& data,
Parcel* reply, uint32_t flags)
{
status_t err = data.errorCheck();
flags |= TF_ACCEPT_FDS;
if (err == NO_ERROR) {
err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL );
}
if (err != NO_ERROR) {
if (reply) reply->setError(err );
return (mLastError = err );
}
if ((flags & TF_ONE_WAY) == 0 ) {
if (reply) {
err = waitForResponse(reply);
} else {
Parcel fakeReply;
err = waitForResponse(&fakeReply);
}
} else {
err = waitForResponse(NULL , NULL );
}
return err ;
}
writeTransactionData
在IPCThreadState::transact 中最重要的函数调用是writeTransactionData,在这里 传入的Parcel 数据 以及handle code 被封装为能和kernel 识别的数据类型:struct binder_transaction_data。数据类型被标记为 BC_TRANSACTION。 然后也被写入IPCThreadState 的Parcel mOut的内存区域中。这时候已经有三个Parcel ,两个参数data 和 reply, 还有一个IPCThreadState 的内部成员mOut, mOut 保存了
binder_transaction_data .
struct binder_transaction_data {
union {
__u32 handle;
binder_uintptr_t ptr;
} target;
binder_uintptr_t cookie;
__u32 code;
__u32 flags;
pid_t sender_pid;
uid_t sender_euid;
binder_size_t data_size;
binder_size_t offsets_size;
union {
struct {
binder_uintptr_t buffer;
binder_uintptr_t offsets;
} ptr;
__u8 buf[8 ];
} data;
};
status_t IPCThreadState::writeTransactionData(int32_t cmd, uint32_t binderFlags,
int32_t handle, uint32_t code, const Parcel& data, status_t* statusBuffer)
{
binder_transaction_data tr;
tr.target.ptr = 0 ;
tr.target.handle = handle;
tr.code = code;
tr.flags = binderFlags;
tr.cookie = 0 ;
tr.sender_pid = 0 ;
tr.sender_euid = 0 ;
const status_t err = data.errorCheck();
if (err == NO_ERROR) {
tr.data_size = data.ipcDataSize();
tr.data.ptr.buffer = data.ipcData();
tr.offsets_size = data.ipcObjectsCount()*sizeof (binder_size_t);
tr.data.ptr.offsets = data.ipcObjects();
} else if (statusBuffer) {
tr.flags |= TF_STATUS_CODE;
*statusBuffer = err;
tr.data_size = sizeof (status_t);
tr.data.ptr.buffer = reinterpret_cast (statusBuffer);
tr.offsets_size = 0 ;
tr.data.ptr.offsets = 0 ;
} else {
return (mLastError = err);
}
mOut.writeInt32(cmd);
mOut.write(&tr, sizeof (tr));
return NO_ERROR;
}
waitForResponse
waitForResponse 中调用talkWithDriver
status_t IPCThreadState::waitForResponse(Parcel *reply , status_t *acquireResult )
{
uint32_t cmd;
int32_t err;
while (1 ) {
if ((err=talkWithDriver()) < NO_ERROR) break ;
err = mIn.errorCheck();
if (err < NO_ERROR) break ;
if (mIn.dataAvail() == 0 ) continue ;
......
}
}
talkWithDriver
talkWithDriver 要和驱动通信了,在这里所有的数据又被一个新的结构体标记了,struct binder_write_read.为什么是标记而不是取代,binder_write_read 仅仅描述了IPCThreadState 的两个Parcel 大小,已经内部内存的地址。ioctl 调用BINDER_WRITE_READ 命令进入内核, 传递的参数是 struct binder_write_read。mIn 用来存放从内核的返回数据。
status_t IPCThreadState::talkWithDriver (bool doReceive)
{
if (mProcess-> mDriverFD <= 0 ) {
return -EBADF ;
}
binder_write_read bwr;
const bool needRead = mIn . dataPosition() >= mIn . dataSize();
const size_t outAvail = (! doReceive || needRead) ? mOut. dataSize() : 0 ;
bwr. write_size = outAvail;
bwr. write_buffer = (uintptr_t)mOut. data ();
if (doReceive && needRead) {
bwr. read_size = mIn . dataCapacity();
bwr. read_buffer = (uintptr_t)mIn . data ();
} else {
bwr. read_size = 0 ;
bwr. read_buffer = 0 ;
}
if ((bwr. write_size == 0 ) && (bwr. read_size == 0 )) return NO_ERROR;
bwr. write_consumed = 0 ;
bwr. read_consumed = 0 ;
status_t err;
do {
#if defined(__ANDROID__)
if (ioctl(mProcess-> mDriverFD, BINDER_WRITE_READ, & bwr) >= 0 )
err = NO_ERROR;
else
err = -errno ;
#else
err = INVALID_OPERATION;
#endif
if (mProcess-> mDriverFD <= 0 ) {
err = -EBADF ;
}
} while (err == -EINTR );
if (err >= NO_ERROR) {
if (bwr. write_consumed > 0 ) {
if (bwr. write_consumed < mOut. dataSize())
mOut. remove(0 , bwr. write_consumed);
else
mOut. setDataSize(0 );
}
if (bwr. read_consumed > 0 ) {
mIn . setDataSize(bwr. read_consumed);
mIn . setDataPosition(0 );
}
return NO_ERROR;
}
return err;
}
数据的转移和封装
在transact 过程中,需要传递的数据由最初的基本数据类型和Binder 类型被层层封装。最后有struct binder_write_read 结构体描述,struct binder_write_read 作为ioctl 的参数传递到内核中。下面的图展示了数据的封装过程。
binder_ioctl
binder_ioctl_write_read
进入内核以后,binder_ioctl 根据cmd 选择binder_ioctl_write_read,binder_ioctl_write_read 调用copy_from_user 从用户空间copy binder_write_read 结构体,第一次copy。 和用户空间封装数据的顺序相反,然后根据写数据的大小和读数据空间的大小进行读写操作。
static int binder_ioctl_write_read(struct file *filp,
unsigned int cmd, unsigned long arg,
struct binder_thread *thread)
{
int ret = 0 ;
struct binder_proc *proc = filp->private_data;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
struct binder_write_read bwr;
if (copy_from_user(&bwr, ubuf, sizeof (bwr))) {
ret = -EFAULT;
goto out ;
}
if (bwr.write_size > 0 ) {
ret = binder_thread_write(proc, thread,
bwr.write_buffer,
bwr.write_size,
&bwr.write_consumed);
trace_binder_write_done(ret);
if (ret < 0 ) {
bwr.read_consumed = 0 ;
if (copy_to_user(ubuf, &bwr, sizeof (bwr)))
ret = -EFAULT;
goto out ;
}
}
if (bwr.read_size > 0 ) {
ret = binder_thread_read(proc, thread, bwr.read_buffer,
bwr.read_size,
&bwr.read_consumed,
filp->f_flags & O_NONBLOCK);
trace_binder_read_done(ret);
if (!list_empty(&proc->todo))
wake_up_interruptible(&proc->wait);
if (ret < 0 ) {
if (copy_to_user(ubuf, &bwr, sizeof (bwr)))
ret = -EFAULT;
goto out ;
}
}
......
}
binder_thread_write
get_user 第二次copy, 读出binder_transaction_data 数据的命令字,这里为BC_TRANSACTION,仅仅一个int 大小。第三次copy_from_user copy 读出binder_transaction_data 结构体
static int binder_thread_write(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed)
{
uint32_t cmd;
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
while (ptr < end && thread->return_error == BR_OK) {
if (get_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof (uint32_t);
trace_binder_command(cmd);
if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
binder_stats.bc[_IOC_NR(cmd)]++;
proc->stats.bc[_IOC_NR(cmd)]++;
thread->stats.bc[_IOC_NR(cmd)]++;
}
switch (cmd) {
......
case BC_TRANSACTION:
case BC_REPLY: {
struct binder_transaction_data tr;
if (copy_from_user(&tr, ptr, sizeof (tr)))
return -EFAULT;
ptr += sizeof (tr);
binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
break ;
}
......
default :
return -EINVAL;
}
*consumed = ptr - buffer;
}
return 0 ;
}
binder_transaction
binder_transaction 中首先根据handler 查找目标的binder的target_node, handler == 0 .
构造binder_transaction 结构体,这个结构体仅仅驱动内部使用,用来在不同的进程中传递数据。调用 binder_alloc_buf,为数据复制准备空间。binder_alloc_buf 除了分配空间,还需要把内核空间地址计算出 target_node 所代表的进程用户空间地址。
copy_from_user, 复制 数据域数据。第四次copy
copy_from_user, 复制 对象域数据。第五次copy
while 循环,将对象域的所有binder 数据解析出来, 还是用 表示
前边 writeStrongBinder 的时候binder type == BINDER_TYPE_BINDER。
binder_get_node 获取 自身的binder_node 节点,这个时候还没有,所以new一个。
binder_get_ref_for_node, 找到对target_node 节点的引用 binder_ref * ref
对解析出来的结果处理,我们传递进来的IBinder 为MediaPlayerService, 实质上是一个BnMediaPlayerService, 那 BnMediaPlayerService 的hanler, 在这里被赋值 fp->handle = ref->desc ref 为binder_get_ref_for_node 对target_node 的引用。这个值得大小及意义后边分析。在这里,找到了Binder 对象的handle 的出生之处。
将binder_transaction 加入到target_node 所代表的进程的target_list 队列中,实际上是todo 队列。
唤醒target_node 进程。到这里完成了进程的切换。
static void binder_transaction(struct binder_proc * proc,
struct binder_thread * thread ,
struct binder_transaction_data * tr, int reply)
{
struct binder_transaction * t;
struct binder_work * tcomplete;
binder_size_t * offp, * off_end;
binder_size_t off_min;
struct binder_proc * target_proc;
struct binder_thread * target_thread = NULL ;
struct binder_node * target_node = NULL ;
struct list_head * target_list;
wait_queue_head_t * target_wait;
struct binder_transaction * in_reply_to = NULL ;
struct binder_transaction_log_entry * e;
uint32_t return_error;
if (reply) {
} else {
if (tr-> target. handle ) {
} else {
target_node = binder_context_mgr_node;
if (target_node == NULL ) {
return_error = BR_DEAD_REPLY;
goto err_no_context_mgr_node;
}
}
target_proc = target_node-> proc;
if (! (tr-> flags & TF_ONE_WAY) && thread -> transaction_stack) {
struct binder_transaction * tmp;
tmp = thread -> transaction_stack;
if (tmp-> to_thread != thread ) {
proc-> pid, thread -> pid, tmp-> debug_id,
tmp-> to_proc ? tmp-> to_proc-> pid : 0 ,
tmp-> to_thread ?
tmp-> to_thread-> pid : 0 );
return_error = BR_FAILED_REPLY;
goto err_bad_call_stack;
}
while (tmp) {
if (tmp-> from && tmp-> from-> proc == target_proc)
target_thread = tmp-> from;
tmp = tmp-> from_parent;
}
}
}
if (target_thread) {
e-> to_thread = target_thread-> pid;
target_list = & target_thread-> todo;
target_wait = & target_thread-> wait;
} else {
target_list = & target_proc-> todo;
target_wait = & target_proc-> wait;
}
t = kzalloc(sizeof(* t), GFP_KERNEL);
if (t == NULL ) {
return_error = BR_FAILED_REPLY;
goto err_alloc_t_failed;
}
binder_stats_created(BINDER_STAT_TRANSACTION);
tcomplete = kzalloc(sizeof(* tcomplete), GFP_KERNEL);
if (tcomplete == NULL ) {
return_error = BR_FAILED_REPLY;
goto err_alloc_tcomplete_failed;
}
binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
t-> debug_id = ++ binder_last_id;
if (! reply && ! (tr-> flags & TF_ONE_WAY))
t-> from = thread ;
else
t-> from = NULL ;
t-> sender_euid = task_euid(proc-> tsk);
t-> to_proc = target_proc;
t-> to_thread = target_thread;
t-> code = tr-> code;
t-> flags = tr-> flags;
t-> priority = task_nice(current);
trace_binder_transaction(reply, t, target_node);
t-> buffer = binder_alloc_buf(target_proc, tr-> data_size,
tr-> offsets_size, ! reply && (t-> flags & TF_ONE_WAY));
if (t-> buffer == NULL ) {
return_error = BR_FAILED_REPLY;
goto err_binder_alloc_buf_failed;
}
t-> buffer-> allow_user_free = 0 ;
t-> buffer-> debug_id = t-> debug_id;
t-> buffer-> transaction = t;
t-> buffer-> target_node = target_node;
trace_binder_transaction_alloc_buf(t-> buffer);
if (target_node)
binder_inc_node(target_node, 1 , 0 , NULL );
offp = (binder_size_t * )(t-> buffer-> data +
ALIGN(tr-> data_size, sizeof(void * )));
if (copy_from_user(t-> buffer-> data , (const void __user * )(uintptr_t)
tr-> data . ptr. buffer, tr-> data_size)) {
binder_user_error("%d:%d got transaction with invalid data ptr
" ,
proc-> pid, thread -> pid);
return_error = BR_FAILED_REPLY;
goto err_copy_data_failed;
}
if (copy_from_user(offp, (const void __user * )(uintptr_t)
tr-> data . ptr. offsets, tr-> offsets_size)) {
binder_user_error("%d:%d got transaction with invalid offsets ptr
" ,
proc-> pid, thread -> pid);
return_error = BR_FAILED_REPLY;
goto err_copy_data_failed;
}
off_end = (void * )offp + tr-> offsets_size;
off_min = 0 ;
for (; offp < off_end; offp++ ) {
struct flat_binder_object * fp;
fp = (struct flat_binder_object * )(t-> buffer-> data + * offp);
off_min = * offp + sizeof(struct flat_binder_object);
switch (fp-> type ) {
case BINDER_TYPE_BINDER:
case BINDER_TYPE_WEAK_BINDER: {
struct binder_ref * ref;
struct binder_node * node = binder_get_node(proc, fp-> binder);
if (node == NULL ) {
node = binder_new_node(proc, fp-> binder, fp-> cookie);
if (node == NULL ) {
return_error = BR_FAILED_REPLY;
goto err_binder_new_node_failed;
}
node-> min_priority = fp-> flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
node-> accept_fds = !! (fp-> flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
}
ref = binder_get_ref_for_node(target_proc, node);
if (ref == NULL ) {
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_for_node_failed;
}
if (fp-> type == BINDER_TYPE_BINDER)
fp-> type = BINDER_TYPE_HANDLE;
else
fp-> type = BINDER_TYPE_WEAK_HANDLE;
fp-> binder = 0 ;
fp-> handle = ref-> desc;
fp-> cookie = 0 ;
binder_inc_ref(ref, fp-> type == BINDER_TYPE_HANDLE,
& thread -> todo);
trace_binder_transaction_node_to_ref(t, node, ref);
binder_debug(BINDER_DEBUG_TRANSACTION,
" node %d u%016llx -> ref %d desc %d
" ,
node-> debug_id, (u64)node-> ptr,
ref-> debug_id, ref-> desc);
} break;
default:
return_error = BR_FAILED_REPLY;
goto err_bad_object_type;
}
}
if (reply) {
BUG_ON(t-> buffer-> async_transaction != 0 );
binder_pop_transaction(target_thread, in_reply_to);
} else if (! (t-> flags & TF_ONE_WAY)) {
BUG_ON(t-> buffer-> async_transaction != 0 );
t-> need_reply = 1 ;
t-> from_parent = thread -> transaction_stack;
thread -> transaction_stack = t;
} else {
BUG_ON(target_node == NULL );
BUG_ON(t-> buffer-> async_transaction != 1 );
if (target_node-> has_async_transaction) {
target_list = & target_node-> async_todo;
target_wait = NULL ;
} else
target_node-> has_async_transaction = 1 ;
}
t-> work. type = BINDER_WORK_TRANSACTION;
list_add_tail(& t-> work. entry, target_list);
tcomplete-> type = BINDER_WORK_TRANSACTION_COMPLETE;
list_add_tail(& tcomplete-> entry, & thread -> todo);
if (target_wait)
wake_up_interruptible(target_wait);
return ;
... ...
}
binder_thread_read
binder_thread_read 返回,binder_thread_read 开始了,由于无数据,被wait_event_freezable_exclusive 阻塞。
handler 的身世
在binder 通信中handler 代表了一个Binder 对象,那么这个hander 的真是意义到底是什么呢。上边我们看到了handler出处,看下这个handler 到底从哪里来,代表什么含义。fp 为flat_binder_object 对象, handler 被赋值为binder_refs 对象的desc. ServiceManger服务一定是0,其他的服务在这个基础上+1, 看下这个desc 的计算过程,本进程中的代理对象的 binder_ref 最大值 计数+1, 也就是说本进程的服务计数+1。所以不同的服务,可能有相同的handler。
这个handler 为什么不交给ServiceManager 管理呢,所有的服务统一引用计数
ref = binder_get_ref_for_node(target_proc, node);
fp-> binder = 0 ;
fp-> handle = ref-> desc;
fp-> cookie = 0 ;
static struct binder_ref * binder_get_ref_for_node(struct binder_proc * proc,
struct binder_node * node)
{
struct rb_node * n;
struct rb_node ** p = & proc-> refs_by_node. rb_node;
struct rb_node * parent = NULL ;
struct binder_ref * ref, * new_ref;
... ...
new_ref-> desc = (node == binder_context_mgr_node) ? 0 : 1 ;
for (n = rb_first(& proc-> refs_by_desc); n != NULL ; n = rb_next(n)) {
ref = rb_entry(n, struct binder_ref, rb_node_desc);
if (ref-> desc > new_ref-> desc)
break;
new_ref-> desc = ref-> desc + 1 ;
}
... ...
return new_ref;
}
ServiceManger 进程的binder_thread_read
ServiceManagere 对应的驱动的队列被唤醒,binder_thread_read 开始工作。
binder_thread_read 首先从TODO 队列中取出需要完成的事物,注意是两个,write 的时候写添加了两个。首先取出 BINDER_WORK_TRANSACTION, binder_transaction。
然后将binder_transaction 结构体转化为 binder_transaction_data 结构体,这样用户态程序可以访问,cmd 转换。
将buffer 内核的地址转化为用户空间地址。依次调用put_user copy_to_user 将相关数据传递到用户空间。
取出 BINDER_WORK_TRANSACTION_COMPLETE binder_transaction。将数据传递到用户空间
static int binder_thread_read(struct binder_proc * proc,
struct binder_thread * thread ,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t * consumed, int non_block)
{
void __user * buffer = (void __user * )(uintptr_t)binder_buffer;
void __user * ptr = buffer + * consumed;
void __user * end = buffer + size;
int ret = 0 ;
int wait_for_proc_work;
if (* consumed == 0 ) {
if (put_user(BR_NOOP, (uint32_t __user * )ptr))
return -EFAULT ;
ptr += sizeof(uint32_t);
}
retry:
wait_for_proc_work = thread -> transaction_stack == NULL &&
list_empty(& thread -> todo);
thread -> looper |= BINDER_LOOPER_STATE_WAITING;
if (wait_for_proc_work)
proc-> ready_threads++ ;
binder_unlock(__func__);
... ...
binder_lock(__func__