joshuayingwhat IT Engineer

七:binder的C++实现注册服务

2024-08-06

int main(int argc __unused, char** argv)
{
    ...
    InitializeIcuOrDie();
    //获得ProcessState实例对象【见小节2.1】
    sp<ProcessState> proc(ProcessState::self());
    //获取BpServiceManager对象
    sp<IServiceManager> sm = defaultServiceManager();
    AudioFlinger::instantiate();
    //注册多媒体服务  【见小节3.1】
    MediaPlayerService::instantiate();
    ResourceManagerService::instantiate();
    CameraService::instantiate();
    AudioPolicyService::instantiate();
    SoundTriggerHwService::instantiate();
    RadioService::instantiate();
    registerExtensions();
    //启动Binder线程池
    ProcessState::self()->startThreadPool();
    //当前线程加入到线程池
    IPCThreadState::self()->joinThreadPool();
 }

1.打开驱动ProcessState::self()

2.通过defaultServiceManager获取sm对象。

3.开启线程池不断获取客户端发送的数据

……

ProcessState::self()

sp<ProcessState> ProcessState::self()
{
    Mutex::Autolock _l(gProcessMutex);
    if (gProcess != NULL) {
        return gProcess;
    }

    //实例化ProcessState 【见小节2.2】
    gProcess = new ProcessState;
    return gProcess;
}

实例化processstate,单利保证每个进程只有一个

ProcessState的初始化

ProcessState::ProcessState()
    : mDriverFD(open_driver()) // 打开Binder驱动【见小节2.3】
    , mVMStart(MAP_FAILED)
    , mThreadCountLock(PTHREAD_MUTEX_INITIALIZER)
    , mThreadCountDecrement(PTHREAD_COND_INITIALIZER)
    , mExecutingThreadsCount(0)
    , mMaxThreads(DEFAULT_MAX_BINDER_THREADS)
    , mManagesContexts(false)
    , mBinderContextCheckFunc(NULL)
    , mBinderContextUserData(NULL)
    , mThreadPoolStarted(false)
    , mThreadPoolSeq(1)
{
    if (mDriverFD >= 0) {
        //采用内存映射函数mmap,给binder分配一块虚拟地址空间【见小节2.4】
        mVMStart = mmap(0, BINDER_VM_SIZE, PROT_READ, MAP_PRIVATE | MAP_NORESERVE, mDriverFD, 0);
        if (mVMStart == MAP_FAILED) {
            close(mDriverFD); //没有足够空间分配给/dev/binder,则关闭驱动
            mDriverFD = -1;
        }
    }
}

打开binder驱动分配内存

服务注册

void MediaPlayerService::instantiate() {
    //注册服务【见小节3.2】
    defaultServiceManager()->addService(String16("media.player"), new MediaPlayerService());
}

调用sm添加服务,传入服务名称和对应的服务处理函数,同是defaultservicemanager也就是bpservicemanager会创建processstate对象和bpbinder对象,入下代码

sp<IServiceManager> defaultServiceManager()
{
    if (gDefaultServiceManager != NULL) return gDefaultServiceManager;
    {
        AutoMutex _l(gDefaultServiceManagerLock); //加锁
        while (gDefaultServiceManager == NULL) {
             //【见下文小节二,三,四】
            gDefaultServiceManager = interface_cast<IServiceManager>(
                ProcessState::self()->getContextObject(NULL));
            if (gDefaultServiceManager == NULL)
                sleep(1);
        }
    }
    return gDefaultServiceManager;
}


const android::String16 IServiceManager::descriptor(android.os.IServiceManager);

const android::String16& IServiceManager::getInterfaceDescriptor() const
{
     return IServiceManager::descriptor;
}

 android::sp<IServiceManager> IServiceManager::asInterface(const android::sp<android::IBinder>& obj)
{
       android::sp<IServiceManager> intr;
        if(obj != NULL) {
           intr = static_cast<IServiceManager *>(
               obj->queryLocalInterface(IServiceManager::descriptor).get());
           if (intr == NULL) {
               intr = new BpServiceManager(obj);  //【见小节4.5】
            }
        }
       return intr;
}

IServiceManager::IServiceManager () { }
IServiceManager::~ IServiceManager() { }

1.processstate::self:创建processstate对象这是一个单利全局只有一个,打开驱动,分配内存给驱动设置最大线程数。

2.getcontextobject:获取bpbinder对象,存在直接返回不存在则创建。

3.interface_cast:用于获取bpservicemanager对象

获取bpbinder对象

1.getcontextobject

sp<IBinder> ProcessState::getContextObject(const sp<IBinder>& /*caller*/)
{
    return getStrongProxyForHandle(0);  //【见小节3.2】
}

获取handler=0的ibinder对象

2.getStrongProxyForHandle

sp<IBinder> ProcessState::getStrongProxyForHandle(int32_t handle)
{
    sp<IBinder> result;

    AutoMutex _l(mLock);
    //查找handle对应的资源项【见小节3.3】
    handle_entry* e = lookupHandleLocked(handle);

    if (e != NULL) {
        IBinder* b = e->binder;
        if (b == NULL || !e->refs->attemptIncWeak(this)) {
            if (handle == 0) {
                Parcel data;
                //通过ping操作测试binder是否准备就绪
                status_t status = IPCThreadState::self()->transact(
                        0, IBinder::PING_TRANSACTION, data, NULL, 0);
                if (status == DEAD_OBJECT)
                   return NULL;
            }
            //当handle值所对应的IBinder不存在或弱引用无效时,则创建BpBinder对象【见小节3.4】
            b = new BpBinder(handle);
            e->binder = b;
            if (b) e->refs = b->getWeakRefs();
            result = b;
        } else {
            result.force_set(b);
            e->refs->decWeak(this);
        }
    }
    return result;
}

首先调用lookupHandleLocked,判断当前handler对应的bpbinder是否存在如果不存在则创建,如果已经存在就直接获取

创建bpbinder

BpBinder::BpBinder(int32_t handle)
    : mHandle(handle)
    , mAlive(1)
    , mObitsSent(0)
    , mObituaries(NULL)
{
    extendObjectLifetime(OBJECT_LIFETIME_WEAK); //延长对象的生命时间
    IPCThreadState::self()->incWeakHandle(handle); //handle所对应的bindle弱引用 + 1
}

bpservicemanager.addservice

virtual status_t addService(const String16& name, const sp<IBinder>& service, bool allowIsolated) {
    Parcel data, reply; //Parcel是数据通信包
    //写入头信息"android.os.IServiceManager"
    data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());   
    data.writeString16(name);        // name为 "media.player"
    data.writeStrongBinder(service); // MediaPlayerService对象【见小节3.2.1】
    data.writeInt32(allowIsolated ? 1 : 0); // allowIsolated= false
    //remote()指向的是BpBinder对象【见小节3.3】
    status_t err = remote()->transact(ADD_SERVICE_TRANSACTION, data, &reply);
    return err == NO_ERROR ? reply.readExceptionCode() : err;
}

向servicemanager注册名为media.player的服务。

writestorngbinder

组装一个实体就是bnxxx

status_t Parcel::writeStrongBinder(const sp<IBinder>& val)
{
    return flatten_binder(ProcessState::self(), val, this);
}

status_t flatten_binder(const sp<ProcessState>& /*proc*/,
    const sp<IBinder>& binder, Parcel* out)
{
    flat_binder_object obj;

    obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
    if (binder != NULL) {
        IBinder *local = binder->localBinder(); //本地Binder不为空
        if (!local) {
            BpBinder *proxy = binder->remoteBinder();
            const int32_t handle = proxy ? proxy->handle() : 0;
            obj.type = BINDER_TYPE_HANDLE;
            obj.binder = 0;
            obj.handle = handle;
            obj.cookie = 0;
        } else { //进入该分支
            obj.type = BINDER_TYPE_BINDER;
            obj.binder = reinterpret_cast<uintptr_t>(local->getWeakRefs());
            obj.cookie = reinterpret_cast<uintptr_t>(local);
        }
    } else {
        ...
    }
    //【见小节3.2.3】
    return finish_flatten_binder(binder, obj, out);
}

将binder实体扁平化

finish_flatten_binder

inline static status_t finish_flatten_binder(
    const sp<IBinder>& , const flat_binder_object& flat, Parcel* out)
{
    return out->writeObject(flat, false);
}

将binder写入parcel

bpbinder::transact

remote->调用transact就是bpbinder调用transact。

status_t BpBinder::transact(
    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
    if (mAlive) {
        // code=ADD_SERVICE_TRANSACTION【见小节3.4】
        status_t status = IPCThreadState::self()->transact(
            mHandle, code, data, reply, flags);
        if (status == DEAD_OBJECT) mAlive = 0;
        return status;
    }
    return DEAD_OBJECT;
}

真正的工作交给了IPCThreadState::self()调用transact函数。

IPCThreadState::self()

IPCThreadState* IPCThreadState::self()
{
    if (gHaveTLS) {
restart:
        const pthread_key_t k = gTLS;
        IPCThreadState* st = (IPCThreadState*)pthread_getspecific(k);
        if (st) return st;
        return new IPCThreadState;  //初始IPCThreadState 【见小节3.3.2】
    }

    if (gShutdown) return NULL;

    pthread_mutex_lock(&gTLSMutex);
    if (!gHaveTLS) { //首次进入gHaveTLS为false
        if (pthread_key_create(&gTLS, threadDestructor) != 0) { //创建线程的TLS
            pthread_mutex_unlock(&gTLSMutex);
            return NULL;
        }
        gHaveTLS = true;
    }
    pthread_mutex_unlock(&gTLSMutex);
    goto restart;
}

从线程的tls中获取IPCThreadState

如果已经有了就从tls中拿,没有就创建。

IPCThreadState初始化

IPCThreadState::IPCThreadState()
    : mProcess(ProcessState::self()),
      mMyThreadId(gettid()),
      mStrictModePolicy(0),
      mLastTransactionBinderFlags(0)
{
    pthread_setspecific(gTLS, this);
    clearCaller();
    mIn.setDataCapacity(256);
    mOut.setDataCapacity(256);
}

每个线程都有一个IPCThreadState,每个IPCThreadState中都有一个mIn、一个mOut。成员变量mProcess保存了ProcessState变量(每个进程只有一个)。

ipc::transact

status_t IPCThreadState::transact(int32_t handle,
                                  uint32_t code, const Parcel& data,
                                  Parcel* reply, uint32_t flags)
{
    status_t err = data.errorCheck(); //数据错误检查
    flags |= TF_ACCEPT_FDS;
    ....
    if (err == NO_ERROR) { // 传输数据 【见小节3.5】
        err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);
    }
    ...

    if ((flags & TF_ONE_WAY) == 0) {
        if (reply) {
            //等待响应 【见小节3.6】
            err = waitForResponse(reply);
        } else {
            Parcel fakeReply;
            err = waitForResponse(&fakeReply);
        }

    } else {
        //oneway,则不需要等待reply的场景
        err = waitForResponse(NULL, NULL);
    }
    return err;
}

检查数据是否错误;传输数据;等待数据响应。

ipc:writeTransactionData

status_t IPCThreadState::writeTransactionData(int32_t cmd, uint32_t binderFlags,
    int32_t handle, uint32_t code, const Parcel& data, status_t* statusBuffer)
{
    binder_transaction_data tr;
    tr.target.ptr = 0;
    tr.target.handle = handle; // handle = 0
    tr.code = code;            // code = ADD_SERVICE_TRANSACTION
    tr.flags = binderFlags;    // binderFlags = 0
    tr.cookie = 0;
    tr.sender_pid = 0;
    tr.sender_euid = 0;

    // data为记录Media服务信息的Parcel对象
    const status_t err = data.errorCheck();
    if (err == NO_ERROR) {
        tr.data_size = data.ipcDataSize();  // mDataSize
        tr.data.ptr.buffer = data.ipcData(); //mData
        tr.offsets_size = data.ipcObjectsCount()*sizeof(binder_size_t); //mObjectsSize
        tr.data.ptr.offsets = data.ipcObjects(); //mObjects
    } else if (statusBuffer) {
        ...
    } else {
        return (mLastError = err);
    }

    mOut.writeInt32(cmd);         //cmd = BC_TRANSACTION
    mOut.write(&tr, sizeof(tr));  //写入binder_transaction_data数据
    return NO_ERROR;
}

其中handle的值用来标识目的端,注册服务过程的目的端为service manager,此处handle=0所对应的是binder_context_mgr_node对象,正是service manager所对应的binder实体对象。binder_transaction_data结构体是binder驱动通信的数据结构,该过程最终是把Binder请求码BC_TRANSACTION和binder_transaction_data结构体写入到mOut

waitForResponse

status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
    int32_t cmd;
    int32_t err;
    while (1) {
        if ((err=talkWithDriver()) < NO_ERROR) break; // 【见小节3.7】
        ...
        if (mIn.dataAvail() == 0) continue;

        cmd = mIn.readInt32();
        switch (cmd) {
            case BR_TRANSACTION_COMPLETE: ...
            case BR_DEAD_REPLY: ...
            case BR_FAILED_REPLY: ...
            case BR_ACQUIRE_RESULT: ...
            case BR_REPLY: ...
                goto finish;

            default:
                err = executeCommand(cmd);  //【见小节3.x】
                if (err != NO_ERROR) goto finish;
                break;
        }
    }
    ...
    return err;
}

在waitForResponse过程, 首先执行BR_TRANSACTION_COMPLETE;另外,目标进程收到事务后,处理BR_TRANSACTION事务。 然后发送给当前进程,再执行BR_REPLY命令。

IPC.talkWithDriver

status_t IPCThreadState::talkWithDriver(bool doReceive)
{
    ...
    binder_write_read bwr;
    const bool needRead = mIn.dataPosition() >= mIn.dataSize();
    const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;

    bwr.write_size = outAvail;
    bwr.write_buffer = (uintptr_t)mOut.data();

    if (doReceive && needRead) {
        //接收数据缓冲区信息的填充。如果以后收到数据,就直接填在mIn中了。
        bwr.read_size = mIn.dataCapacity();
        bwr.read_buffer = (uintptr_t)mIn.data();
    } else {
        bwr.read_size = 0;
        bwr.read_buffer = 0;
    }
    //当读缓冲和写缓冲都为空,则直接返回
    if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR;

    bwr.write_consumed = 0;
    bwr.read_consumed = 0;
    status_t err;
    do {
        //通过ioctl不停的读写操作,跟Binder Driver进行通信
        if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
            err = NO_ERROR;
        ...
    } while (err == -EINTR); //当被中断,则继续执行
    ...
    return err;
}

重新将数据组装成bwr最终调用ioctl将数据

发送给驱动

接下来就和c语言binder驱动分析一致了 Ihelloservice.h中定义了sayhello和sahello_to两个函数。

server端数据结构∶

Ihelloservice.h—>BnHelloservice.cpp(ontransaction,sayhello,sayhello_to);

test_server:1.接收数据,2.调用ontransaction

client端数据结构∶

Ihelloservice.h—>BpHelloservice.cpp;

test_client:调用sayhello或者sayhello_to;

共有的收发函数:Binder.c

BpServiceManager其中handle=0代表在内核中是servicemanager。

BpHelloService的handle= BpServiceManager→getervice(”Hello”)获取到的handle BpInterface———>BpServiceManager


Similar Posts

Content