vsomeip源码梳理 -- Event订阅流程
本文基于vsomeip 3.1.20.3總結(jié)而成
源碼地址:https://github.com/GENIVI/vsomeip.git
本文主要涉及vsomeip庫中的如下代碼:
在vsomeip中,提供了一個(gè)event類來實(shí)現(xiàn)SOME/IP協(xié)議中的事件所包含的信息與功能,在之前的寫的demo篇中有介紹過事件的基礎(chǔ)使用,包括事件的注冊,訂閱以及發(fā)送三個(gè)功能,那這一篇文章繼續(xù)來看看event的處理具體處理流程。
事件訂閱(request_event & subscribe )
在跟蹤源碼之前,先看下一個(gè)客戶端訂閱某個(gè)事件組的API介紹,主要是兩個(gè)函數(shù),request_event與subscribe, 函數(shù)定義在application.hpp中
//注冊該應(yīng)用模塊為純事件或者屬性事件的使用者
virtual void request_event(service_t _service, instance_t _instance,event_t _event, const std::set<eventgroup_t> &_eventgroups,event_type_e _type = event_type_e::ET_EVENT,reliability_type_e _reliability = reliability_type_e::RT_UNKNOWN) = 0;//訂閱事件組,該函數(shù)需在request_event后調(diào)用。
virtual void subscribe(service_t _service, instance_t _instance,eventgroup_t _eventgroup, major_version_t _major = DEFAULT_MAJOR,event_t _event = ANY_EVENT) = 0;
這兩個(gè)函數(shù)就是這個(gè)模塊跟蹤的入口了,看代碼,兩個(gè)函數(shù)的實(shí)現(xiàn)在application_impl.cpp中,按照注釋的順序,我們先從request_event函數(shù)開始看
void application_impl::request_event(service_t _service, instance_t _instance,event_t _event, const std::set<eventgroup_t> &_eventgroups,event_type_e _type, reliability_type_e _reliability) {//這里的routing_在之前的init流程分析過,可能指向host模式的rtm_impl,也//可能指向proxy模式的rtm_proxyif (routing_)routing_->register_event(client_,_service, _instance,_event,_eventgroups, _type, _reliability,std::chrono::milliseconds::zero(), false, true,nullptr,false);
}
還是先跟蹤host模式的路由實(shí)現(xiàn),因?yàn)閜roxy的實(shí)現(xiàn)相對來說比較簡單。app_模塊中的request_event啥也沒干,直接交給路由模塊的register_event函數(shù)處理了。
host路由中的register_event實(shí)現(xiàn)
/** app調(diào)用register_event傳入的參數(shù):* _change_resets_cycle= false* _update_on_change = true* _epsilon_change_func = null* _is_provided = false* _is_shadow = false* _is_cache_placeholder = false*/
void routing_manager_impl::register_event(client_t _client,service_t _service, instance_t _instance,event_t _notifier,const std::set<eventgroup_t> &_eventgroups, const event_type_e _type,reliability_type_e _reliability,std::chrono::milliseconds _cycle, bool _change_resets_cycle,bool _update_on_change,epsilon_change_func_t _epsilon_change_func,bool _is_provided, bool _is_shadow, bool _is_cache_placeholder) {//從APP中的event緩存map中查找是否已經(jīng)存在對應(yīng)的event實(shí)例auto its_event = find_event(_service, _instance, _notifier);bool is_first(false);//判斷是否為首次注冊if (its_event) {if (!its_event->has_ref(_client, _is_provided)) {is_first = true;}} else {is_first = true;}if (is_first) {//首次注冊的情況下,調(diào)用父類的register_event方法routing_manager_base::register_event(_client,_service, _instance,_notifier,_eventgroups, _type, _reliability,_cycle, _change_resets_cycle, _update_on_change,_epsilon_change_func, _is_provided, _is_shadow,_is_cache_placeholder);}//忽略日志打印代碼
}
上面的流程也比較簡單,就是根據(jù)服務(wù)實(shí)例以及事件ID來判斷事件是否已經(jīng)注冊過,已經(jīng)注冊過的情況下,就不處理該次注冊動(dòng)作了,接著看rtm_base中的流程,因?yàn)閞tm_base ::register_event中源碼挺多的,而且host與proxy共用邏輯,這里先對整個(gè)流程畫了一個(gè)流程圖簡述一下
void routing_manager_base::register_event(client_t _client,service_t _service, instance_t _instance,event_t _notifier,const std::set<eventgroup_t> &_eventgroups,const event_type_e _type,reliability_type_e _reliability,std::chrono::milliseconds _cycle, bool _change_resets_cycle,bool _update_on_change,epsilon_change_func_t _epsilon_change_func,bool _is_provided, bool _is_shadow, bool _is_cache_placeholder) {std::lock_guard<std::mutex> its_registration_lock(event_registration_mutex_);auto determine_event_reliability = [this, &_service, &_instance,&_notifier, &_reliability]() {reliability_type_e its_reliability =configuration_->get_event_reliability(_service, _instance, _notifier);if (its_reliability != reliability_type_e::RT_UNKNOWN) {// event was explicitly configured -> overwrite value passed via APIreturn its_reliability;} else if (_reliability != reliability_type_e::RT_UNKNOWN) {// use value provided via APIreturn _reliability;} else { // automatic mode, user service' reliabilityreturn configuration_->get_service_reliability(_service, _instance);}};//從已注冊的事件表中查找當(dāng)前事件是否已經(jīng)存在std::shared_ptr<event> its_event = find_event(_service, _instance, _notifier);bool transfer_subscriptions_from_any_event(false);if (its_event) {//事件已經(jīng)注冊過,判斷已注冊的事件是否是占位事件if (!its_event->is_cache_placeholder()) {if (_type == its_event->get_type()|| its_event->get_type() == event_type_e::ET_UNKNOWN
#ifdef VSOMEIP_ENABLE_COMPAT|| (its_event->get_type() == event_type_e::ET_EVENT&& _type == event_type_e::ET_SELECTIVE_EVENT)|| (its_event->get_type() == event_type_e::ET_SELECTIVE_EVENT&& _type == event_type_e::ET_EVENT && _is_provided)
#endif) {//非占位事件,且事件類型一致則根據(jù)傳入的參數(shù)更新事件信息
#ifdef VSOMEIP_ENABLE_COMPATif (its_event->get_type() == event_type_e::ET_EVENT&& _type == event_type_e::ET_SELECTIVE_EVENT) {its_event->set_type(_type);}
#endifif (_is_provided) {its_event->set_provided(true);its_event->set_reliability(determine_event_reliability());}if (_is_shadow && _is_provided) {its_event->set_shadow(_is_shadow);}//注冊該事件的客戶端為host路由應(yīng)用,強(qiáng)制將事件標(biāo)記為非影子事件if (_client == host_->get_client() && _is_provided) {its_event->set_shadow(false);its_event->set_update_on_change(_update_on_change);}//更新事件的事件組信息for (auto eg : _eventgroups) {its_event->add_eventgroup(eg);}transfer_subscriptions_from_any_event = true;} else {
#ifdef VSOMEIP_ENABLE_COMPATif (!(its_event->get_type() == event_type_e::ET_SELECTIVE_EVENT&& _type == event_type_e::ET_EVENT))
#endifVSOMEIP_ERROR << "Event registration update failed. ""Specified arguments do not match existing registration.";}} else {//該事件之前已經(jīng)作為占位事件注冊過,這里將占位事件變?yōu)檎鎸?shí)注冊事件//并更新事件信息if (_type != event_type_e::ET_FIELD) {// don't cache payload for non-fieldsits_event->unset_payload(true);}if (_is_shadow && _is_provided) {its_event->set_shadow(_is_shadow);}if (_client == host_->get_client() && _is_provided) {its_event->set_shadow(false);its_event->set_update_on_change(_update_on_change);}its_event->set_type(_type);its_event->set_reliability(determine_event_reliability());its_event->set_provided(_is_provided);its_event->set_cache_placeholder(false);std::shared_ptr<serviceinfo> its_service = find_service(_service, _instance);if (its_service) {its_event->set_version(its_service->get_major());}if (_eventgroups.size() == 0) { // No eventgroup specifiedstd::set<eventgroup_t> its_eventgroups;its_eventgroups.insert(_notifier);its_event->set_eventgroups(its_eventgroups);} else {for (auto eg : _eventgroups) {its_event->add_eventgroup(eg);}}its_event->set_epsilon_change_function(_epsilon_change_func);its_event->set_change_resets_cycle(_change_resets_cycle);its_event->set_update_cycle(_cycle);}} else {//該事件之前沒有注冊過,則創(chuàng)建新的event對象its_event = std::make_shared<event>(this, _is_shadow);its_event->set_service(_service);its_event->set_instance(_instance);its_event->set_event(_notifier);its_event->set_type(_type);its_event->set_reliability(determine_event_reliability());its_event->set_provided(_is_provided);its_event->set_cache_placeholder(_is_cache_placeholder);std::shared_ptr<serviceinfo> its_service = find_service(_service, _instance);if (its_service) {its_event->set_version(its_service->get_major());}if (_eventgroups.size() == 0) { // No eventgroup specifiedstd::set<eventgroup_t> its_eventgroups;its_eventgroups.insert(_notifier);its_event->set_eventgroups(its_eventgroups);} else {its_event->set_eventgroups(_eventgroups);}//當(dāng)前注冊的是影子事件且epsilon變化事件處理函數(shù)為空,epsilon變化的意思是:僅當(dāng)與最后一個(gè)值的差異大于某個(gè)閾值時(shí)才發(fā)送更新。if (_is_shadow && !_epsilon_change_func) {std::shared_ptr<cfg::debounce> its_debounce= configuration_->get_debounce(_service, _instance, _notifier);if (its_debounce) {//省略了部分日志代碼//根據(jù)配置文件中的debounce配置信息構(gòu)建新的_epsilon_change_func函數(shù)_epsilon_change_func = [its_debounce](const std::shared_ptr<payload> &_old,const std::shared_ptr<payload> &_new) {bool is_changed(false), is_elapsed(false);// Check whether we should forward because of changed dataif (its_debounce->on_change_) {length_t its_min_length, its_max_length;if (_old->get_length() < _new->get_length()) {its_min_length = _old->get_length();its_max_length = _new->get_length();} else {its_min_length = _new->get_length();its_max_length = _old->get_length();}// Check whether all additional bytes (if any) are excludedfor (length_t i = its_min_length; i < its_max_length; i++) {auto j = its_debounce->ignore_.find(i);// A change is detected when an additional byte is not// excluded at all or if its exclusion does not cover// all its bits.if (j == its_debounce->ignore_.end() || j->second != 0xFF) {is_changed = true;break;}}if (!is_changed) {const byte_t *its_old = _old->get_data();const byte_t *its_new = _new->get_data();for (length_t i = 0; i < its_min_length; i++) {auto j = its_debounce->ignore_.find(i);if (j == its_debounce->ignore_.end()) {if (its_old[i] != its_new[i]) {is_changed = true;break;}} else if (j->second != 0xFF) {if ((its_old[i] & ~(j->second)) != (its_new[i] & ~(j->second))) {is_changed = true;break;}}}}}if (its_debounce->interval_ > -1) {// Check whether we should forward because of the elapsed time since// we did last timestd::chrono::steady_clock::time_point its_current= std::chrono::steady_clock::now();long elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(its_current - its_debounce->last_forwarded_).count();is_elapsed = (its_debounce->last_forwarded_ == (std::chrono::steady_clock::time_point::max)()|| elapsed >= its_debounce->interval_);if (is_elapsed || (is_changed && its_debounce->on_change_resets_interval_))its_debounce->last_forwarded_ = its_current;}return (is_changed || is_elapsed);};} else {//構(gòu)造一個(gè)空的函數(shù)_epsilon_change_func = [](const std::shared_ptr<payload> &_old,const std::shared_ptr<payload> &_new) {(void)_old;(void)_new;return true;};}}//設(shè)置觸發(fā)事件信息its_event->set_epsilon_change_function(_epsilon_change_func);its_event->set_change_resets_cycle(_change_resets_cycle);its_event->set_update_cycle(_cycle);its_event->set_update_on_change(_update_on_change);if (_is_provided) {transfer_subscriptions_from_any_event = true;}}if (transfer_subscriptions_from_any_event) {// check if someone subscribed to ANY_EVENT and the subscription// was stored in the cache placeholder. Move the subscribers// into new event//獲取指定服務(wù)實(shí)例中的任意事件的event對象std::shared_ptr<event> its_any_event =find_event(_service, _instance, ANY_EVENT);if (its_any_event) {//當(dāng)前緩存中存在任意事件的event,獲取該event所在的事件組std::set<eventgroup_t> any_events_eventgroups =its_any_event->get_eventgroups();//遍歷當(dāng)前注冊事件的所在事件組for (eventgroup_t eventgroup : _eventgroups) {//任意事件的事件組集中包含了當(dāng)前注冊事件的事件組auto found_eg = any_events_eventgroups.find(eventgroup);if (found_eg != any_events_eventgroups.end()) {//獲取訂閱任意事件所在事件組的客戶端ID集std::set<client_t> its_any_event_subscribers =its_any_event->get_subscribers(eventgroup);//更新當(dāng)前事件的訂閱器信息for (const client_t subscriber : its_any_event_subscribers) {its_event->add_subscriber(eventgroup, subscriber, true);}}}}}//事件為真實(shí)事件,添加該客戶端的引用if (!its_event->is_cache_placeholder()) {its_event->add_ref(_client, _is_provided);}//更新事件組信息for (auto eg : _eventgroups) {std::shared_ptr<eventgroupinfo> its_eventgroupinfo= find_eventgroup(_service, _instance, eg);if (!its_eventgroupinfo) {its_eventgroupinfo = std::make_shared<eventgroupinfo>();its_eventgroupinfo->set_service(_service);its_eventgroupinfo->set_instance(_instance);its_eventgroupinfo->set_eventgroup(eg);std::lock_guard<std::mutex> its_lock(eventgroups_mutex_);eventgroups_[_service][_instance][eg] = its_eventgroupinfo;}its_eventgroupinfo->add_event(its_event);}std::lock_guard<std::mutex> its_lock(events_mutex_);//更新已注冊事件信息events_[_service][_instance][_notifier] = its_event;
}
到這里,我們基本就清楚了host模式的vsomeip app中的request_event的主要作用是創(chuàng)建了eventgroupinfo, event兩個(gè)實(shí)例的共享指針 ,并將其添加到模塊中的eventgroups_ 以及events_表中
proxy路由中的request_event實(shí)現(xiàn)
proxy中的request_event流程比較簡單,首先是app_模塊中的request_event什么也沒做,直接調(diào)用了rtm_proxy中的register_event事件:
void routing_manager_proxy::register_event(client_t _client,service_t _service, instance_t _instance,event_t _notifier,const std::set<eventgroup_t> &_eventgroups, const event_type_e _type,reliability_type_e _reliability,std::chrono::milliseconds _cycle, bool _change_resets_cycle,bool _update_on_change, epsilon_change_func_t _epsilon_change_func,bool _is_provided, bool _is_shadow, bool _is_cache_placeholder) {(void)_is_shadow;(void)_is_cache_placeholder;//創(chuàng)建一個(gè)事件注冊器const event_data_t registration = {_service,_instance,_notifier,_type,_reliability,_is_provided,_eventgroups};bool is_first(false);{//從當(dāng)前已經(jīng)pending的事件注冊器中年查找對應(yīng)的事件注冊器是否已經(jīng)存在,如果不存在的情況下,就是該事件的首次注冊std::lock_guard<std::mutex> its_lock(state_mutex_);is_first = pending_event_registrations_.find(registration)== pending_event_registrations_.end();
#ifndef VSOMEIP_ENABLE_COMPATif (is_first) {pending_event_registrations_.insert(registration);}
#else......
#endif}//首次注冊的情況下,調(diào)用rtm_base創(chuàng)建event實(shí)例與eventgroup實(shí)例,并將其加入到緩存map中,rtm_base::register_event中的邏輯參考host章節(jié)中的流程。if (is_first || _is_provided) {routing_manager_base::register_event(_client,_service, _instance,_notifier,_eventgroups, _type, _reliability,_cycle, _change_resets_cycle, _update_on_change,_epsilon_change_func,_is_provided);}{std::lock_guard<std::mutex> its_lock(state_mutex_);//如果當(dāng)前應(yīng)用狀態(tài)已注冊,且事件為第一次注冊,則發(fā)送命令到host端實(shí)現(xiàn)register_event流程if (state_ == inner_state_type_e::ST_REGISTERED && is_first) {send_register_event(client_, _service, _instance,_notifier, _eventgroups, _type, _reliability, _is_provided);}}
}繼續(xù)跟蹤send_register_event函數(shù),發(fā)現(xiàn)其中的邏輯比較簡單,就是根據(jù)傳入的事件信息拼包,然后通過unix域socket的方式發(fā)送類型為VSOMEIP_REGISTER_EVENT命令到host端:```cpp
void routing_manager_proxy::send_register_event(client_t _client,service_t _service, instance_t _instance,event_t _notifier,const std::set<eventgroup_t> &_eventgroups, const event_type_e _type,reliability_type_e _reliability,bool _is_provided) {......byte_t *its_command = new byte_t[its_eventgroups_size];uint32_t its_size = static_cast<std::uint32_t>(its_eventgroups_size)- VSOMEIP_COMMAND_HEADER_SIZE;its_command[VSOMEIP_COMMAND_TYPE_POS] = VSOMEIP_REGISTER_EVENT;//省略拼包邏輯std::size_t i = 9;for (auto eg : _eventgroups) {std::memcpy(&its_command[VSOMEIP_COMMAND_PAYLOAD_POS + i], &eg,sizeof(eventgroup_t));i += sizeof(eventgroup_t);}{std::lock_guard<std::mutex> its_lock(sender_mutex_);//這個(gè)sender_在init流程的分析中跟過,它就是一個(gè)類型為local_client_endpoint的對象if (sender_) {sender_->send(its_command, static_cast<std::uint32_t>(its_eventgroups_size));}}if (_is_provided) {//打印日志}delete[] its_command;
}
總結(jié)一下:vsomeip的應(yīng)用如果需要在某個(gè)事件觸發(fā)的時(shí)候得到消息的回調(diào),就必須通過其提供的request_event方法來請求事件,請求事件的邏輯又會(huì)根據(jù)路由的host角色與proxy角色的不同來處理邏輯,兩種角色的相同部分邏輯就是會(huì)在自己的模塊中創(chuàng)建需要關(guān)聯(lián)的event與eventgroup的信息,不同的部分在于proxy端還需要將它所訂閱的事件信息發(fā)送給host路由,當(dāng)host路由收到網(wǎng)段內(nèi)其他端的事件通知時(shí),它就會(huì)將其通知給對應(yīng)的proxy端。
走完request_event的流程,接下來看subscribe,客戶端訂閱事件需要request_event與subscribe搭配才能收到事件。
Host路由的subscribe實(shí)現(xiàn)
host路由的subscribe函數(shù)實(shí)現(xiàn)再routing_maanger_impl.cpp文件中,函數(shù)定義如下:
void routing_manager_impl::subscribe(client_t _client, uid_t _uid, gid_t _gid,service_t _service, instance_t _instance, eventgroup_t _eventgroup,major_version_t _major, event_t _event)
該函數(shù)實(shí)現(xiàn),首先根據(jù)服務(wù)實(shí)例信息查看本地服務(wù)中是否有提供的該服務(wù)實(shí)例的客戶端ID
const client_t its_local_client = find_local_client(_service, _instance);
這里有幾個(gè)判斷條件,匯總?cè)缦?#xff1a;
- 根據(jù)訂閱的事件信息,從本地服務(wù)的緩存中查找是否存在提供該事件的客戶端
- 提供事件信息的客戶端是當(dāng)前的app模塊(get_client() == its_local_client),那么直接通過app中的on_subscription觸發(fā)回調(diào),告知訂閱成功,app模塊的回調(diào)函數(shù)如果允許訂閱,則通過rtm_stub發(fā)送subscribe_ack命令,否則發(fā)送subscribe_nack
- 如果提供事件信息的客戶端不是當(dāng)前app模塊,且本地服務(wù)中沒有找到有提供該事件的實(shí)例,那么就通過SD模塊去發(fā)起遠(yuǎn)程訂閱(網(wǎng)段內(nèi)廣播訂閱)
- 如果提供事件信息的客戶端不是當(dāng)前app模塊,但是在本地服務(wù)中找到存在了事件的服務(wù)實(shí)例,通過rtm_stub告知對應(yīng)的app端有其他的客戶端向它的服務(wù)中包含的事件發(fā)起了訂閱(unix域通信)。
下面是代碼
//簡化后的代碼:訂閱的服務(wù)實(shí)例類型為本地服務(wù)
void routing_manager_impl::subscribe(client_t _client, uid_t _uid, gid_t _gid,service_t _service, instance_t _instance, eventgroup_t _eventgroup,major_version_t _major, event_t _event) {const client_t its_local_client = find_local_client(_service, _instance);if (get_client() == its_local_client) {auto self = shared_from_this();//調(diào)用application模塊的on_subscription方法host_->on_subscription(_service, _instance, _eventgroup, _client, _uid, _gid, true,[this, self, _client, _uid, _gid, _service, _instance, _eventgroup,_event, _major](const bool _subscription_accepted) {(void) ep_mgr_->find_or_create_local(_client);//如果當(dāng)前app拒絕訂閱,則通過stub發(fā)送nack給到訂閱事件請求的客戶端,并返回if (!_subscription_accepted) {stub_->send_subscribe_nack(_client, _service, _instance, _eventgroup, _event);return;} else {//如果application中接受訂閱,則發(fā)送ackstub_->send_subscribe_ack(_client, _service, _instance, _eventgroup, _event);}//如果application中接受訂閱,調(diào)用rtm_base的subscribe,創(chuàng)建事件的訂閱器routing_manager_base::subscribe(_client, _uid, _gid, _service, _instance, _eventgroup, _major, _event);});}
提供事件信息的客戶端不是當(dāng)前app模塊
if (discovery_) {std::set<event_t> its_already_subscribed_events;std::unique_lock<std::mutex> its_critical(remote_subscription_state_mutex_);//添加訂閱器bool inserted = insert_subscription(_service, _instance, _eventgroup,_event, _client, &its_already_subscribed_events);if (inserted) {//當(dāng)前為路由模塊if (0 == its_local_client) {handle_subscription_state(_client, _service, _instance, _eventgroup, _event);its_critical.unlock();static const ttl_t configured_ttl(configuration_->get_sd_ttl());//觸發(fā)一次事件通知notify_one_current_value(_client, _service, _instance,_eventgroup, _event, its_already_subscribed_events);//通過SD模塊,廣播遠(yuǎn)程訂閱信息auto its_info = find_eventgroup(_service, _instance, _eventgroup);if (its_info) {discovery_->subscribe(_service, _instance, _eventgroup,_major, configured_ttl,its_info->is_selective() ? _client : VSOMEIP_ROUTING_CLIENT,its_info);}} else {//非路由模塊its_critical.unlock();if (is_available(_service, _instance, _major)) {//發(fā)送命令類型為VSOMEIP_SUBSCRIBE的報(bào)文給到代理端,rtm_proxy端的on_message函數(shù)最終//會(huì)收到該報(bào)文stub_->send_subscribe(ep_mgr_->find_local(_service, _instance),_client, _service, _instance, _eventgroup, _major, _event,PENDING_SUBSCRIPTION_ID);}}}if (get_client() == _client) {std::lock_guard<std::mutex> ist_lock(pending_subscription_mutex_);subscription_data_t subscription = {_service, _instance, _eventgroup, _major, _event, _uid, _gid};pending_subscriptions_.insert(subscription);}} else {VSOMEIP_ERROR<< "SOME/IP eventgroups require SD to be enabled!";}}
上面的代碼中,我們看到幾個(gè)子流程:
- 給對應(yīng)客戶端發(fā)subscribe nack命令:routing_manager_stub::send_subscribe_nack
- 給對應(yīng)客戶端發(fā)subscribe ack命令:routing_manager_stub::send_subscribe_ack
- 給對應(yīng)客戶端發(fā)訂閱命令:routing_manager_stub::send_subscribe
- 創(chuàng)建訂閱器:routing_manager_base::subscribe
- 調(diào)用sd模塊的subscribe方法
我們摘出來一個(gè)個(gè)的單個(gè)看
routing_manager_stub::send_subscribe_nack 這個(gè)函數(shù)調(diào)用的場景在訂閱時(shí)提供事件信息的客戶端是當(dāng)前的app模塊,且app模塊中通過register_subscription_handler注冊了訂閱操作函數(shù),此時(shí)在訂閱回調(diào)觸發(fā)時(shí),如果app模塊拒絕客戶端訂閱,則會(huì)發(fā)送一個(gè)nack給到訂閱的客戶端。該函數(shù)定義如下:
void routing_manager_stub::send_subscribe_nack(client_t _client, service_t _service,instance_t _instance, eventgroup_t _eventgroup, event_t _event) {//找到目標(biāo)client的endpoint對象std::shared_ptr<endpoint> its_endpoint = host_->find_local(_client);if (its_endpoint) {byte_t its_command[VSOMEIP_SUBSCRIBE_NACK_COMMAND_SIZE];uint32_t its_size = VSOMEIP_SUBSCRIBE_NACK_COMMAND_SIZE- VSOMEIP_COMMAND_HEADER_SIZE;client_t this_client = get_client();its_command[VSOMEIP_COMMAND_TYPE_POS] = VSOMEIP_SUBSCRIBE_NACK;//省略了拼包邏輯//將該命令報(bào)文發(fā)送給到對應(yīng)的client端its_endpoint->send(&its_command[0], sizeof(its_command));}
}
這個(gè)消息會(huì)被客戶端的routing_manager_proxy中消化:
void routing_manager_proxy::on_message(const byte_t *_data, length_t _size,endpoint *_receiver, const boost::asio::ip::address &_destination,client_t _bound_client,credentials_t _credentials,const boost::asio::ip::address &_remote_address,std::uint16_t _remote_port) {//省略代碼case VSOMEIP_SUBSCRIBE_NACK://省略代碼//直接轉(zhuǎn)到了該函數(shù)進(jìn)行處理on_subscribe_nack(its_subscriber, its_service, its_instance, its_eventgroup, its_event);
}void routing_manager_proxy::on_subscribe_nack(client_t _client,service_t _service, instance_t _instance, eventgroup_t _eventgroup, event_t _event) {(void)_client;//如果是任意類型的事件訂閱被拒絕,則通知對應(yīng)的事件組中所有事件的服務(wù)模塊該事件訂閱被拒絕if (_event == ANY_EVENT) {auto its_eventgroup = find_eventgroup(_service, _instance, _eventgroup);if (its_eventgroup) {for (const auto& its_event : its_eventgroup->get_events()) {host_->on_subscription_status(_service, _instance, _eventgroup, its_event->get_event(), 0x7 /*Rejected*/);}}} else {//通知client端的app模塊,該事件被拒絕host_->on_subscription_status(_service, _instance, _eventgroup, _event, 0x7 /*Rejected*/);}
}
最終,如果client的app有注冊訂閱狀態(tài)的操作函數(shù),則能夠監(jiān)聽到訂閱狀態(tài)的回調(diào)
app_->register_subscription_status_handler
routing_manager_stub::send_subscribe_ack的流程與nack的流程基本大同小異, 這里不再復(fù)述了。
routing_manager_stub::send_subscribe : 這個(gè)給客戶端發(fā)送訂閱的調(diào)用場景客戶端訂閱時(shí),提供訂閱事件的服務(wù)實(shí)例由另外一個(gè)代理客戶端提供的,所以這里拼了一個(gè)VSOMEIP_SUBSCRIBE的包發(fā)給rtm_proxy去處理
調(diào)用sd模塊的subscribe方法:SD模塊的業(yè)務(wù)實(shí)現(xiàn)在service_discovery_impl中,刨去細(xì)枝末節(jié),來跟一下整個(gè)subscribe的流程如下:
void
service_discovery_impl::subscribe(service_t _service, instance_t _instance,eventgroup_t _eventgroup, major_version_t _major,ttl_t _ttl, client_t _client,const std::shared_ptr<eventgroupinfo> &_info) {......send_subscription(its_subscription,_service, _instance, _eventgroup,_client);
}void
service_discovery_impl::send_subscription(const std::shared_ptr<subscription> &_subscription,const service_t _service, const instance_t _instance,const eventgroup_t _eventgroup,const client_t _client) {auto its_reliable = _subscription->get_endpoint(true);auto its_unreliable = _subscription->get_endpoint(false);boost::asio::ip::address its_address;get_subscription_address(its_reliable, its_unreliable, its_address);if (!its_address.is_unspecified()) {......if (its_data.entry_) {auto its_current_message = std::make_shared<message_impl>();std::vector<std::shared_ptr<message_impl> > its_messages;its_messages.push_back(its_current_message);add_entry_data(its_messages, its_data);//序列化數(shù)據(jù),然后發(fā)送報(bào)文serialize_and_send(its_messages, its_address);} }
}//該函數(shù)中省略了部分代碼
bool
service_discovery_impl::serialize_and_send(const std::vector<std::shared_ptr<message_impl> > &_messages,const boost::asio::ip::address &_address) {if (!_address.is_unspecified()) {std::lock_guard<std::mutex> its_lock(serialize_mutex_);for (const auto &m : _messages) {if (m->has_entry()) {......//序列化報(bào)文數(shù)據(jù)if (serializer_->serialize(m.get())) {if (host_->send_via_sd(endpoint_definition::get(_address, port_,reliable_, m->get_service(), m->get_instance()),serializer_->get_data(), serializer_->get_size(),port_)) {//新增session idincrement_session(_address);}} }}}
}
//上面函數(shù)走完了,業(yè)務(wù)層面的就下完了,后面是網(wǎng)絡(luò)發(fā)送相關(guān)的邏輯,host_指針類型是routing_manager_impl, send_via_sd實(shí)現(xiàn)在rtm_impl中。
bool routing_manager_impl::send_via_sd(const std::shared_ptr<endpoint_definition> &_target,const byte_t *_data, uint32_t _size, uint16_t _sd_port) {std::shared_ptr<endpoint> its_endpoint =ep_mgr_impl_->find_server_endpoint(_sd_port,_target->is_reliable());return its_endpoint->send_to(_target, _data, _size);
}//上面的its_endpoint實(shí)際是udp_server_endpoint_impl類型的共享指針
bool udp_server_endpoint_impl::send_to(const std::shared_ptr<endpoint_definition> _target,const byte_t *_data, uint32_t _size) {std::lock_guard<std::mutex> its_lock(mutex_);endpoint_type its_target(_target->get_address(), _target->get_port());return send_intern(its_target, _data, _size);
}//send_intern實(shí)現(xiàn)在父類server_endpoint_impl中,是一個(gè)模板函數(shù)
template<typename Protocol>
bool server_endpoint_impl<Protocol>::send_intern(endpoint_type _target, const byte_t *_data, uint32_t _size) {......// STEP 10: restart timer with current departure timetarget_train->departure_timer_->expires_from_now(target_train->departure_);target_train->departure_timer_->async_wait(std::bind(&server_endpoint_impl<Protocol>::flush_cbk,this->shared_from_this(), _target,target_train, std::placeholders::_1));
}//接下來flush_cbk觸發(fā)執(zhí)行
template<typename Protocol>
void server_endpoint_impl<Protocol>::flush_cbk(endpoint_type _target,const std::shared_ptr<train>& _train, const boost::system::error_code &_error_code) {if (!_error_code) {(void) flush(_target, _train);}
}//調(diào)用了flush函數(shù)template<typename Protocol>
bool server_endpoint_impl<Protocol>::flush(endpoint_type _target,const std::shared_ptr<train>& _train){//buffer不為空的情況下,循環(huán)取出數(shù)據(jù)加入隊(duì)列if (!_train->buffer_->empty()){const queue_iterator_type target_queue_iterator = queues_.find(_target);if (target_queue_iterator != queues_.end()) {const bool queue_size_zero_on_entry(target_queue_iterator->second.second.empty());queue_train(target_queue_iterator, _train, queue_size_zero_on_entry);is_flushed = true;} }
}//將數(shù)據(jù)不斷發(fā)送到報(bào)文
template<typename Protocol>
void server_endpoint_impl<Protocol>::queue_train(const queue_iterator_type _queue_iterator,const std::shared_ptr<train>& _train,bool _queue_size_zero_on_entry) {...send_queued(_queue_iterator);
}//send_queued是一個(gè)虛函數(shù),由子類自行實(shí)現(xiàn),這里又走到了udp_server_endpoint_impl中, 發(fā)送流程基本就走完了。
void udp_server_endpoint_impl::send_queued(const queue_iterator_type _queue_iterator) {message_buffer_ptr_t its_buffer = _queue_iterator->second.second.front();......std::lock_guard<std::mutex> its_lock(unicast_mutex_);unicast_socket_.async_send_to(boost::asio::buffer(*its_buffer),_queue_iterator->first,std::bind(&udp_server_endpoint_base_impl::send_cbk,shared_from_this(),_queue_iterator,std::placeholders::_1,std::placeholders::_2));
}
proxy路由的subscribe實(shí)現(xiàn)
跟以往所有流程一樣,proxy的實(shí)現(xiàn)相對host來說要簡單很多了,proxy中的subscribe實(shí)現(xiàn)在routing_manager_proxy中
void routing_manager_proxy::subscribe(client_t _client, uid_t _uid, gid_t _gid, service_t _service,instance_t _instance, eventgroup_t _eventgroup, major_version_t _major,event_t _event) {......std::lock_guard<std::mutex> its_lock(state_mutex_);//服務(wù)可用,且當(dāng)前app狀態(tài)為已注冊if (state_ == inner_state_type_e::ST_REGISTERED && is_available(_service, _instance, _major)) {send_subscribe(client_, _service, _instance, _eventgroup, _major, _event );}subscription_data_t subscription = { _service, _instance, _eventgroup, _major, _event, _uid, _gid};pending_subscriptions_.insert(subscription);
}void routing_manager_proxy::send_subscribe(client_t _client, service_t _service,instance_t _instance, eventgroup_t _eventgroup, major_version_t _major,event_t _event) {...its_command[VSOMEIP_COMMAND_TYPE_POS] = VSOMEIP_SUBSCRIBE;...client_t target_client = find_local_client(_service, _instance);if (target_client != VSOMEIP_ROUTING_CLIENT) {//發(fā)送命令包給該訂閱事件所在服務(wù)所屬的客戶端,存在于這種情況,同一個(gè)進(jìn)程內(nèi)存在//一個(gè)路由app與多個(gè)proxy app, 其中某個(gè)proxy app提供了該訂閱所需的事件。//那么這個(gè)報(bào)文就發(fā)送到了另外一個(gè)app模塊的routing_manager_proxy中的on_message方法//處理auto its_target = ep_mgr_->find_or_create_local(target_client);its_target->send(its_command, sizeof(its_command));} else {//發(fā)送命令給到路由app,該命令由routing_manager_stub處理。std::lock_guard<std::mutex> its_lock(sender_mutex_);if (sender_) {sender_->send(its_command, sizeof(its_command));}}
}//第一種情況,proxy->proxy
void routing_manager_proxy::on_message(const byte_t *_data, length_t _size,endpoint *_receiver, const boost::asio::ip::address &_destination,client_t _bound_client,credentials_t _credentials,const boost::asio::ip::address &_remote_address,std::uint16_t _remote_port) {.......case VSOMEIP_SUBSCRIBE://1.觸發(fā)app中的subscription status回調(diào),如果app模塊允許訂閱,則回復(fù)ack, 否則回復(fù)nack//2.創(chuàng)建訂閱器
}//第二種情況,proxy->stub
void routing_manager_stub::on_message(const byte_t *_data, length_t _size,endpoint *_receiver, const boost::asio::ip::address &_destination,client_t _bound_client,credentials_t _credentials,const boost::asio::ip::address &_remote_address,std::uint16_t _remote_port) {case VSOMEIP_SUBSCRIBE://調(diào)用rtm_impl中的subscribe流程 host_->subscribe(its_client, its_sender_uid, its_sender_gid, its_service, its_instance,its_eventgroup, its_major, its_notifier);
}
總結(jié)
以上是生活随笔為你收集整理的vsomeip源码梳理 -- Event订阅流程的全部內(nèi)容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: spring smtp_使用Spring
- 下一篇: 程咬金的老婆是谁 程咬金的老婆是裴翠云