上面说过类ICPProcessorExt对外提供的接口有两个:start()和ICPQuery(),start()的功能是初始化并启动ICP模块,这个上面已经分析完了,剩下的就是ICPQuery()了,该函数的功能是执行一次ICP的查询。
ICPProcessor.cc中
Action *
ICPProcessorExt::ICPQuery(Continuation * c, URL * url)
{
//直接调用ICPProcessor的ICPQuery()函数
return _ICPpr->ICPQuery(c, url);
}
ICPProcessorExt::ICPQuery()--->ICPProcessor::ICPQuery()
该函数的功能是创建和初始化表示ICP查询请求的continuation,设置handler,然后立马调度之执行
Action *
ICPProcessor::ICPQuery(Continuation * c, URL * url)
{
//入参为上层continuation:HttpSM
EThread *thread = this_ethread();
ProxyMutex *mutex = thread->mutex;
//创建并初始化表示ICP查询请求的continuation:ICPRequestCont
ICPRequestCont *rc = new(ICPRequestCont_allocator.alloc()) ICPRequestCont(this, c, url);
ICP_INCREMENT_DYN_STAT(icp_query_requests_stat);
//设置启动时间
rc->SetRequestStartTime();
//设置ICPRequestCont的handler为ICPRequestCont::ICPRequestEvent,并立马调度执行
SET_CONTINUATION_HANDLER(rc, (ICPRequestContHandler) & ICPRequestCont::ICPRequestEvent);
eventProcessor.schedule_imm(rc, ET_ICP);
//返回上层continuation
return rc->GetActionPtr();
}
ICPProcessorExt::ICPQuery()--->ICPProcessor::ICPQuery()--->ICPRequestCont::ICPRequestEvent()
该函数的功能是进入ICP的状态机进行ICP查询请求操作,最后根据结果判断是否完成,如果完成则释放该continuation,否则是否继续进行ICP查询操作
int
ICPRequestCont::ICPRequestEvent(int event, Event * e)
{
ink_assert(event == NET_EVENT_DATAGRAM_WRITE_COMPLETE ||
event == NET_EVENT_DATAGRAM_WRITE_ERROR ||
event == EVENT_IMMEDIATE || event == EVENT_INTERVAL || event == ICP_RESPONSE_MESSAGE);
if ((event == NET_EVENT_DATAGRAM_WRITE_COMPLETE)
|| (event == NET_EVENT_DATAGRAM_WRITE_ERROR)) {
ink_assert(npending_actions > 0);
remove_from_pendingActions((Action *) e);
return EVENT_DONE;
}
switch (_next_state) {
case ICP_START:
case ICP_OFF_TERMINATE:
case ICP_QUEUE_REQUEST:
case ICP_AWAITING_RESPONSE:
case ICP_DEQUEUE_REQUEST:
case ICP_POST_COMPLETION:
case ICP_REQUEST_NOT_ACTIVE:
{
//进入ICP状态机进行ICP查询操作
if (ICPStateMachine(event, (void *) e) == EVENT_CONT) {
eventProcessor.schedule_in(this, HRTIME_MSECONDS(RETRY_INTERVAL), ET_ICP);
return EVENT_CONT;
} else if (_next_state == ICP_DONE) {
delete this;
break;
} else {
break;
}
}
case ICP_DONE:
default:
ink_release_assert(0);
}
return EVENT_DONE;
}
ICPProcessorExt::ICPQuery()--->ICPProcessor::ICPQuery()--->ICPRequestCont::ICPRequestEvent()--->ICPRequestCont::ICPStateMachine()
该函数的功能是处理一次ICP查询请求的完整过程,是使用状态机来实现的,ICP请求的所有状态以及每个状态的处理如下:
ICP_START:判断是否允许发送IC查询,构建ICP查询奇请求报文,切换到ICP_QUEUE_REQUEST状态
ICP_OFF_TERMINATE:ICP查询终止处理,切换到ICP_DONE状态
ICP_QUEUE_REQUEST:把该continuation加到ICP请求队列,从当前发送的peer开始向所有发送列表上的peer发送ICP请求报文:组播,设置超时处理,切换到ICP_AWAITING_RESPONSE状态
ICP_AWAITING_RESPONSE:遍历父peer列表,寻找有响应的peer直到找到,切换到ICP_DEQUEUE_REQUEST状态
ICP_DEQUEUE_REQUEST:从ICP请求队列中删除该continuation,切换到ICP_POST_COMPLETION状态
ICP_POST_COMPLETION:通知上层(POST)处理,切换到ICP_WAIT_SEND_COMPLETE状态
ICP_WAIT_SEND_COMPLETE:切换到ICP_REQUEST_NOT_ACTIVE状态
ICP_REQUEST_NOT_ACTIVE:释放相应结构
ICP_DONE:nothing
int
ICPRequestCont::ICPStateMachine(int event, void *d)
{
ICPConfiguration *ICPcf = _ICPpr->GetConfig();
ip_port_text_buffer ipb;
while (1) {
switch (_next_state) {
case ICP_START:
{
if (_act.cancelled) {
_next_state = ICP_DONE;
return EVENT_DONE;
}
if (!_ICPpr->Lock())
return EVENT_CONT;
if (_ICPpr->AllowICPQueries() && (ICPcf->globalConfig()->ICPconfigured() == ICP_MODE_SEND_RECEIVE)) {
if (_url->valid()) {
int host_len;
const char *host = _url->host_get(&host_len);
if (ptr_len_casecmp(host, host_len, "127.0.0.1") == 0 || ptr_len_casecmp(host, host_len, "localhost") == 0) {
_ICPpr->Unlock();
_next_state = ICP_OFF_TERMINATE;
Debug("icp", "[ICP_START] NULL/localhost URL ignored Id=%d", _sequence_number);
break;
}
}
_ICPpr->IncPendingQuery();
_ICPpr->Unlock();
char *urlstr = _url->string_get(NULL);
int urlstr_len = strlen(urlstr) + 1;
int status = BuildICPMsg(ICP_OP_QUERY,
_sequence_number = ICPReqSeqNumber(),
0 /* optflags */ , 0 /* optdata */ ,
0 /* shostid */ ,
(void *) urlstr, urlstr_len,
&_sendMsgHdr, _sendMsgIOV,
&_ICPmsg);
ink_assert(status == 0);
Debug("icp", "[ICP_START] ICP_OP_QUERY for [%s], Id=%d", urlstr, _sequence_number);
_next_state = ICP_QUEUE_REQUEST;
break;
} else {
ICP_INCREMENT_DYN_STAT(icp_start_icpoff_stat);
_ICPpr->Unlock();
_next_state = ICP_OFF_TERMINATE;
break;
}
}
case ICP_OFF_TERMINATE:
{
if (!MUTEX_TAKE_TRY_LOCK_FOR(mutex, this_ethread(), _cont)) {
return EVENT_CONT;
}
Debug("icp", "[ICP_OFF_TERMINATE] Id=%d", _sequence_number);
if (!_act.cancelled) {
_cont->handleEvent(_ret_status, (void *) &_ret_sockaddr);
}
MUTEX_UNTAKE_LOCK(mutex, this_ethread());
_next_state = ICP_DONE;
return EVENT_DONE;
}
case ICP_QUEUE_REQUEST:
{
int ret = AddICPRequest(_sequence_number, this);
ink_assert(ret == 0);
int bias = _ICPpr->GetStartingSendPeerBias();
int SendPeers = _ICPpr->GetSendPeers();
npending_actions = 0;
while (SendPeers > 0) {
Peer *P = _ICPpr->GetNthSendPeer(SendPeers, bias);
if (!P->IsOnline()) {
SendPeers--;
continue;
}
int was_expected = P->ExpectedReplies(&_expected_replies_list);
_expected_replies += was_expected;
npending_actions++;
Action *a = P->SendMsg_re(this, P, &_sendMsgHdr, NULL);
if (!a) {
a = ACTION_IO_ERROR;
}
if (a != ACTION_IO_ERROR) {
if (a != ACTION_RESULT_DONE) {
if (!pendingActions) {
pendingActions = NEW(new DynArray<Action *>(&default_action));
}
(*pendingActions) (npending_actions) = a;
}
P->LogSendMsg(&_ICPmsg, NULL); // log as send query
Debug("icp", "[ICP_QUEUE_REQUEST] Id=%d send query to [%s]",
_sequence_number, ats_ip_nptop(P->GetIP(), ipb, sizeof(ipb)));
} else {
_expected_replies_list.ClearBit(P->GetPeerID());
_expected_replies -= was_expected;
ICP_INCREMENT_DYN_STAT(send_query_partial_write_stat);
Debug("icp_warn",
"ICP query send, res=%d, ip=%s", ntohs(_ICPmsg.h.msglen),
ats_ip_ntop(P->GetIP(), ipb, sizeof(ipb)));
}
SendPeers--;
}
Debug("icp", "[ICP_QUEUE_REQUEST] Id=%d expected replies=%d", _sequence_number, _expected_replies);
if (!_expected_replies) {
ICP_INCREMENT_DYN_STAT(icp_queries_no_expected_replies_stat);
_next_state = ICP_DEQUEUE_REQUEST;
break;
}
ICP_SUM_DYN_STAT(total_udp_send_queries_stat, _expected_replies);
int tval = _ICPpr->GetConfig()->globalConfig()->ICPqueryTimeout();
_timeout = eventProcessor.schedule_in(this, HRTIME_SECONDS(tval), ET_ICP);
_next_state = ICP_AWAITING_RESPONSE;
return EVENT_DONE;
}
case ICP_AWAITING_RESPONSE:
{
Debug("icp", "[ICP_AWAITING_RESPONSE] Id=%d", _sequence_number);
ink_assert(d);
ICPRequestEventArgs_t dummyArgs;
ICPRequestEventArgs_t *args = 0;
if (event == ICP_RESPONSE_MESSAGE) {
args = (ICPRequestEventArgs_t *) d;
} else if (event == EVENT_INTERVAL) {
memset((void *) &dummyArgs, 0, sizeof(dummyArgs));
args = &dummyArgs;
} else {
ink_release_assert(0);
}
if (ICPResponseMessage(event, args->rICPmsg, args->peer) == EVENT_DONE) {
_next_state = ICP_DEQUEUE_REQUEST;
break;
} else {
return EVENT_DONE;
}
}
case ICP_DEQUEUE_REQUEST:
{
int ret = RemoveICPRequest(_sequence_number);
Debug("icp", "[ICP_DEQUEUE_REQUEST] Id=%d", _sequence_number);
ink_assert(ret == 0);
_next_state = ICP_POST_COMPLETION;
break;
}
case ICP_POST_COMPLETION:
{
if (!MUTEX_TAKE_TRY_LOCK_FOR(mutex, this_ethread(), _cont)) {
return EVENT_CONT;
}
Debug("icp", "[ICP_POST_COMPLETION] Id=%d", _sequence_number);
if (!_act.cancelled) {
_cont->handleEvent(_ret_status, (void *) &_ret_sockaddr);
}
MUTEX_UNTAKE_LOCK(mutex, this_ethread());
ICP_SUM_DYN_STAT(total_icp_request_time_stat, (ink_get_hrtime() - _start_time));
_next_state = ICP_WAIT_SEND_COMPLETE;
break;
}
case ICP_WAIT_SEND_COMPLETE:
{
if (npending_actions > 0) {
Debug("icp", "[ICP_WAIT_SEND_COMPLETE] Id=%d active=%d", _sequence_number, npending_actions);
} else {
_next_state = ICP_REQUEST_NOT_ACTIVE;
break;
}
}
break;
case ICP_REQUEST_NOT_ACTIVE:
{
Debug("icp", "[ICP_REQUEST_NOT_ACTIVE] Id=%d", _sequence_number);
_sequence_number = 0;
if (!_ICPpr->Lock())
return EVENT_CONT;
_ICPpr->DecPendingQuery();
_ICPpr->Unlock();
_next_state = ICP_DONE;
return EVENT_DONE;
}
case ICP_DONE:
default:
ink_release_assert(0);
}
}
}