通过janus认识libnice

nice_agent_set_remote_candidates分析

收到trickle后解析candidates,调用nice_agent_set_remote_candidates()然后调用 _set_remote_candidates_locked()

static int
_set_remote_candidates_locked (NiceAgent *agent, Stream *stream,
    Component *component, const GSList *candidates)
{
  const GSList *i;
  int added = 0;

  for (i = candidates; i && added >= 0; i = i->next) {
    NiceCandidate *d = (NiceCandidate*) i->data;

    if (nice_address_is_valid (&d->addr) == TRUE) {
      gboolean res =
          priv_add_remote_candidate (agent,
              stream->id,
              component->id,
              d->type,
              &d->addr,
              &d->base_addr,
              d->transport,
              d->priority,
              d->username,
              d->password,
              d->foundation);
      if (res)
        ++added;
    }
  }

  conn_check_remote_candidates_set(agent);

  if (added > 0) {
    gboolean res = conn_check_schedule_next (agent);
    if (res != TRUE)
      nice_debug ("Agent %p : Warning: unable to schedule any conn checks!", agent);
  }

  return added;
}

可以看出该函数先是遍历调用priv_add_remote_candidate(),我们先探究priv_add_remote_candidate函数。

static gboolean priv_add_remote_candidate (
  NiceAgent *agent,
  guint stream_id,
  guint component_id,
  NiceCandidateType type,
  const NiceAddress *addr,
  const NiceAddress *base_addr,
  NiceCandidateTransport transport,
  guint32 priority,
  const gchar *username,
  const gchar *password,
  const gchar *foundation)
{
  Component *component;
  NiceCandidate *candidate;

  if (!agent_find_component (agent, stream_id, component_id, NULL, &component))
    return FALSE;

  /* step: check whether the candidate already exists */
  candidate = component_find_remote_candidate(component, addr, transport);
  if (candidate) {
    if (nice_debug_is_enabled ()) {
      gchar tmpbuf[INET6_ADDRSTRLEN];
      nice_address_to_string (addr, tmpbuf);
      nice_debug ("Agent %p : Updating existing remote candidate with addr [%s]:%u"
          " for s%d/c%d. U/P '%s'/'%s' prio: %u", agent, tmpbuf,
          nice_address_get_port (addr), stream_id, component_id,
          username, password, priority);
    }
    /* case 1: an existing candidate, update the attributes */
    candidate->type = type;
    if (base_addr)
      candidate->base_addr = *base_addr;
    candidate->priority = priority;
    if (foundation)
      g_strlcpy(candidate->foundation, foundation,
          NICE_CANDIDATE_MAX_FOUNDATION);
    /* note: username and password must remain the same during
     *       a session; see sect 9.1.2 in ICE ID-19 */

    /* note: however, the user/pass in ID-19 is global, if the user/pass
     * are set in the candidate here, it means they need to be updated...
     * this is essential to overcome a race condition where we might receive
     * a valid binding request from a valid candidate that wasn't yet added to
     * our list of candidates.. this 'update' will make the peer-rflx a
     * server-rflx/host candidate again and restore that user/pass it needed
     * to have in the first place */
    if (username) {
      g_free (candidate->username);
      candidate->username = g_strdup (username);
    }
    if (password) {
      g_free (candidate->password);
      candidate->password = g_strdup (password);
    }
  }

在以上priv_add_remote_candidate中可以看出首先判断在改component中candidate是否存在,只是根据addr和transport是否相等作为判断,如果不存在且不是PEER_REFLEXIVE地址类型,则开始添加到remote_candidates中,然后调用conn_check_add_for_candidate函数,在该函数中遍历local_candidates来调用conn_check_add_for_candidate_pair(),开始准备和所有的local_candidates进行连通性检查。
在conn_check_add_for_candidate_pair只是简单的过滤掉不需要检查的协议,并且过滤本端和对端协议族和传输协议不同的local_candidates,最后调用priv_conn_check_add_for_candidate_pair_matched()准备匹配工作。

static void priv_conn_check_add_for_candidate_pair_matched (NiceAgent *agent,
    guint stream_id, Component *component, NiceCandidate *local,
    NiceCandidate *remote, NiceCheckState initial_state)
{
  nice_debug ("Agent %p, Adding check pair between %s and %s", agent,
      local->foundation, remote->foundation);
  priv_add_new_check_pair (agent, stream_id, component, local, remote,
      initial_state, FALSE);
  if (component->state == NICE_COMPONENT_STATE_CONNECTED ||
      component->state == NICE_COMPONENT_STATE_READY) {
    agent_signal_component_state_change (agent,
        stream_id,
        component->id,
        NICE_COMPONENT_STATE_CONNECTED);
  } else {
    agent_signal_component_state_change (agent,
        stream_id,
        component->id,
        NICE_COMPONENT_STATE_CONNECTING);
  }
}

在priv_conn_check_add_for_candidate_pair_matched函数中主要是做两件事情,调用priv_add_new_check_pair,然后发出agent_signal_component_state_change信号。所以重点在priv_add_new_check_pair()中。

static void priv_add_new_check_pair (NiceAgent *agent, guint stream_id, Component *component, NiceCandidate *local, NiceCandidate *remote, NiceCheckState initial_state, gboolean use_candidate)
{
  Stream *stream;
  CandidateCheckPair *pair;

  g_assert (local != NULL);
  g_assert (remote != NULL);
//    local(%s:%u),remote(%s:%u).",
//    agent, timeout,inet_ntoa( p->local->addr.s.ip4.sin_addr),ntohs(p->local->addr.s.ip4.sin_port),inet_ntoa( p->remote->addr.s.ip4.sin_addr),ntohs(p->remote->addr.s.ip4.sin_port))
  stream = agent_find_stream (agent, stream_id);
  pair = g_slice_new0 (CandidateCheckPair);

  pair->agent = agent;
  pair->stream_id = stream_id;
  pair->component_id = component->id;;
  pair->local = local;
  pair->remote = remote;
  if (remote->type == NICE_CANDIDATE_TYPE_PEER_REFLEXIVE)
    pair->sockptr = (NiceSocket *) remote->sockptr;
  else
    pair->sockptr = (NiceSocket *) local->sockptr;
  g_snprintf (pair->foundation, NICE_CANDIDATE_PAIR_MAX_FOUNDATION, "%s:%s", local->foundation, remote->foundation);

  pair->priority = agent_candidate_pair_priority (agent, local, remote);
  pair->state = initial_state;
  nice_debug ("Agent %p : creating new pair %p state %d,local(%s:%u),remote(%s:%u)", agent, pair, initial_state,inet_ntoa(local->addr.s.ip4.sin_addr),ntohs(local->addr.s.ip4.sin_port),inet_ntoa(remote->addr.s.ip4.sin_addr),ntohs(remote->addr.s.ip4.sin_port));
  pair->nominated = use_candidate;
  pair->controlling = agent->controlling_mode;

  stream->conncheck_list = g_slist_insert_sorted (stream->conncheck_list, pair,
      (GCompareFunc)conn_check_compare);

  nice_debug ("Agent %p : added a new conncheck %p with foundation of '%s' to list %u.", agent, pair, pair->foundation, stream_id);

  /* implement the hard upper limit for number of
     checks (see sect 5.7.3 ICE ID-19): */
  if (agent->compatibility == NICE_COMPATIBILITY_RFC5245) {
    priv_limit_conn_check_list_size (stream->conncheck_list, agent->max_conn_checks);
  }
}

priv_add_new_check_pair()创建一个新的CandidateCheckPair初始化local、remote NiceCandidate等,并且根据local、remote 的priority计算CandidateCheckPair的priority,然后根据priority排序插入到conncheck_list中,最后对NICE_COMPATIBILITY_RFC5245协议有个check_list_size的限制,可以忽略。至此,完成了将一个可用的remote_candidate加入到了conncheck_list列表中(连通性测试列表),然后回到_set_remote_candidates_locked函数。

添加到conncheck_list列表后,改函数又调用了conn_check_remote_candidates_set()做一些remote_candidates的检查,主要是针对本端发出连通性检查信息前,对端的连通性请求已经到达我们这里的情况(原文注释:This function handles the special case where answerer has sent us connectivity checks before the answer (containing candidate information),reaches us. ).
然后当添加到conncheck_list列表的数量大于0时调用conn_check_schedule_next安排conncheck_list中的下一个检查。

/*
 * Initiates the next pending connectivity check.
 * 
 * @return TRUE if a pending check was scheduled
 */
gboolean conn_check_schedule_next (NiceAgent *agent)
{
  gboolean res = priv_conn_check_unfreeze_next (agent);
  nice_debug ("Agent %p : priv_conn_check_unfreeze_next returned %d", agent, res);

  if (agent->discovery_unsched_items > 0)
    nice_debug ("Agent %p : WARN: starting conn checks before local candidate gathering is finished.", agent);

  /* step: call once imediately */
  res = priv_conn_check_tick_unlocked (agent);
  nice_debug ("Agent %p : priv_conn_check_tick_unlocked returned %d", agent, res);

  /* step: schedule timer if not running yet */
  if (res && agent->conncheck_timer_source == NULL) {
    agent_timeout_add_with_context (agent, &agent->conncheck_timer_source,
        "Connectivity check schedule", agent->timer_ta,
        priv_conn_check_tick, agent);
  }

  /* step: also start the keepalive timer */
  if (agent->keepalive_timer_source == NULL) {
    agent_timeout_add_with_context (agent, &agent->keepalive_timer_source,
        "Connectivity keepalive timeout", NICE_AGENT_TIMER_TR_DEFAULT,
        priv_conn_keepalive_tick, agent);
  }

  nice_debug ("Agent %p : conn_check_schedule_next returning %d", agent, res);
  return res;
}

我们知道在加入到连通性列表中时,pair的状态是NICE_CHECK_FROZEN状态,所以该函数首先调用了priv_conn_check_unfreeze_next函数,unfreeze一个优先级(priority)最大的pair,将该pair的状态置为NICE_CHECK_WAITING状态。

然后调用priv_conn_check_tick_unlocked()该函数比较重要,我们先进入此函数:

/*
 * Timer callback that handles initiating and managing connectivity
 * checks (paced by the Ta timer).
 *
 * This function is designed for the g_timeout_add() interface.
 *
 * @return will return FALSE when no more pending timers.
 */
static gboolean priv_conn_check_tick_unlocked (NiceAgent *agent)
{
  CandidateCheckPair *pair = NULL;
  gboolean keep_timer_going = FALSE;
  GSList *i, *j;
  GTimeVal now;

  /* step: process ongoing STUN transactions */
  g_get_current_time (&now);

  /* step: find the highest priority waiting check and send it */
  for (i = agent->streams; i ; i = i->next) {
    Stream *stream = i->data;

    pair = priv_conn_check_find_next_waiting (stream->conncheck_list);
    if (pair)
      break;
  }

  if (pair) {
    priv_conn_check_initiate (agent, pair);
    keep_timer_going = TRUE;
  } else {
    keep_timer_going = priv_conn_check_unfreeze_next (agent);
  }

  for (j = agent->streams; j; j = j->next) {
    Stream *stream = j->data;
    gboolean res =
      priv_conn_check_tick_stream (stream, agent, &now);
    if (res)
      keep_timer_going = res;
  }

  /* step: stop timer if no work left */
  if (keep_timer_going != TRUE) {
    nice_debug ("Agent %p : %s: stopping conncheck timer", agent, G_STRFUNC);
    for (i = agent->streams; i; i = i->next) {
      Stream *stream = i->data;
      priv_update_check_list_failed_components (agent, stream);
      for (j = stream->components; j; j = j->next) {
        Component *component = j->data;
        priv_update_check_list_state_for_ready (agent, stream, component);
      }
    }

    /* Stopping the timer so destroy the source.. this will allow
       the timer to be reset if we get a set_remote_candidates after this
       point */
    if (agent->conncheck_timer_source != NULL) {
      g_source_destroy (agent->conncheck_timer_source);
      g_source_unref (agent->conncheck_timer_source);
      agent->conncheck_timer_source = NULL;
    }

    /* XXX: what to signal, is all processing now really done? */
    nice_debug ("Agent %p : changing conncheck state to COMPLETED.", agent);
  }

  return keep_timer_going;
}

首先调用priv_conn_check_find_next_waiting()获得一个NICE_CHECK_WAITING状态的pair,然后调用priv_conn_check_initiate()配置状态并且发起连通性检查,也就是设置此次检查的超时时间(_NiceAgent中的timer_ta),并将状态设置为NICE_CHECK_IN_PROGRESS,最后调用conn_check_send()发送连通性检查数据到对方开始检查,连通性检查主要是发送以下信息,

    • username (for USERNAME attribute)
    • password (for MESSAGE-INTEGRITY)
    • priority (for PRIORITY)
    • ICE-CONTROLLED/ICE-CONTROLLING (for role conflicts)
    • USE-CANDIDATE (if sent by the controlling agent)

conn_check_send()之后开始遍历stream调用priv_conn_check_tick_stream(),该函数主要是针对各个pair的状态做处理,例如超时的需要重发、销毁等。

然后根据priv_conn_check_tick_stream()返回判断是否有没有完成conn_check的pair需要时钟继续运行。如果所有已经完成了的话则遍历stream调用priv_update_check_list_failed_components()处理失败的components,然后在遍历成功的components,然后取消check_list中比该pair优先级小的pair,可以减少许多不必要的检查,然后消耗时钟。

至此完成了一次连通性检查的发送和状态处理。

然后回到conn_check_schedule_next()函数。
完成发送处理后,判断是否需要继续运行时钟,如果需要,且时钟不存在,则开始新建时钟,priv_conn_check_tick()为时钟处理函数,一般只有第一次会出现。然后如果keepalive_timer_source时钟没有启动的话也启动,

至此新加入remote_candidates过程完成。

你可能感兴趣的:(通过janus认识libnice)