NAT44-ED流表使用bihash创建,bihash类型为16_8,即16字节的key值和8字节的value值。默认情况下每个线程的会话数量为63K。bihash的哈希桶数量默认为32768。
int nat44_plugin_enable (nat44_config_t c)
{
snat_main_t *sm = &snat_main;
if (!c.sessions)
c.sessions = 63 * 1024;
sm->max_translations_per_thread = c.sessions;
vlib_stats_set_gauge (sm->max_cfg_sessions_gauge, sm->max_translations_per_thread);
sm->translation_buckets = nat_calc_bihash_buckets (c.sessions);
vec_add1 (sm->max_translations_per_fib, sm->max_translations_per_thread);
nat44_ed_db_init ();
bihash哈希桶translation_buckets的值要求为2的幂,以下函数根据63K流表项,计算buckets值。对于16_8类型bihash,文件bihash_16_8.h中定义每个page可保存4对KV值(#define BIHASH_KVP_PER_PAGE 4),理论上buckets的数量可设置为63K/4等于16128,这样63K表项完全平均分配在了16128个buckets桶中,每个桶中一个page,每个page有4个KV对。对于bihash,page为clib_bihash_value结构。
以下函数计算出的bucket值为最接近总数n_elts的一半值的2的幂值。理论上如果hash函数平均散列在这些桶(32768)之间,每个page中的KV值约等于2个,查询效率更高。
static_always_inline u32
nat_calc_bihash_buckets (u32 n_elts)
{
n_elts = n_elts / 2.5;
u64 lower_pow2 = 1;
while (lower_pow2 * 2 < n_elts) {
lower_pow2 = 2 * lower_pow2;
}
u64 upper_pow2 = 2 * lower_pow2;
if ((upper_pow2 - n_elts) < (n_elts - lower_pow2))
{
if (upper_pow2 <= UINT32_MAX)
return upper_pow2;
}
return lower_pow2;
nat流表的8字节value值,高4字节保存会话所在的worker线程索引;低4字节保存会话索引,即会话池(pool)中的索引。同一个会话保持在相同的worker线程处理,避免互锁问题,参见以下的handoff处理。
always_inline u32
ed_value_get_thread_index (clib_bihash_kv_16_8_t *value)
{
return value->value >> 32;
}
always_inline u32
ed_value_get_session_index (clib_bihash_kv_16_8_t *value)
{
return value->value & ~(u32) 0;
}
初始化全局的flow_hash结构,对于每个worker线程,初始化会话池。
static void
nat44_ed_db_init ()
{
snat_main_t *sm = &snat_main;
snat_main_per_thread_data_t *tsm;
nat44_ed_flow_hash_init ();
vec_foreach (tsm, sm->per_thread_data) {
nat44_ed_worker_db_init (tsm, sm->max_translations_per_thread);
}
初始化clib_bihash_16_8_t结构流哈希表flow_hash,所有worker线程都使用此流表,每个会话(session)由两个方向的流组成,所以,flow_hash的桶buckets的数量由以上三者的乘积组成。
这里是不是可以把两个方向的流量先做对称hash,减少一半的buckets数量?
static void
nat44_ed_flow_hash_init ()
{
snat_main_t *sm = &snat_main;
// we expect 2 flows per session, so multiply translation_buckets by 2
clib_bihash_init_16_8 (
&sm->flow_hash, "ed-flow-hash",
clib_max (1, sm->num_workers) * 2 * sm->translation_buckets, 0);
clib_bihash_set_kvp_format_fn_16_8 (&sm->flow_hash, format_ed_session_kvp);
}
对于每个worker线程,事先分配全部的流表,其所需内存此时已经被分配出来(pool_alloc)。
static void
nat44_ed_worker_db_init (snat_main_per_thread_data_t *tsm, u32 translations)
{
dlist_elt_t *head;
pool_alloc (tsm->per_vrf_sessions_pool, translations);
pool_alloc (tsm->sessions, translations);
函数nat44_ed_get_in2out_worker_index根据6元组信息,包括报文的5元组信息(源/目的IP地址,源/目的端口号和协议号),以及fib索引(VRF),生成bihash表的key值(16字节),在全局流表flow_hash中查找value值(8字节),value中分别保存着会话索引和会话所在worker线程的索引。
报文结构b有值,表明处于数据转发路径。另外,对于分片报文,由于后续分片没有源和目的端口号信息,nat44插件需要开启报文重组。
u32 nat44_ed_get_in2out_worker_index (vlib_buffer_t *b, ip4_header_t *ip, u32 rx_fib_index, u8 is_output)
{
snat_main_t *sm = &snat_main;
u32 next_worker_index = sm->first_worker_index;
clib_bihash_kv_16_8_t kv16, value16;
u32 fib_index = rx_fib_index;
if (b) {
init_ed_k (&kv16, ip->src_address.as_u32, vnet_buffer (b)->ip.reass.l4_src_port, ip->dst_address.as_u32,
vnet_buffer (b)->ip.reass.l4_dst_port, fib_index, ip->protocol);
if (!clib_bihash_search_16_8 (&sm->flow_hash, &kv16, &value16)) {
next_worker_index = ed_value_get_thread_index (&value16);
vnet_buffer2 (b)->nat.cached_session_index = ed_value_get_session_index (&value16);
goto out;
相比于SNAT,对于DNAT,如下交换源和目的IP地址,以及源和目的端口号,生成新的key值,再次查询flow_hash流表。在命中之后,保存worker线程索引作为函数返回值;以及在vnet_buffer结构中缓存会话索引,留待其它node节点使用。
// dst NAT
init_ed_k (&kv16, ip->dst_address.as_u32,
vnet_buffer (b)->ip.reass.l4_dst_port, ip->src_address.as_u32,
vnet_buffer (b)->ip.reass.l4_src_port, rx_fib_index, ip->protocol);
if (!clib_bihash_search_16_8 (&sm->flow_hash, &kv16, &value16)) {
next_worker_index = ed_value_get_thread_index (&value16);
vnet_buffer2 (b)->nat.cached_dst_nat_session_index = ed_value_get_session_index (&value16);
goto out;
}
}
如果flow_hash流表中没有查询到value值,或者,对于控制平面,报文结构b为空,以下算法计算数据流的hash值,根据hash值,在所有worker线程中选择一个处理线程。
hash = ip->src_address.as_u32 + (ip->src_address.as_u32 >> 8) +
(ip->src_address.as_u32 >> 16) + (ip->src_address.as_u32 >> 24) +
rx_fib_index + (rx_fib_index >> 8) + (rx_fib_index >> 16) + (rx_fib_index >> 24);
if (PREDICT_TRUE (is_pow2 (_vec_len (sm->workers))))
next_worker_index += sm->workers[hash & (_vec_len (sm->workers) - 1)];
else
next_worker_index += sm->workers[hash % _vec_len (sm->workers)];
out:
return next_worker_index;
节点snat_in2out_worker_handoff保证相同的会话在相同的worker线程处理。
VLIB_NODE_FN (snat_in2out_worker_handoff_node) (vlib_main_t * vm,
vlib_node_runtime_t * node, vlib_frame_t * frame)
{
return nat44_worker_handoff_fn_inline (vm, node, frame, 0, 1);
}
VLIB_REGISTER_NODE (snat_in2out_worker_handoff_node) = {
.name = "nat44-in2out-worker-handoff",
.vector_size = sizeof (u32),
.sibling_of = "nat-default",
.format_trace = format_nat44_handoff_trace,
.type = VLIB_NODE_TYPE_INTERNAL,
在处理in2out报文,即还没有经过地址转换的报文时,节点nat44-in2out-worker-handoff的下一个处理节点为nat44-ed-in2out节点。如果接收到报文的worker线程,不等于报文所属会话所在的线程。通过frame_queue队列在worker之间传递,fq_in2out_index为frame_queue的索引。
static inline uword
nat44_worker_handoff_fn_inline (vlib_main_t * vm,
vlib_node_runtime_t * node, vlib_frame_t * frame, u8 is_output, u8 is_in2out)
{
u16 thread_indices[VLIB_FRAME_SIZE], *ti = thread_indices;
snat_main_t *sm = &snat_main;
u32 fq_index, thread_index = vm->thread_index;
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
vlib_get_buffers (vm, from, b, n_left_from);
if (is_in2out)
fq_index = is_output ? sm->fq_in2out_output_index : sm->fq_in2out_index;
由以上nat44_ed_get_in2out_worker_index函数获取报文的处理worker线程索引,由函数vlib_buffer_enqueue_to_thread发送到对应的worker线程。
while (n_left_from > 0) {
ip0 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[0]) + iph_offset0);
sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
rx_fib_index0 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index0);
if (is_in2out)
ti[0] = nat44_ed_get_in2out_worker_index (b[0], ip0, rx_fib_index0, is_output);
else
ti[0] = nat44_ed_get_out2in_worker_index (b[0], ip0, rx_fib_index0, is_output);
if (ti[0] == thread_index) same_worker++;
else do_handoff++;
b += 1; ti += 1; n_left_from -= 1;
}
n_enq = vlib_buffer_enqueue_to_thread (vm, node, fq_index, from,
thread_indices, frame->n_vectors, 1);
以下在nat44插件使能时,创建了fq_in2out_index队列,队列的元素数量有frame_queue_nelts指定。
int
nat44_plugin_enable (nat44_config_t c)
{
if (sm->num_workers > 1) {
vlib_main_t *vm = vlib_get_main ();
vlib_node_t *node;
if (sm->fq_in2out_index == ~0) {
node = vlib_get_node_by_name (vm, (u8 *) "nat44-ed-in2out");
sm->fq_in2out_index = vlib_frame_queue_main_init (node->index, sm->frame_queue_nelts);
}
在节点nat44-ed-in2out处理函数中,首先,根据报文信息初始化6元组结构lookup。
static inline uword
nat44_ed_in2out_fast_path_node_fn_inline (vlib_main_t *vm,
vlib_node_runtime_t *node, vlib_frame_t *frame,
int is_output_feature, int is_multi_worker)
{
while (n_left_from > 0) {
next[0] = vnet_buffer2 (b0)->nat.arc_next;
ip0 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b0) + iph_offset0);
rx_sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
tx_sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
cntr_sw_if_index0 = is_output_feature ? tx_sw_if_index0 : rx_sw_if_index0;
rx_fib_index0 = fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP4, rx_sw_if_index0);
lookup.fib_index = rx_fib_index0;
lookup.proto = ip0->protocol;
lookup.saddr.as_u32 = ip0->src_address.as_u32;
lookup.daddr.as_u32 = ip0->dst_address.as_u32;
lookup.sport = vnet_buffer (b0)->ip.reass.l4_src_port;
lookup.dport = vnet_buffer (b0)->ip.reass.l4_dst_port;
之后根据上一个节点找到的会话索引,保存在cached_session_index变量中,在worker线程的会话池中找到会话结构s0。最后,对比lookup与会话结构中的6元组是否完全相等。
/* there might be a stashed index in vnet_buffer2 from handoff or classify node, see if it can be used */
if (is_multi_worker &&
!pool_is_free_index (tsm->sessions, vnet_buffer2 (b0)->nat.cached_session_index))
{
s0 = pool_elt_at_index (tsm->sessions, vnet_buffer2 (b0)->nat.cached_session_index);
if (PREDICT_TRUE (nat_6t_t_eq (&s0->i2o.match, &lookup)
// for some hairpinning cases there are two "i2i" flows instead of i2o and o2i as both hosts are on inside
|| (s0->flags & SNAT_SESSION_FLAG_HAIRPINNING && nat_6t_t_eq (&s0->o2i.match, &lookup)))) {
/* yes, this is the droid we're looking for */
lookup_skipped = 1;
goto skip_lookup;
}
s0 = NULL;
}
慢速路径不同于以上处理,例如对于新的数据流,不存在flow_hash中,在节点nat44-ed-in2out-slowpath中处理,核心函数为slow_path_ed。以静态映射nat为例,首先分配session结构,并且,初始化两个方向的元组结构,in2out和out2in,注意,这里out2in方向的报文还没有接收到。
static u32 slow_path_ed (vlib_main_t *vm, snat_main_t *sm, vlib_buffer_t *b, ...)
{
snat_session_t *s = NULL;
s = nat_ed_session_alloc (sm, thread_index, now, proto);
tx_fib_index = get_tx_fib_index (rx_fib_index, r_addr);
// static mapping
s->out2in.addr = outside_addr = sm_addr;
s->out2in.port = outside_port = sm_port;
s->in2out.addr = l_addr;
s->in2out.port = l_port;
s->proto = proto;
s->in2out.fib_index = rx_fib_index;
s->out2in.fib_index = tx_fib_index;
s->flags |= SNAT_SESSION_FLAG_STATIC_MAPPING;
初始化session的i2o结构(nat_6t_flow_t),在查找session过程中,将用到i2o中的match成员。函数nat_ed_ses_i2o_flow_hash_add_del将session添加到全局flow_hash流表中。
nat_6t_i2o_flow_init (sm, thread_index, s, l_addr, l_port, r_addr, r_port,
rx_fib_index, proto);
if (nat_ed_ses_i2o_flow_hash_add_del (sm, thread_index, s, 1)) {
nat_elog_notice (sm, "in2out key add failed");
goto error;
}
根据i2o结构中的6元组信息生成key值,value值为worker线程索引,以及session在会话池中的索引,将kv结构添加到全局流表flow_hash中。
static_always_inline int
nat_ed_ses_i2o_flow_hash_add_del (snat_main_t *sm, u32 thread_idx,
snat_session_t *s, int is_add)
{
snat_main_per_thread_data_t *tsm = vec_elt_at_index (sm->per_thread_data, thread_idx);
clib_bihash_kv_16_8_t kv;
{
nat_6t_flow_to_ed_kv (&kv, &s->i2o, thread_idx, s - tsm->sessions);
nat_6t_l3_l4_csum_calc (&s->i2o);
}
ASSERT (thread_idx == s->thread_index);
return clib_bihash_add_del_16_8 (&sm->flow_hash, &kv, is_add);
以下命令查看全局流表ed-flow-hash。
DBGvpp# show nat44 hash tables detail
Hash table 'ed-flow-hash'
0 active elements 0 active buckets
0 free lists
0 linear search buckets
heap: 1 chunk(s) allocated
bytes: used 6.50m, scrap 0
-------- thread 0 vpp_main --------
Hash table 'ed-flow-hash'
0 active elements 0 active buckets
0 free lists
0 linear search buckets
heap: 1 chunk(s) allocated
bytes: used 6.50m, scrap 0
-------- hash table parameters --------
translation buckets: 32768