导航
memcached源码分析
memcached源码分析-网络模块
memcached源码分析-指令解析模块
memcached源码分析-哈希表(hashtable)模块
memcached源码分析-slab存储机制
1.前言
Memcached是一个高性能的开源分布式内存对象缓存系统,说到这里的"高性能"、"缓存"那么不得不提memcached内存是如何管理的,这也是memecached核心内容,接下来主要分hashtable模块、slabs内存分配管理机制、LRU淘汰策略。这一章节先行分析hashtable模块。memcached的哈希表是用来保存item*数据结构的一段固定大小的内存区域,这里主要分析的功能点有hash表的段锁、hash表的增删改查、hash表的动态拓容。
2.hash表段锁
memcached是多线程的实现的,为了确保多线程操作hash表的安全性,代码中使用了锁。什么是段锁,就是有可能会多个key对应一把锁,而不是每一个key都对应一把锁,因为不同的key可能hash出来的值一样,那么就都会对应这一把锁。
程序中在memcached_thread_init函数中初始化了锁
void memcached_thread_init(int nthreads, void *arg) {
//....
if (nthreads < 3) {
power = 10;
} else if (nthreads < 4) {
power = 11;
} else if (nthreads < 5) {
power = 12;
} else if (nthreads <= 10) {
power = 13;
} else if (nthreads <= 20) {
power = 14;
} else {
/* 32k buckets. just under the hashpower default. */
power = 15;
}
//段锁的槽大小要小于hash槽的大小
//个人理解power等于hashpower是可取的,可以减少锁冲突,如果power大于hashpower是一种资源的浪费
if (power >= hashpower) {
fprintf(stderr, "Hash table power size (%d) cannot be equal to or less than item lock table (%d)\n", hashpower, power);
fprintf(stderr, "Item lock table grows with `-t N` (worker threadcount)\n");
fprintf(stderr, "Hash table grows with `-o hashpower=N` \n");
exit(1);
}
//哈希锁的数量计算
item_lock_count = hashsize(power);
item_lock_hashpower = power;
//申请hash锁资源
//哈希锁不会和hash表一样进行拓容,如果后期hash拓容,那么会导致越多的hash槽位对应同一把锁
item_locks = calloc(item_lock_count, sizeof(pthread_mutex_t));
if (! item_locks) {
perror("Can't allocate item locks");
exit(1);
}
//初始化
for (i = 0; i < item_lock_count; i++) {
pthread_mutex_init(&item_locks[i], NULL);
}
}
锁的使用实例
//锁的使用
enum store_item_type store_item(item *item, int comm, conn* c) {
enum store_item_type ret;
uint32_t hv;
//根据item计算一个对应的hash value
hv = hash(ITEM_key(item), item->nkey);
//根据hash value 获取对应的锁资源,锁hash槽
item_lock(hv);
//存储item到hash槽链表上
ret = do_store_item(item, comm, c, hv);
//释放锁资源
item_unlock(hv);
return ret;
}
3.哈希表的增删改查
hash表的增删改查,说到hash表,我们先介绍一个memcached中的一个重要结构体item,item结构是用于存储缓存数据的节点,item内存分配于slab(后面会做详细分析),而hash表主要作用是记录item节点,我们在后面可通过key快速查取到缓存数据
item节点结构
/**
* Structure for storing items within memcached.
*/
typedef struct _stritem {
/* Protected by LRU locks */
struct _stritem *next;
struct _stritem *prev;
/* Rest are protected by an item lock */
//h_next用于记录哈希表槽中下一个item节点的地址
//多线程访问hash槽节点,是通过item lock保证线程安全的
struct _stritem *h_next; /* hash chain next */
rel_time_t time; /* least recent access */
rel_time_t exptime; /* expire time */
int nbytes; /* size of data */
unsigned short refcount;
uint8_t nsuffix; /* length of flags-and-length string */
uint8_t it_flags; /* ITEM_* above */
uint8_t slabs_clsid;/* which slab class we're in */
uint8_t nkey; /* key length, w/terminating null and padding */
/* this odd type prevents type-punning issues when we do
* the little shuffle to save space when not using CAS. */
union {
uint64_t cas;
char end;
} data[];
/* if it_flags & ITEM_CAS we have 8 bytes CAS */
/* then null-terminated key */
/* then " flags length\r\n" (no terminating null) */
/* then data with terminating \r\n (no terminating null; it's binary!) */
} item;
哈希表的初始化
main函数中调用assoc_init实现hashtable的初始化工作
void assoc_init(const int hashtable_init) {
//根据hashtable_init计算哈希表的大小
if (hashtable_init) {
hashpower = hashtable_init;
}
//根据hashpower计算hash表一共有多少个桶槽
//例如hashpower = 16,那么通过hashsize计算可得桶槽大小为1<<16 = 65536
primary_hashtable = calloc(hashsize(hashpower), sizeof(void *));
if (! primary_hashtable) {
fprintf(stderr, "Failed to init hashtable.\n");
exit(EXIT_FAILURE);
}
STATS_LOCK();
stats_state.hash_power_level = hashpower;
stats_state.hash_bytes = hashsize(hashpower) * sizeof(void *);
STATS_UNLOCK();
}
哈希表查找
查找过程:
1.根据key计算一个hash值hv
2.根据hv获取段锁(保证多线程安全访问)
3.根据hv到哈希表中找到对应的桶槽
4.遍历桶槽的的单向链表,比较key值,找到具体的item节点
item *item_get(const char *key, const size_t nkey, conn *c, const bool do_update) {
item *it;
uint32_t hv;
hv = hash(key, nkey);
//获取锁,保证多线程访问hash表的安全性
item_lock(hv);
//根据key值,取hash表中查找item
it = do_item_get(key, nkey, hv, c, do_update);
//释放锁资源
item_unlock(hv);
return it;
}
item *do_item_get(const char *key, const size_t nkey, const uint32_t hv, conn *c, const bool do_update) {
//assoc_find就是具体的hash表查找
item *it = assoc_find(key, nkey, hv);
//...
}
item *assoc_find(const char *key, const size_t nkey, const uint32_t hv) {
item *it;
unsigned int oldbucket;
//expanding是用来判断hash表是否正在拓容
//expanding == true说明hash表正在拓容
//先简要说明一下拓容:hash表拓容就是分配一张更大的hash表,然后将数据从头到尾将旧的hash内容复制一份到新的hash表
//expand_bucket是标记hash表已经拓容到哪个桶槽了
//比如旧的hash表old_hashtble一共65536个桶槽位,已经复制了10000个桶槽数据到新哈希表primary_hashtable,那么此时
//expand_bucket == 10000
if (expanding &&
(oldbucket = (hv & hashmask(hashpower - 1))) >= expand_bucket)
{
//1.哈希表正在拓容
//2.hv & hashmask(hashpower - 1) 大于expand_bucket说明,要查询的桶槽在old_hashtable处
it = old_hashtable[oldbucket];
} else {
it = primary_hashtable[hv & hashmask(hashpower)];
}
item *ret = NULL;
int depth = 0;
//遍历桶槽的单向链表
while (it) {
//比较具体的key值
if ((nkey == it->nkey) && (memcmp(key, ITEM_key(it), nkey) == 0)) {
ret = it;
break;
}
it = it->h_next;
++depth;
}
MEMCACHED_ASSOC_FIND(key, nkey, depth);
return ret;
}
向hash表中插入一个新增的item
int assoc_insert(item *it, const uint32_t hv) {
unsigned int oldbucket;
//...
//这里的解析参考我们的assoc_find分析
if (expanding &&
(oldbucket = (hv & hashmask(hashpower - 1))) >= expand_bucket)
{
it->h_next = old_hashtable[oldbucket];
old_hashtable[oldbucket] = it;
} else {
it->h_next = primary_hashtable[hv & hashmask(hashpower)]
primary_hashtable[hv & hashmask(hashpower)] = it;
}
}
从hash表中删除一个item节点
void assoc_delete(const char *key, const size_t nkey, const uint32_t hv) {
//根据key,nkey,hv查找节点
//我们要查找key对应的item节点pnode
//prev->h_next指向pnode
//这里的before就是prev->h_next的变量地址,也就是pnode前驱item节点h_next指针变量的地址
item **before = _hashitem_before(key, nkey, hv);
if (*before) {
item *nxt;
/* The DTrace probe cannot be triggered as the last instruction
* due to possible tail-optimization by the compiler
*/
MEMCACHED_ASSOC_DELETE(key, nkey);
nxt = (*before)->h_next;
(*before)->h_next = 0; /* probably pointless, but whatever. */
*before = nxt;
return;
}
/* Note: we never actually get here. the callers don't delete things
they can't find. */
assert(*before != 0);
}
/* returns the address of the item pointer before the key. if *item == 0,
the item wasn't found */
static item** _hashitem_before (const char *key, const size_t nkey, const uint32_t hv) {
item **pos;
unsigned int oldbucket;
//这里的解析参考我们的assoc_find分析
if (expanding &&
(oldbucket = (hv & hashmask(hashpower - 1))) >= expand_bucket)
{
pos = &old_hashtable[oldbucket];
} else {
pos = &primary_hashtable[hv & hashmask(hashpower)];
}
//比较key
while (*pos && ((nkey != (*pos)->nkey) || memcmp(key, ITEM_key(*pos), nkey))) {
pos = &(*pos)->h_next;
}
return pos;
}
3.哈希表的拓容
哈希表何时进行拓容呢?memcached做法是向libevent中注册了一个时钟回调,定时的去检测是否需要对哈希表进行拓容操作。hash表的拓容过程则是由memcached单独申请了一个线程进行拓容操作。
检测拓容回调函数
int main(){
//...
/* initialise clock event */
clock_handler(0, 0, 0);
//....
}
/* libevent uses a monotonic clock when available for event scheduling. Aside
* from jitter, simply ticking our internal timer here is accurate enough.
* Note that users who are setting explicit dates for expiration times *must*
* ensure their clocks are correct before starting memcached. */
static void clock_handler(const int fd, const short which, void *arg) {
//...
// While we're here, check for hash table expansion.
// This function should be quick to avoid delaying the timer.
//判断是否需要hash拓容
assoc_start_expand(stats_state.curr_items);
evtimer_set(&clockevent, clock_handler, 0);
event_base_set(main_base, &clockevent);
evtimer_add(&clockevent, &t);
//...
}
void assoc_start_expand(uint64_t curr_items) {
//正在拓容,返回
if (started_expanding)
return;
//curr_items是当前hash表中记录的item节点的个数
//当curr_items大于当前hash表桶槽个数的1.5倍的时候,则需要拓容当前的hash表
if (curr_items > (hashsize(hashpower) * 3) / 2 &&
hashpower < HASHPOWER_MAX) {
started_expanding = true;
//信号量触发
pthread_cond_signal(&maintenance_cond);
}
}
拓容线程
//hash的拓容memcached独立的启动了一个线程进行拓容操作
static void *assoc_maintenance_thread(void *arg) {
mutex_lock(&maintenance_lock);
while (do_run_maintenance_thread) {
int ii = 0;
/* There is only one expansion thread, so no need to global lock. */
//expanding初始化为false,当assoc_start_expand中信号量触发将会触发assoc_expand的执行
//assoc_expand中对expanding置为true
for (ii = 0; ii < hash_bulk_move && expanding; ++ii) {
item *it, *next;
unsigned int bucket;
void *item_lock = NULL;
/* bucket = hv & hashmask(hashpower) =>the bucket of hash table
* is the lowest N bits of the hv, and the bucket of item_locks is
* also the lowest M bits of hv, and N is greater than M.
* So we can process expanding with only one item_lock. cool! */
//expand_bucket变量含义解析我们可以参考assoc_find中的解释
//当前桶槽尝试加锁
if ((item_lock = item_trylock(expand_bucket))) {
//for循环遍历桶槽下的链表,将每个节点从old_hashtable迁移到primary_hashtable
for (it = old_hashtable[expand_bucket]; NULL != it; it = next) {
next = it->h_next;
bucket = hash(ITEM_key(it), it->nkey) & hashmask(hashpower);
it->h_next = primary_hashtable[bucket];
primary_hashtable[bucket] = it;
}
old_hashtable[expand_bucket] = NULL;
//下一个桶槽
expand_bucket++;
//当expand_bucket == old_hashtable尺寸大小,则节点转移完毕
if (expand_bucket == hashsize(hashpower - 1)) {
//停止拓容
expanding = false;
free(old_hashtable);
STATS_LOCK();
stats_state.hash_bytes -= hashsize(hashpower - 1) * sizeof(void *);
stats_state.hash_is_expanding = false;
STATS_UNLOCK();
if (settings.verbose > 1)
fprintf(stderr, "Hash table expansion done\n");
}
} else {
usleep(10*1000);
}
if (item_lock) {
item_trylock_unlock(item_lock);
item_lock = NULL;
}
}
if (!expanding) {
/* We are done expanding.. just wait for next invocation */
started_expanding = false;
//信号量未触发,当前线程将阻塞在这里
//当assoc_start_expand中执行pthread_cond_signal,当前线程不在阻塞
pthread_cond_wait(&maintenance_cond, &maintenance_lock);
/* assoc_expand() swaps out the hash table entirely, so we need
* all threads to not hold any references related to the hash
* table while this happens.
* This is instead of a more complex, possibly slower algorithm to
* allow dynamic hash table expansion without causing significant
* wait times.
*/
pause_threads(PAUSE_ALL_THREADS);
//拓容前的准备
assoc_expand();
pause_threads(RESUME_ALL_THREADS);
}
}
return NULL;
}
/* grows the hashtable to the next power of 2. */
static void assoc_expand(void) {
//副本
old_hashtable = primary_hashtable;
//新拓容的hash表
primary_hashtable = calloc(hashsize(hashpower + 1), sizeof(void *));
if (primary_hashtable) {
if (settings.verbose > 1)
fprintf(stderr, "Hash table expansion starting\n");
hashpower++;
//assoc_maintenance_thread函数将依据该变量,开始进行拓容操作
expanding = true;
expand_bucket = 0;
STATS_LOCK();
stats_state.hash_power_level = hashpower;
stats_state.hash_bytes += hashsize(hashpower) * sizeof(void *);
stats_state.hash_is_expanding = true;
STATS_UNLOCK();
} else {
primary_hashtable = old_hashtable;
/* Bad news, but we can keep running. */
}
}