Linux内存管理之slab机制(释放对象)

 Linux内核中将对象释放到slab中上层所用函数为kfree()kmem_cache_free()。两个函数都会调用__cache_free()函数。

代码执行流程:

1,当本地CPU cache中空闲对象数小于规定上限时,只需将对象放入本地CPU cache中;

2,当local cache中对象过多(大于等于规定上限),需要释放一批对象到slab三链中。由函数cache_flusharray()实现。

1)如果三链中存在共享本地cache,那么首先选择释放到共享本地cache中,能释放多少是多少;

2)如果没有shared local cache,释放对象到slab三链中,实现函数为free_block()。对于free_block()函数,当三链中的空闲对象数过多时,销毁此cache。不然,添加此slab到空闲链表。因为在分配的时候我们看到将slab结构从cache链表中脱离了,在这里,根据page描述符的lru找到slab并将它添加到三链的空闲链表中。

主实现

/*
 * Release an obj back to its cache. If the obj has a constructed state, it must
 * be in this state _before_ it is released.  Called with disabled ints.
 */
static inline void __cache_free(struct kmem_cache *cachep, void *objp)
{
	/* 获得本CPU的local cache */
	struct array_cache *ac = cpu_cache_get(cachep);

	check_irq_off();
	kmemleak_free_recursive(objp, cachep->flags);
	objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));

	kmemcheck_slab_free(cachep, objp, obj_size(cachep));

	/*
	 * Skip calling cache_free_alien() when the platform is not numa.
	 * This will avoid cache misses that happen while accessing slabp (which
	 * is per page memory  reference) to get nodeid. Instead use a global
	 * variable to skip the call, which is mostly likely to be present in
	 * the cache.
	 *//* NUMA相关 */
	if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
		return;

	if (likely(ac->avail < ac->limit)) {
		/* local cache中的空闲对象数小于上限时
		,只需将对象释放回entry数组中 */
		STATS_INC_FREEHIT(cachep);
		ac->entry[ac->avail++] = objp;
		return;
	} else {
 		/* 大于等于上限时, */
		STATS_INC_FREEMISS(cachep);
		/* local cache中对象过多,需要释放一批对象到slab三链中。*/
		cache_flusharray(cachep, ac);
		ac->entry[ac->avail++] = objp;
	}
}

释放对象到三链中

/*local cache中对象过多,需要释放一批对象到slab三链中。*/
static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
{
	int batchcount;
	struct kmem_list3 *l3;
	int node = numa_node_id();
	/* 每次释放多少个对象 */
	batchcount = ac->batchcount;
#if DEBUG
	BUG_ON(!batchcount || batchcount > ac->avail);
#endif
	check_irq_off();
 	/* 获得此cache的slab三链 */
	l3 = cachep->nodelists[node];
	spin_lock(&l3->list_lock);
	if (l3->shared) {
		/* 如果存在shared local cache,将对象释放到其中 */
		struct array_cache *shared_array = l3->shared;
 		/* 计算shared local cache中还有多少空位 */
		int max = shared_array->limit - shared_array->avail;
		if (max) {
			/* 空位数小于要释放的对象数时,释放数等于空位数 */
			if (batchcount > max)
				batchcount = max;
			/* 释放local cache前面的几个对象到shared local cache中
			,前面的是最早不用的 */
			memcpy(&(shared_array->entry[shared_array->avail]),
			       ac->entry, sizeof(void *) * batchcount);
			/* 增加shared local cache可用对象数 */
			shared_array->avail += batchcount;
			goto free_done;
		}
	}
 	/* 无shared local cache,释放对象到slab三链中 */
	free_block(cachep, ac->entry, batchcount, node);
free_done:
#if STATS
	{
		int i = 0;
		struct list_head *p;

		p = l3->slabs_free.next;
		while (p != &(l3->slabs_free)) {
			struct slab *slabp;

			slabp = list_entry(p, struct slab, list);
			BUG_ON(slabp->inuse);

			i++;
			p = p->next;
		}
		STATS_SET_FREEABLE(cachep, i);
	}
#endif
	spin_unlock(&l3->list_lock);
	/* 减少local cache可用对象数*/
	ac->avail -= batchcount;
 	/* local cache前面有batchcount个空位,将后面的对象依次前移batchcount位 */
	memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
}

shared local cache,释放对象到slab三链中

/*
 * Caller needs to acquire correct kmem_list's list_lock
 */
 /*释放一定数目的对象*/
static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
		       int node)
{
	int i;
	struct kmem_list3 *l3;
	 /* 逐一释放对象到slab三链中 */
	for (i = 0; i < nr_objects; i++) {
		void *objp = objpp[i];
		struct slab *slabp;
		/* 通过虚拟地址得到page,再通过page得到slab */
		slabp = virt_to_slab(objp);
		/* 获得slab三链 */
		l3 = cachep->nodelists[node];
		/* 先将对象所在的slab从链表中摘除 */
		list_del(&slabp->list);
		check_spinlock_acquired_node(cachep, node);
		check_slabp(cachep, slabp);
		/* 将对象释放到其slab中 */
		slab_put_obj(cachep, slabp, objp, node);
		STATS_DEC_ACTIVE(cachep);
		/* 空闲对象数加一 */
		l3->free_objects++;
		check_slabp(cachep, slabp);

		/* fixup slab chains */
		if (slabp->inuse == 0) {
			 /* 如果slab中均为空闲对象 */
			if (l3->free_objects > l3->free_limit) {
				/* 如果slab三链中空闲对象数超过上限
				,直接回收整个slab到内存
				,空闲对象数减去每个slab中对象数 */
				l3->free_objects -= cachep->num;
				/* No need to drop any previously held
				 * lock here, even if we have a off-slab slab
				 * descriptor it is guaranteed to come from
				 * a different cache, refer to comments before
				 * alloc_slabmgmt.
				 *//* 销毁struct slab对象 */
				slab_destroy(cachep, slabp);
			} else {
				/* 将此slab添加到空slab链表中 */
				list_add(&slabp->list, &l3->slabs_free);
			}
		} else {
			/* Unconditionally move a slab to the end of the
			 * partial list on free - maximum time for the
			 * other objects to be freed, too.
			 *//*将此slab添加到部分满slab链表中*/
			list_add_tail(&slabp->list, &l3->slabs_partial);
		}
	}
}

将对象释放到其slab

static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
				void *objp, int nodeid)
{ 	/* 获得对象在kmem_bufctl_t数组中的索引 */
	unsigned int objnr = obj_to_index(cachep, slabp, objp);

#if DEBUG
	/* Verify that the slab belongs to the intended node */
	WARN_ON(slabp->nodeid != nodeid);

	if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) {
		printk(KERN_ERR "slab: double free detected in cache "
				"'%s', objp %p\n", cachep->name, objp);
		BUG();
	}
#endif
	/*这两步相当于静态链表的插入操作*/
	/* 指向slab中原来的第一个空闲对象 */
	slab_bufctl(slabp)[objnr] = slabp->free;
	/* 释放的对象作为第一个空闲对象 */
	slabp->free = objnr;
	/* 已分配对象数减一 */
	slabp->inuse--;
}

辅助函数

/* 通过虚拟地址得到page,再通过page得到slab */
static inline struct slab *virt_to_slab(const void *obj)
{
	struct page *page = virt_to_head_page(obj);
	return page_get_slab(page);
}
static inline struct slab *page_get_slab(struct page *page)
{
	BUG_ON(!PageSlab(page));
	return (struct slab *)page->lru.prev;
}

可见,用page->lru.prev得到slab,和创建slab时相呼应。

你可能感兴趣的:(linux内存管理,Linux内核学习笔记)