水平有限,描述不当之处还请之处,转载请注明出处http://blog.csdn.net/vanbreaker/article/details/7673372
在满足以下两个条件时,slab分配器将为高速缓存创建新的slab
1.请求分配对象,但本地高速缓存没有空闲对象可以分配,需要填充
2.kmem_list3维护的链表中没有slab或者所有的slab都处于FULL链表中
这时,调用cache_grow()创建slab增大缓存容量
下图给出了cache_grow()的代码流程
static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid, void *objp) { struct slab *slabp; size_t offset; gfp_t local_flags; struct kmem_list3 *l3; /* * Be lazy and only check for valid flags here, keeping it out of the * critical path in kmem_cache_alloc(). */ BUG_ON(flags & GFP_SLAB_BUG_MASK); local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); /* Take the l3 list lock to change the colour_next on this node */ check_irq_off(); l3 = cachep->nodelists[nodeid]; spin_lock(&l3->list_lock); /* Get colour for the slab, and cal the next value. */ /*确定待创建的slab的颜色编号*/ offset = l3->colour_next; /*更新下一个slab的颜色编号*/ l3->colour_next++; /*颜色编号必须小于颜色数*/ if (l3->colour_next >= cachep->colour) l3->colour_next = 0; spin_unlock(&l3->list_lock); /*确定待创建的slab的颜色*/ offset *= cachep->colour_off; if (local_flags & __GFP_WAIT) local_irq_enable(); /* * The test for missing atomic flag is performed here, rather than * the more obvious place, simply to reduce the critical path length * in kmem_cache_alloc(). If a caller is seriously mis-behaving they * will eventually be caught here (where it matters). */ kmem_flagcheck(cachep, flags); /* * Get mem for the objs. Attempt to allocate a physical page from * 'nodeid'. */ if (!objp) /*从伙伴系统分配页框,这是slab分配器与伙伴系统的接口*/ objp = kmem_getpages(cachep, local_flags, nodeid); if (!objp) goto failed; /* Get slab management. */ /*分配slab管理区*/ slabp = alloc_slabmgmt(cachep, objp, offset, local_flags & ~GFP_CONSTRAINT_MASK, nodeid); if (!slabp) goto opps1; /*建立页面到slab和cache的映射,以便于根据obj迅速定位slab描述符和cache描述符*/ slab_map_pages(cachep, slabp, objp); /*初始化对象*/ cache_init_objs(cachep, slabp); if (local_flags & __GFP_WAIT) local_irq_disable(); check_irq_off(); spin_lock(&l3->list_lock); /* Make slab active. */ /*将新创建的slab添加到free链表*/ list_add_tail(&slabp->list, &(l3->slabs_free)); STATS_INC_GROWN(cachep); l3->free_objects += cachep->num; spin_unlock(&l3->list_lock); return 1; opps1: kmem_freepages(cachep, objp); failed: if (local_flags & __GFP_WAIT) local_irq_disable(); return 0; }
辅助函数:
为slab分配页框
static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) { struct page *page; int nr_pages; int i; #ifndef CONFIG_MMU /* * Nommu uses slab's for process anonymous memory allocations, and thus * requires __GFP_COMP to properly refcount higher order allocations */ flags |= __GFP_COMP; #endif flags |= cachep->gfpflags; if (cachep->flags & SLAB_RECLAIM_ACCOUNT) flags |= __GFP_RECLAIMABLE; /*从特定的节点分配2^gfporder个连续页*/ page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder); if (!page) return NULL; nr_pages = (1 << cachep->gfporder); if (cachep->flags & SLAB_RECLAIM_ACCOUNT) add_zone_page_state(page_zone(page), NR_SLAB_RECLAIMABLE, nr_pages); else add_zone_page_state(page_zone(page), NR_SLAB_UNRECLAIMABLE, nr_pages); for (i = 0; i < nr_pages; i++) __SetPageSlab(page + i); if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) { kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid); if (cachep->ctor) kmemcheck_mark_uninitialized_pages(page, nr_pages); else kmemcheck_mark_unallocated_pages(page, nr_pages); } /*返回首页的虚拟地址*/ return page_address(page); }
为slab管理区分配空间:
static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, int colour_off, gfp_t local_flags, int nodeid) { struct slab *slabp; /*如果slab管理区位于slab外,则在指定的slabp_cache中分配空间*/ if (OFF_SLAB(cachep)) { /* Slab management obj is off-slab. */ slabp = kmem_cache_alloc_node(cachep->slabp_cache, local_flags, nodeid); /* * If the first object in the slab is leaked (it's allocated * but no one has a reference to it), we want to make sure * kmemleak does not treat the ->s_mem pointer as a reference * to the object. Otherwise we will not report the leak. */ kmemleak_scan_area(slabp, offsetof(struct slab, list), sizeof(struct list_head), local_flags); if (!slabp) return NULL; } else {/*slab管理区处于slab中*/ /*slab管理区从slab首部偏移颜色值的地方开始*/ slabp = objp + colour_off; colour_off += cachep->slab_size; } slabp->inuse = 0;/*对象全为空闲*/ slabp->colouroff = colour_off; /*刷新第一个对象的偏移*/ slabp->s_mem = objp + colour_off;/*确定第一个对象的位置*/ slabp->nodeid = nodeid;/*标识节点*/ slabp->free = 0; /*下一个空闲对象位于s_mem起始处*/ return slabp; }
利用页描述结构的lru域建立页框到slab描述符和cache描述符的映射,实际就是使lru.next指向cache描述符,lru.prev指向slab描述符
static void slab_map_pages(struct kmem_cache *cache, struct slab *slab, void *addr) { int nr_pages; struct page *page; page = virt_to_page(addr); nr_pages = 1; if (likely(!PageCompound(page))) nr_pages <<= cache->gfporder;/*分配给slab的页框数*/ do { page_set_cache(page, cache);/*建立到cache的映射*/ page_set_slab(page, slab); /*建立到slab的映射*/ page++; } while (--nr_pages); }
static inline void page_set_cache(struct page *page, struct kmem_cache *cache) { page->lru.next = (struct list_head *)cache; }
static inline void page_set_slab(struct page *page, struct slab *slab) { page->lru.prev = (struct list_head *)slab; }
初始化对象
static void cache_init_objs(struct kmem_cache *cachep, struct slab *slabp) { int i; for (i = 0; i < cachep->num; i++) { /*得到第i个对象*/ void *objp = index_to_obj(cachep, slabp, i); #if DEBUG /*Debug相关操作*/ /* need to poison the objs? */ if (cachep->flags & SLAB_POISON) poison_obj(cachep, objp, POISON_FREE); if (cachep->flags & SLAB_STORE_USER) *dbg_userword(cachep, objp) = NULL; if (cachep->flags & SLAB_RED_ZONE) { *dbg_redzone1(cachep, objp) = RED_INACTIVE; *dbg_redzone2(cachep, objp) = RED_INACTIVE; } /* * Constructors are not allowed to allocate memory from the same * cache which they are a constructor for. Otherwise, deadlock. * They must also be threaded. */ if (cachep->ctor && !(cachep->flags & SLAB_POISON)) cachep->ctor(objp + obj_offset(cachep)); if (cachep->flags & SLAB_RED_ZONE) { if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) slab_error(cachep, "constructor overwrote the" " end of an object"); if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) slab_error(cachep, "constructor overwrote the" " start of an object"); } if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep) && cachep->flags & SLAB_POISON) kernel_map_pages(virt_to_page(objp), cachep->buffer_size / PAGE_SIZE, 0); #else if (cachep->ctor)/*根据构造函数初始化对象*/ cachep->ctor(objp); #endif slab_bufctl(slabp)[i] = i + 1;/*确定下一个空闲对象为后面相邻的对象*/ } slab_bufctl(slabp)[i - 1] = BUFCTL_END; }
销毁slab就是释放slab管理区和对象占用的空间
static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp) { /*用第一个对象的地址减去着色偏移量得到slab的起始地址*/ void *addr = slabp->s_mem - slabp->colouroff; slab_destroy_debugcheck(cachep, slabp); /*如果选择了RCU方式来销毁slab,则通过RCU进行销毁,这个表示还不太明白*/ if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) { struct slab_rcu *slab_rcu; slab_rcu = (struct slab_rcu *)slabp; slab_rcu->cachep = cachep; slab_rcu->addr = addr; call_rcu(&slab_rcu->head, kmem_rcu_free); } else { /*将slab占用的页框释放回伙伴系统*/ kmem_freepages(cachep, addr); /*如果slab的管理区位于外部,则需要从对应的缓存中释放管理区对象*/ if (OFF_SLAB(cachep)) kmem_cache_free(cachep->slabp_cache, slabp); } }