linux内存管理之kmalloc

   这里只说物理内存管理 linux内核的,看了很多讲解的内存的东西,但是自己总结的时候总感觉无从下手,这里就从实际物理内存分配接口开始吧。

Kmalloc  它分配连续的物理内存空间 ,它不负责把分配的内存空间清零,它能分配多大的呢?并且它只能分配ZONE_NORMAL的不能分配dma和high里的,也就是只分配低端内存.一般情况下内存被分为三个zone:NORMAL、DMA、HIGH.

这个函数是建立在slab分配器的基础上的,通过cache cache有通过slab 分配obj
在开始分析kmalloc函数之前,我们需要说明一下linux内核物理内存的分配函数API:
__get_free_pages它会调用alloc_pages,它的特点是不能从HIGHMEM分配内存,分配2的幂个连续物理页面。它有简化模式(只分配一page)
__get_free_page,而get_zeroed_page接口分配的页面内容对应填充为0. 从dma分配可以调用__get_dma_pages(它本质也是调用__get_free_pages)
 那么终极接口alloc_pages它可以从任何zone里申请内存
,当然前提设置对应的flags.
参考内核:linux3.18.13  
参考书籍:《linux内核设计与实现》《linux设备驱动程序》《深入理解linux设备驱动内核机制》
下面我们就说说kmalloc:(关于分配时候的flags这里不讨论,具体可以参考资料)
我们先看头文件

#include 

而关于它的具体实现我们看slab.h


点击(此处)折叠或打开

  1. #ifdef CONFIG_SLUB
  2. #include linux/slub_def.h>
  3. #elif defined(CONFIG_SLOB)
  4. #include linux/slob_def.h>
  5. #else
  6. #include linux/slab_def.h>
  7. #endif


一般系统默认#include 

点击(此处)折叠或打开

  1. static __always_inline void *kmalloc(size_t size, gfp_t flags)
  2. {
  3.     struct kmem_cache *cachep;
  4.     void *ret;

  5.     if (__builtin_constant_p(size)) {
  6.         int i = 0;

  7.         if (!size)
  8.             return ZERO_SIZE_PTR;

  9. #define CACHE(x) \
  10.         if (size = x) \
  11.             goto found; \
  12.         else \
  13.             i++;
  14. #include linux/kmalloc_sizes.h> //这里查询申请的size在哪个范围 从32乘2递增。I每次加1.
  15. #undef CACHE
  16.         return NULL;
  17. found:
  18. #ifdef CONFIG_ZONE_DMA
  19.         if (flags & GFP_DMA)
  20.             cachep = malloc_sizes[i].cs_dmacachep; //很明显如果定义了dma,并且设置了dma标志则优先从dma cache里申请。malloc_sizes的初始化在slab.c里。可以具体分析一下。
  21.         else
  22. #endif
  23.             cachep = malloc_sizes[i].cs_cachep; //从指定的cache链表分配内存,不浪费空间。

  24.         ret = kmem_cache_alloc_trace(cachep, flags, size);

  25.         return ret;
  26.     }
  27.     return __kmalloc(size, flags);
  28. }
这里可以补充下代码关于 kmalloc_sizes . h

点击(此处)折叠或打开

  1. #if (PAGE_SIZE == 4096)
  2.     CACHE(32)
  3. #endif
  4.     CACHE(64)
  5. #if L1_CACHE_BYTES 64
  6.     CACHE(96)
  7. #endif
  8.     CACHE(128)
  9. #if L1_CACHE_BYTES 128
  10.     CACHE(192)
  11. #endif
  12.     CACHE(256)
  13.     CACHE(512)
  14.     CACHE(1024)
  15.     CACHE(2048)
  16.     CACHE(4096)
  17.     CACHE(8192)
  18.     CACHE(16384)
  19.     CACHE(32768)
  20.     CACHE(65536)
  21.     CACHE(131072)
  22. #if KMALLOC_MAX_SIZE >= 262144
  23.     CACHE(262144)
  24. #endif
  25. #if KMALLOC_MAX_SIZE >= 524288
  26.     CACHE(524288)
  27. #endif
  28. #if KMALLOC_MAX_SIZE >= 1048576
  29.     CACHE(1048576)
  30. #endif
  31. #if KMALLOC_MAX_SIZE >= 2097152
  32.     CACHE(2097152)
  33. #endif
  34. #if KMALLOC_MAX_SIZE >= 4194304
  35.     CACHE(4194304)
  36. #endif
  37. #if KMALLOC_MAX_SIZE >= 8388608
  38.     CACHE(8388608)
  39. #endif
  40. #if KMALLOC_MAX_SIZE >= 16777216
  41.     CACHE(16777216)
  42. #endif
  43. #if KMALLOC_MAX_SIZE >= 33554432
  44.     CACHE(33554432)
  45. #endif
我们看到函数开头需要说明一下:
__builtin_constant_p 是编译器gcc内置函数,用于判断一个值是否为编译时常量,如果是常数,函数返回1 ,否则返回0。此内置函数的典型用法是在宏中用于手动编译时优化显然如果size为常数 则用__kmalloc(size, flags);申请内存.
它查询需要分配的内存在哪个系统cache然后调用

点击(此处)折叠或打开

  1. #ifdef CONFIG_TRACING
  2. extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
  3. #else
  4. static __always_inline void *
  5. kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
  6. {
  7.     return kmem_cache_alloc(cachep, flags);
  8. }
  9. #endif
我们看具体代码:

点击(此处)折叠或打开

  1. /**
  2.  * kmem_cache_alloc - Allocate an object
  3.  * @cachep: The cache to allocate from.
  4.  * @flags: See kmalloc().
  5.  *
  6.  * Allocate an object from this cache. The flags are only relevant
  7.  * if the cache has no available objects.
  8.  */
  9. void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
  10. {
  11.     void *ret = slab_alloc(cachep, flags, _RET_IP_);

  12.     trace_kmem_cache_alloc(_RET_IP_, ret,                    // 跟踪调试会用到
  13.              cachep->object_size, cachep->size, flags);

  14.     return ret;
  15. }
它实际的分配是slab_alloc:

点击(此处)折叠或打开

  1. static __always_inline void *
  2. slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
  3. {
  4.     unsigned long save_flags;
  5.     void *objp;

  6.     flags &= gfp_allowed_mask;   //  说明在gfp.h中 ,如下

    点击(此处)折叠或打开

    1. /*
    2.  * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what
    3.  * GFP flags are used before interrupts are enabled. Once interrupts are
    4.  * enabled, it is set to __GFP_BITS_MASK while the system is running. During
    5.  * hibernation, it is used by PM to avoid I/O during memory allocation while
    6.  * devices are suspended.
    7.  */
    8. extern gfp_t gfp_allowed_mask;



  7.     lockdep_trace_alloc(flags);  // 调试用

  8.     if (slab_should_failslab(cachep, flags))
  9.         return NULL;

  10.     cachep = memcg_kmem_get_cache(cachep, flags);

  11.     cache_alloc_debugcheck_before(cachep, flags);
  12.     local_irq_save(save_flags);
  13.     objp = __do_cache_alloc(cachep, flags);
  14.     local_irq_restore(save_flags);
  15.     objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
  16.     kmemleak_alloc_recursive(objp, cachep->object_size, 1, cachep->flags,
  17.                  flags);
  18.     prefetchw(objp);

  19.     if (likely(objp))
  20.         kmemcheck_slab_alloc(cachep, flags, objp, cachep->object_size);

  21.     if (unlikely((flags & __GFP_ZERO) && objp))
  22.         memset(objp, 0, cachep->object_size);

  23.     return objp;
  24. }

它调用objp = __do_cache_alloc(cachep, flags); 除了检查一些标志等继续调用

____cache_alloc(cachep, flags);

它是一个统一的接口 (有检测numauma linux默认是uma 除非指定了numa

点击(此处)折叠或打开

  1. static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
  2. {
  3.     void *objp;
  4.     struct array_cache *ac;
  5.     bool force_refill = false;

  6.     check_irq_off();

  7.     ac = cpu_cache_get(cachep);
  8.     if (likely(ac->avail)) {
  9.         ac->touched = 1;
  10.         objp = ac_get_obj(cachep, ac, flags, false);

  11.         /*
  12.          * Allow for the possibility all avail objects are not allowed
  13.          * by the current flags
  14.          */
  15.         if (objp) {
  16.             STATS_INC_ALLOCHIT(cachep);
  17.             goto out;
  18.         }
  19.         force_refill = true;
  20.     }

  21.     STATS_INC_ALLOCMISS(cachep);
  22.     objp = cache_alloc_refill(cachep, flags, force_refill);
  23.     /*
  24.      * the 'ac' may be updated by cache_alloc_refill(),
  25.      * and kmemleak_erase() requires its correct value.
  26.      */
  27.     ac = cpu_cache_get(cachep);

  28. out:
  29.     /*
  30.      * To avoid a false negative, if an object that is in one of the
  31.      * per-CPU caches is leaked, we need to make sure kmemleak doesn't
  32.      * treat the array pointers as a reference to the object.
  33.      */
  34.     if (objp)
  35.         kmemleak_erase(&ac->entry[ac->avail]);
  36.     return objp;
  37. }
这里我们假定是第一次使用分配内存,那么根据在kmem_cache_init中的malloc_sizes[]的初始化,在kmalloc的时候返回的kmalloc_cache指针指向的cache中用到这样个函数:

点击(此处)折叠或打开

  1. static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
  2. {
  3.     if (slab_state >= FULL)
  4.         return enable_cpucache(cachep, gfp);

  5.     if (slab_state == DOWN) {
  6.         /*
  7.          * Note: Creation of first cache (kmem_cache).
  8.          * The setup_list3s is taken care
  9.          * of by the caller of __kmem_cache_create
  10.          */
  11.         cachep->array[smp_processor_id()] = &initarray_generic.cache;
  12.         slab_state = PARTIAL;
  13.     } else if (slab_state == PARTIAL) {
  14.         /*
  15.          * Note: the second kmem_cache_create must create the cache
  16.          * that's used by kmalloc(24), otherwise the creation of
  17.          * further caches will BUG().
  18.          */
  19.         cachep->array[smp_processor_id()] = &initarray_generic.cache;

  20.         /*
  21.          * If the cache that's used by kmalloc(sizeof(kmem_list3)) is
  22.          * the second cache, then we need to set up all its list3s,
  23.          * otherwise the creation of further caches will BUG().
  24.          */
  25.         set_up_list3s(cachep, SIZE_AC);
  26.         if (INDEX_AC == INDEX_L3)
  27.             slab_state = PARTIAL_L3;
  28.         else
  29.             slab_state = PARTIAL_ARRAYCACHE;
  30.     } else {
  31.         /* Remaining boot caches */
  32.         cachep->array[smp_processor_id()] =
  33.             kmalloc(sizeof(struct arraycache_init), gfp);

  34.         if (slab_state == PARTIAL_ARRAYCACHE) {
  35.             set_up_list3s(cachep, SIZE_L3);
  36.             slab_state = PARTIAL_L3;
  37.         } else {
  38.             int node;
  39.             for_each_online_node(node) {
  40.                 cachep->nodelists[node] =
  41.                  kmalloc_node(sizeof(struct kmem_list3),
  42.                         gfp, node);
  43.                 BUG_ON(!cachep->nodelists[node]);
  44.                 kmem_list3_init(cachep->nodelists[node]);
  45.             }
  46.         }
  47.     }
  48.     cachep->nodelists[numa_mem_id()]->next_reap =
  49.             jiffies + REAPTIMEOUT_LIST3 +
  50.             ((unsigned long)cachep) % REAPTIMEOUT_LIST3;

  51.     cpu_cache_get(cachep)->avail = 0;
  52.     cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;   // 1
  53.     cpu_cache_get(cachep)->batchcount = 1;
  54.     cpu_cache_get(cachep)->touched = 0;
  55.     cachep->batchcount = 1;
  56.     cachep->limit = BOOT_CPUCACHE_ENTRIES;
  57.     return 0;
  58. }
我们知道不论array被赋了什么值,最后都要初始化avail等值.
所以如果array不可用,那么就会调用;当然如果array可用那么直接返回申请的obj的内存指针.

点击(此处)折叠或打开

  1. static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags,
  2.                             bool force_refill)
  3. {
  4.     int batchcount;
  5.     struct kmem_list3 *l3;
  6.     struct array_cache *ac;
  7.     int node;

  8.     check_irq_off();
  9.     node = numa_mem_id();
  10.     if (unlikely(force_refill))
  11.         goto force_grow;
  12. retry:
  13.     ac = cpu_cache_get(cachep);
  14.     batchcount = ac->batchcount;
  15.     if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
  16.         /*
  17.          * If there was little recent activity on this cache, then
  18.          * perform only a partial refill. Otherwise we could generate
  19.          * refill bouncing.
  20.          */
  21.         batchcount = BATCHREFILL_LIMIT;
  22.     }
  23.     l3 = cachep->nodelists[node];

  24.     BUG_ON(ac->avail > 0 || !l3);
  25.     spin_lock(&l3->list_lock);

  26.     /* See if we can refill from the shared array */
  27.     if (l3->shared && transfer_objects(ac, l3->shared, batchcount)) {
  28.         l3->shared->touched = 1;
  29.         goto alloc_done;
  30.     }

  31.     while (batchcount > 0) {
  32.         struct list_head *entry;
  33.         struct slab *slabp;
  34.         /* Get slab alloc is to come from. */
  35.         entry = l3->slabs_partial.next;
  36.         if (entry == &l3->slabs_partial) {
  37.             l3->free_touched = 1;
  38.             entry = l3->slabs_free.next;
  39.             if (entry == &l3->slabs_free)
  40.                 goto must_grow;
  41.         }

  42.         slabp = list_entry(entry, struct slab, list);
  43.         check_slabp(cachep, slabp);
  44.         check_spinlock_acquired(cachep);

  45.         /*
  46.          * The slab was either on partial or free list so
  47.          * there must be at least one object available for
  48.          * allocation.
  49.          */
  50.         BUG_ON(slabp->inuse >= cachep->num);

  51.         while (slabp->inuse cachep->num && batchcount--) {
  52.             STATS_INC_ALLOCED(cachep);
  53.             STATS_INC_ACTIVE(cachep);
  54.             STATS_SET_HIGH(cachep);

  55.             ac_put_obj(cachep, ac, slab_get_obj(cachep, slabp,
  56.                                     node));
  57.         }
  58.         check_slabp(cachep, slabp);

  59.         /* move slabp to correct slabp list: */
  60.         list_del(&slabp->list);
  61.         if (slabp->free == BUFCTL_END)
  62.             list_add(&slabp->list, &l3->slabs_full);
  63.         else
  64.             list_add(&slabp->list, &l3->slabs_partial);
  65.     }

  66. must_grow:
  67.     l3->free_objects -= ac->avail;
  68. alloc_done:
  69.     spin_unlock(&l3->list_lock);

  70.     if (unlikely(!ac->avail)) {
  71.         int x;
  72. force_grow:
  73.         x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL);   //  grow成功返回 1 

  74.         /* cache_grow can reenable interrupts, then ac could change. */
  75.         ac = cpu_cache_get(cachep);
  76.         node = numa_mem_id();

  77.         /* no objects in sight? abort */
  78.         if (!x && (ac->avail == 0 || force_refill))
  79.             return NULL;

  80.         if (!ac->avail)        /* objects refilled by interrupt? */
  81.             goto retry;
  82.     }
  83.     ac->touched = 1;

  84.     return ac_get_obj(cachep, ac, flags, force_refill);
  85. }
由于第一次使用nodelist上slab链表都为空,所以must_grow
它调用cache_grow,这个函数首先计算了slab着色处理。然后调用kmem_getpages申请page,大小根据cache->gfporder,它返回申请pages的虚拟地址.

点击(此处)折叠或打开

  1. /*
  2.  * Grow (by 1) the number of slabs within a cache. This is called by
  3.  * kmem_cache_alloc() when there are no active objs left in a cache.
  4.  */
  5. static int cache_grow(struct kmem_cache *cachep,
  6.         gfp_t flags, int nodeid, void *objp)
  7. {
  8.     struct slab *slabp;
  9.     size_t offset;
  10.     gfp_t local_flags;
  11.     struct kmem_list3 *l3;

  12.     /*
  13.      * Be lazy and only check for valid flags here, keeping it out of the
  14.      * critical path in kmem_cache_alloc().
  15.      */
  16.     BUG_ON(flags & GFP_SLAB_BUG_MASK);
  17.     local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);

  18.     /* Take the l3 list lock to change the colour_next on this node */
  19.     check_irq_off();
  20.     l3 = cachep->nodelists[nodeid];
  21.     spin_lock(&l3->list_lock);

  22.     /* Get colour for the slab, and cal the next value. */
  23.     offset = l3->colour_next; // default 0
  24.     l3->colour_next++;
  25.     if (l3->colour_next >= cachep->colour)
  26.         l3->colour_next = 0;
  27.     spin_unlock(&l3->list_lock);

  28.     offset *= cachep->colour_off; // first time ,offset is 0 ;

  29.     if (local_flags & __GFP_WAIT)
  30.         local_irq_enable();

  31.     /*
  32.      * The test for missing atomic flag is performed here, rather than
  33.      * the more obvious place, simply to reduce the critical path length
  34.      * in kmem_cache_alloc(). If a caller is seriously mis-behaving they
  35.      * will eventually be caught here (where it matters).
  36.      */
  37.     kmem_flagcheck(cachep, flags);

  38.     /*
  39.      * Get mem for the objs. Attempt to allocate a physical page from
  40.      * 'nodeid'.
  41.      */
  42.     if (!objp)
  43.         objp = kmem_getpages(cachep, local_flags, nodeid);
  44.     if (!objp)
  45.         goto failed;

  46.     /* Get slab management. */
  47.     slabp = alloc_slabmgmt(cachep, objp, offset,
  48.             local_flags & ~GFP_CONSTRAINT_MASK, nodeid);
  49.     if (!slabp)
  50.         goto opps1;

  51.     slab_map_pages(cachep, slabp, objp);

  52.     cache_init_objs(cachep, slabp);

  53.     if (local_flags & __GFP_WAIT)
  54.         local_irq_disable();
  55.     check_irq_off();
  56.     spin_lock(&l3->list_lock);

  57.     /* Make slab active. */
  58.     list_add_tail(&slabp->list, &(l3->slabs_free));  //  把新申请的slab添加到nodelist的slabs_free链表。
  59.     STATS_INC_GROWN(cachep);
  60.     l3->free_objects += cachep->num;                    //初始化可用的对象即每个slab可以包含的obj数目
  61.     spin_unlock(&l3->list_lock);
  62.     return 1;
  63. opps1:
  64.     kmem_freepages(cachep, objp);
  65. failed:
  66.     if (local_flags & __GFP_WAIT)
  67.         local_irq_disable();
  68.     return 0;
  69. }
而关于slab着色跟硬件缓冲有关,为了尽量避免缓存冲突不命中问题,提高效率(cache_line问题)。可以参考《深入理解计算机系统》。

具体操作见:

点击(此处)折叠或打开

  1. /*
  2.  * Get the memory for a slab management obj.
  3.  * For a slab cache when the slab descriptor is off-slab, slab descriptors
  4.  * always come from malloc_sizes caches. The slab descriptor cannot
  5.  * come from the same cache which is getting created because,
  6.  * when we are searching for an appropriate cache for these
  7.  * descriptors in kmem_cache_create, we search through the malloc_sizes array.
  8.  * If we are creating a malloc_sizes cache here it would not be visible to
  9.  * kmem_find_general_cachep till the initialization is complete.
  10.  * Hence we cannot have slabp_cache same as the original cache.
  11.  */
  12. static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
  13.                  int colour_off, gfp_t local_flags,
  14.                  int nodeid)
  15. {
  16.     struct slab *slabp;

  17.     if (OFF_SLAB(cachep)) {     
  18.    //  关于OFF_SLAB问题 可以看代码:
  19.         

    点击(此处)折叠或打开

    1. CFLGS_OFF_SLAB 在__kmem_cache_create
    2.     /*
    3.      * Determine if the slab management is 'on' or 'off' slab.
    4.      * (bootstrapping cannot cope with offslab caches so don't do
    5.      * it too early on. Always use on-slab management when
    6.      * SLAB_NOLEAKTRACE to avoid recursive calls into kmemleak)
    7.      */
    8.     if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init &&
    9.      !(flags & SLAB_NOLEAKTRACE))
    10.         /*
    11.          * Size is large, assume best to place the slab management obj
    12.          * off-slab (should allow better packing of objs).
    13.          */
    14.         flags |= CFLGS_OFF_SLAB;


  20.             
  21.         /* Slab management obj is off-slab. */
  22.         slabp = kmem_cache_alloc_node(cachep->slabp_cache,
  23.                      local_flags, nodeid);
  24.         /*
  25.          * If the first object in the slab is leaked (it's allocated
  26.          * but no one has a reference to it), we want to make sure
  27.          * kmemleak does not treat the ->s_mem pointer as a reference
  28.          * to the object. Otherwise we will not report the leak.
  29.          */
  30.         kmemleak_scan_area(&slabp->list, sizeof(struct list_head),
  31.                  local_flags);
  32.         if (!slabp)
  33.             return NULL;
  34.     } else {
  35.         slabp = objp + colour_off;    //   在__kmem_cache_create中cachep->colour_off = cache_line_size();
  36.                                        // 在cache.h中#define cache_line_size() L1_CACHE_BYTES;  一般为32B 大小.
  37.                                         // cachep->colour = left_over / cachep->colour_off;
  38.         colour_off += cachep->slab_size;
  39.     }
  40.     slabp->inuse = 0;                    // num of objs active in slab
  41.     slabp->colouroff = colour_off;   //第一个obj相对page地址的偏移
  42.     slabp->s_mem = objp + colour_off;  //第一个obj的地址
  43.     slabp->nodeid = nodeid;
  44.     slabp->free = 0;                    
  45.     return slabp;
  46. }
我们看看另外一个很重要的操作:

点击(此处)折叠或打开

  1. static void cache_init_objs(struct kmem_cache *cachep,
  2.              struct slab *slabp)
  3. {
  4.     int i;

  5.     for (i = 0; i cachep->num; i++) {
  6.         void *objp = index_to_obj(cachep, slabp, i);
  7. #if DEBUG
  8.         /* need to poison the objs? */
  9.         if (cachep->flags & SLAB_POISON)
  10.             poison_obj(cachep, objp, POISON_FREE);
  11.         if (cachep->flags & SLAB_STORE_USER)
  12.             *dbg_userword(cachep, objp) = NULL;

  13.         if (cachep->flags & SLAB_RED_ZONE) {
  14.             *dbg_redzone1(cachep, objp) = RED_INACTIVE;
  15.             *dbg_redzone2(cachep, objp) = RED_INACTIVE;
  16.         }
  17.         /*
  18.          * Constructors are not allowed to allocate memory from the same
  19.          * cache which they are a constructor for. Otherwise, deadlock.
  20.          * They must also be threaded.
  21.          */
  22.         if (cachep->ctor && !(cachep->flags & SLAB_POISON))
  23.             cachep->ctor(objp + obj_offset(cachep));

  24.         if (cachep->flags & SLAB_RED_ZONE) {
  25.             if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
  26.                 slab_error(cachep, "constructor overwrote the"
  27.                      " end of an object");
  28.             if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
  29.                 slab_error(cachep, "constructor overwrote the"
  30.                      " start of an object");
  31.         }
  32.         if ((cachep->size % PAGE_SIZE) == 0 &&
  33.              OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
  34.             kernel_map_pages(virt_to_page(objp),
  35.                      cachep->size / PAGE_SIZE, 0);
  36. #else
  37.         if (cachep->ctor)
  38.             cachep->ctor(objp); //  根据构造函数初始化对象
  39. #endif
  40.         slab_bufctl(slabp)[i] = i + 1;  //  init  bufctl数组 1、2、3、4 .....  最后一个设置成为BUFCTL_END
  41.     }
  42.     slab_bufctl(slabp)[i - 1] = BUFCTL_END;
  43. }
















你可能感兴趣的:(linux内存管理之kmalloc)