一、5种malloc方法
1)tcache_alloc_small
2)arena_malloc_small
3)tcache_alloc_large
4)arena_malloc_large
5)huge_malloc
//written in jemalloc_internal.h file imalloct(size_t size, bool try_tcache, arena_t *arena) { assert(size != 0); if (size <= arena_maxclass) return (arena_malloc(arena, size, false, try_tcache)); else return (huge_malloc(size, false, huge_dss_prec_get(arena))); } JEMALLOC_ALWAYS_INLINE void * imalloc(size_t size) { return (imalloct(size, true, NULL)); } //written in arena.h JEMALLOC_ALWAYS_INLINE void * arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache) { tcache_t *tcache; assert(size != 0); assert(size <= arena_maxclass); if (size <= SMALL_MAXCLASS) { if (try_tcache && (tcache = tcache_get(true)) != NULL) return (tcache_alloc_small(tcache, size, zero)); else { return (arena_malloc_small(choose_arena(arena), size, zero)); } } else { /* * Initialize tcache after checking size in order to avoid * infinite recursion during tcache initialization. */ if (try_tcache && size <= tcache_maxclass && (tcache = tcache_get(true)) != NULL) return (tcache_alloc_large(tcache, size, zero)); else { return (arena_malloc_large(choose_arena(arena), size, zero)); } } }
二、malloc时选择arena的机制
1)先用arenas_tsd_get获得线程绑定变量arena;
2)如果1)获得值为null,则扫描arenas数组,找到threads number为0或者最小的未卸载的arena;
3)如果2)中扫描发现存在null插槽,则需要调用arenas_extend进行初始化null插槽;
4)调用arenas_tsd_set设置线程绑定比变量arena。
//详细见jemalloc.c中函数 arena_t *choose_arena_hard(void)
三、arenas_extend处理过程
在choose_arena处理过程中,找到一个null空arena后,需要对该arena做初始化,即调用arenas_extend函数,它的处理过程如下:
1)base_alloc arena_t对象;
2)arena_new arena_t对象;
arena_t *arenas_extend(unsigned ind) { arena_t *ret; ret = (arena_t *)base_alloc(sizeof(arena_t)); if (ret != NULL && arena_new(ret, ind) == false) { arenas[ind] = ret; return (ret); } /* Only reached if there is an OOM error. */ /* * OOM here is quite inconvenient to propagate, since dealing with it * would require a check for failure in the fast path. Instead, punt * by using arenas[0]. In practice, this is an extremely unlikely * failure. */ malloc_write("<jemalloc>: Error initializing arena\n"); if (opt_abort) abort(); return (arenas[0]); }
四、base_alloc处理过程
1)数据对齐CACHELINE_CEILING,即64位对齐(8字节);
2)调用base_pages_alloc申请足够内存,从而调用chunk_alloc申请chunk对象;
3)chunk_alloc过程:如果采用dss优先的申请方式,则尝试sbrk这种方式申请,再尝试mmap这种方式;反之,则反序。
chunk_alloc(size_t size, size_t alignment, bool base, bool *zero, dss_prec_t dss_prec) { void *ret; assert(size != 0); assert((size & chunksize_mask) == 0); assert(alignment != 0); assert((alignment & chunksize_mask) == 0); /* "primary" dss. */ if (config_dss && dss_prec == dss_prec_primary) { if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size, alignment, base, zero)) != NULL) goto label_return; if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL) goto label_return; } /* mmap. */ if ((ret = chunk_recycle(&chunks_szad_mmap, &chunks_ad_mmap, size, alignment, base, zero)) != NULL) goto label_return; if ((ret = chunk_alloc_mmap(size, alignment, zero)) != NULL) goto label_return; /* "secondary" dss. */ if (config_dss && dss_prec == dss_prec_secondary) { if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size, alignment, base, zero)) != NULL) goto label_return; if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL) goto label_return; } /* All strategies for allocation failed. */ ret = NULL; label_return: if (ret != NULL) { if (config_ivsalloc && base == false) { if (rtree_set(chunks_rtree, (uintptr_t)ret, 1)) { chunk_dealloc(ret, size, true); return (NULL); } } if (config_stats || config_prof) { bool gdump; malloc_mutex_lock(&chunks_mtx); if (config_stats) stats_chunks.nchunks += (size / chunksize); stats_chunks.curchunks += (size / chunksize); if (stats_chunks.curchunks > stats_chunks.highchunks) { stats_chunks.highchunks = stats_chunks.curchunks; if (config_prof) gdump = true; } else if (config_prof) gdump = false; malloc_mutex_unlock(&chunks_mtx); if (config_prof && opt_prof && opt_prof_gdump && gdump) prof_gdump(); } if (config_valgrind) VALGRIND_MAKE_MEM_UNDEFINED(ret, size); } 1 return ret; }