memory pool 的高效实现(代码)

mpool.h

 #ifndef __febird_c_mpool_h__ #define __febird_c_mpool_h__ #ifdef __cplusplus extern "C" { #endif //------------------------------------------------------------------------------------------ typedef void* (*chunk_alloc_ft)(struct chunk_allocator* self, size_t size); typedef void* (*chunk_realloc_ft)(struct chunk_allocator* self, void* block, size_t olc_size, size_t new_size); typedef void (*chunk_free_ft)(struct chunk_allocator* self, void* block, size_t size); struct chunk_allocator { void* (*chunk_alloc)(struct chunk_allocator* self, size_t size); void* (*chunk_realloc)(struct chunk_allocator* self, void* block, size_t olc_size, size_t new_size); void (*chunk_free)(struct chunk_allocator* self, void* block, size_t size); }; struct fixed_mpool { struct chunk_allocator* ca; struct mpool_chunk* pChunks; struct mpool_cell* head; size_t iNextChunk; size_t nChunks; size_t cell_size; size_t chunk_size; size_t used_cells; // size_t free_cells; }; // 是否允许 mpool 分配超出 max_cell_size 的内存块 //#define FEBIRD_MPOOL_ALLOW_BIG_BLOCK #ifdef FEBIRD_MPOOL_ALLOW_BIG_BLOCK struct big_block_header { struct big_block_header *next, *prev; }; #endif struct mpool { //------------------------------------------------------------------------------------------ /// export a chunk_allocator interface void* (*chunk_alloc)(struct chunk_allocator* self, size_t size); void* (*chunk_realloc)(struct chunk_allocator* self, void* block, size_t olc_size, size_t new_size); void (*chunk_free)(struct chunk_allocator* self, void* block, size_t size); //------------------------------------------------------------------------------------------ /// chunk_allocator for this mpool self struct chunk_allocator* ca; struct fixed_mpool* fixed; size_t max_cell_size; size_t chunk_size; #ifdef FEBIRD_MPOOL_ALLOW_BIG_BLOCK size_t big_blocks; // size > max_cell_size, use malloc, this is rare case struct big_block_header big_list; #endif }; void fixed_mpool_init(struct fixed_mpool* mpf); void fixed_mpool_destroy(struct fixed_mpool* mpf); void* fixed_mpool_alloc(struct fixed_mpool* mpf); void fixed_mpool_free(struct fixed_mpool* mpf, void* ptr); void mpool_init(struct mpool* mp); void mpool_destroy(struct mpool* mp); void* mpool_alloc(struct mpool* mp, size_t size); void mpool_free(struct mpool* mp, void* ptr, size_t size); size_t mpool_used_cells(const struct mpool* mp); size_t mpool_used_bytes(const struct mpool* mp); #ifdef __cplusplus } #endif #endif // __febird_c_mpool_h__

 

mpool.c

 

#include "stdafx.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include "mpool.h" /* must be power of 2 and greater than sizeof(void*) */ #define MPOOL_MIN_CELL 8 #define MPOOL_MIN_CHUNK 256 struct mpool_cell { struct mpool_cell* next; }; struct mpool_chunk { struct mpool_cell* cell; // cell array size_t size; // size in bytes; }; /**********************************************************************************/ /** * chunk_allocator use malloc/free */ static void* chunk_malloc_alloc(struct chunk_allocator* , size_t size) { return malloc(size); } static void chunk_malloc_free(struct chunk_allocator* , void* block, size_t size) { return free(block); } static void* chunk_malloc_realloc(struct chunk_allocator* , void* block, size_t old_size, size_t new_size) { return realloc(block, new_size); } /**********************************************************************************/ void* default_chunk_realloc(struct chunk_allocator* ca, void* ptr, size_t old_size, size_t new_size) { void* q = ca->chunk_alloc(ca, new_size); assert(old_size > 0); assert(new_size > 0); if (NULL == q) return NULL; memcpy(q, ptr, old_size < new_size ? old_size : new_size); ca->chunk_free(ca, ptr, old_size); return q; } static void mpool_chunk_init(struct mpool_cell* cell, size_t cell_count, size_t cell_size) { size_t i; struct mpool_cell* p = cell; assert(cell_size % MPOOL_MIN_CELL == 0); for (i = 0; i < cell_count-1; ++i) p = p->next = (struct mpool_cell*)((char*)p + cell_size); p->next = NULL; } /**********************************************************************************/ static struct chunk_allocator fal = { &chunk_malloc_alloc, &chunk_malloc_realloc, &chunk_malloc_free }; /**********************************************************************************/ /** * require initialized fields: * cell_size * chunk_size * ca [0 OR initialized] */ void fixed_mpool_init(struct fixed_mpool* mpf) { struct chunk_allocator* al; if (NULL == mpf->ca) al = mpf->ca = &fal; else { al = mpf->ca; assert(NULL != al->chunk_alloc); assert(NULL != al->chunk_free); if (NULL == al->chunk_realloc) al->chunk_realloc = &default_chunk_realloc; } assert(mpf->chunk_size > 0); assert(mpf->cell_size > 0); assert(mpf->cell_size < mpf->chunk_size); mpf->cell_size = (mpf->cell_size + MPOOL_MIN_CELL - 1) / MPOOL_MIN_CELL * MPOOL_MIN_CELL; mpf->chunk_size = (mpf->chunk_size + MPOOL_MIN_CHUNK - 1) / MPOOL_MIN_CHUNK * MPOOL_MIN_CHUNK; mpf->head = NULL; if (mpf->nChunks < MPOOL_MIN_CHUNK/sizeof(struct mpool_chunk)) mpf->nChunks = MPOOL_MIN_CHUNK/sizeof(struct mpool_chunk); mpf->iNextChunk = 0; mpf->pChunks = (struct mpool_chunk*) al->chunk_alloc(al, sizeof(struct mpool_chunk) * mpf->nChunks); if (NULL == mpf->pChunks) { fprintf(stderr , "fatal: febird.fixed_mpool_init failed, chunk[size=%zd, count=%zd]/n" , mpf->chunk_size, mpf->nChunks); abort(); } mpf->used_cells = 0; } void fixed_mpool_destroy(struct fixed_mpool* mpf) { struct chunk_allocator* ca = mpf->ca; long i; for (i = mpf->iNextChunk - 1; i >= 0; --i) ca->chunk_free(ca, mpf->pChunks[i].cell, mpf->pChunks[i].size); ca->chunk_free(ca, mpf->pChunks, sizeof(struct mpool_chunk) * mpf->nChunks); } // 0 success, -1 fail int fixed_mpool_add_chunk(struct fixed_mpool* mpf) { struct mpool_cell* cell; assert(mpf->pChunks != NULL); if (mpf->iNextChunk == mpf->nChunks) { size_t old_size = sizeof(struct mpool_chunk) * mpf->nChunks; size_t new_size = 2 * old_size; // allocate new chunk array struct mpool_chunk* c = (struct mpool_chunk*) mpf->ca->chunk_realloc(mpf->ca, mpf->pChunks, old_size, new_size); if (NULL == c) return -1; mpf->pChunks = c; mpf->nChunks *= 2; // chunk array expanded 2 mpf->chunk_size *= 2; // chunk_size expanded 2 also } // allocate a new cell array cell = (struct mpool_cell*)mpf->ca->chunk_alloc(mpf->ca, mpf->chunk_size); if (NULL == cell) return -1; mpf->pChunks[mpf->iNextChunk].cell = cell; mpf->pChunks[mpf->iNextChunk].size = mpf->chunk_size; mpf->iNextChunk++; mpool_chunk_init(cell, mpf->chunk_size / mpf->cell_size, mpf->cell_size); mpf->head = cell; return 0; } void* fixed_mpool_alloc(struct fixed_mpool* mpf) { struct mpool_cell* cell; if (NULL == mpf->head) { // in most case it will not run this path if (fixed_mpool_add_chunk(mpf) != 0) return NULL; } cell = mpf->head; mpf->used_cells++; mpf->head = mpf->head->next; return cell; } void fixed_mpool_free(struct fixed_mpool* mpf, void* ptr) { struct mpool_cell* cell = (struct mpool_cell*)ptr; cell->next = mpf->head; mpf->used_cells--; mpf->head = cell; } /**********************************************************************************/ /** * require initialized fields: * max_cell_size * chunk_size * ca [0 OR initialized] */ void mpool_init(struct mpool* mp) { int i, nFixed; struct chunk_allocator* al; assert(mp->max_cell_size < mp->chunk_size); if (NULL == mp->chunk_alloc) al = mp->ca = &fal; else { al = mp->ca; assert(NULL != al->chunk_alloc); assert(NULL != al->chunk_free); if (NULL == al->chunk_realloc) al->chunk_realloc = &default_chunk_realloc; } mp->chunk_alloc = (chunk_alloc_ft)&mpool_alloc; mp->chunk_free = (chunk_free_ft)&mpool_free; mp->chunk_realloc = (chunk_realloc_ft)&default_chunk_realloc; mp->max_cell_size = (mp->max_cell_size + MPOOL_MIN_CELL - 1) / MPOOL_MIN_CELL * MPOOL_MIN_CELL; mp->chunk_size = (mp->chunk_size + MPOOL_MIN_CHUNK - 1) / MPOOL_MIN_CHUNK * MPOOL_MIN_CHUNK; nFixed = mp->max_cell_size / MPOOL_MIN_CELL; mp->fixed = (struct fixed_mpool*)al->chunk_alloc(al, sizeof(struct fixed_mpool) * nFixed); if (NULL == mp->fixed) { fprintf(stderr, "fatal: febird.mpool_init[max_cell_size=%zd, chunk_size=%zd]/n" , mp->max_cell_size, mp->chunk_size); abort(); } for (i = 0; i < nFixed; ++i) { mp->fixed[i].cell_size = (i + 1) * MPOOL_MIN_CELL; mp->fixed[i].chunk_size = mp->chunk_size; mp->fixed[i].nChunks = 16; mp->fixed[i].ca = mp->ca; fixed_mpool_init(&mp->fixed[i]); } #ifdef FEBIRD_MPOOL_ALLOW_BIG_BLOCK mp->big_blocks = 0; mp->big_list.prev = mp->big_list.next = &mp->big_list; #endif } void mpool_destroy(struct mpool* mp) { size_t i, nFixed; #ifdef FEBIRD_MPOOL_ALLOW_BIG_BLOCK struct big_block_header *p; for (i = 0, p = mp->big_list.next; p != &mp->big_list; ++i) { struct big_block_header *q; if (i > mp->big_blocks) { fprintf(stderr, "fatal: febird.mpool_destroy/n"); abort(); } q = p->next; free(p); p = q; } assert(i == mp->big_blocks); #endif nFixed = mp->max_cell_size / MPOOL_MIN_CELL; for (i = 0; i < nFixed; ++i) { fixed_mpool_destroy(&mp->fixed[i]); } mp->ca->chunk_free(mp->ca, mp->fixed, sizeof(struct fixed_mpool) * nFixed); } void* mpool_alloc(struct mpool* mp, size_t size) { size_t idx; struct fixed_mpool* mpf; struct mpool_cell* cell; #ifdef FEBIRD_MPOOL_ALLOW_BIG_BLOCK if (size > mp->max_cell_size) { // this is rare case struct big_block_header *p, *h; p = (struct big_block_header*)malloc(sizeof(struct big_block_header) + size); if (p) { h = &mp->big_list; p->prev = h; p->next = h->next; h->next->prev = p; h->next = p; mp->big_blocks++; return (p + 1); } else return NULL; } #else assert(size <= mp->max_cell_size); #endif assert(size > 0); idx = (size - 1) / MPOOL_MIN_CELL; mpf = &mp->fixed[idx]; // same as fixed_mpool_alloc.... if (NULL == mpf->head) { // in most case it will not run this path if (fixed_mpool_add_chunk(mpf) != 0) return NULL; } cell = mpf->head; mpf->used_cells++; mpf->head = mpf->head->next; return cell; } void mpool_free(struct mpool* mp, void* ptr, size_t size) { size_t idx; struct fixed_mpool* mpf; struct mpool_cell* cell; #ifdef FEBIRD_MPOOL_ALLOW_BIG_BLOCK if (size > mp->max_cell_size) { // this is rare case struct big_block_header* bbh = (struct big_block_header*)ptr - 1; bbh->prev->next = bbh->next; bbh->next->prev = bbh->prev; free(bbh); mp->big_blocks--; return; } #else assert(size <= mp->max_cell_size); #endif assert(size > 0); idx = (size - 1) / MPOOL_MIN_CELL; mpf = &mp->fixed[idx]; // same as fixed_mpool_free... cell = (struct mpool_cell*)ptr; cell->next = mpf->head; mpf->used_cells--; mpf->head = cell; } size_t mpool_used_cells(const struct mpool* mp) { size_t i, n = mp->max_cell_size / MPOOL_MIN_CELL; size_t used = 0; for (i = 0; i < n; ++i) used += mp->fixed[i].used_cells; return used; } size_t mpool_used_bytes(const struct mpool* mp) { size_t i, n = mp->max_cell_size / MPOOL_MIN_CELL; size_t used = 0; for (i = 0; i < n; ++i) used += mp->fixed[i].cell_size * mp->fixed[i].used_cells; return used; }   

你可能感兴趣的:(memory pool 的高效实现(代码))