1 #include <stdlib.h> 2 #include <string.h> 3 #include <malloc.h> 4 #include <pthread.h> 5 #include <unistd.h> 6 #include <assert.h> 7 8 #include <linux/mempool.h> 9 #include <linux/poison.h> 10 #include <linux/slab.h> 11 #include <linux/radix-tree.h> 12 #include <urcu/uatomic.h> 13 14 int nr_allocated; 15 int preempt_count; 16 17 struct kmem_cache { 18 pthread_mutex_t lock; 19 int size; 20 int nr_objs; 21 void *objs; 22 void (*ctor)(void *); 23 }; 24 25 void *mempool_alloc(mempool_t *pool, int gfp_mask) 26 { 27 return pool->alloc(gfp_mask, pool->data); 28 } 29 30 void mempool_free(void *element, mempool_t *pool) 31 { 32 pool->free(element, pool->data); 33 } 34 35 mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, 36 mempool_free_t *free_fn, void *pool_data) 37 { 38 mempool_t *ret = malloc(sizeof(*ret)); 39 40 ret->alloc = alloc_fn; 41 ret->free = free_fn; 42 ret->data = pool_data; 43 return ret; 44 } 45 46 void *kmem_cache_alloc(struct kmem_cache *cachep, int flags) 47 { 48 struct radix_tree_node *node; 49 50 if (flags & __GFP_NOWARN) 51 return NULL; 52 53 pthread_mutex_lock(&cachep->lock); 54 if (cachep->nr_objs) { 55 cachep->nr_objs--; 56 node = cachep->objs; 57 cachep->objs = node->private_data; 58 pthread_mutex_unlock(&cachep->lock); 59 node->private_data = NULL; 60 } else { 61 pthread_mutex_unlock(&cachep->lock); 62 node = malloc(cachep->size); 63 if (cachep->ctor) 64 cachep->ctor(node); 65 } 66 67 uatomic_inc(&nr_allocated); 68 return node; 69 } 70 71 void kmem_cache_free(struct kmem_cache *cachep, void *objp) 72 { 73 assert(objp); 74 uatomic_dec(&nr_allocated); 75 pthread_mutex_lock(&cachep->lock); 76 if (cachep->nr_objs > 10) { 77 memset(objp, POISON_FREE, cachep->size); 78 free(objp); 79 } else { 80 struct radix_tree_node *node = objp; 81 cachep->nr_objs++; 82 node->private_data = cachep->objs; 83 cachep->objs = node; 84 } 85 pthread_mutex_unlock(&cachep->lock); 86 } 87 88 void *kmalloc(size_t size, gfp_t gfp) 89 { 90 void *ret = malloc(size); 91 uatomic_inc(&nr_allocated); 92 return ret; 93 } 94 95 void kfree(void *p) 96 { 97 if (!p) 98 return; 99 uatomic_dec(&nr_allocated); 100 free(p); 101 } 102 103 struct kmem_cache * 104 kmem_cache_create(const char *name, size_t size, size_t offset, 105 unsigned long flags, void (*ctor)(void *)) 106 { 107 struct kmem_cache *ret = malloc(sizeof(*ret)); 108 109 pthread_mutex_init(&ret->lock, NULL); 110 ret->size = size; 111 ret->nr_objs = 0; 112 ret->objs = NULL; 113 ret->ctor = ctor; 114 return ret; 115 } 116