1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
21366c37eSMatthew Wilcox #include <stdlib.h>
31366c37eSMatthew Wilcox #include <string.h>
41366c37eSMatthew Wilcox #include <malloc.h>
5bbe9d71fSMatthew Wilcox #include <pthread.h>
61366c37eSMatthew Wilcox #include <unistd.h>
71366c37eSMatthew Wilcox #include <assert.h>
81366c37eSMatthew Wilcox
912ea6539SMatthew Wilcox #include <linux/gfp.h>
10bbe9d71fSMatthew Wilcox #include <linux/poison.h>
111366c37eSMatthew Wilcox #include <linux/slab.h>
12bbe9d71fSMatthew Wilcox #include <linux/radix-tree.h>
131366c37eSMatthew Wilcox #include <urcu/uatomic.h>
141366c37eSMatthew Wilcox
151366c37eSMatthew Wilcox int nr_allocated;
16847d3576SMatthew Wilcox int preempt_count;
1773bc029bSRehas Sachdeva int test_verbose;
181366c37eSMatthew Wilcox
19bbe9d71fSMatthew Wilcox struct kmem_cache {
20bbe9d71fSMatthew Wilcox pthread_mutex_t lock;
2134eee836SMatthew Wilcox (Oracle) unsigned int size;
2234eee836SMatthew Wilcox (Oracle) unsigned int align;
23bbe9d71fSMatthew Wilcox int nr_objs;
24bbe9d71fSMatthew Wilcox void *objs;
25bbe9d71fSMatthew Wilcox void (*ctor)(void *);
26e73cb368SLiam R. Howlett unsigned int non_kernel;
27000a4493SLiam R. Howlett unsigned long nr_allocated;
28000a4493SLiam R. Howlett unsigned long nr_tallocated;
29bbe9d71fSMatthew Wilcox };
30bbe9d71fSMatthew Wilcox
kmem_cache_set_non_kernel(struct kmem_cache * cachep,unsigned int val)31e73cb368SLiam R. Howlett void kmem_cache_set_non_kernel(struct kmem_cache *cachep, unsigned int val)
32e73cb368SLiam R. Howlett {
33e73cb368SLiam R. Howlett cachep->non_kernel = val;
34e73cb368SLiam R. Howlett }
35e73cb368SLiam R. Howlett
kmem_cache_get_alloc(struct kmem_cache * cachep)36000a4493SLiam R. Howlett unsigned long kmem_cache_get_alloc(struct kmem_cache *cachep)
37000a4493SLiam R. Howlett {
38000a4493SLiam R. Howlett return cachep->size * cachep->nr_allocated;
39000a4493SLiam R. Howlett }
40000a4493SLiam R. Howlett
kmem_cache_nr_allocated(struct kmem_cache * cachep)41000a4493SLiam R. Howlett unsigned long kmem_cache_nr_allocated(struct kmem_cache *cachep)
42000a4493SLiam R. Howlett {
43000a4493SLiam R. Howlett return cachep->nr_allocated;
44000a4493SLiam R. Howlett }
45000a4493SLiam R. Howlett
kmem_cache_nr_tallocated(struct kmem_cache * cachep)46000a4493SLiam R. Howlett unsigned long kmem_cache_nr_tallocated(struct kmem_cache *cachep)
47000a4493SLiam R. Howlett {
48000a4493SLiam R. Howlett return cachep->nr_tallocated;
49000a4493SLiam R. Howlett }
50000a4493SLiam R. Howlett
kmem_cache_zero_nr_tallocated(struct kmem_cache * cachep)51000a4493SLiam R. Howlett void kmem_cache_zero_nr_tallocated(struct kmem_cache *cachep)
52000a4493SLiam R. Howlett {
53000a4493SLiam R. Howlett cachep->nr_tallocated = 0;
54000a4493SLiam R. Howlett }
55000a4493SLiam R. Howlett
kmem_cache_alloc_lru(struct kmem_cache * cachep,struct list_lru * lru,int gfp)56b9663a6fSMatthew Wilcox (Oracle) void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
57b9663a6fSMatthew Wilcox (Oracle) int gfp)
581366c37eSMatthew Wilcox {
5934eee836SMatthew Wilcox (Oracle) void *p;
6031023cd6SMatthew Wilcox
61e73cb368SLiam R. Howlett if (!(gfp & __GFP_DIRECT_RECLAIM)) {
62e73cb368SLiam R. Howlett if (!cachep->non_kernel)
6331023cd6SMatthew Wilcox return NULL;
6431023cd6SMatthew Wilcox
65e73cb368SLiam R. Howlett cachep->non_kernel--;
66e73cb368SLiam R. Howlett }
67e73cb368SLiam R. Howlett
68bbe9d71fSMatthew Wilcox pthread_mutex_lock(&cachep->lock);
69bbe9d71fSMatthew Wilcox if (cachep->nr_objs) {
7034eee836SMatthew Wilcox (Oracle) struct radix_tree_node *node = cachep->objs;
71bbe9d71fSMatthew Wilcox cachep->nr_objs--;
721293d5c5SMatthew Wilcox cachep->objs = node->parent;
73bbe9d71fSMatthew Wilcox pthread_mutex_unlock(&cachep->lock);
741293d5c5SMatthew Wilcox node->parent = NULL;
7534eee836SMatthew Wilcox (Oracle) p = node;
76bbe9d71fSMatthew Wilcox } else {
77bbe9d71fSMatthew Wilcox pthread_mutex_unlock(&cachep->lock);
7834eee836SMatthew Wilcox (Oracle) if (cachep->align)
7934eee836SMatthew Wilcox (Oracle) posix_memalign(&p, cachep->align, cachep->size);
8034eee836SMatthew Wilcox (Oracle) else
8134eee836SMatthew Wilcox (Oracle) p = malloc(cachep->size);
821366c37eSMatthew Wilcox if (cachep->ctor)
8334eee836SMatthew Wilcox (Oracle) cachep->ctor(p);
8434eee836SMatthew Wilcox (Oracle) else if (gfp & __GFP_ZERO)
8534eee836SMatthew Wilcox (Oracle) memset(p, 0, cachep->size);
86bbe9d71fSMatthew Wilcox }
87bbe9d71fSMatthew Wilcox
88000a4493SLiam R. Howlett uatomic_inc(&cachep->nr_allocated);
891366c37eSMatthew Wilcox uatomic_inc(&nr_allocated);
90000a4493SLiam R. Howlett uatomic_inc(&cachep->nr_tallocated);
915eeb2d23SMatthew Wilcox if (kmalloc_verbose)
9234eee836SMatthew Wilcox (Oracle) printf("Allocating %p from slab\n", p);
9334eee836SMatthew Wilcox (Oracle) return p;
941366c37eSMatthew Wilcox }
951366c37eSMatthew Wilcox
kmem_cache_free_locked(struct kmem_cache * cachep,void * objp)96cc86e0c2SLiam R. Howlett void kmem_cache_free_locked(struct kmem_cache *cachep, void *objp)
971366c37eSMatthew Wilcox {
981366c37eSMatthew Wilcox assert(objp);
991366c37eSMatthew Wilcox uatomic_dec(&nr_allocated);
100000a4493SLiam R. Howlett uatomic_dec(&cachep->nr_allocated);
1015eeb2d23SMatthew Wilcox if (kmalloc_verbose)
1025eeb2d23SMatthew Wilcox printf("Freeing %p to slab\n", objp);
10334eee836SMatthew Wilcox (Oracle) if (cachep->nr_objs > 10 || cachep->align) {
104bbe9d71fSMatthew Wilcox memset(objp, POISON_FREE, cachep->size);
1051366c37eSMatthew Wilcox free(objp);
106bbe9d71fSMatthew Wilcox } else {
107bbe9d71fSMatthew Wilcox struct radix_tree_node *node = objp;
108bbe9d71fSMatthew Wilcox cachep->nr_objs++;
1091293d5c5SMatthew Wilcox node->parent = cachep->objs;
110bbe9d71fSMatthew Wilcox cachep->objs = node;
111bbe9d71fSMatthew Wilcox }
112cc86e0c2SLiam R. Howlett }
113cc86e0c2SLiam R. Howlett
kmem_cache_free(struct kmem_cache * cachep,void * objp)114cc86e0c2SLiam R. Howlett void kmem_cache_free(struct kmem_cache *cachep, void *objp)
115cc86e0c2SLiam R. Howlett {
116cc86e0c2SLiam R. Howlett pthread_mutex_lock(&cachep->lock);
117cc86e0c2SLiam R. Howlett kmem_cache_free_locked(cachep, objp);
118bbe9d71fSMatthew Wilcox pthread_mutex_unlock(&cachep->lock);
1191366c37eSMatthew Wilcox }
1201366c37eSMatthew Wilcox
kmem_cache_free_bulk(struct kmem_cache * cachep,size_t size,void ** list)121cc86e0c2SLiam R. Howlett void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list)
122cc86e0c2SLiam R. Howlett {
123cc86e0c2SLiam R. Howlett if (kmalloc_verbose)
124cc86e0c2SLiam R. Howlett pr_debug("Bulk free %p[0-%lu]\n", list, size - 1);
125cc86e0c2SLiam R. Howlett
126cc86e0c2SLiam R. Howlett pthread_mutex_lock(&cachep->lock);
127cc86e0c2SLiam R. Howlett for (int i = 0; i < size; i++)
128cc86e0c2SLiam R. Howlett kmem_cache_free_locked(cachep, list[i]);
129cc86e0c2SLiam R. Howlett pthread_mutex_unlock(&cachep->lock);
130cc86e0c2SLiam R. Howlett }
131cc86e0c2SLiam R. Howlett
kmem_cache_shrink(struct kmem_cache * cachep)132*120b1162SLiam Howlett void kmem_cache_shrink(struct kmem_cache *cachep)
133*120b1162SLiam Howlett {
134*120b1162SLiam Howlett }
135*120b1162SLiam Howlett
kmem_cache_alloc_bulk(struct kmem_cache * cachep,gfp_t gfp,size_t size,void ** p)136cc86e0c2SLiam R. Howlett int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
137cc86e0c2SLiam R. Howlett void **p)
138cc86e0c2SLiam R. Howlett {
139cc86e0c2SLiam R. Howlett size_t i;
140cc86e0c2SLiam R. Howlett
141cc86e0c2SLiam R. Howlett if (kmalloc_verbose)
142cc86e0c2SLiam R. Howlett pr_debug("Bulk alloc %lu\n", size);
143cc86e0c2SLiam R. Howlett
144cc86e0c2SLiam R. Howlett if (!(gfp & __GFP_DIRECT_RECLAIM)) {
145cc86e0c2SLiam R. Howlett if (cachep->non_kernel < size)
146cc86e0c2SLiam R. Howlett return 0;
147cc86e0c2SLiam R. Howlett
148cc86e0c2SLiam R. Howlett cachep->non_kernel -= size;
149cc86e0c2SLiam R. Howlett }
150cc86e0c2SLiam R. Howlett
151cc86e0c2SLiam R. Howlett pthread_mutex_lock(&cachep->lock);
152cc86e0c2SLiam R. Howlett if (cachep->nr_objs >= size) {
153cc86e0c2SLiam R. Howlett struct radix_tree_node *node;
154cc86e0c2SLiam R. Howlett
155cc86e0c2SLiam R. Howlett for (i = 0; i < size; i++) {
156cc86e0c2SLiam R. Howlett node = cachep->objs;
157cc86e0c2SLiam R. Howlett cachep->nr_objs--;
158cc86e0c2SLiam R. Howlett cachep->objs = node->parent;
159cc86e0c2SLiam R. Howlett p[i] = node;
160cc86e0c2SLiam R. Howlett node->parent = NULL;
161cc86e0c2SLiam R. Howlett }
162cc86e0c2SLiam R. Howlett pthread_mutex_unlock(&cachep->lock);
163cc86e0c2SLiam R. Howlett } else {
164cc86e0c2SLiam R. Howlett pthread_mutex_unlock(&cachep->lock);
165cc86e0c2SLiam R. Howlett for (i = 0; i < size; i++) {
166cc86e0c2SLiam R. Howlett if (cachep->align) {
167cc86e0c2SLiam R. Howlett posix_memalign(&p[i], cachep->align,
168cc86e0c2SLiam R. Howlett cachep->size * size);
169cc86e0c2SLiam R. Howlett } else {
170cc86e0c2SLiam R. Howlett p[i] = malloc(cachep->size * size);
171cc86e0c2SLiam R. Howlett }
172cc86e0c2SLiam R. Howlett if (cachep->ctor)
173cc86e0c2SLiam R. Howlett cachep->ctor(p[i]);
174cc86e0c2SLiam R. Howlett else if (gfp & __GFP_ZERO)
175cc86e0c2SLiam R. Howlett memset(p[i], 0, cachep->size);
176cc86e0c2SLiam R. Howlett }
177cc86e0c2SLiam R. Howlett }
178cc86e0c2SLiam R. Howlett
179cc86e0c2SLiam R. Howlett for (i = 0; i < size; i++) {
180cc86e0c2SLiam R. Howlett uatomic_inc(&nr_allocated);
181cc86e0c2SLiam R. Howlett uatomic_inc(&cachep->nr_allocated);
182cc86e0c2SLiam R. Howlett uatomic_inc(&cachep->nr_tallocated);
183cc86e0c2SLiam R. Howlett if (kmalloc_verbose)
184cc86e0c2SLiam R. Howlett printf("Allocating %p from slab\n", p[i]);
185cc86e0c2SLiam R. Howlett }
186cc86e0c2SLiam R. Howlett
187cc86e0c2SLiam R. Howlett return size;
188cc86e0c2SLiam R. Howlett }
189cc86e0c2SLiam R. Howlett
1901366c37eSMatthew Wilcox struct kmem_cache *
kmem_cache_create(const char * name,unsigned int size,unsigned int align,unsigned int flags,void (* ctor)(void *))19134eee836SMatthew Wilcox (Oracle) kmem_cache_create(const char *name, unsigned int size, unsigned int align,
19234eee836SMatthew Wilcox (Oracle) unsigned int flags, void (*ctor)(void *))
1931366c37eSMatthew Wilcox {
1941366c37eSMatthew Wilcox struct kmem_cache *ret = malloc(sizeof(*ret));
1951366c37eSMatthew Wilcox
196bbe9d71fSMatthew Wilcox pthread_mutex_init(&ret->lock, NULL);
1971366c37eSMatthew Wilcox ret->size = size;
19834eee836SMatthew Wilcox (Oracle) ret->align = align;
199bbe9d71fSMatthew Wilcox ret->nr_objs = 0;
200000a4493SLiam R. Howlett ret->nr_allocated = 0;
201000a4493SLiam R. Howlett ret->nr_tallocated = 0;
202bbe9d71fSMatthew Wilcox ret->objs = NULL;
2031366c37eSMatthew Wilcox ret->ctor = ctor;
204e73cb368SLiam R. Howlett ret->non_kernel = 0;
2051366c37eSMatthew Wilcox return ret;
2061366c37eSMatthew Wilcox }
207cc86e0c2SLiam R. Howlett
208cc86e0c2SLiam R. Howlett /*
209cc86e0c2SLiam R. Howlett * Test the test infrastructure for kem_cache_alloc/free and bulk counterparts.
210cc86e0c2SLiam R. Howlett */
test_kmem_cache_bulk(void)211cc86e0c2SLiam R. Howlett void test_kmem_cache_bulk(void)
212cc86e0c2SLiam R. Howlett {
213cc86e0c2SLiam R. Howlett int i;
214cc86e0c2SLiam R. Howlett void *list[12];
215cc86e0c2SLiam R. Howlett static struct kmem_cache *test_cache, *test_cache2;
216cc86e0c2SLiam R. Howlett
217cc86e0c2SLiam R. Howlett /*
218cc86e0c2SLiam R. Howlett * Testing the bulk allocators without aligned kmem_cache to force the
219cc86e0c2SLiam R. Howlett * bulk alloc/free to reuse
220cc86e0c2SLiam R. Howlett */
221cc86e0c2SLiam R. Howlett test_cache = kmem_cache_create("test_cache", 256, 0, SLAB_PANIC, NULL);
222cc86e0c2SLiam R. Howlett
223cc86e0c2SLiam R. Howlett for (i = 0; i < 5; i++)
224cc86e0c2SLiam R. Howlett list[i] = kmem_cache_alloc(test_cache, __GFP_DIRECT_RECLAIM);
225cc86e0c2SLiam R. Howlett
226cc86e0c2SLiam R. Howlett for (i = 0; i < 5; i++)
227cc86e0c2SLiam R. Howlett kmem_cache_free(test_cache, list[i]);
228cc86e0c2SLiam R. Howlett assert(test_cache->nr_objs == 5);
229cc86e0c2SLiam R. Howlett
230cc86e0c2SLiam R. Howlett kmem_cache_alloc_bulk(test_cache, __GFP_DIRECT_RECLAIM, 5, list);
231cc86e0c2SLiam R. Howlett kmem_cache_free_bulk(test_cache, 5, list);
232cc86e0c2SLiam R. Howlett
233cc86e0c2SLiam R. Howlett for (i = 0; i < 12 ; i++)
234cc86e0c2SLiam R. Howlett list[i] = kmem_cache_alloc(test_cache, __GFP_DIRECT_RECLAIM);
235cc86e0c2SLiam R. Howlett
236cc86e0c2SLiam R. Howlett for (i = 0; i < 12; i++)
237cc86e0c2SLiam R. Howlett kmem_cache_free(test_cache, list[i]);
238cc86e0c2SLiam R. Howlett
239cc86e0c2SLiam R. Howlett /* The last free will not be kept around */
240cc86e0c2SLiam R. Howlett assert(test_cache->nr_objs == 11);
241cc86e0c2SLiam R. Howlett
242cc86e0c2SLiam R. Howlett /* Aligned caches will immediately free */
243cc86e0c2SLiam R. Howlett test_cache2 = kmem_cache_create("test_cache2", 128, 128, SLAB_PANIC, NULL);
244cc86e0c2SLiam R. Howlett
245cc86e0c2SLiam R. Howlett kmem_cache_alloc_bulk(test_cache2, __GFP_DIRECT_RECLAIM, 10, list);
246cc86e0c2SLiam R. Howlett kmem_cache_free_bulk(test_cache2, 10, list);
247cc86e0c2SLiam R. Howlett assert(!test_cache2->nr_objs);
248cc86e0c2SLiam R. Howlett
249cc86e0c2SLiam R. Howlett
250cc86e0c2SLiam R. Howlett }
251