xref: /openbmc/linux/mm/mempool.c (revision 8730046c)
1 /*
2  *  linux/mm/mempool.c
3  *
4  *  memory buffer pool support. Such pools are mostly used
5  *  for guaranteed, deadlock-free memory allocations during
6  *  extreme VM load.
7  *
8  *  started by Ingo Molnar, Copyright (C) 2001
9  *  debugging by David Rientjes, Copyright (C) 2015
10  */
11 
12 #include <linux/mm.h>
13 #include <linux/slab.h>
14 #include <linux/highmem.h>
15 #include <linux/kasan.h>
16 #include <linux/kmemleak.h>
17 #include <linux/export.h>
18 #include <linux/mempool.h>
19 #include <linux/blkdev.h>
20 #include <linux/writeback.h>
21 #include "slab.h"
22 
23 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
24 static void poison_error(mempool_t *pool, void *element, size_t size,
25 			 size_t byte)
26 {
27 	const int nr = pool->curr_nr;
28 	const int start = max_t(int, byte - (BITS_PER_LONG / 8), 0);
29 	const int end = min_t(int, byte + (BITS_PER_LONG / 8), size);
30 	int i;
31 
32 	pr_err("BUG: mempool element poison mismatch\n");
33 	pr_err("Mempool %p size %zu\n", pool, size);
34 	pr_err(" nr=%d @ %p: %s0x", nr, element, start > 0 ? "... " : "");
35 	for (i = start; i < end; i++)
36 		pr_cont("%x ", *(u8 *)(element + i));
37 	pr_cont("%s\n", end < size ? "..." : "");
38 	dump_stack();
39 }
40 
41 static void __check_element(mempool_t *pool, void *element, size_t size)
42 {
43 	u8 *obj = element;
44 	size_t i;
45 
46 	for (i = 0; i < size; i++) {
47 		u8 exp = (i < size - 1) ? POISON_FREE : POISON_END;
48 
49 		if (obj[i] != exp) {
50 			poison_error(pool, element, size, i);
51 			return;
52 		}
53 	}
54 	memset(obj, POISON_INUSE, size);
55 }
56 
57 static void check_element(mempool_t *pool, void *element)
58 {
59 	/* Mempools backed by slab allocator */
60 	if (pool->free == mempool_free_slab || pool->free == mempool_kfree)
61 		__check_element(pool, element, ksize(element));
62 
63 	/* Mempools backed by page allocator */
64 	if (pool->free == mempool_free_pages) {
65 		int order = (int)(long)pool->pool_data;
66 		void *addr = kmap_atomic((struct page *)element);
67 
68 		__check_element(pool, addr, 1UL << (PAGE_SHIFT + order));
69 		kunmap_atomic(addr);
70 	}
71 }
72 
73 static void __poison_element(void *element, size_t size)
74 {
75 	u8 *obj = element;
76 
77 	memset(obj, POISON_FREE, size - 1);
78 	obj[size - 1] = POISON_END;
79 }
80 
81 static void poison_element(mempool_t *pool, void *element)
82 {
83 	/* Mempools backed by slab allocator */
84 	if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
85 		__poison_element(element, ksize(element));
86 
87 	/* Mempools backed by page allocator */
88 	if (pool->alloc == mempool_alloc_pages) {
89 		int order = (int)(long)pool->pool_data;
90 		void *addr = kmap_atomic((struct page *)element);
91 
92 		__poison_element(addr, 1UL << (PAGE_SHIFT + order));
93 		kunmap_atomic(addr);
94 	}
95 }
96 #else /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
97 static inline void check_element(mempool_t *pool, void *element)
98 {
99 }
100 static inline void poison_element(mempool_t *pool, void *element)
101 {
102 }
103 #endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
104 
105 static void kasan_poison_element(mempool_t *pool, void *element)
106 {
107 	if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
108 		kasan_poison_kfree(element);
109 	if (pool->alloc == mempool_alloc_pages)
110 		kasan_free_pages(element, (unsigned long)pool->pool_data);
111 }
112 
113 static void kasan_unpoison_element(mempool_t *pool, void *element, gfp_t flags)
114 {
115 	if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
116 		kasan_unpoison_slab(element);
117 	if (pool->alloc == mempool_alloc_pages)
118 		kasan_alloc_pages(element, (unsigned long)pool->pool_data);
119 }
120 
121 static void add_element(mempool_t *pool, void *element)
122 {
123 	BUG_ON(pool->curr_nr >= pool->min_nr);
124 	poison_element(pool, element);
125 	kasan_poison_element(pool, element);
126 	pool->elements[pool->curr_nr++] = element;
127 }
128 
129 static void *remove_element(mempool_t *pool, gfp_t flags)
130 {
131 	void *element = pool->elements[--pool->curr_nr];
132 
133 	BUG_ON(pool->curr_nr < 0);
134 	kasan_unpoison_element(pool, element, flags);
135 	check_element(pool, element);
136 	return element;
137 }
138 
139 /**
140  * mempool_destroy - deallocate a memory pool
141  * @pool:      pointer to the memory pool which was allocated via
142  *             mempool_create().
143  *
144  * Free all reserved elements in @pool and @pool itself.  This function
145  * only sleeps if the free_fn() function sleeps.
146  */
147 void mempool_destroy(mempool_t *pool)
148 {
149 	if (unlikely(!pool))
150 		return;
151 
152 	while (pool->curr_nr) {
153 		void *element = remove_element(pool, GFP_KERNEL);
154 		pool->free(element, pool->pool_data);
155 	}
156 	kfree(pool->elements);
157 	kfree(pool);
158 }
159 EXPORT_SYMBOL(mempool_destroy);
160 
161 /**
162  * mempool_create - create a memory pool
163  * @min_nr:    the minimum number of elements guaranteed to be
164  *             allocated for this pool.
165  * @alloc_fn:  user-defined element-allocation function.
166  * @free_fn:   user-defined element-freeing function.
167  * @pool_data: optional private data available to the user-defined functions.
168  *
169  * this function creates and allocates a guaranteed size, preallocated
170  * memory pool. The pool can be used from the mempool_alloc() and mempool_free()
171  * functions. This function might sleep. Both the alloc_fn() and the free_fn()
172  * functions might sleep - as long as the mempool_alloc() function is not called
173  * from IRQ contexts.
174  */
175 mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
176 				mempool_free_t *free_fn, void *pool_data)
177 {
178 	return mempool_create_node(min_nr,alloc_fn,free_fn, pool_data,
179 				   GFP_KERNEL, NUMA_NO_NODE);
180 }
181 EXPORT_SYMBOL(mempool_create);
182 
183 mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
184 			       mempool_free_t *free_fn, void *pool_data,
185 			       gfp_t gfp_mask, int node_id)
186 {
187 	mempool_t *pool;
188 	pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id);
189 	if (!pool)
190 		return NULL;
191 	pool->elements = kmalloc_node(min_nr * sizeof(void *),
192 				      gfp_mask, node_id);
193 	if (!pool->elements) {
194 		kfree(pool);
195 		return NULL;
196 	}
197 	spin_lock_init(&pool->lock);
198 	pool->min_nr = min_nr;
199 	pool->pool_data = pool_data;
200 	init_waitqueue_head(&pool->wait);
201 	pool->alloc = alloc_fn;
202 	pool->free = free_fn;
203 
204 	/*
205 	 * First pre-allocate the guaranteed number of buffers.
206 	 */
207 	while (pool->curr_nr < pool->min_nr) {
208 		void *element;
209 
210 		element = pool->alloc(gfp_mask, pool->pool_data);
211 		if (unlikely(!element)) {
212 			mempool_destroy(pool);
213 			return NULL;
214 		}
215 		add_element(pool, element);
216 	}
217 	return pool;
218 }
219 EXPORT_SYMBOL(mempool_create_node);
220 
221 /**
222  * mempool_resize - resize an existing memory pool
223  * @pool:       pointer to the memory pool which was allocated via
224  *              mempool_create().
225  * @new_min_nr: the new minimum number of elements guaranteed to be
226  *              allocated for this pool.
227  *
228  * This function shrinks/grows the pool. In the case of growing,
229  * it cannot be guaranteed that the pool will be grown to the new
230  * size immediately, but new mempool_free() calls will refill it.
231  * This function may sleep.
232  *
233  * Note, the caller must guarantee that no mempool_destroy is called
234  * while this function is running. mempool_alloc() & mempool_free()
235  * might be called (eg. from IRQ contexts) while this function executes.
236  */
237 int mempool_resize(mempool_t *pool, int new_min_nr)
238 {
239 	void *element;
240 	void **new_elements;
241 	unsigned long flags;
242 
243 	BUG_ON(new_min_nr <= 0);
244 	might_sleep();
245 
246 	spin_lock_irqsave(&pool->lock, flags);
247 	if (new_min_nr <= pool->min_nr) {
248 		while (new_min_nr < pool->curr_nr) {
249 			element = remove_element(pool, GFP_KERNEL);
250 			spin_unlock_irqrestore(&pool->lock, flags);
251 			pool->free(element, pool->pool_data);
252 			spin_lock_irqsave(&pool->lock, flags);
253 		}
254 		pool->min_nr = new_min_nr;
255 		goto out_unlock;
256 	}
257 	spin_unlock_irqrestore(&pool->lock, flags);
258 
259 	/* Grow the pool */
260 	new_elements = kmalloc_array(new_min_nr, sizeof(*new_elements),
261 				     GFP_KERNEL);
262 	if (!new_elements)
263 		return -ENOMEM;
264 
265 	spin_lock_irqsave(&pool->lock, flags);
266 	if (unlikely(new_min_nr <= pool->min_nr)) {
267 		/* Raced, other resize will do our work */
268 		spin_unlock_irqrestore(&pool->lock, flags);
269 		kfree(new_elements);
270 		goto out;
271 	}
272 	memcpy(new_elements, pool->elements,
273 			pool->curr_nr * sizeof(*new_elements));
274 	kfree(pool->elements);
275 	pool->elements = new_elements;
276 	pool->min_nr = new_min_nr;
277 
278 	while (pool->curr_nr < pool->min_nr) {
279 		spin_unlock_irqrestore(&pool->lock, flags);
280 		element = pool->alloc(GFP_KERNEL, pool->pool_data);
281 		if (!element)
282 			goto out;
283 		spin_lock_irqsave(&pool->lock, flags);
284 		if (pool->curr_nr < pool->min_nr) {
285 			add_element(pool, element);
286 		} else {
287 			spin_unlock_irqrestore(&pool->lock, flags);
288 			pool->free(element, pool->pool_data);	/* Raced */
289 			goto out;
290 		}
291 	}
292 out_unlock:
293 	spin_unlock_irqrestore(&pool->lock, flags);
294 out:
295 	return 0;
296 }
297 EXPORT_SYMBOL(mempool_resize);
298 
299 /**
300  * mempool_alloc - allocate an element from a specific memory pool
301  * @pool:      pointer to the memory pool which was allocated via
302  *             mempool_create().
303  * @gfp_mask:  the usual allocation bitmask.
304  *
305  * this function only sleeps if the alloc_fn() function sleeps or
306  * returns NULL. Note that due to preallocation, this function
307  * *never* fails when called from process contexts. (it might
308  * fail if called from an IRQ context.)
309  * Note: using __GFP_ZERO is not supported.
310  */
311 void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
312 {
313 	void *element;
314 	unsigned long flags;
315 	wait_queue_t wait;
316 	gfp_t gfp_temp;
317 
318 	VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO);
319 	might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
320 
321 	gfp_mask |= __GFP_NOMEMALLOC;	/* don't allocate emergency reserves */
322 	gfp_mask |= __GFP_NORETRY;	/* don't loop in __alloc_pages */
323 	gfp_mask |= __GFP_NOWARN;	/* failures are OK */
324 
325 	gfp_temp = gfp_mask & ~(__GFP_DIRECT_RECLAIM|__GFP_IO);
326 
327 repeat_alloc:
328 
329 	element = pool->alloc(gfp_temp, pool->pool_data);
330 	if (likely(element != NULL))
331 		return element;
332 
333 	spin_lock_irqsave(&pool->lock, flags);
334 	if (likely(pool->curr_nr)) {
335 		element = remove_element(pool, gfp_temp);
336 		spin_unlock_irqrestore(&pool->lock, flags);
337 		/* paired with rmb in mempool_free(), read comment there */
338 		smp_wmb();
339 		/*
340 		 * Update the allocation stack trace as this is more useful
341 		 * for debugging.
342 		 */
343 		kmemleak_update_trace(element);
344 		return element;
345 	}
346 
347 	/*
348 	 * We use gfp mask w/o direct reclaim or IO for the first round.  If
349 	 * alloc failed with that and @pool was empty, retry immediately.
350 	 */
351 	if (gfp_temp != gfp_mask) {
352 		spin_unlock_irqrestore(&pool->lock, flags);
353 		gfp_temp = gfp_mask;
354 		goto repeat_alloc;
355 	}
356 
357 	/* We must not sleep if !__GFP_DIRECT_RECLAIM */
358 	if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {
359 		spin_unlock_irqrestore(&pool->lock, flags);
360 		return NULL;
361 	}
362 
363 	/* Let's wait for someone else to return an element to @pool */
364 	init_wait(&wait);
365 	prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
366 
367 	spin_unlock_irqrestore(&pool->lock, flags);
368 
369 	/*
370 	 * FIXME: this should be io_schedule().  The timeout is there as a
371 	 * workaround for some DM problems in 2.6.18.
372 	 */
373 	io_schedule_timeout(5*HZ);
374 
375 	finish_wait(&pool->wait, &wait);
376 	goto repeat_alloc;
377 }
378 EXPORT_SYMBOL(mempool_alloc);
379 
380 /**
381  * mempool_free - return an element to the pool.
382  * @element:   pool element pointer.
383  * @pool:      pointer to the memory pool which was allocated via
384  *             mempool_create().
385  *
386  * this function only sleeps if the free_fn() function sleeps.
387  */
388 void mempool_free(void *element, mempool_t *pool)
389 {
390 	unsigned long flags;
391 
392 	if (unlikely(element == NULL))
393 		return;
394 
395 	/*
396 	 * Paired with the wmb in mempool_alloc().  The preceding read is
397 	 * for @element and the following @pool->curr_nr.  This ensures
398 	 * that the visible value of @pool->curr_nr is from after the
399 	 * allocation of @element.  This is necessary for fringe cases
400 	 * where @element was passed to this task without going through
401 	 * barriers.
402 	 *
403 	 * For example, assume @p is %NULL at the beginning and one task
404 	 * performs "p = mempool_alloc(...);" while another task is doing
405 	 * "while (!p) cpu_relax(); mempool_free(p, ...);".  This function
406 	 * may end up using curr_nr value which is from before allocation
407 	 * of @p without the following rmb.
408 	 */
409 	smp_rmb();
410 
411 	/*
412 	 * For correctness, we need a test which is guaranteed to trigger
413 	 * if curr_nr + #allocated == min_nr.  Testing curr_nr < min_nr
414 	 * without locking achieves that and refilling as soon as possible
415 	 * is desirable.
416 	 *
417 	 * Because curr_nr visible here is always a value after the
418 	 * allocation of @element, any task which decremented curr_nr below
419 	 * min_nr is guaranteed to see curr_nr < min_nr unless curr_nr gets
420 	 * incremented to min_nr afterwards.  If curr_nr gets incremented
421 	 * to min_nr after the allocation of @element, the elements
422 	 * allocated after that are subject to the same guarantee.
423 	 *
424 	 * Waiters happen iff curr_nr is 0 and the above guarantee also
425 	 * ensures that there will be frees which return elements to the
426 	 * pool waking up the waiters.
427 	 */
428 	if (unlikely(pool->curr_nr < pool->min_nr)) {
429 		spin_lock_irqsave(&pool->lock, flags);
430 		if (likely(pool->curr_nr < pool->min_nr)) {
431 			add_element(pool, element);
432 			spin_unlock_irqrestore(&pool->lock, flags);
433 			wake_up(&pool->wait);
434 			return;
435 		}
436 		spin_unlock_irqrestore(&pool->lock, flags);
437 	}
438 	pool->free(element, pool->pool_data);
439 }
440 EXPORT_SYMBOL(mempool_free);
441 
442 /*
443  * A commonly used alloc and free fn.
444  */
445 void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data)
446 {
447 	struct kmem_cache *mem = pool_data;
448 	VM_BUG_ON(mem->ctor);
449 	return kmem_cache_alloc(mem, gfp_mask);
450 }
451 EXPORT_SYMBOL(mempool_alloc_slab);
452 
453 void mempool_free_slab(void *element, void *pool_data)
454 {
455 	struct kmem_cache *mem = pool_data;
456 	kmem_cache_free(mem, element);
457 }
458 EXPORT_SYMBOL(mempool_free_slab);
459 
460 /*
461  * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory
462  * specified by pool_data
463  */
464 void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)
465 {
466 	size_t size = (size_t)pool_data;
467 	return kmalloc(size, gfp_mask);
468 }
469 EXPORT_SYMBOL(mempool_kmalloc);
470 
471 void mempool_kfree(void *element, void *pool_data)
472 {
473 	kfree(element);
474 }
475 EXPORT_SYMBOL(mempool_kfree);
476 
477 /*
478  * A simple mempool-backed page allocator that allocates pages
479  * of the order specified by pool_data.
480  */
481 void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data)
482 {
483 	int order = (int)(long)pool_data;
484 	return alloc_pages(gfp_mask, order);
485 }
486 EXPORT_SYMBOL(mempool_alloc_pages);
487 
488 void mempool_free_pages(void *element, void *pool_data)
489 {
490 	int order = (int)(long)pool_data;
491 	__free_pages(element, order);
492 }
493 EXPORT_SYMBOL(mempool_free_pages);
494