1 /* 2 * linux/mm/mempool.c 3 * 4 * memory buffer pool support. Such pools are mostly used 5 * for guaranteed, deadlock-free memory allocations during 6 * extreme VM load. 7 * 8 * started by Ingo Molnar, Copyright (C) 2001 9 * debugging by David Rientjes, Copyright (C) 2015 10 */ 11 12 #include <linux/mm.h> 13 #include <linux/slab.h> 14 #include <linux/highmem.h> 15 #include <linux/kasan.h> 16 #include <linux/kmemleak.h> 17 #include <linux/export.h> 18 #include <linux/mempool.h> 19 #include <linux/blkdev.h> 20 #include <linux/writeback.h> 21 #include "slab.h" 22 23 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON) 24 static void poison_error(mempool_t *pool, void *element, size_t size, 25 size_t byte) 26 { 27 const int nr = pool->curr_nr; 28 const int start = max_t(int, byte - (BITS_PER_LONG / 8), 0); 29 const int end = min_t(int, byte + (BITS_PER_LONG / 8), size); 30 int i; 31 32 pr_err("BUG: mempool element poison mismatch\n"); 33 pr_err("Mempool %p size %zu\n", pool, size); 34 pr_err(" nr=%d @ %p: %s0x", nr, element, start > 0 ? "... " : ""); 35 for (i = start; i < end; i++) 36 pr_cont("%x ", *(u8 *)(element + i)); 37 pr_cont("%s\n", end < size ? "..." : ""); 38 dump_stack(); 39 } 40 41 static void __check_element(mempool_t *pool, void *element, size_t size) 42 { 43 u8 *obj = element; 44 size_t i; 45 46 for (i = 0; i < size; i++) { 47 u8 exp = (i < size - 1) ? POISON_FREE : POISON_END; 48 49 if (obj[i] != exp) { 50 poison_error(pool, element, size, i); 51 return; 52 } 53 } 54 memset(obj, POISON_INUSE, size); 55 } 56 57 static void check_element(mempool_t *pool, void *element) 58 { 59 /* Mempools backed by slab allocator */ 60 if (pool->free == mempool_free_slab || pool->free == mempool_kfree) 61 __check_element(pool, element, ksize(element)); 62 63 /* Mempools backed by page allocator */ 64 if (pool->free == mempool_free_pages) { 65 int order = (int)(long)pool->pool_data; 66 void *addr = kmap_atomic((struct page *)element); 67 68 __check_element(pool, addr, 1UL << (PAGE_SHIFT + order)); 69 kunmap_atomic(addr); 70 } 71 } 72 73 static void __poison_element(void *element, size_t size) 74 { 75 u8 *obj = element; 76 77 memset(obj, POISON_FREE, size - 1); 78 obj[size - 1] = POISON_END; 79 } 80 81 static void poison_element(mempool_t *pool, void *element) 82 { 83 /* Mempools backed by slab allocator */ 84 if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) 85 __poison_element(element, ksize(element)); 86 87 /* Mempools backed by page allocator */ 88 if (pool->alloc == mempool_alloc_pages) { 89 int order = (int)(long)pool->pool_data; 90 void *addr = kmap_atomic((struct page *)element); 91 92 __poison_element(addr, 1UL << (PAGE_SHIFT + order)); 93 kunmap_atomic(addr); 94 } 95 } 96 #else /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */ 97 static inline void check_element(mempool_t *pool, void *element) 98 { 99 } 100 static inline void poison_element(mempool_t *pool, void *element) 101 { 102 } 103 #endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */ 104 105 static void kasan_poison_element(mempool_t *pool, void *element) 106 { 107 if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) 108 kasan_poison_kfree(element); 109 if (pool->alloc == mempool_alloc_pages) 110 kasan_free_pages(element, (unsigned long)pool->pool_data); 111 } 112 113 static void kasan_unpoison_element(mempool_t *pool, void *element, gfp_t flags) 114 { 115 if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) 116 kasan_unpoison_slab(element); 117 if (pool->alloc == mempool_alloc_pages) 118 kasan_alloc_pages(element, (unsigned long)pool->pool_data); 119 } 120 121 static void add_element(mempool_t *pool, void *element) 122 { 123 BUG_ON(pool->curr_nr >= pool->min_nr); 124 poison_element(pool, element); 125 kasan_poison_element(pool, element); 126 pool->elements[pool->curr_nr++] = element; 127 } 128 129 static void *remove_element(mempool_t *pool, gfp_t flags) 130 { 131 void *element = pool->elements[--pool->curr_nr]; 132 133 BUG_ON(pool->curr_nr < 0); 134 kasan_unpoison_element(pool, element, flags); 135 check_element(pool, element); 136 return element; 137 } 138 139 /** 140 * mempool_destroy - deallocate a memory pool 141 * @pool: pointer to the memory pool which was allocated via 142 * mempool_create(). 143 * 144 * Free all reserved elements in @pool and @pool itself. This function 145 * only sleeps if the free_fn() function sleeps. 146 */ 147 void mempool_destroy(mempool_t *pool) 148 { 149 if (unlikely(!pool)) 150 return; 151 152 while (pool->curr_nr) { 153 void *element = remove_element(pool, GFP_KERNEL); 154 pool->free(element, pool->pool_data); 155 } 156 kfree(pool->elements); 157 kfree(pool); 158 } 159 EXPORT_SYMBOL(mempool_destroy); 160 161 /** 162 * mempool_create - create a memory pool 163 * @min_nr: the minimum number of elements guaranteed to be 164 * allocated for this pool. 165 * @alloc_fn: user-defined element-allocation function. 166 * @free_fn: user-defined element-freeing function. 167 * @pool_data: optional private data available to the user-defined functions. 168 * 169 * this function creates and allocates a guaranteed size, preallocated 170 * memory pool. The pool can be used from the mempool_alloc() and mempool_free() 171 * functions. This function might sleep. Both the alloc_fn() and the free_fn() 172 * functions might sleep - as long as the mempool_alloc() function is not called 173 * from IRQ contexts. 174 */ 175 mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, 176 mempool_free_t *free_fn, void *pool_data) 177 { 178 return mempool_create_node(min_nr,alloc_fn,free_fn, pool_data, 179 GFP_KERNEL, NUMA_NO_NODE); 180 } 181 EXPORT_SYMBOL(mempool_create); 182 183 mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, 184 mempool_free_t *free_fn, void *pool_data, 185 gfp_t gfp_mask, int node_id) 186 { 187 mempool_t *pool; 188 pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id); 189 if (!pool) 190 return NULL; 191 pool->elements = kmalloc_node(min_nr * sizeof(void *), 192 gfp_mask, node_id); 193 if (!pool->elements) { 194 kfree(pool); 195 return NULL; 196 } 197 spin_lock_init(&pool->lock); 198 pool->min_nr = min_nr; 199 pool->pool_data = pool_data; 200 init_waitqueue_head(&pool->wait); 201 pool->alloc = alloc_fn; 202 pool->free = free_fn; 203 204 /* 205 * First pre-allocate the guaranteed number of buffers. 206 */ 207 while (pool->curr_nr < pool->min_nr) { 208 void *element; 209 210 element = pool->alloc(gfp_mask, pool->pool_data); 211 if (unlikely(!element)) { 212 mempool_destroy(pool); 213 return NULL; 214 } 215 add_element(pool, element); 216 } 217 return pool; 218 } 219 EXPORT_SYMBOL(mempool_create_node); 220 221 /** 222 * mempool_resize - resize an existing memory pool 223 * @pool: pointer to the memory pool which was allocated via 224 * mempool_create(). 225 * @new_min_nr: the new minimum number of elements guaranteed to be 226 * allocated for this pool. 227 * 228 * This function shrinks/grows the pool. In the case of growing, 229 * it cannot be guaranteed that the pool will be grown to the new 230 * size immediately, but new mempool_free() calls will refill it. 231 * This function may sleep. 232 * 233 * Note, the caller must guarantee that no mempool_destroy is called 234 * while this function is running. mempool_alloc() & mempool_free() 235 * might be called (eg. from IRQ contexts) while this function executes. 236 */ 237 int mempool_resize(mempool_t *pool, int new_min_nr) 238 { 239 void *element; 240 void **new_elements; 241 unsigned long flags; 242 243 BUG_ON(new_min_nr <= 0); 244 might_sleep(); 245 246 spin_lock_irqsave(&pool->lock, flags); 247 if (new_min_nr <= pool->min_nr) { 248 while (new_min_nr < pool->curr_nr) { 249 element = remove_element(pool, GFP_KERNEL); 250 spin_unlock_irqrestore(&pool->lock, flags); 251 pool->free(element, pool->pool_data); 252 spin_lock_irqsave(&pool->lock, flags); 253 } 254 pool->min_nr = new_min_nr; 255 goto out_unlock; 256 } 257 spin_unlock_irqrestore(&pool->lock, flags); 258 259 /* Grow the pool */ 260 new_elements = kmalloc_array(new_min_nr, sizeof(*new_elements), 261 GFP_KERNEL); 262 if (!new_elements) 263 return -ENOMEM; 264 265 spin_lock_irqsave(&pool->lock, flags); 266 if (unlikely(new_min_nr <= pool->min_nr)) { 267 /* Raced, other resize will do our work */ 268 spin_unlock_irqrestore(&pool->lock, flags); 269 kfree(new_elements); 270 goto out; 271 } 272 memcpy(new_elements, pool->elements, 273 pool->curr_nr * sizeof(*new_elements)); 274 kfree(pool->elements); 275 pool->elements = new_elements; 276 pool->min_nr = new_min_nr; 277 278 while (pool->curr_nr < pool->min_nr) { 279 spin_unlock_irqrestore(&pool->lock, flags); 280 element = pool->alloc(GFP_KERNEL, pool->pool_data); 281 if (!element) 282 goto out; 283 spin_lock_irqsave(&pool->lock, flags); 284 if (pool->curr_nr < pool->min_nr) { 285 add_element(pool, element); 286 } else { 287 spin_unlock_irqrestore(&pool->lock, flags); 288 pool->free(element, pool->pool_data); /* Raced */ 289 goto out; 290 } 291 } 292 out_unlock: 293 spin_unlock_irqrestore(&pool->lock, flags); 294 out: 295 return 0; 296 } 297 EXPORT_SYMBOL(mempool_resize); 298 299 /** 300 * mempool_alloc - allocate an element from a specific memory pool 301 * @pool: pointer to the memory pool which was allocated via 302 * mempool_create(). 303 * @gfp_mask: the usual allocation bitmask. 304 * 305 * this function only sleeps if the alloc_fn() function sleeps or 306 * returns NULL. Note that due to preallocation, this function 307 * *never* fails when called from process contexts. (it might 308 * fail if called from an IRQ context.) 309 * Note: neither __GFP_NOMEMALLOC nor __GFP_ZERO are supported. 310 */ 311 void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) 312 { 313 void *element; 314 unsigned long flags; 315 wait_queue_t wait; 316 gfp_t gfp_temp; 317 318 /* If oom killed, memory reserves are essential to prevent livelock */ 319 VM_WARN_ON_ONCE(gfp_mask & __GFP_NOMEMALLOC); 320 /* No element size to zero on allocation */ 321 VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO); 322 323 might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM); 324 325 gfp_mask |= __GFP_NORETRY; /* don't loop in __alloc_pages */ 326 gfp_mask |= __GFP_NOWARN; /* failures are OK */ 327 328 gfp_temp = gfp_mask & ~(__GFP_DIRECT_RECLAIM|__GFP_IO); 329 330 repeat_alloc: 331 if (likely(pool->curr_nr)) { 332 /* 333 * Don't allocate from emergency reserves if there are 334 * elements available. This check is racy, but it will 335 * be rechecked each loop. 336 */ 337 gfp_temp |= __GFP_NOMEMALLOC; 338 } 339 340 element = pool->alloc(gfp_temp, pool->pool_data); 341 if (likely(element != NULL)) 342 return element; 343 344 spin_lock_irqsave(&pool->lock, flags); 345 if (likely(pool->curr_nr)) { 346 element = remove_element(pool, gfp_temp); 347 spin_unlock_irqrestore(&pool->lock, flags); 348 /* paired with rmb in mempool_free(), read comment there */ 349 smp_wmb(); 350 /* 351 * Update the allocation stack trace as this is more useful 352 * for debugging. 353 */ 354 kmemleak_update_trace(element); 355 return element; 356 } 357 358 /* 359 * We use gfp mask w/o direct reclaim or IO for the first round. If 360 * alloc failed with that and @pool was empty, retry immediately. 361 */ 362 if ((gfp_temp & ~__GFP_NOMEMALLOC) != gfp_mask) { 363 spin_unlock_irqrestore(&pool->lock, flags); 364 gfp_temp = gfp_mask; 365 goto repeat_alloc; 366 } 367 gfp_temp = gfp_mask; 368 369 /* We must not sleep if !__GFP_DIRECT_RECLAIM */ 370 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) { 371 spin_unlock_irqrestore(&pool->lock, flags); 372 return NULL; 373 } 374 375 /* Let's wait for someone else to return an element to @pool */ 376 init_wait(&wait); 377 prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE); 378 379 spin_unlock_irqrestore(&pool->lock, flags); 380 381 /* 382 * FIXME: this should be io_schedule(). The timeout is there as a 383 * workaround for some DM problems in 2.6.18. 384 */ 385 io_schedule_timeout(5*HZ); 386 387 finish_wait(&pool->wait, &wait); 388 goto repeat_alloc; 389 } 390 EXPORT_SYMBOL(mempool_alloc); 391 392 /** 393 * mempool_free - return an element to the pool. 394 * @element: pool element pointer. 395 * @pool: pointer to the memory pool which was allocated via 396 * mempool_create(). 397 * 398 * this function only sleeps if the free_fn() function sleeps. 399 */ 400 void mempool_free(void *element, mempool_t *pool) 401 { 402 unsigned long flags; 403 404 if (unlikely(element == NULL)) 405 return; 406 407 /* 408 * Paired with the wmb in mempool_alloc(). The preceding read is 409 * for @element and the following @pool->curr_nr. This ensures 410 * that the visible value of @pool->curr_nr is from after the 411 * allocation of @element. This is necessary for fringe cases 412 * where @element was passed to this task without going through 413 * barriers. 414 * 415 * For example, assume @p is %NULL at the beginning and one task 416 * performs "p = mempool_alloc(...);" while another task is doing 417 * "while (!p) cpu_relax(); mempool_free(p, ...);". This function 418 * may end up using curr_nr value which is from before allocation 419 * of @p without the following rmb. 420 */ 421 smp_rmb(); 422 423 /* 424 * For correctness, we need a test which is guaranteed to trigger 425 * if curr_nr + #allocated == min_nr. Testing curr_nr < min_nr 426 * without locking achieves that and refilling as soon as possible 427 * is desirable. 428 * 429 * Because curr_nr visible here is always a value after the 430 * allocation of @element, any task which decremented curr_nr below 431 * min_nr is guaranteed to see curr_nr < min_nr unless curr_nr gets 432 * incremented to min_nr afterwards. If curr_nr gets incremented 433 * to min_nr after the allocation of @element, the elements 434 * allocated after that are subject to the same guarantee. 435 * 436 * Waiters happen iff curr_nr is 0 and the above guarantee also 437 * ensures that there will be frees which return elements to the 438 * pool waking up the waiters. 439 */ 440 if (unlikely(pool->curr_nr < pool->min_nr)) { 441 spin_lock_irqsave(&pool->lock, flags); 442 if (likely(pool->curr_nr < pool->min_nr)) { 443 add_element(pool, element); 444 spin_unlock_irqrestore(&pool->lock, flags); 445 wake_up(&pool->wait); 446 return; 447 } 448 spin_unlock_irqrestore(&pool->lock, flags); 449 } 450 pool->free(element, pool->pool_data); 451 } 452 EXPORT_SYMBOL(mempool_free); 453 454 /* 455 * A commonly used alloc and free fn. 456 */ 457 void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data) 458 { 459 struct kmem_cache *mem = pool_data; 460 VM_BUG_ON(mem->ctor); 461 return kmem_cache_alloc(mem, gfp_mask); 462 } 463 EXPORT_SYMBOL(mempool_alloc_slab); 464 465 void mempool_free_slab(void *element, void *pool_data) 466 { 467 struct kmem_cache *mem = pool_data; 468 kmem_cache_free(mem, element); 469 } 470 EXPORT_SYMBOL(mempool_free_slab); 471 472 /* 473 * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory 474 * specified by pool_data 475 */ 476 void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data) 477 { 478 size_t size = (size_t)pool_data; 479 return kmalloc(size, gfp_mask); 480 } 481 EXPORT_SYMBOL(mempool_kmalloc); 482 483 void mempool_kfree(void *element, void *pool_data) 484 { 485 kfree(element); 486 } 487 EXPORT_SYMBOL(mempool_kfree); 488 489 /* 490 * A simple mempool-backed page allocator that allocates pages 491 * of the order specified by pool_data. 492 */ 493 void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data) 494 { 495 int order = (int)(long)pool_data; 496 return alloc_pages(gfp_mask, order); 497 } 498 EXPORT_SYMBOL(mempool_alloc_pages); 499 500 void mempool_free_pages(void *element, void *pool_data) 501 { 502 int order = (int)(long)pool_data; 503 __free_pages(element, order); 504 } 505 EXPORT_SYMBOL(mempool_free_pages); 506