11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/mm/mempool.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * memory buffer pool support. Such pools are mostly used 51da177e4SLinus Torvalds * for guaranteed, deadlock-free memory allocations during 61da177e4SLinus Torvalds * extreme VM load. 71da177e4SLinus Torvalds * 81da177e4SLinus Torvalds * started by Ingo Molnar, Copyright (C) 2001 91da177e4SLinus Torvalds */ 101da177e4SLinus Torvalds 111da177e4SLinus Torvalds #include <linux/mm.h> 121da177e4SLinus Torvalds #include <linux/slab.h> 1317411962SCatalin Marinas #include <linux/kmemleak.h> 14b95f1b31SPaul Gortmaker #include <linux/export.h> 151da177e4SLinus Torvalds #include <linux/mempool.h> 161da177e4SLinus Torvalds #include <linux/blkdev.h> 171da177e4SLinus Torvalds #include <linux/writeback.h> 181da177e4SLinus Torvalds 191da177e4SLinus Torvalds static void add_element(mempool_t *pool, void *element) 201da177e4SLinus Torvalds { 211da177e4SLinus Torvalds BUG_ON(pool->curr_nr >= pool->min_nr); 221da177e4SLinus Torvalds pool->elements[pool->curr_nr++] = element; 231da177e4SLinus Torvalds } 241da177e4SLinus Torvalds 251da177e4SLinus Torvalds static void *remove_element(mempool_t *pool) 261da177e4SLinus Torvalds { 271da177e4SLinus Torvalds BUG_ON(pool->curr_nr <= 0); 281da177e4SLinus Torvalds return pool->elements[--pool->curr_nr]; 291da177e4SLinus Torvalds } 301da177e4SLinus Torvalds 310565d317STejun Heo /** 320565d317STejun Heo * mempool_destroy - deallocate a memory pool 330565d317STejun Heo * @pool: pointer to the memory pool which was allocated via 340565d317STejun Heo * mempool_create(). 350565d317STejun Heo * 360565d317STejun Heo * Free all reserved elements in @pool and @pool itself. This function 370565d317STejun Heo * only sleeps if the free_fn() function sleeps. 380565d317STejun Heo */ 390565d317STejun Heo void mempool_destroy(mempool_t *pool) 401da177e4SLinus Torvalds { 411da177e4SLinus Torvalds while (pool->curr_nr) { 421da177e4SLinus Torvalds void *element = remove_element(pool); 431da177e4SLinus Torvalds pool->free(element, pool->pool_data); 441da177e4SLinus Torvalds } 451da177e4SLinus Torvalds kfree(pool->elements); 461da177e4SLinus Torvalds kfree(pool); 471da177e4SLinus Torvalds } 480565d317STejun Heo EXPORT_SYMBOL(mempool_destroy); 491da177e4SLinus Torvalds 501da177e4SLinus Torvalds /** 511da177e4SLinus Torvalds * mempool_create - create a memory pool 521da177e4SLinus Torvalds * @min_nr: the minimum number of elements guaranteed to be 531da177e4SLinus Torvalds * allocated for this pool. 541da177e4SLinus Torvalds * @alloc_fn: user-defined element-allocation function. 551da177e4SLinus Torvalds * @free_fn: user-defined element-freeing function. 561da177e4SLinus Torvalds * @pool_data: optional private data available to the user-defined functions. 571da177e4SLinus Torvalds * 581da177e4SLinus Torvalds * this function creates and allocates a guaranteed size, preallocated 5972fd4a35SRobert P. J. Day * memory pool. The pool can be used from the mempool_alloc() and mempool_free() 601da177e4SLinus Torvalds * functions. This function might sleep. Both the alloc_fn() and the free_fn() 6172fd4a35SRobert P. J. Day * functions might sleep - as long as the mempool_alloc() function is not called 621da177e4SLinus Torvalds * from IRQ contexts. 631da177e4SLinus Torvalds */ 641da177e4SLinus Torvalds mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, 651da177e4SLinus Torvalds mempool_free_t *free_fn, void *pool_data) 661da177e4SLinus Torvalds { 67a91a5ac6STejun Heo return mempool_create_node(min_nr,alloc_fn,free_fn, pool_data, 68a91a5ac6STejun Heo GFP_KERNEL, NUMA_NO_NODE); 691946089aSChristoph Lameter } 701946089aSChristoph Lameter EXPORT_SYMBOL(mempool_create); 711da177e4SLinus Torvalds 721946089aSChristoph Lameter mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, 73a91a5ac6STejun Heo mempool_free_t *free_fn, void *pool_data, 74a91a5ac6STejun Heo gfp_t gfp_mask, int node_id) 751946089aSChristoph Lameter { 761946089aSChristoph Lameter mempool_t *pool; 777b5219dbSJoe Perches pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id); 781da177e4SLinus Torvalds if (!pool) 791da177e4SLinus Torvalds return NULL; 801946089aSChristoph Lameter pool->elements = kmalloc_node(min_nr * sizeof(void *), 81a91a5ac6STejun Heo gfp_mask, node_id); 821da177e4SLinus Torvalds if (!pool->elements) { 831da177e4SLinus Torvalds kfree(pool); 841da177e4SLinus Torvalds return NULL; 851da177e4SLinus Torvalds } 861da177e4SLinus Torvalds spin_lock_init(&pool->lock); 871da177e4SLinus Torvalds pool->min_nr = min_nr; 881da177e4SLinus Torvalds pool->pool_data = pool_data; 891da177e4SLinus Torvalds init_waitqueue_head(&pool->wait); 901da177e4SLinus Torvalds pool->alloc = alloc_fn; 911da177e4SLinus Torvalds pool->free = free_fn; 921da177e4SLinus Torvalds 931da177e4SLinus Torvalds /* 941da177e4SLinus Torvalds * First pre-allocate the guaranteed number of buffers. 951da177e4SLinus Torvalds */ 961da177e4SLinus Torvalds while (pool->curr_nr < pool->min_nr) { 971da177e4SLinus Torvalds void *element; 981da177e4SLinus Torvalds 99a91a5ac6STejun Heo element = pool->alloc(gfp_mask, pool->pool_data); 1001da177e4SLinus Torvalds if (unlikely(!element)) { 1010565d317STejun Heo mempool_destroy(pool); 1021da177e4SLinus Torvalds return NULL; 1031da177e4SLinus Torvalds } 1041da177e4SLinus Torvalds add_element(pool, element); 1051da177e4SLinus Torvalds } 1061da177e4SLinus Torvalds return pool; 1071da177e4SLinus Torvalds } 1081946089aSChristoph Lameter EXPORT_SYMBOL(mempool_create_node); 1091da177e4SLinus Torvalds 1101da177e4SLinus Torvalds /** 1111da177e4SLinus Torvalds * mempool_resize - resize an existing memory pool 1121da177e4SLinus Torvalds * @pool: pointer to the memory pool which was allocated via 1131da177e4SLinus Torvalds * mempool_create(). 1141da177e4SLinus Torvalds * @new_min_nr: the new minimum number of elements guaranteed to be 1151da177e4SLinus Torvalds * allocated for this pool. 1161da177e4SLinus Torvalds * 1171da177e4SLinus Torvalds * This function shrinks/grows the pool. In the case of growing, 1181da177e4SLinus Torvalds * it cannot be guaranteed that the pool will be grown to the new 1191da177e4SLinus Torvalds * size immediately, but new mempool_free() calls will refill it. 120*11d83360SDavid Rientjes * This function may sleep. 1211da177e4SLinus Torvalds * 1221da177e4SLinus Torvalds * Note, the caller must guarantee that no mempool_destroy is called 1231da177e4SLinus Torvalds * while this function is running. mempool_alloc() & mempool_free() 1241da177e4SLinus Torvalds * might be called (eg. from IRQ contexts) while this function executes. 1251da177e4SLinus Torvalds */ 126*11d83360SDavid Rientjes int mempool_resize(mempool_t *pool, int new_min_nr) 1271da177e4SLinus Torvalds { 1281da177e4SLinus Torvalds void *element; 1291da177e4SLinus Torvalds void **new_elements; 1301da177e4SLinus Torvalds unsigned long flags; 1311da177e4SLinus Torvalds 1321da177e4SLinus Torvalds BUG_ON(new_min_nr <= 0); 133*11d83360SDavid Rientjes might_sleep(); 1341da177e4SLinus Torvalds 1351da177e4SLinus Torvalds spin_lock_irqsave(&pool->lock, flags); 1361da177e4SLinus Torvalds if (new_min_nr <= pool->min_nr) { 1371da177e4SLinus Torvalds while (new_min_nr < pool->curr_nr) { 1381da177e4SLinus Torvalds element = remove_element(pool); 1391da177e4SLinus Torvalds spin_unlock_irqrestore(&pool->lock, flags); 1401da177e4SLinus Torvalds pool->free(element, pool->pool_data); 1411da177e4SLinus Torvalds spin_lock_irqsave(&pool->lock, flags); 1421da177e4SLinus Torvalds } 1431da177e4SLinus Torvalds pool->min_nr = new_min_nr; 1441da177e4SLinus Torvalds goto out_unlock; 1451da177e4SLinus Torvalds } 1461da177e4SLinus Torvalds spin_unlock_irqrestore(&pool->lock, flags); 1471da177e4SLinus Torvalds 1481da177e4SLinus Torvalds /* Grow the pool */ 149*11d83360SDavid Rientjes new_elements = kmalloc_array(new_min_nr, sizeof(*new_elements), 150*11d83360SDavid Rientjes GFP_KERNEL); 1511da177e4SLinus Torvalds if (!new_elements) 1521da177e4SLinus Torvalds return -ENOMEM; 1531da177e4SLinus Torvalds 1541da177e4SLinus Torvalds spin_lock_irqsave(&pool->lock, flags); 1551da177e4SLinus Torvalds if (unlikely(new_min_nr <= pool->min_nr)) { 1561da177e4SLinus Torvalds /* Raced, other resize will do our work */ 1571da177e4SLinus Torvalds spin_unlock_irqrestore(&pool->lock, flags); 1581da177e4SLinus Torvalds kfree(new_elements); 1591da177e4SLinus Torvalds goto out; 1601da177e4SLinus Torvalds } 1611da177e4SLinus Torvalds memcpy(new_elements, pool->elements, 1621da177e4SLinus Torvalds pool->curr_nr * sizeof(*new_elements)); 1631da177e4SLinus Torvalds kfree(pool->elements); 1641da177e4SLinus Torvalds pool->elements = new_elements; 1651da177e4SLinus Torvalds pool->min_nr = new_min_nr; 1661da177e4SLinus Torvalds 1671da177e4SLinus Torvalds while (pool->curr_nr < pool->min_nr) { 1681da177e4SLinus Torvalds spin_unlock_irqrestore(&pool->lock, flags); 169*11d83360SDavid Rientjes element = pool->alloc(GFP_KERNEL, pool->pool_data); 1701da177e4SLinus Torvalds if (!element) 1711da177e4SLinus Torvalds goto out; 1721da177e4SLinus Torvalds spin_lock_irqsave(&pool->lock, flags); 1731da177e4SLinus Torvalds if (pool->curr_nr < pool->min_nr) { 1741da177e4SLinus Torvalds add_element(pool, element); 1751da177e4SLinus Torvalds } else { 1761da177e4SLinus Torvalds spin_unlock_irqrestore(&pool->lock, flags); 1771da177e4SLinus Torvalds pool->free(element, pool->pool_data); /* Raced */ 1781da177e4SLinus Torvalds goto out; 1791da177e4SLinus Torvalds } 1801da177e4SLinus Torvalds } 1811da177e4SLinus Torvalds out_unlock: 1821da177e4SLinus Torvalds spin_unlock_irqrestore(&pool->lock, flags); 1831da177e4SLinus Torvalds out: 1841da177e4SLinus Torvalds return 0; 1851da177e4SLinus Torvalds } 1861da177e4SLinus Torvalds EXPORT_SYMBOL(mempool_resize); 1871da177e4SLinus Torvalds 1881da177e4SLinus Torvalds /** 1891da177e4SLinus Torvalds * mempool_alloc - allocate an element from a specific memory pool 1901da177e4SLinus Torvalds * @pool: pointer to the memory pool which was allocated via 1911da177e4SLinus Torvalds * mempool_create(). 1921da177e4SLinus Torvalds * @gfp_mask: the usual allocation bitmask. 1931da177e4SLinus Torvalds * 19472fd4a35SRobert P. J. Day * this function only sleeps if the alloc_fn() function sleeps or 1951da177e4SLinus Torvalds * returns NULL. Note that due to preallocation, this function 1961da177e4SLinus Torvalds * *never* fails when called from process contexts. (it might 1971da177e4SLinus Torvalds * fail if called from an IRQ context.) 1988bf8fcb0SSebastian Ott * Note: using __GFP_ZERO is not supported. 1991da177e4SLinus Torvalds */ 200dd0fc66fSAl Viro void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask) 2011da177e4SLinus Torvalds { 2021da177e4SLinus Torvalds void *element; 2031da177e4SLinus Torvalds unsigned long flags; 20401890a4cSBenjamin LaHaise wait_queue_t wait; 2056daa0e28SAl Viro gfp_t gfp_temp; 20620a77776SNick Piggin 2078bf8fcb0SSebastian Ott VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO); 20820a77776SNick Piggin might_sleep_if(gfp_mask & __GFP_WAIT); 209b84a35beSNick Piggin 210b84a35beSNick Piggin gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */ 211b84a35beSNick Piggin gfp_mask |= __GFP_NORETRY; /* don't loop in __alloc_pages */ 212b84a35beSNick Piggin gfp_mask |= __GFP_NOWARN; /* failures are OK */ 2131da177e4SLinus Torvalds 21420a77776SNick Piggin gfp_temp = gfp_mask & ~(__GFP_WAIT|__GFP_IO); 21520a77776SNick Piggin 2161da177e4SLinus Torvalds repeat_alloc: 21720a77776SNick Piggin 21820a77776SNick Piggin element = pool->alloc(gfp_temp, pool->pool_data); 2191da177e4SLinus Torvalds if (likely(element != NULL)) 2201da177e4SLinus Torvalds return element; 2211da177e4SLinus Torvalds 2221da177e4SLinus Torvalds spin_lock_irqsave(&pool->lock, flags); 2231da177e4SLinus Torvalds if (likely(pool->curr_nr)) { 2241da177e4SLinus Torvalds element = remove_element(pool); 2251da177e4SLinus Torvalds spin_unlock_irqrestore(&pool->lock, flags); 2265b990546STejun Heo /* paired with rmb in mempool_free(), read comment there */ 2275b990546STejun Heo smp_wmb(); 22817411962SCatalin Marinas /* 22917411962SCatalin Marinas * Update the allocation stack trace as this is more useful 23017411962SCatalin Marinas * for debugging. 23117411962SCatalin Marinas */ 23217411962SCatalin Marinas kmemleak_update_trace(element); 2331da177e4SLinus Torvalds return element; 2341da177e4SLinus Torvalds } 2351da177e4SLinus Torvalds 2361ebb7044STejun Heo /* 2371ebb7044STejun Heo * We use gfp mask w/o __GFP_WAIT or IO for the first round. If 2381ebb7044STejun Heo * alloc failed with that and @pool was empty, retry immediately. 2391ebb7044STejun Heo */ 2401ebb7044STejun Heo if (gfp_temp != gfp_mask) { 2411ebb7044STejun Heo spin_unlock_irqrestore(&pool->lock, flags); 2421ebb7044STejun Heo gfp_temp = gfp_mask; 2431ebb7044STejun Heo goto repeat_alloc; 2441ebb7044STejun Heo } 2451ebb7044STejun Heo 2461ebb7044STejun Heo /* We must not sleep if !__GFP_WAIT */ 2475b990546STejun Heo if (!(gfp_mask & __GFP_WAIT)) { 2485b990546STejun Heo spin_unlock_irqrestore(&pool->lock, flags); 2491da177e4SLinus Torvalds return NULL; 2505b990546STejun Heo } 2511da177e4SLinus Torvalds 2525b990546STejun Heo /* Let's wait for someone else to return an element to @pool */ 25301890a4cSBenjamin LaHaise init_wait(&wait); 2541da177e4SLinus Torvalds prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE); 2555b990546STejun Heo 2565b990546STejun Heo spin_unlock_irqrestore(&pool->lock, flags); 2575b990546STejun Heo 2580b1d647aSPavel Mironchik /* 2595b990546STejun Heo * FIXME: this should be io_schedule(). The timeout is there as a 2605b990546STejun Heo * workaround for some DM problems in 2.6.18. 2610b1d647aSPavel Mironchik */ 2620b1d647aSPavel Mironchik io_schedule_timeout(5*HZ); 2631da177e4SLinus Torvalds 2645b990546STejun Heo finish_wait(&pool->wait, &wait); 2651da177e4SLinus Torvalds goto repeat_alloc; 2661da177e4SLinus Torvalds } 2671da177e4SLinus Torvalds EXPORT_SYMBOL(mempool_alloc); 2681da177e4SLinus Torvalds 2691da177e4SLinus Torvalds /** 2701da177e4SLinus Torvalds * mempool_free - return an element to the pool. 2711da177e4SLinus Torvalds * @element: pool element pointer. 2721da177e4SLinus Torvalds * @pool: pointer to the memory pool which was allocated via 2731da177e4SLinus Torvalds * mempool_create(). 2741da177e4SLinus Torvalds * 2751da177e4SLinus Torvalds * this function only sleeps if the free_fn() function sleeps. 2761da177e4SLinus Torvalds */ 2771da177e4SLinus Torvalds void mempool_free(void *element, mempool_t *pool) 2781da177e4SLinus Torvalds { 2791da177e4SLinus Torvalds unsigned long flags; 2801da177e4SLinus Torvalds 281c80e7a82SRusty Russell if (unlikely(element == NULL)) 282c80e7a82SRusty Russell return; 283c80e7a82SRusty Russell 2845b990546STejun Heo /* 2855b990546STejun Heo * Paired with the wmb in mempool_alloc(). The preceding read is 2865b990546STejun Heo * for @element and the following @pool->curr_nr. This ensures 2875b990546STejun Heo * that the visible value of @pool->curr_nr is from after the 2885b990546STejun Heo * allocation of @element. This is necessary for fringe cases 2895b990546STejun Heo * where @element was passed to this task without going through 2905b990546STejun Heo * barriers. 2915b990546STejun Heo * 2925b990546STejun Heo * For example, assume @p is %NULL at the beginning and one task 2935b990546STejun Heo * performs "p = mempool_alloc(...);" while another task is doing 2945b990546STejun Heo * "while (!p) cpu_relax(); mempool_free(p, ...);". This function 2955b990546STejun Heo * may end up using curr_nr value which is from before allocation 2965b990546STejun Heo * of @p without the following rmb. 2975b990546STejun Heo */ 2985b990546STejun Heo smp_rmb(); 2995b990546STejun Heo 3005b990546STejun Heo /* 3015b990546STejun Heo * For correctness, we need a test which is guaranteed to trigger 3025b990546STejun Heo * if curr_nr + #allocated == min_nr. Testing curr_nr < min_nr 3035b990546STejun Heo * without locking achieves that and refilling as soon as possible 3045b990546STejun Heo * is desirable. 3055b990546STejun Heo * 3065b990546STejun Heo * Because curr_nr visible here is always a value after the 3075b990546STejun Heo * allocation of @element, any task which decremented curr_nr below 3085b990546STejun Heo * min_nr is guaranteed to see curr_nr < min_nr unless curr_nr gets 3095b990546STejun Heo * incremented to min_nr afterwards. If curr_nr gets incremented 3105b990546STejun Heo * to min_nr after the allocation of @element, the elements 3115b990546STejun Heo * allocated after that are subject to the same guarantee. 3125b990546STejun Heo * 3135b990546STejun Heo * Waiters happen iff curr_nr is 0 and the above guarantee also 3145b990546STejun Heo * ensures that there will be frees which return elements to the 3155b990546STejun Heo * pool waking up the waiters. 3165b990546STejun Heo */ 317eb9a3c62SMikulas Patocka if (unlikely(pool->curr_nr < pool->min_nr)) { 3181da177e4SLinus Torvalds spin_lock_irqsave(&pool->lock, flags); 319eb9a3c62SMikulas Patocka if (likely(pool->curr_nr < pool->min_nr)) { 3201da177e4SLinus Torvalds add_element(pool, element); 3211da177e4SLinus Torvalds spin_unlock_irqrestore(&pool->lock, flags); 3221da177e4SLinus Torvalds wake_up(&pool->wait); 3231da177e4SLinus Torvalds return; 3241da177e4SLinus Torvalds } 3251da177e4SLinus Torvalds spin_unlock_irqrestore(&pool->lock, flags); 3261da177e4SLinus Torvalds } 3271da177e4SLinus Torvalds pool->free(element, pool->pool_data); 3281da177e4SLinus Torvalds } 3291da177e4SLinus Torvalds EXPORT_SYMBOL(mempool_free); 3301da177e4SLinus Torvalds 3311da177e4SLinus Torvalds /* 3321da177e4SLinus Torvalds * A commonly used alloc and free fn. 3331da177e4SLinus Torvalds */ 334dd0fc66fSAl Viro void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data) 3351da177e4SLinus Torvalds { 336fcc234f8SPekka Enberg struct kmem_cache *mem = pool_data; 3371da177e4SLinus Torvalds return kmem_cache_alloc(mem, gfp_mask); 3381da177e4SLinus Torvalds } 3391da177e4SLinus Torvalds EXPORT_SYMBOL(mempool_alloc_slab); 3401da177e4SLinus Torvalds 3411da177e4SLinus Torvalds void mempool_free_slab(void *element, void *pool_data) 3421da177e4SLinus Torvalds { 343fcc234f8SPekka Enberg struct kmem_cache *mem = pool_data; 3441da177e4SLinus Torvalds kmem_cache_free(mem, element); 3451da177e4SLinus Torvalds } 3461da177e4SLinus Torvalds EXPORT_SYMBOL(mempool_free_slab); 3476e0678f3SMatthew Dobson 3486e0678f3SMatthew Dobson /* 34953184082SMatthew Dobson * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory 350183ff22bSSimon Arlott * specified by pool_data 35153184082SMatthew Dobson */ 35253184082SMatthew Dobson void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data) 35353184082SMatthew Dobson { 3545e2f89b5SFigo.zhang size_t size = (size_t)pool_data; 35553184082SMatthew Dobson return kmalloc(size, gfp_mask); 35653184082SMatthew Dobson } 35753184082SMatthew Dobson EXPORT_SYMBOL(mempool_kmalloc); 35853184082SMatthew Dobson 35953184082SMatthew Dobson void mempool_kfree(void *element, void *pool_data) 36053184082SMatthew Dobson { 36153184082SMatthew Dobson kfree(element); 36253184082SMatthew Dobson } 36353184082SMatthew Dobson EXPORT_SYMBOL(mempool_kfree); 36453184082SMatthew Dobson 36553184082SMatthew Dobson /* 3666e0678f3SMatthew Dobson * A simple mempool-backed page allocator that allocates pages 3676e0678f3SMatthew Dobson * of the order specified by pool_data. 3686e0678f3SMatthew Dobson */ 3696e0678f3SMatthew Dobson void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data) 3706e0678f3SMatthew Dobson { 3716e0678f3SMatthew Dobson int order = (int)(long)pool_data; 3726e0678f3SMatthew Dobson return alloc_pages(gfp_mask, order); 3736e0678f3SMatthew Dobson } 3746e0678f3SMatthew Dobson EXPORT_SYMBOL(mempool_alloc_pages); 3756e0678f3SMatthew Dobson 3766e0678f3SMatthew Dobson void mempool_free_pages(void *element, void *pool_data) 3776e0678f3SMatthew Dobson { 3786e0678f3SMatthew Dobson int order = (int)(long)pool_data; 3796e0678f3SMatthew Dobson __free_pages(element, order); 3806e0678f3SMatthew Dobson } 3816e0678f3SMatthew Dobson EXPORT_SYMBOL(mempool_free_pages); 382