xref: /openbmc/linux/mm/mempool.c (revision 1da177e4)
1 /*
2  *  linux/mm/mempool.c
3  *
4  *  memory buffer pool support. Such pools are mostly used
5  *  for guaranteed, deadlock-free memory allocations during
6  *  extreme VM load.
7  *
8  *  started by Ingo Molnar, Copyright (C) 2001
9  */
10 
11 #include <linux/mm.h>
12 #include <linux/slab.h>
13 #include <linux/module.h>
14 #include <linux/mempool.h>
15 #include <linux/blkdev.h>
16 #include <linux/writeback.h>
17 
18 static void add_element(mempool_t *pool, void *element)
19 {
20 	BUG_ON(pool->curr_nr >= pool->min_nr);
21 	pool->elements[pool->curr_nr++] = element;
22 }
23 
24 static void *remove_element(mempool_t *pool)
25 {
26 	BUG_ON(pool->curr_nr <= 0);
27 	return pool->elements[--pool->curr_nr];
28 }
29 
30 static void free_pool(mempool_t *pool)
31 {
32 	while (pool->curr_nr) {
33 		void *element = remove_element(pool);
34 		pool->free(element, pool->pool_data);
35 	}
36 	kfree(pool->elements);
37 	kfree(pool);
38 }
39 
40 /**
41  * mempool_create - create a memory pool
42  * @min_nr:    the minimum number of elements guaranteed to be
43  *             allocated for this pool.
44  * @alloc_fn:  user-defined element-allocation function.
45  * @free_fn:   user-defined element-freeing function.
46  * @pool_data: optional private data available to the user-defined functions.
47  *
48  * this function creates and allocates a guaranteed size, preallocated
49  * memory pool. The pool can be used from the mempool_alloc and mempool_free
50  * functions. This function might sleep. Both the alloc_fn() and the free_fn()
51  * functions might sleep - as long as the mempool_alloc function is not called
52  * from IRQ contexts.
53  */
54 mempool_t * mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
55 				mempool_free_t *free_fn, void *pool_data)
56 {
57 	mempool_t *pool;
58 
59 	pool = kmalloc(sizeof(*pool), GFP_KERNEL);
60 	if (!pool)
61 		return NULL;
62 	memset(pool, 0, sizeof(*pool));
63 	pool->elements = kmalloc(min_nr * sizeof(void *), GFP_KERNEL);
64 	if (!pool->elements) {
65 		kfree(pool);
66 		return NULL;
67 	}
68 	spin_lock_init(&pool->lock);
69 	pool->min_nr = min_nr;
70 	pool->pool_data = pool_data;
71 	init_waitqueue_head(&pool->wait);
72 	pool->alloc = alloc_fn;
73 	pool->free = free_fn;
74 
75 	/*
76 	 * First pre-allocate the guaranteed number of buffers.
77 	 */
78 	while (pool->curr_nr < pool->min_nr) {
79 		void *element;
80 
81 		element = pool->alloc(GFP_KERNEL, pool->pool_data);
82 		if (unlikely(!element)) {
83 			free_pool(pool);
84 			return NULL;
85 		}
86 		add_element(pool, element);
87 	}
88 	return pool;
89 }
90 EXPORT_SYMBOL(mempool_create);
91 
92 /**
93  * mempool_resize - resize an existing memory pool
94  * @pool:       pointer to the memory pool which was allocated via
95  *              mempool_create().
96  * @new_min_nr: the new minimum number of elements guaranteed to be
97  *              allocated for this pool.
98  * @gfp_mask:   the usual allocation bitmask.
99  *
100  * This function shrinks/grows the pool. In the case of growing,
101  * it cannot be guaranteed that the pool will be grown to the new
102  * size immediately, but new mempool_free() calls will refill it.
103  *
104  * Note, the caller must guarantee that no mempool_destroy is called
105  * while this function is running. mempool_alloc() & mempool_free()
106  * might be called (eg. from IRQ contexts) while this function executes.
107  */
108 int mempool_resize(mempool_t *pool, int new_min_nr, unsigned int __nocast gfp_mask)
109 {
110 	void *element;
111 	void **new_elements;
112 	unsigned long flags;
113 
114 	BUG_ON(new_min_nr <= 0);
115 
116 	spin_lock_irqsave(&pool->lock, flags);
117 	if (new_min_nr <= pool->min_nr) {
118 		while (new_min_nr < pool->curr_nr) {
119 			element = remove_element(pool);
120 			spin_unlock_irqrestore(&pool->lock, flags);
121 			pool->free(element, pool->pool_data);
122 			spin_lock_irqsave(&pool->lock, flags);
123 		}
124 		pool->min_nr = new_min_nr;
125 		goto out_unlock;
126 	}
127 	spin_unlock_irqrestore(&pool->lock, flags);
128 
129 	/* Grow the pool */
130 	new_elements = kmalloc(new_min_nr * sizeof(*new_elements), gfp_mask);
131 	if (!new_elements)
132 		return -ENOMEM;
133 
134 	spin_lock_irqsave(&pool->lock, flags);
135 	if (unlikely(new_min_nr <= pool->min_nr)) {
136 		/* Raced, other resize will do our work */
137 		spin_unlock_irqrestore(&pool->lock, flags);
138 		kfree(new_elements);
139 		goto out;
140 	}
141 	memcpy(new_elements, pool->elements,
142 			pool->curr_nr * sizeof(*new_elements));
143 	kfree(pool->elements);
144 	pool->elements = new_elements;
145 	pool->min_nr = new_min_nr;
146 
147 	while (pool->curr_nr < pool->min_nr) {
148 		spin_unlock_irqrestore(&pool->lock, flags);
149 		element = pool->alloc(gfp_mask, pool->pool_data);
150 		if (!element)
151 			goto out;
152 		spin_lock_irqsave(&pool->lock, flags);
153 		if (pool->curr_nr < pool->min_nr) {
154 			add_element(pool, element);
155 		} else {
156 			spin_unlock_irqrestore(&pool->lock, flags);
157 			pool->free(element, pool->pool_data);	/* Raced */
158 			goto out;
159 		}
160 	}
161 out_unlock:
162 	spin_unlock_irqrestore(&pool->lock, flags);
163 out:
164 	return 0;
165 }
166 EXPORT_SYMBOL(mempool_resize);
167 
168 /**
169  * mempool_destroy - deallocate a memory pool
170  * @pool:      pointer to the memory pool which was allocated via
171  *             mempool_create().
172  *
173  * this function only sleeps if the free_fn() function sleeps. The caller
174  * has to guarantee that all elements have been returned to the pool (ie:
175  * freed) prior to calling mempool_destroy().
176  */
177 void mempool_destroy(mempool_t *pool)
178 {
179 	if (pool->curr_nr != pool->min_nr)
180 		BUG();		/* There were outstanding elements */
181 	free_pool(pool);
182 }
183 EXPORT_SYMBOL(mempool_destroy);
184 
185 /**
186  * mempool_alloc - allocate an element from a specific memory pool
187  * @pool:      pointer to the memory pool which was allocated via
188  *             mempool_create().
189  * @gfp_mask:  the usual allocation bitmask.
190  *
191  * this function only sleeps if the alloc_fn function sleeps or
192  * returns NULL. Note that due to preallocation, this function
193  * *never* fails when called from process contexts. (it might
194  * fail if called from an IRQ context.)
195  */
196 void * mempool_alloc(mempool_t *pool, unsigned int __nocast gfp_mask)
197 {
198 	void *element;
199 	unsigned long flags;
200 	DEFINE_WAIT(wait);
201 	int gfp_nowait = gfp_mask & ~(__GFP_WAIT | __GFP_IO);
202 
203 	might_sleep_if(gfp_mask & __GFP_WAIT);
204 repeat_alloc:
205 	element = pool->alloc(gfp_nowait|__GFP_NOWARN, pool->pool_data);
206 	if (likely(element != NULL))
207 		return element;
208 
209 	/*
210 	 * If the pool is less than 50% full and we can perform effective
211 	 * page reclaim then try harder to allocate an element.
212 	 */
213 	mb();
214 	if ((gfp_mask & __GFP_FS) && (gfp_mask != gfp_nowait) &&
215 				(pool->curr_nr <= pool->min_nr/2)) {
216 		element = pool->alloc(gfp_mask, pool->pool_data);
217 		if (likely(element != NULL))
218 			return element;
219 	}
220 
221 	/*
222 	 * Kick the VM at this point.
223 	 */
224 	wakeup_bdflush(0);
225 
226 	spin_lock_irqsave(&pool->lock, flags);
227 	if (likely(pool->curr_nr)) {
228 		element = remove_element(pool);
229 		spin_unlock_irqrestore(&pool->lock, flags);
230 		return element;
231 	}
232 	spin_unlock_irqrestore(&pool->lock, flags);
233 
234 	/* We must not sleep in the GFP_ATOMIC case */
235 	if (!(gfp_mask & __GFP_WAIT))
236 		return NULL;
237 
238 	prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
239 	mb();
240 	if (!pool->curr_nr)
241 		io_schedule();
242 	finish_wait(&pool->wait, &wait);
243 
244 	goto repeat_alloc;
245 }
246 EXPORT_SYMBOL(mempool_alloc);
247 
248 /**
249  * mempool_free - return an element to the pool.
250  * @element:   pool element pointer.
251  * @pool:      pointer to the memory pool which was allocated via
252  *             mempool_create().
253  *
254  * this function only sleeps if the free_fn() function sleeps.
255  */
256 void mempool_free(void *element, mempool_t *pool)
257 {
258 	unsigned long flags;
259 
260 	mb();
261 	if (pool->curr_nr < pool->min_nr) {
262 		spin_lock_irqsave(&pool->lock, flags);
263 		if (pool->curr_nr < pool->min_nr) {
264 			add_element(pool, element);
265 			spin_unlock_irqrestore(&pool->lock, flags);
266 			wake_up(&pool->wait);
267 			return;
268 		}
269 		spin_unlock_irqrestore(&pool->lock, flags);
270 	}
271 	pool->free(element, pool->pool_data);
272 }
273 EXPORT_SYMBOL(mempool_free);
274 
275 /*
276  * A commonly used alloc and free fn.
277  */
278 void *mempool_alloc_slab(unsigned int __nocast gfp_mask, void *pool_data)
279 {
280 	kmem_cache_t *mem = (kmem_cache_t *) pool_data;
281 	return kmem_cache_alloc(mem, gfp_mask);
282 }
283 EXPORT_SYMBOL(mempool_alloc_slab);
284 
285 void mempool_free_slab(void *element, void *pool_data)
286 {
287 	kmem_cache_t *mem = (kmem_cache_t *) pool_data;
288 	kmem_cache_free(mem, element);
289 }
290 EXPORT_SYMBOL(mempool_free_slab);
291