xref: /openbmc/linux/lib/idr.c (revision 20055477)
1 /*
2  * 2002-10-18  written by Jim Houston jim.houston@ccur.com
3  *	Copyright (C) 2002 by Concurrent Computer Corporation
4  *	Distributed under the GNU GPL license version 2.
5  *
6  * Modified by George Anzinger to reuse immediately and to use
7  * find bit instructions.  Also removed _irq on spinlocks.
8  *
9  * Modified by Nadia Derbey to make it RCU safe.
10  *
11  * Small id to pointer translation service.
12  *
13  * It uses a radix tree like structure as a sparse array indexed
14  * by the id to obtain the pointer.  The bitmap makes allocating
15  * a new id quick.
16  *
17  * You call it to allocate an id (an int) an associate with that id a
18  * pointer or what ever, we treat it as a (void *).  You can pass this
19  * id to a user for him to pass back at a later time.  You then pass
20  * that id to this code and it returns your pointer.
21 
22  * You can release ids at any time. When all ids are released, most of
23  * the memory is returned (we keep MAX_IDR_FREE) in a local pool so we
24  * don't need to go to the memory "store" during an id allocate, just
25  * so you don't need to be too concerned about locking and conflicts
26  * with the slab allocator.
27  */
28 
29 #ifndef TEST                        // to test in user space...
30 #include <linux/slab.h>
31 #include <linux/init.h>
32 #include <linux/export.h>
33 #endif
34 #include <linux/err.h>
35 #include <linux/string.h>
36 #include <linux/idr.h>
37 #include <linux/spinlock.h>
38 #include <linux/percpu.h>
39 #include <linux/hardirq.h>
40 
41 #define MAX_IDR_SHIFT		(sizeof(int) * 8 - 1)
42 #define MAX_IDR_BIT		(1U << MAX_IDR_SHIFT)
43 
44 /* Leave the possibility of an incomplete final layer */
45 #define MAX_IDR_LEVEL ((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS)
46 
47 /* Number of id_layer structs to leave in free list */
48 #define MAX_IDR_FREE (MAX_IDR_LEVEL * 2)
49 
50 static struct kmem_cache *idr_layer_cache;
51 static DEFINE_PER_CPU(struct idr_layer *, idr_preload_head);
52 static DEFINE_PER_CPU(int, idr_preload_cnt);
53 static DEFINE_SPINLOCK(simple_ida_lock);
54 
55 /* the maximum ID which can be allocated given idr->layers */
56 static int idr_max(int layers)
57 {
58 	int bits = min_t(int, layers * IDR_BITS, MAX_IDR_SHIFT);
59 
60 	return (1 << bits) - 1;
61 }
62 
63 /*
64  * Prefix mask for an idr_layer at @layer.  For layer 0, the prefix mask is
65  * all bits except for the lower IDR_BITS.  For layer 1, 2 * IDR_BITS, and
66  * so on.
67  */
68 static int idr_layer_prefix_mask(int layer)
69 {
70 	return ~idr_max(layer + 1);
71 }
72 
73 static struct idr_layer *get_from_free_list(struct idr *idp)
74 {
75 	struct idr_layer *p;
76 	unsigned long flags;
77 
78 	spin_lock_irqsave(&idp->lock, flags);
79 	if ((p = idp->id_free)) {
80 		idp->id_free = p->ary[0];
81 		idp->id_free_cnt--;
82 		p->ary[0] = NULL;
83 	}
84 	spin_unlock_irqrestore(&idp->lock, flags);
85 	return(p);
86 }
87 
88 /**
89  * idr_layer_alloc - allocate a new idr_layer
90  * @gfp_mask: allocation mask
91  * @layer_idr: optional idr to allocate from
92  *
93  * If @layer_idr is %NULL, directly allocate one using @gfp_mask or fetch
94  * one from the per-cpu preload buffer.  If @layer_idr is not %NULL, fetch
95  * an idr_layer from @idr->id_free.
96  *
97  * @layer_idr is to maintain backward compatibility with the old alloc
98  * interface - idr_pre_get() and idr_get_new*() - and will be removed
99  * together with per-pool preload buffer.
100  */
101 static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr)
102 {
103 	struct idr_layer *new;
104 
105 	/* this is the old path, bypass to get_from_free_list() */
106 	if (layer_idr)
107 		return get_from_free_list(layer_idr);
108 
109 	/*
110 	 * Try to allocate directly from kmem_cache.  We want to try this
111 	 * before preload buffer; otherwise, non-preloading idr_alloc()
112 	 * users will end up taking advantage of preloading ones.  As the
113 	 * following is allowed to fail for preloaded cases, suppress
114 	 * warning this time.
115 	 */
116 	new = kmem_cache_zalloc(idr_layer_cache, gfp_mask | __GFP_NOWARN);
117 	if (new)
118 		return new;
119 
120 	/*
121 	 * Try to fetch one from the per-cpu preload buffer if in process
122 	 * context.  See idr_preload() for details.
123 	 */
124 	if (!in_interrupt()) {
125 		preempt_disable();
126 		new = __this_cpu_read(idr_preload_head);
127 		if (new) {
128 			__this_cpu_write(idr_preload_head, new->ary[0]);
129 			__this_cpu_dec(idr_preload_cnt);
130 			new->ary[0] = NULL;
131 		}
132 		preempt_enable();
133 		if (new)
134 			return new;
135 	}
136 
137 	/*
138 	 * Both failed.  Try kmem_cache again w/o adding __GFP_NOWARN so
139 	 * that memory allocation failure warning is printed as intended.
140 	 */
141 	return kmem_cache_zalloc(idr_layer_cache, gfp_mask);
142 }
143 
144 static void idr_layer_rcu_free(struct rcu_head *head)
145 {
146 	struct idr_layer *layer;
147 
148 	layer = container_of(head, struct idr_layer, rcu_head);
149 	kmem_cache_free(idr_layer_cache, layer);
150 }
151 
152 static inline void free_layer(struct idr *idr, struct idr_layer *p)
153 {
154 	if (idr->hint && idr->hint == p)
155 		RCU_INIT_POINTER(idr->hint, NULL);
156 	call_rcu(&p->rcu_head, idr_layer_rcu_free);
157 }
158 
159 /* only called when idp->lock is held */
160 static void __move_to_free_list(struct idr *idp, struct idr_layer *p)
161 {
162 	p->ary[0] = idp->id_free;
163 	idp->id_free = p;
164 	idp->id_free_cnt++;
165 }
166 
167 static void move_to_free_list(struct idr *idp, struct idr_layer *p)
168 {
169 	unsigned long flags;
170 
171 	/*
172 	 * Depends on the return element being zeroed.
173 	 */
174 	spin_lock_irqsave(&idp->lock, flags);
175 	__move_to_free_list(idp, p);
176 	spin_unlock_irqrestore(&idp->lock, flags);
177 }
178 
179 static void idr_mark_full(struct idr_layer **pa, int id)
180 {
181 	struct idr_layer *p = pa[0];
182 	int l = 0;
183 
184 	__set_bit(id & IDR_MASK, p->bitmap);
185 	/*
186 	 * If this layer is full mark the bit in the layer above to
187 	 * show that this part of the radix tree is full.  This may
188 	 * complete the layer above and require walking up the radix
189 	 * tree.
190 	 */
191 	while (bitmap_full(p->bitmap, IDR_SIZE)) {
192 		if (!(p = pa[++l]))
193 			break;
194 		id = id >> IDR_BITS;
195 		__set_bit((id & IDR_MASK), p->bitmap);
196 	}
197 }
198 
199 int __idr_pre_get(struct idr *idp, gfp_t gfp_mask)
200 {
201 	while (idp->id_free_cnt < MAX_IDR_FREE) {
202 		struct idr_layer *new;
203 		new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
204 		if (new == NULL)
205 			return (0);
206 		move_to_free_list(idp, new);
207 	}
208 	return 1;
209 }
210 EXPORT_SYMBOL(__idr_pre_get);
211 
212 /**
213  * sub_alloc - try to allocate an id without growing the tree depth
214  * @idp: idr handle
215  * @starting_id: id to start search at
216  * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer
217  * @gfp_mask: allocation mask for idr_layer_alloc()
218  * @layer_idr: optional idr passed to idr_layer_alloc()
219  *
220  * Allocate an id in range [@starting_id, INT_MAX] from @idp without
221  * growing its depth.  Returns
222  *
223  *  the allocated id >= 0 if successful,
224  *  -EAGAIN if the tree needs to grow for allocation to succeed,
225  *  -ENOSPC if the id space is exhausted,
226  *  -ENOMEM if more idr_layers need to be allocated.
227  */
228 static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa,
229 		     gfp_t gfp_mask, struct idr *layer_idr)
230 {
231 	int n, m, sh;
232 	struct idr_layer *p, *new;
233 	int l, id, oid;
234 
235 	id = *starting_id;
236  restart:
237 	p = idp->top;
238 	l = idp->layers;
239 	pa[l--] = NULL;
240 	while (1) {
241 		/*
242 		 * We run around this while until we reach the leaf node...
243 		 */
244 		n = (id >> (IDR_BITS*l)) & IDR_MASK;
245 		m = find_next_zero_bit(p->bitmap, IDR_SIZE, n);
246 		if (m == IDR_SIZE) {
247 			/* no space available go back to previous layer. */
248 			l++;
249 			oid = id;
250 			id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
251 
252 			/* if already at the top layer, we need to grow */
253 			if (id >= 1 << (idp->layers * IDR_BITS)) {
254 				*starting_id = id;
255 				return -EAGAIN;
256 			}
257 			p = pa[l];
258 			BUG_ON(!p);
259 
260 			/* If we need to go up one layer, continue the
261 			 * loop; otherwise, restart from the top.
262 			 */
263 			sh = IDR_BITS * (l + 1);
264 			if (oid >> sh == id >> sh)
265 				continue;
266 			else
267 				goto restart;
268 		}
269 		if (m != n) {
270 			sh = IDR_BITS*l;
271 			id = ((id >> sh) ^ n ^ m) << sh;
272 		}
273 		if ((id >= MAX_IDR_BIT) || (id < 0))
274 			return -ENOSPC;
275 		if (l == 0)
276 			break;
277 		/*
278 		 * Create the layer below if it is missing.
279 		 */
280 		if (!p->ary[m]) {
281 			new = idr_layer_alloc(gfp_mask, layer_idr);
282 			if (!new)
283 				return -ENOMEM;
284 			new->layer = l-1;
285 			new->prefix = id & idr_layer_prefix_mask(new->layer);
286 			rcu_assign_pointer(p->ary[m], new);
287 			p->count++;
288 		}
289 		pa[l--] = p;
290 		p = p->ary[m];
291 	}
292 
293 	pa[l] = p;
294 	return id;
295 }
296 
297 static int idr_get_empty_slot(struct idr *idp, int starting_id,
298 			      struct idr_layer **pa, gfp_t gfp_mask,
299 			      struct idr *layer_idr)
300 {
301 	struct idr_layer *p, *new;
302 	int layers, v, id;
303 	unsigned long flags;
304 
305 	id = starting_id;
306 build_up:
307 	p = idp->top;
308 	layers = idp->layers;
309 	if (unlikely(!p)) {
310 		if (!(p = idr_layer_alloc(gfp_mask, layer_idr)))
311 			return -ENOMEM;
312 		p->layer = 0;
313 		layers = 1;
314 	}
315 	/*
316 	 * Add a new layer to the top of the tree if the requested
317 	 * id is larger than the currently allocated space.
318 	 */
319 	while (id > idr_max(layers)) {
320 		layers++;
321 		if (!p->count) {
322 			/* special case: if the tree is currently empty,
323 			 * then we grow the tree by moving the top node
324 			 * upwards.
325 			 */
326 			p->layer++;
327 			WARN_ON_ONCE(p->prefix);
328 			continue;
329 		}
330 		if (!(new = idr_layer_alloc(gfp_mask, layer_idr))) {
331 			/*
332 			 * The allocation failed.  If we built part of
333 			 * the structure tear it down.
334 			 */
335 			spin_lock_irqsave(&idp->lock, flags);
336 			for (new = p; p && p != idp->top; new = p) {
337 				p = p->ary[0];
338 				new->ary[0] = NULL;
339 				new->count = 0;
340 				bitmap_clear(new->bitmap, 0, IDR_SIZE);
341 				__move_to_free_list(idp, new);
342 			}
343 			spin_unlock_irqrestore(&idp->lock, flags);
344 			return -ENOMEM;
345 		}
346 		new->ary[0] = p;
347 		new->count = 1;
348 		new->layer = layers-1;
349 		new->prefix = id & idr_layer_prefix_mask(new->layer);
350 		if (bitmap_full(p->bitmap, IDR_SIZE))
351 			__set_bit(0, new->bitmap);
352 		p = new;
353 	}
354 	rcu_assign_pointer(idp->top, p);
355 	idp->layers = layers;
356 	v = sub_alloc(idp, &id, pa, gfp_mask, layer_idr);
357 	if (v == -EAGAIN)
358 		goto build_up;
359 	return(v);
360 }
361 
362 /*
363  * @id and @pa are from a successful allocation from idr_get_empty_slot().
364  * Install the user pointer @ptr and mark the slot full.
365  */
366 static void idr_fill_slot(struct idr *idr, void *ptr, int id,
367 			  struct idr_layer **pa)
368 {
369 	/* update hint used for lookup, cleared from free_layer() */
370 	rcu_assign_pointer(idr->hint, pa[0]);
371 
372 	rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], (struct idr_layer *)ptr);
373 	pa[0]->count++;
374 	idr_mark_full(pa, id);
375 }
376 
377 int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
378 {
379 	struct idr_layer *pa[MAX_IDR_LEVEL + 1];
380 	int rv;
381 
382 	rv = idr_get_empty_slot(idp, starting_id, pa, 0, idp);
383 	if (rv < 0)
384 		return rv == -ENOMEM ? -EAGAIN : rv;
385 
386 	idr_fill_slot(idp, ptr, rv, pa);
387 	*id = rv;
388 	return 0;
389 }
390 EXPORT_SYMBOL(__idr_get_new_above);
391 
392 /**
393  * idr_preload - preload for idr_alloc()
394  * @gfp_mask: allocation mask to use for preloading
395  *
396  * Preload per-cpu layer buffer for idr_alloc().  Can only be used from
397  * process context and each idr_preload() invocation should be matched with
398  * idr_preload_end().  Note that preemption is disabled while preloaded.
399  *
400  * The first idr_alloc() in the preloaded section can be treated as if it
401  * were invoked with @gfp_mask used for preloading.  This allows using more
402  * permissive allocation masks for idrs protected by spinlocks.
403  *
404  * For example, if idr_alloc() below fails, the failure can be treated as
405  * if idr_alloc() were called with GFP_KERNEL rather than GFP_NOWAIT.
406  *
407  *	idr_preload(GFP_KERNEL);
408  *	spin_lock(lock);
409  *
410  *	id = idr_alloc(idr, ptr, start, end, GFP_NOWAIT);
411  *
412  *	spin_unlock(lock);
413  *	idr_preload_end();
414  *	if (id < 0)
415  *		error;
416  */
417 void idr_preload(gfp_t gfp_mask)
418 {
419 	/*
420 	 * Consuming preload buffer from non-process context breaks preload
421 	 * allocation guarantee.  Disallow usage from those contexts.
422 	 */
423 	WARN_ON_ONCE(in_interrupt());
424 	might_sleep_if(gfp_mask & __GFP_WAIT);
425 
426 	preempt_disable();
427 
428 	/*
429 	 * idr_alloc() is likely to succeed w/o full idr_layer buffer and
430 	 * return value from idr_alloc() needs to be checked for failure
431 	 * anyway.  Silently give up if allocation fails.  The caller can
432 	 * treat failures from idr_alloc() as if idr_alloc() were called
433 	 * with @gfp_mask which should be enough.
434 	 */
435 	while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) {
436 		struct idr_layer *new;
437 
438 		preempt_enable();
439 		new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
440 		preempt_disable();
441 		if (!new)
442 			break;
443 
444 		/* link the new one to per-cpu preload list */
445 		new->ary[0] = __this_cpu_read(idr_preload_head);
446 		__this_cpu_write(idr_preload_head, new);
447 		__this_cpu_inc(idr_preload_cnt);
448 	}
449 }
450 EXPORT_SYMBOL(idr_preload);
451 
452 /**
453  * idr_alloc - allocate new idr entry
454  * @idr: the (initialized) idr
455  * @ptr: pointer to be associated with the new id
456  * @start: the minimum id (inclusive)
457  * @end: the maximum id (exclusive, <= 0 for max)
458  * @gfp_mask: memory allocation flags
459  *
460  * Allocate an id in [start, end) and associate it with @ptr.  If no ID is
461  * available in the specified range, returns -ENOSPC.  On memory allocation
462  * failure, returns -ENOMEM.
463  *
464  * Note that @end is treated as max when <= 0.  This is to always allow
465  * using @start + N as @end as long as N is inside integer range.
466  *
467  * The user is responsible for exclusively synchronizing all operations
468  * which may modify @idr.  However, read-only accesses such as idr_find()
469  * or iteration can be performed under RCU read lock provided the user
470  * destroys @ptr in RCU-safe way after removal from idr.
471  */
472 int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
473 {
474 	int max = end > 0 ? end - 1 : INT_MAX;	/* inclusive upper limit */
475 	struct idr_layer *pa[MAX_IDR_LEVEL + 1];
476 	int id;
477 
478 	might_sleep_if(gfp_mask & __GFP_WAIT);
479 
480 	/* sanity checks */
481 	if (WARN_ON_ONCE(start < 0))
482 		return -EINVAL;
483 	if (unlikely(max < start))
484 		return -ENOSPC;
485 
486 	/* allocate id */
487 	id = idr_get_empty_slot(idr, start, pa, gfp_mask, NULL);
488 	if (unlikely(id < 0))
489 		return id;
490 	if (unlikely(id > max))
491 		return -ENOSPC;
492 
493 	idr_fill_slot(idr, ptr, id, pa);
494 	return id;
495 }
496 EXPORT_SYMBOL_GPL(idr_alloc);
497 
498 /**
499  * idr_alloc_cyclic - allocate new idr entry in a cyclical fashion
500  * @idr: the (initialized) idr
501  * @ptr: pointer to be associated with the new id
502  * @start: the minimum id (inclusive)
503  * @end: the maximum id (exclusive, <= 0 for max)
504  * @gfp_mask: memory allocation flags
505  *
506  * Essentially the same as idr_alloc, but prefers to allocate progressively
507  * higher ids if it can. If the "cur" counter wraps, then it will start again
508  * at the "start" end of the range and allocate one that has already been used.
509  */
510 int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end,
511 			gfp_t gfp_mask)
512 {
513 	int id;
514 
515 	id = idr_alloc(idr, ptr, max(start, idr->cur), end, gfp_mask);
516 	if (id == -ENOSPC)
517 		id = idr_alloc(idr, ptr, start, end, gfp_mask);
518 
519 	if (likely(id >= 0))
520 		idr->cur = id + 1;
521 	return id;
522 }
523 EXPORT_SYMBOL(idr_alloc_cyclic);
524 
525 static void idr_remove_warning(int id)
526 {
527 	WARN(1, "idr_remove called for id=%d which is not allocated.\n", id);
528 }
529 
530 static void sub_remove(struct idr *idp, int shift, int id)
531 {
532 	struct idr_layer *p = idp->top;
533 	struct idr_layer **pa[MAX_IDR_LEVEL + 1];
534 	struct idr_layer ***paa = &pa[0];
535 	struct idr_layer *to_free;
536 	int n;
537 
538 	*paa = NULL;
539 	*++paa = &idp->top;
540 
541 	while ((shift > 0) && p) {
542 		n = (id >> shift) & IDR_MASK;
543 		__clear_bit(n, p->bitmap);
544 		*++paa = &p->ary[n];
545 		p = p->ary[n];
546 		shift -= IDR_BITS;
547 	}
548 	n = id & IDR_MASK;
549 	if (likely(p != NULL && test_bit(n, p->bitmap))) {
550 		__clear_bit(n, p->bitmap);
551 		rcu_assign_pointer(p->ary[n], NULL);
552 		to_free = NULL;
553 		while(*paa && ! --((**paa)->count)){
554 			if (to_free)
555 				free_layer(idp, to_free);
556 			to_free = **paa;
557 			**paa-- = NULL;
558 		}
559 		if (!*paa)
560 			idp->layers = 0;
561 		if (to_free)
562 			free_layer(idp, to_free);
563 	} else
564 		idr_remove_warning(id);
565 }
566 
567 /**
568  * idr_remove - remove the given id and free its slot
569  * @idp: idr handle
570  * @id: unique key
571  */
572 void idr_remove(struct idr *idp, int id)
573 {
574 	struct idr_layer *p;
575 	struct idr_layer *to_free;
576 
577 	if (id < 0)
578 		return;
579 
580 	sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
581 	if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
582 	    idp->top->ary[0]) {
583 		/*
584 		 * Single child at leftmost slot: we can shrink the tree.
585 		 * This level is not needed anymore since when layers are
586 		 * inserted, they are inserted at the top of the existing
587 		 * tree.
588 		 */
589 		to_free = idp->top;
590 		p = idp->top->ary[0];
591 		rcu_assign_pointer(idp->top, p);
592 		--idp->layers;
593 		to_free->count = 0;
594 		bitmap_clear(to_free->bitmap, 0, IDR_SIZE);
595 		free_layer(idp, to_free);
596 	}
597 	while (idp->id_free_cnt >= MAX_IDR_FREE) {
598 		p = get_from_free_list(idp);
599 		/*
600 		 * Note: we don't call the rcu callback here, since the only
601 		 * layers that fall into the freelist are those that have been
602 		 * preallocated.
603 		 */
604 		kmem_cache_free(idr_layer_cache, p);
605 	}
606 	return;
607 }
608 EXPORT_SYMBOL(idr_remove);
609 
610 void __idr_remove_all(struct idr *idp)
611 {
612 	int n, id, max;
613 	int bt_mask;
614 	struct idr_layer *p;
615 	struct idr_layer *pa[MAX_IDR_LEVEL + 1];
616 	struct idr_layer **paa = &pa[0];
617 
618 	n = idp->layers * IDR_BITS;
619 	p = idp->top;
620 	rcu_assign_pointer(idp->top, NULL);
621 	max = idr_max(idp->layers);
622 
623 	id = 0;
624 	while (id >= 0 && id <= max) {
625 		while (n > IDR_BITS && p) {
626 			n -= IDR_BITS;
627 			*paa++ = p;
628 			p = p->ary[(id >> n) & IDR_MASK];
629 		}
630 
631 		bt_mask = id;
632 		id += 1 << n;
633 		/* Get the highest bit that the above add changed from 0->1. */
634 		while (n < fls(id ^ bt_mask)) {
635 			if (p)
636 				free_layer(idp, p);
637 			n += IDR_BITS;
638 			p = *--paa;
639 		}
640 	}
641 	idp->layers = 0;
642 }
643 EXPORT_SYMBOL(__idr_remove_all);
644 
645 /**
646  * idr_destroy - release all cached layers within an idr tree
647  * @idp: idr handle
648  *
649  * Free all id mappings and all idp_layers.  After this function, @idp is
650  * completely unused and can be freed / recycled.  The caller is
651  * responsible for ensuring that no one else accesses @idp during or after
652  * idr_destroy().
653  *
654  * A typical clean-up sequence for objects stored in an idr tree will use
655  * idr_for_each() to free all objects, if necessay, then idr_destroy() to
656  * free up the id mappings and cached idr_layers.
657  */
658 void idr_destroy(struct idr *idp)
659 {
660 	__idr_remove_all(idp);
661 
662 	while (idp->id_free_cnt) {
663 		struct idr_layer *p = get_from_free_list(idp);
664 		kmem_cache_free(idr_layer_cache, p);
665 	}
666 }
667 EXPORT_SYMBOL(idr_destroy);
668 
669 void *idr_find_slowpath(struct idr *idp, int id)
670 {
671 	int n;
672 	struct idr_layer *p;
673 
674 	if (id < 0)
675 		return NULL;
676 
677 	p = rcu_dereference_raw(idp->top);
678 	if (!p)
679 		return NULL;
680 	n = (p->layer+1) * IDR_BITS;
681 
682 	if (id > idr_max(p->layer + 1))
683 		return NULL;
684 	BUG_ON(n == 0);
685 
686 	while (n > 0 && p) {
687 		n -= IDR_BITS;
688 		BUG_ON(n != p->layer*IDR_BITS);
689 		p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
690 	}
691 	return((void *)p);
692 }
693 EXPORT_SYMBOL(idr_find_slowpath);
694 
695 /**
696  * idr_for_each - iterate through all stored pointers
697  * @idp: idr handle
698  * @fn: function to be called for each pointer
699  * @data: data passed back to callback function
700  *
701  * Iterate over the pointers registered with the given idr.  The
702  * callback function will be called for each pointer currently
703  * registered, passing the id, the pointer and the data pointer passed
704  * to this function.  It is not safe to modify the idr tree while in
705  * the callback, so functions such as idr_get_new and idr_remove are
706  * not allowed.
707  *
708  * We check the return of @fn each time. If it returns anything other
709  * than %0, we break out and return that value.
710  *
711  * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove().
712  */
713 int idr_for_each(struct idr *idp,
714 		 int (*fn)(int id, void *p, void *data), void *data)
715 {
716 	int n, id, max, error = 0;
717 	struct idr_layer *p;
718 	struct idr_layer *pa[MAX_IDR_LEVEL + 1];
719 	struct idr_layer **paa = &pa[0];
720 
721 	n = idp->layers * IDR_BITS;
722 	p = rcu_dereference_raw(idp->top);
723 	max = idr_max(idp->layers);
724 
725 	id = 0;
726 	while (id >= 0 && id <= max) {
727 		while (n > 0 && p) {
728 			n -= IDR_BITS;
729 			*paa++ = p;
730 			p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
731 		}
732 
733 		if (p) {
734 			error = fn(id, (void *)p, data);
735 			if (error)
736 				break;
737 		}
738 
739 		id += 1 << n;
740 		while (n < fls(id)) {
741 			n += IDR_BITS;
742 			p = *--paa;
743 		}
744 	}
745 
746 	return error;
747 }
748 EXPORT_SYMBOL(idr_for_each);
749 
750 /**
751  * idr_get_next - lookup next object of id to given id.
752  * @idp: idr handle
753  * @nextidp:  pointer to lookup key
754  *
755  * Returns pointer to registered object with id, which is next number to
756  * given id. After being looked up, *@nextidp will be updated for the next
757  * iteration.
758  *
759  * This function can be called under rcu_read_lock(), given that the leaf
760  * pointers lifetimes are correctly managed.
761  */
762 void *idr_get_next(struct idr *idp, int *nextidp)
763 {
764 	struct idr_layer *p, *pa[MAX_IDR_LEVEL + 1];
765 	struct idr_layer **paa = &pa[0];
766 	int id = *nextidp;
767 	int n, max;
768 
769 	/* find first ent */
770 	p = rcu_dereference_raw(idp->top);
771 	if (!p)
772 		return NULL;
773 	n = (p->layer + 1) * IDR_BITS;
774 	max = idr_max(p->layer + 1);
775 
776 	while (id >= 0 && id <= max) {
777 		while (n > 0 && p) {
778 			n -= IDR_BITS;
779 			*paa++ = p;
780 			p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
781 		}
782 
783 		if (p) {
784 			*nextidp = id;
785 			return p;
786 		}
787 
788 		/*
789 		 * Proceed to the next layer at the current level.  Unlike
790 		 * idr_for_each(), @id isn't guaranteed to be aligned to
791 		 * layer boundary at this point and adding 1 << n may
792 		 * incorrectly skip IDs.  Make sure we jump to the
793 		 * beginning of the next layer using round_up().
794 		 */
795 		id = round_up(id + 1, 1 << n);
796 		while (n < fls(id)) {
797 			n += IDR_BITS;
798 			p = *--paa;
799 		}
800 	}
801 	return NULL;
802 }
803 EXPORT_SYMBOL(idr_get_next);
804 
805 
806 /**
807  * idr_replace - replace pointer for given id
808  * @idp: idr handle
809  * @ptr: pointer you want associated with the id
810  * @id: lookup key
811  *
812  * Replace the pointer registered with an id and return the old value.
813  * A %-ENOENT return indicates that @id was not found.
814  * A %-EINVAL return indicates that @id was not within valid constraints.
815  *
816  * The caller must serialize with writers.
817  */
818 void *idr_replace(struct idr *idp, void *ptr, int id)
819 {
820 	int n;
821 	struct idr_layer *p, *old_p;
822 
823 	if (id < 0)
824 		return ERR_PTR(-EINVAL);
825 
826 	p = idp->top;
827 	if (!p)
828 		return ERR_PTR(-EINVAL);
829 
830 	n = (p->layer+1) * IDR_BITS;
831 
832 	if (id >= (1 << n))
833 		return ERR_PTR(-EINVAL);
834 
835 	n -= IDR_BITS;
836 	while ((n > 0) && p) {
837 		p = p->ary[(id >> n) & IDR_MASK];
838 		n -= IDR_BITS;
839 	}
840 
841 	n = id & IDR_MASK;
842 	if (unlikely(p == NULL || !test_bit(n, p->bitmap)))
843 		return ERR_PTR(-ENOENT);
844 
845 	old_p = p->ary[n];
846 	rcu_assign_pointer(p->ary[n], ptr);
847 
848 	return old_p;
849 }
850 EXPORT_SYMBOL(idr_replace);
851 
852 void __init idr_init_cache(void)
853 {
854 	idr_layer_cache = kmem_cache_create("idr_layer_cache",
855 				sizeof(struct idr_layer), 0, SLAB_PANIC, NULL);
856 }
857 
858 /**
859  * idr_init - initialize idr handle
860  * @idp:	idr handle
861  *
862  * This function is use to set up the handle (@idp) that you will pass
863  * to the rest of the functions.
864  */
865 void idr_init(struct idr *idp)
866 {
867 	memset(idp, 0, sizeof(struct idr));
868 	spin_lock_init(&idp->lock);
869 }
870 EXPORT_SYMBOL(idr_init);
871 
872 
873 /**
874  * DOC: IDA description
875  * IDA - IDR based ID allocator
876  *
877  * This is id allocator without id -> pointer translation.  Memory
878  * usage is much lower than full blown idr because each id only
879  * occupies a bit.  ida uses a custom leaf node which contains
880  * IDA_BITMAP_BITS slots.
881  *
882  * 2007-04-25  written by Tejun Heo <htejun@gmail.com>
883  */
884 
885 static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
886 {
887 	unsigned long flags;
888 
889 	if (!ida->free_bitmap) {
890 		spin_lock_irqsave(&ida->idr.lock, flags);
891 		if (!ida->free_bitmap) {
892 			ida->free_bitmap = bitmap;
893 			bitmap = NULL;
894 		}
895 		spin_unlock_irqrestore(&ida->idr.lock, flags);
896 	}
897 
898 	kfree(bitmap);
899 }
900 
901 /**
902  * ida_pre_get - reserve resources for ida allocation
903  * @ida:	ida handle
904  * @gfp_mask:	memory allocation flag
905  *
906  * This function should be called prior to locking and calling the
907  * following function.  It preallocates enough memory to satisfy the
908  * worst possible allocation.
909  *
910  * If the system is REALLY out of memory this function returns %0,
911  * otherwise %1.
912  */
913 int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
914 {
915 	/* allocate idr_layers */
916 	if (!__idr_pre_get(&ida->idr, gfp_mask))
917 		return 0;
918 
919 	/* allocate free_bitmap */
920 	if (!ida->free_bitmap) {
921 		struct ida_bitmap *bitmap;
922 
923 		bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask);
924 		if (!bitmap)
925 			return 0;
926 
927 		free_bitmap(ida, bitmap);
928 	}
929 
930 	return 1;
931 }
932 EXPORT_SYMBOL(ida_pre_get);
933 
934 /**
935  * ida_get_new_above - allocate new ID above or equal to a start id
936  * @ida:	ida handle
937  * @starting_id: id to start search at
938  * @p_id:	pointer to the allocated handle
939  *
940  * Allocate new ID above or equal to @starting_id.  It should be called
941  * with any required locks.
942  *
943  * If memory is required, it will return %-EAGAIN, you should unlock
944  * and go back to the ida_pre_get() call.  If the ida is full, it will
945  * return %-ENOSPC.
946  *
947  * @p_id returns a value in the range @starting_id ... %0x7fffffff.
948  */
949 int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
950 {
951 	struct idr_layer *pa[MAX_IDR_LEVEL + 1];
952 	struct ida_bitmap *bitmap;
953 	unsigned long flags;
954 	int idr_id = starting_id / IDA_BITMAP_BITS;
955 	int offset = starting_id % IDA_BITMAP_BITS;
956 	int t, id;
957 
958  restart:
959 	/* get vacant slot */
960 	t = idr_get_empty_slot(&ida->idr, idr_id, pa, 0, &ida->idr);
961 	if (t < 0)
962 		return t == -ENOMEM ? -EAGAIN : t;
963 
964 	if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT)
965 		return -ENOSPC;
966 
967 	if (t != idr_id)
968 		offset = 0;
969 	idr_id = t;
970 
971 	/* if bitmap isn't there, create a new one */
972 	bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK];
973 	if (!bitmap) {
974 		spin_lock_irqsave(&ida->idr.lock, flags);
975 		bitmap = ida->free_bitmap;
976 		ida->free_bitmap = NULL;
977 		spin_unlock_irqrestore(&ida->idr.lock, flags);
978 
979 		if (!bitmap)
980 			return -EAGAIN;
981 
982 		memset(bitmap, 0, sizeof(struct ida_bitmap));
983 		rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK],
984 				(void *)bitmap);
985 		pa[0]->count++;
986 	}
987 
988 	/* lookup for empty slot */
989 	t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset);
990 	if (t == IDA_BITMAP_BITS) {
991 		/* no empty slot after offset, continue to the next chunk */
992 		idr_id++;
993 		offset = 0;
994 		goto restart;
995 	}
996 
997 	id = idr_id * IDA_BITMAP_BITS + t;
998 	if (id >= MAX_IDR_BIT)
999 		return -ENOSPC;
1000 
1001 	__set_bit(t, bitmap->bitmap);
1002 	if (++bitmap->nr_busy == IDA_BITMAP_BITS)
1003 		idr_mark_full(pa, idr_id);
1004 
1005 	*p_id = id;
1006 
1007 	/* Each leaf node can handle nearly a thousand slots and the
1008 	 * whole idea of ida is to have small memory foot print.
1009 	 * Throw away extra resources one by one after each successful
1010 	 * allocation.
1011 	 */
1012 	if (ida->idr.id_free_cnt || ida->free_bitmap) {
1013 		struct idr_layer *p = get_from_free_list(&ida->idr);
1014 		if (p)
1015 			kmem_cache_free(idr_layer_cache, p);
1016 	}
1017 
1018 	return 0;
1019 }
1020 EXPORT_SYMBOL(ida_get_new_above);
1021 
1022 /**
1023  * ida_remove - remove the given ID
1024  * @ida:	ida handle
1025  * @id:		ID to free
1026  */
1027 void ida_remove(struct ida *ida, int id)
1028 {
1029 	struct idr_layer *p = ida->idr.top;
1030 	int shift = (ida->idr.layers - 1) * IDR_BITS;
1031 	int idr_id = id / IDA_BITMAP_BITS;
1032 	int offset = id % IDA_BITMAP_BITS;
1033 	int n;
1034 	struct ida_bitmap *bitmap;
1035 
1036 	/* clear full bits while looking up the leaf idr_layer */
1037 	while ((shift > 0) && p) {
1038 		n = (idr_id >> shift) & IDR_MASK;
1039 		__clear_bit(n, p->bitmap);
1040 		p = p->ary[n];
1041 		shift -= IDR_BITS;
1042 	}
1043 
1044 	if (p == NULL)
1045 		goto err;
1046 
1047 	n = idr_id & IDR_MASK;
1048 	__clear_bit(n, p->bitmap);
1049 
1050 	bitmap = (void *)p->ary[n];
1051 	if (!test_bit(offset, bitmap->bitmap))
1052 		goto err;
1053 
1054 	/* update bitmap and remove it if empty */
1055 	__clear_bit(offset, bitmap->bitmap);
1056 	if (--bitmap->nr_busy == 0) {
1057 		__set_bit(n, p->bitmap);	/* to please idr_remove() */
1058 		idr_remove(&ida->idr, idr_id);
1059 		free_bitmap(ida, bitmap);
1060 	}
1061 
1062 	return;
1063 
1064  err:
1065 	WARN(1, "ida_remove called for id=%d which is not allocated.\n", id);
1066 }
1067 EXPORT_SYMBOL(ida_remove);
1068 
1069 /**
1070  * ida_destroy - release all cached layers within an ida tree
1071  * @ida:		ida handle
1072  */
1073 void ida_destroy(struct ida *ida)
1074 {
1075 	idr_destroy(&ida->idr);
1076 	kfree(ida->free_bitmap);
1077 }
1078 EXPORT_SYMBOL(ida_destroy);
1079 
1080 /**
1081  * ida_simple_get - get a new id.
1082  * @ida: the (initialized) ida.
1083  * @start: the minimum id (inclusive, < 0x8000000)
1084  * @end: the maximum id (exclusive, < 0x8000000 or 0)
1085  * @gfp_mask: memory allocation flags
1086  *
1087  * Allocates an id in the range start <= id < end, or returns -ENOSPC.
1088  * On memory allocation failure, returns -ENOMEM.
1089  *
1090  * Use ida_simple_remove() to get rid of an id.
1091  */
1092 int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
1093 		   gfp_t gfp_mask)
1094 {
1095 	int ret, id;
1096 	unsigned int max;
1097 	unsigned long flags;
1098 
1099 	BUG_ON((int)start < 0);
1100 	BUG_ON((int)end < 0);
1101 
1102 	if (end == 0)
1103 		max = 0x80000000;
1104 	else {
1105 		BUG_ON(end < start);
1106 		max = end - 1;
1107 	}
1108 
1109 again:
1110 	if (!ida_pre_get(ida, gfp_mask))
1111 		return -ENOMEM;
1112 
1113 	spin_lock_irqsave(&simple_ida_lock, flags);
1114 	ret = ida_get_new_above(ida, start, &id);
1115 	if (!ret) {
1116 		if (id > max) {
1117 			ida_remove(ida, id);
1118 			ret = -ENOSPC;
1119 		} else {
1120 			ret = id;
1121 		}
1122 	}
1123 	spin_unlock_irqrestore(&simple_ida_lock, flags);
1124 
1125 	if (unlikely(ret == -EAGAIN))
1126 		goto again;
1127 
1128 	return ret;
1129 }
1130 EXPORT_SYMBOL(ida_simple_get);
1131 
1132 /**
1133  * ida_simple_remove - remove an allocated id.
1134  * @ida: the (initialized) ida.
1135  * @id: the id returned by ida_simple_get.
1136  */
1137 void ida_simple_remove(struct ida *ida, unsigned int id)
1138 {
1139 	unsigned long flags;
1140 
1141 	BUG_ON((int)id < 0);
1142 	spin_lock_irqsave(&simple_ida_lock, flags);
1143 	ida_remove(ida, id);
1144 	spin_unlock_irqrestore(&simple_ida_lock, flags);
1145 }
1146 EXPORT_SYMBOL(ida_simple_remove);
1147 
1148 /**
1149  * ida_init - initialize ida handle
1150  * @ida:	ida handle
1151  *
1152  * This function is use to set up the handle (@ida) that you will pass
1153  * to the rest of the functions.
1154  */
1155 void ida_init(struct ida *ida)
1156 {
1157 	memset(ida, 0, sizeof(struct ida));
1158 	idr_init(&ida->idr);
1159 
1160 }
1161 EXPORT_SYMBOL(ida_init);
1162