xref: /openbmc/linux/lib/idr.c (revision 22246614)
1 /*
2  * 2002-10-18  written by Jim Houston jim.houston@ccur.com
3  *	Copyright (C) 2002 by Concurrent Computer Corporation
4  *	Distributed under the GNU GPL license version 2.
5  *
6  * Modified by George Anzinger to reuse immediately and to use
7  * find bit instructions.  Also removed _irq on spinlocks.
8  *
9  * Small id to pointer translation service.
10  *
11  * It uses a radix tree like structure as a sparse array indexed
12  * by the id to obtain the pointer.  The bitmap makes allocating
13  * a new id quick.
14  *
15  * You call it to allocate an id (an int) an associate with that id a
16  * pointer or what ever, we treat it as a (void *).  You can pass this
17  * id to a user for him to pass back at a later time.  You then pass
18  * that id to this code and it returns your pointer.
19 
20  * You can release ids at any time. When all ids are released, most of
21  * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we
22  * don't need to go to the memory "store" during an id allocate, just
23  * so you don't need to be too concerned about locking and conflicts
24  * with the slab allocator.
25  */
26 
27 #ifndef TEST                        // to test in user space...
28 #include <linux/slab.h>
29 #include <linux/init.h>
30 #include <linux/module.h>
31 #endif
32 #include <linux/err.h>
33 #include <linux/string.h>
34 #include <linux/idr.h>
35 
36 static struct kmem_cache *idr_layer_cache;
37 
38 static struct idr_layer *alloc_layer(struct idr *idp)
39 {
40 	struct idr_layer *p;
41 	unsigned long flags;
42 
43 	spin_lock_irqsave(&idp->lock, flags);
44 	if ((p = idp->id_free)) {
45 		idp->id_free = p->ary[0];
46 		idp->id_free_cnt--;
47 		p->ary[0] = NULL;
48 	}
49 	spin_unlock_irqrestore(&idp->lock, flags);
50 	return(p);
51 }
52 
53 /* only called when idp->lock is held */
54 static void __free_layer(struct idr *idp, struct idr_layer *p)
55 {
56 	p->ary[0] = idp->id_free;
57 	idp->id_free = p;
58 	idp->id_free_cnt++;
59 }
60 
61 static void free_layer(struct idr *idp, struct idr_layer *p)
62 {
63 	unsigned long flags;
64 
65 	/*
66 	 * Depends on the return element being zeroed.
67 	 */
68 	spin_lock_irqsave(&idp->lock, flags);
69 	__free_layer(idp, p);
70 	spin_unlock_irqrestore(&idp->lock, flags);
71 }
72 
73 static void idr_mark_full(struct idr_layer **pa, int id)
74 {
75 	struct idr_layer *p = pa[0];
76 	int l = 0;
77 
78 	__set_bit(id & IDR_MASK, &p->bitmap);
79 	/*
80 	 * If this layer is full mark the bit in the layer above to
81 	 * show that this part of the radix tree is full.  This may
82 	 * complete the layer above and require walking up the radix
83 	 * tree.
84 	 */
85 	while (p->bitmap == IDR_FULL) {
86 		if (!(p = pa[++l]))
87 			break;
88 		id = id >> IDR_BITS;
89 		__set_bit((id & IDR_MASK), &p->bitmap);
90 	}
91 }
92 
93 /**
94  * idr_pre_get - reserver resources for idr allocation
95  * @idp:	idr handle
96  * @gfp_mask:	memory allocation flags
97  *
98  * This function should be called prior to locking and calling the
99  * following function.  It preallocates enough memory to satisfy
100  * the worst possible allocation.
101  *
102  * If the system is REALLY out of memory this function returns 0,
103  * otherwise 1.
104  */
105 int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
106 {
107 	while (idp->id_free_cnt < IDR_FREE_MAX) {
108 		struct idr_layer *new;
109 		new = kmem_cache_alloc(idr_layer_cache, gfp_mask);
110 		if (new == NULL)
111 			return (0);
112 		free_layer(idp, new);
113 	}
114 	return 1;
115 }
116 EXPORT_SYMBOL(idr_pre_get);
117 
118 static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
119 {
120 	int n, m, sh;
121 	struct idr_layer *p, *new;
122 	int l, id, oid;
123 	unsigned long bm;
124 
125 	id = *starting_id;
126  restart:
127 	p = idp->top;
128 	l = idp->layers;
129 	pa[l--] = NULL;
130 	while (1) {
131 		/*
132 		 * We run around this while until we reach the leaf node...
133 		 */
134 		n = (id >> (IDR_BITS*l)) & IDR_MASK;
135 		bm = ~p->bitmap;
136 		m = find_next_bit(&bm, IDR_SIZE, n);
137 		if (m == IDR_SIZE) {
138 			/* no space available go back to previous layer. */
139 			l++;
140 			oid = id;
141 			id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
142 
143 			/* if already at the top layer, we need to grow */
144 			if (!(p = pa[l])) {
145 				*starting_id = id;
146 				return -2;
147 			}
148 
149 			/* If we need to go up one layer, continue the
150 			 * loop; otherwise, restart from the top.
151 			 */
152 			sh = IDR_BITS * (l + 1);
153 			if (oid >> sh == id >> sh)
154 				continue;
155 			else
156 				goto restart;
157 		}
158 		if (m != n) {
159 			sh = IDR_BITS*l;
160 			id = ((id >> sh) ^ n ^ m) << sh;
161 		}
162 		if ((id >= MAX_ID_BIT) || (id < 0))
163 			return -3;
164 		if (l == 0)
165 			break;
166 		/*
167 		 * Create the layer below if it is missing.
168 		 */
169 		if (!p->ary[m]) {
170 			if (!(new = alloc_layer(idp)))
171 				return -1;
172 			p->ary[m] = new;
173 			p->count++;
174 		}
175 		pa[l--] = p;
176 		p = p->ary[m];
177 	}
178 
179 	pa[l] = p;
180 	return id;
181 }
182 
183 static int idr_get_empty_slot(struct idr *idp, int starting_id,
184 			      struct idr_layer **pa)
185 {
186 	struct idr_layer *p, *new;
187 	int layers, v, id;
188 	unsigned long flags;
189 
190 	id = starting_id;
191 build_up:
192 	p = idp->top;
193 	layers = idp->layers;
194 	if (unlikely(!p)) {
195 		if (!(p = alloc_layer(idp)))
196 			return -1;
197 		layers = 1;
198 	}
199 	/*
200 	 * Add a new layer to the top of the tree if the requested
201 	 * id is larger than the currently allocated space.
202 	 */
203 	while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
204 		layers++;
205 		if (!p->count)
206 			continue;
207 		if (!(new = alloc_layer(idp))) {
208 			/*
209 			 * The allocation failed.  If we built part of
210 			 * the structure tear it down.
211 			 */
212 			spin_lock_irqsave(&idp->lock, flags);
213 			for (new = p; p && p != idp->top; new = p) {
214 				p = p->ary[0];
215 				new->ary[0] = NULL;
216 				new->bitmap = new->count = 0;
217 				__free_layer(idp, new);
218 			}
219 			spin_unlock_irqrestore(&idp->lock, flags);
220 			return -1;
221 		}
222 		new->ary[0] = p;
223 		new->count = 1;
224 		if (p->bitmap == IDR_FULL)
225 			__set_bit(0, &new->bitmap);
226 		p = new;
227 	}
228 	idp->top = p;
229 	idp->layers = layers;
230 	v = sub_alloc(idp, &id, pa);
231 	if (v == -2)
232 		goto build_up;
233 	return(v);
234 }
235 
236 static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
237 {
238 	struct idr_layer *pa[MAX_LEVEL];
239 	int id;
240 
241 	id = idr_get_empty_slot(idp, starting_id, pa);
242 	if (id >= 0) {
243 		/*
244 		 * Successfully found an empty slot.  Install the user
245 		 * pointer and mark the slot full.
246 		 */
247 		pa[0]->ary[id & IDR_MASK] = (struct idr_layer *)ptr;
248 		pa[0]->count++;
249 		idr_mark_full(pa, id);
250 	}
251 
252 	return id;
253 }
254 
255 /**
256  * idr_get_new_above - allocate new idr entry above or equal to a start id
257  * @idp: idr handle
258  * @ptr: pointer you want associated with the ide
259  * @start_id: id to start search at
260  * @id: pointer to the allocated handle
261  *
262  * This is the allocate id function.  It should be called with any
263  * required locks.
264  *
265  * If memory is required, it will return -EAGAIN, you should unlock
266  * and go back to the idr_pre_get() call.  If the idr is full, it will
267  * return -ENOSPC.
268  *
269  * @id returns a value in the range 0 ... 0x7fffffff
270  */
271 int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
272 {
273 	int rv;
274 
275 	rv = idr_get_new_above_int(idp, ptr, starting_id);
276 	/*
277 	 * This is a cheap hack until the IDR code can be fixed to
278 	 * return proper error values.
279 	 */
280 	if (rv < 0) {
281 		if (rv == -1)
282 			return -EAGAIN;
283 		else /* Will be -3 */
284 			return -ENOSPC;
285 	}
286 	*id = rv;
287 	return 0;
288 }
289 EXPORT_SYMBOL(idr_get_new_above);
290 
291 /**
292  * idr_get_new - allocate new idr entry
293  * @idp: idr handle
294  * @ptr: pointer you want associated with the ide
295  * @id: pointer to the allocated handle
296  *
297  * This is the allocate id function.  It should be called with any
298  * required locks.
299  *
300  * If memory is required, it will return -EAGAIN, you should unlock
301  * and go back to the idr_pre_get() call.  If the idr is full, it will
302  * return -ENOSPC.
303  *
304  * @id returns a value in the range 0 ... 0x7fffffff
305  */
306 int idr_get_new(struct idr *idp, void *ptr, int *id)
307 {
308 	int rv;
309 
310 	rv = idr_get_new_above_int(idp, ptr, 0);
311 	/*
312 	 * This is a cheap hack until the IDR code can be fixed to
313 	 * return proper error values.
314 	 */
315 	if (rv < 0) {
316 		if (rv == -1)
317 			return -EAGAIN;
318 		else /* Will be -3 */
319 			return -ENOSPC;
320 	}
321 	*id = rv;
322 	return 0;
323 }
324 EXPORT_SYMBOL(idr_get_new);
325 
326 static void idr_remove_warning(int id)
327 {
328 	printk("idr_remove called for id=%d which is not allocated.\n", id);
329 	dump_stack();
330 }
331 
332 static void sub_remove(struct idr *idp, int shift, int id)
333 {
334 	struct idr_layer *p = idp->top;
335 	struct idr_layer **pa[MAX_LEVEL];
336 	struct idr_layer ***paa = &pa[0];
337 	int n;
338 
339 	*paa = NULL;
340 	*++paa = &idp->top;
341 
342 	while ((shift > 0) && p) {
343 		n = (id >> shift) & IDR_MASK;
344 		__clear_bit(n, &p->bitmap);
345 		*++paa = &p->ary[n];
346 		p = p->ary[n];
347 		shift -= IDR_BITS;
348 	}
349 	n = id & IDR_MASK;
350 	if (likely(p != NULL && test_bit(n, &p->bitmap))){
351 		__clear_bit(n, &p->bitmap);
352 		p->ary[n] = NULL;
353 		while(*paa && ! --((**paa)->count)){
354 			free_layer(idp, **paa);
355 			**paa-- = NULL;
356 		}
357 		if (!*paa)
358 			idp->layers = 0;
359 	} else
360 		idr_remove_warning(id);
361 }
362 
363 /**
364  * idr_remove - remove the given id and free it's slot
365  * @idp: idr handle
366  * @id: unique key
367  */
368 void idr_remove(struct idr *idp, int id)
369 {
370 	struct idr_layer *p;
371 
372 	/* Mask off upper bits we don't use for the search. */
373 	id &= MAX_ID_MASK;
374 
375 	sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
376 	if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
377 	    idp->top->ary[0]) {  // We can drop a layer
378 
379 		p = idp->top->ary[0];
380 		idp->top->bitmap = idp->top->count = 0;
381 		free_layer(idp, idp->top);
382 		idp->top = p;
383 		--idp->layers;
384 	}
385 	while (idp->id_free_cnt >= IDR_FREE_MAX) {
386 		p = alloc_layer(idp);
387 		kmem_cache_free(idr_layer_cache, p);
388 	}
389 	return;
390 }
391 EXPORT_SYMBOL(idr_remove);
392 
393 /**
394  * idr_remove_all - remove all ids from the given idr tree
395  * @idp: idr handle
396  *
397  * idr_destroy() only frees up unused, cached idp_layers, but this
398  * function will remove all id mappings and leave all idp_layers
399  * unused.
400  *
401  * A typical clean-up sequence for objects stored in an idr tree, will
402  * use idr_for_each() to free all objects, if necessay, then
403  * idr_remove_all() to remove all ids, and idr_destroy() to free
404  * up the cached idr_layers.
405  */
406 void idr_remove_all(struct idr *idp)
407 {
408 	int n, id, max;
409 	struct idr_layer *p;
410 	struct idr_layer *pa[MAX_LEVEL];
411 	struct idr_layer **paa = &pa[0];
412 
413 	n = idp->layers * IDR_BITS;
414 	p = idp->top;
415 	max = 1 << n;
416 
417 	id = 0;
418 	while (id < max) {
419 		while (n > IDR_BITS && p) {
420 			n -= IDR_BITS;
421 			*paa++ = p;
422 			p = p->ary[(id >> n) & IDR_MASK];
423 		}
424 
425 		id += 1 << n;
426 		while (n < fls(id)) {
427 			if (p) {
428 				memset(p, 0, sizeof *p);
429 				free_layer(idp, p);
430 			}
431 			n += IDR_BITS;
432 			p = *--paa;
433 		}
434 	}
435 	idp->top = NULL;
436 	idp->layers = 0;
437 }
438 EXPORT_SYMBOL(idr_remove_all);
439 
440 /**
441  * idr_destroy - release all cached layers within an idr tree
442  * idp: idr handle
443  */
444 void idr_destroy(struct idr *idp)
445 {
446 	while (idp->id_free_cnt) {
447 		struct idr_layer *p = alloc_layer(idp);
448 		kmem_cache_free(idr_layer_cache, p);
449 	}
450 }
451 EXPORT_SYMBOL(idr_destroy);
452 
453 /**
454  * idr_find - return pointer for given id
455  * @idp: idr handle
456  * @id: lookup key
457  *
458  * Return the pointer given the id it has been registered with.  A %NULL
459  * return indicates that @id is not valid or you passed %NULL in
460  * idr_get_new().
461  *
462  * The caller must serialize idr_find() vs idr_get_new() and idr_remove().
463  */
464 void *idr_find(struct idr *idp, int id)
465 {
466 	int n;
467 	struct idr_layer *p;
468 
469 	n = idp->layers * IDR_BITS;
470 	p = idp->top;
471 
472 	/* Mask off upper bits we don't use for the search. */
473 	id &= MAX_ID_MASK;
474 
475 	if (id >= (1 << n))
476 		return NULL;
477 
478 	while (n > 0 && p) {
479 		n -= IDR_BITS;
480 		p = p->ary[(id >> n) & IDR_MASK];
481 	}
482 	return((void *)p);
483 }
484 EXPORT_SYMBOL(idr_find);
485 
486 /**
487  * idr_for_each - iterate through all stored pointers
488  * @idp: idr handle
489  * @fn: function to be called for each pointer
490  * @data: data passed back to callback function
491  *
492  * Iterate over the pointers registered with the given idr.  The
493  * callback function will be called for each pointer currently
494  * registered, passing the id, the pointer and the data pointer passed
495  * to this function.  It is not safe to modify the idr tree while in
496  * the callback, so functions such as idr_get_new and idr_remove are
497  * not allowed.
498  *
499  * We check the return of @fn each time. If it returns anything other
500  * than 0, we break out and return that value.
501  *
502  * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove().
503  */
504 int idr_for_each(struct idr *idp,
505 		 int (*fn)(int id, void *p, void *data), void *data)
506 {
507 	int n, id, max, error = 0;
508 	struct idr_layer *p;
509 	struct idr_layer *pa[MAX_LEVEL];
510 	struct idr_layer **paa = &pa[0];
511 
512 	n = idp->layers * IDR_BITS;
513 	p = idp->top;
514 	max = 1 << n;
515 
516 	id = 0;
517 	while (id < max) {
518 		while (n > 0 && p) {
519 			n -= IDR_BITS;
520 			*paa++ = p;
521 			p = p->ary[(id >> n) & IDR_MASK];
522 		}
523 
524 		if (p) {
525 			error = fn(id, (void *)p, data);
526 			if (error)
527 				break;
528 		}
529 
530 		id += 1 << n;
531 		while (n < fls(id)) {
532 			n += IDR_BITS;
533 			p = *--paa;
534 		}
535 	}
536 
537 	return error;
538 }
539 EXPORT_SYMBOL(idr_for_each);
540 
541 /**
542  * idr_replace - replace pointer for given id
543  * @idp: idr handle
544  * @ptr: pointer you want associated with the id
545  * @id: lookup key
546  *
547  * Replace the pointer registered with an id and return the old value.
548  * A -ENOENT return indicates that @id was not found.
549  * A -EINVAL return indicates that @id was not within valid constraints.
550  *
551  * The caller must serialize vs idr_find(), idr_get_new(), and idr_remove().
552  */
553 void *idr_replace(struct idr *idp, void *ptr, int id)
554 {
555 	int n;
556 	struct idr_layer *p, *old_p;
557 
558 	n = idp->layers * IDR_BITS;
559 	p = idp->top;
560 
561 	id &= MAX_ID_MASK;
562 
563 	if (id >= (1 << n))
564 		return ERR_PTR(-EINVAL);
565 
566 	n -= IDR_BITS;
567 	while ((n > 0) && p) {
568 		p = p->ary[(id >> n) & IDR_MASK];
569 		n -= IDR_BITS;
570 	}
571 
572 	n = id & IDR_MASK;
573 	if (unlikely(p == NULL || !test_bit(n, &p->bitmap)))
574 		return ERR_PTR(-ENOENT);
575 
576 	old_p = p->ary[n];
577 	p->ary[n] = ptr;
578 
579 	return old_p;
580 }
581 EXPORT_SYMBOL(idr_replace);
582 
583 static void idr_cache_ctor(struct kmem_cache *idr_layer_cache, void *idr_layer)
584 {
585 	memset(idr_layer, 0, sizeof(struct idr_layer));
586 }
587 
588 void __init idr_init_cache(void)
589 {
590 	idr_layer_cache = kmem_cache_create("idr_layer_cache",
591 				sizeof(struct idr_layer), 0, SLAB_PANIC,
592 				idr_cache_ctor);
593 }
594 
595 /**
596  * idr_init - initialize idr handle
597  * @idp:	idr handle
598  *
599  * This function is use to set up the handle (@idp) that you will pass
600  * to the rest of the functions.
601  */
602 void idr_init(struct idr *idp)
603 {
604 	memset(idp, 0, sizeof(struct idr));
605 	spin_lock_init(&idp->lock);
606 }
607 EXPORT_SYMBOL(idr_init);
608 
609 
610 /*
611  * IDA - IDR based ID allocator
612  *
613  * this is id allocator without id -> pointer translation.  Memory
614  * usage is much lower than full blown idr because each id only
615  * occupies a bit.  ida uses a custom leaf node which contains
616  * IDA_BITMAP_BITS slots.
617  *
618  * 2007-04-25  written by Tejun Heo <htejun@gmail.com>
619  */
620 
621 static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
622 {
623 	unsigned long flags;
624 
625 	if (!ida->free_bitmap) {
626 		spin_lock_irqsave(&ida->idr.lock, flags);
627 		if (!ida->free_bitmap) {
628 			ida->free_bitmap = bitmap;
629 			bitmap = NULL;
630 		}
631 		spin_unlock_irqrestore(&ida->idr.lock, flags);
632 	}
633 
634 	kfree(bitmap);
635 }
636 
637 /**
638  * ida_pre_get - reserve resources for ida allocation
639  * @ida:	ida handle
640  * @gfp_mask:	memory allocation flag
641  *
642  * This function should be called prior to locking and calling the
643  * following function.  It preallocates enough memory to satisfy the
644  * worst possible allocation.
645  *
646  * If the system is REALLY out of memory this function returns 0,
647  * otherwise 1.
648  */
649 int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
650 {
651 	/* allocate idr_layers */
652 	if (!idr_pre_get(&ida->idr, gfp_mask))
653 		return 0;
654 
655 	/* allocate free_bitmap */
656 	if (!ida->free_bitmap) {
657 		struct ida_bitmap *bitmap;
658 
659 		bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask);
660 		if (!bitmap)
661 			return 0;
662 
663 		free_bitmap(ida, bitmap);
664 	}
665 
666 	return 1;
667 }
668 EXPORT_SYMBOL(ida_pre_get);
669 
670 /**
671  * ida_get_new_above - allocate new ID above or equal to a start id
672  * @ida:	ida handle
673  * @staring_id:	id to start search at
674  * @p_id:	pointer to the allocated handle
675  *
676  * Allocate new ID above or equal to @ida.  It should be called with
677  * any required locks.
678  *
679  * If memory is required, it will return -EAGAIN, you should unlock
680  * and go back to the ida_pre_get() call.  If the ida is full, it will
681  * return -ENOSPC.
682  *
683  * @p_id returns a value in the range 0 ... 0x7fffffff.
684  */
685 int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
686 {
687 	struct idr_layer *pa[MAX_LEVEL];
688 	struct ida_bitmap *bitmap;
689 	unsigned long flags;
690 	int idr_id = starting_id / IDA_BITMAP_BITS;
691 	int offset = starting_id % IDA_BITMAP_BITS;
692 	int t, id;
693 
694  restart:
695 	/* get vacant slot */
696 	t = idr_get_empty_slot(&ida->idr, idr_id, pa);
697 	if (t < 0) {
698 		if (t == -1)
699 			return -EAGAIN;
700 		else /* will be -3 */
701 			return -ENOSPC;
702 	}
703 
704 	if (t * IDA_BITMAP_BITS >= MAX_ID_BIT)
705 		return -ENOSPC;
706 
707 	if (t != idr_id)
708 		offset = 0;
709 	idr_id = t;
710 
711 	/* if bitmap isn't there, create a new one */
712 	bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK];
713 	if (!bitmap) {
714 		spin_lock_irqsave(&ida->idr.lock, flags);
715 		bitmap = ida->free_bitmap;
716 		ida->free_bitmap = NULL;
717 		spin_unlock_irqrestore(&ida->idr.lock, flags);
718 
719 		if (!bitmap)
720 			return -EAGAIN;
721 
722 		memset(bitmap, 0, sizeof(struct ida_bitmap));
723 		pa[0]->ary[idr_id & IDR_MASK] = (void *)bitmap;
724 		pa[0]->count++;
725 	}
726 
727 	/* lookup for empty slot */
728 	t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset);
729 	if (t == IDA_BITMAP_BITS) {
730 		/* no empty slot after offset, continue to the next chunk */
731 		idr_id++;
732 		offset = 0;
733 		goto restart;
734 	}
735 
736 	id = idr_id * IDA_BITMAP_BITS + t;
737 	if (id >= MAX_ID_BIT)
738 		return -ENOSPC;
739 
740 	__set_bit(t, bitmap->bitmap);
741 	if (++bitmap->nr_busy == IDA_BITMAP_BITS)
742 		idr_mark_full(pa, idr_id);
743 
744 	*p_id = id;
745 
746 	/* Each leaf node can handle nearly a thousand slots and the
747 	 * whole idea of ida is to have small memory foot print.
748 	 * Throw away extra resources one by one after each successful
749 	 * allocation.
750 	 */
751 	if (ida->idr.id_free_cnt || ida->free_bitmap) {
752 		struct idr_layer *p = alloc_layer(&ida->idr);
753 		if (p)
754 			kmem_cache_free(idr_layer_cache, p);
755 	}
756 
757 	return 0;
758 }
759 EXPORT_SYMBOL(ida_get_new_above);
760 
761 /**
762  * ida_get_new - allocate new ID
763  * @ida:	idr handle
764  * @p_id:	pointer to the allocated handle
765  *
766  * Allocate new ID.  It should be called with any required locks.
767  *
768  * If memory is required, it will return -EAGAIN, you should unlock
769  * and go back to the idr_pre_get() call.  If the idr is full, it will
770  * return -ENOSPC.
771  *
772  * @id returns a value in the range 0 ... 0x7fffffff.
773  */
774 int ida_get_new(struct ida *ida, int *p_id)
775 {
776 	return ida_get_new_above(ida, 0, p_id);
777 }
778 EXPORT_SYMBOL(ida_get_new);
779 
780 /**
781  * ida_remove - remove the given ID
782  * @ida:	ida handle
783  * @id:		ID to free
784  */
785 void ida_remove(struct ida *ida, int id)
786 {
787 	struct idr_layer *p = ida->idr.top;
788 	int shift = (ida->idr.layers - 1) * IDR_BITS;
789 	int idr_id = id / IDA_BITMAP_BITS;
790 	int offset = id % IDA_BITMAP_BITS;
791 	int n;
792 	struct ida_bitmap *bitmap;
793 
794 	/* clear full bits while looking up the leaf idr_layer */
795 	while ((shift > 0) && p) {
796 		n = (idr_id >> shift) & IDR_MASK;
797 		__clear_bit(n, &p->bitmap);
798 		p = p->ary[n];
799 		shift -= IDR_BITS;
800 	}
801 
802 	if (p == NULL)
803 		goto err;
804 
805 	n = idr_id & IDR_MASK;
806 	__clear_bit(n, &p->bitmap);
807 
808 	bitmap = (void *)p->ary[n];
809 	if (!test_bit(offset, bitmap->bitmap))
810 		goto err;
811 
812 	/* update bitmap and remove it if empty */
813 	__clear_bit(offset, bitmap->bitmap);
814 	if (--bitmap->nr_busy == 0) {
815 		__set_bit(n, &p->bitmap);	/* to please idr_remove() */
816 		idr_remove(&ida->idr, idr_id);
817 		free_bitmap(ida, bitmap);
818 	}
819 
820 	return;
821 
822  err:
823 	printk(KERN_WARNING
824 	       "ida_remove called for id=%d which is not allocated.\n", id);
825 }
826 EXPORT_SYMBOL(ida_remove);
827 
828 /**
829  * ida_destroy - release all cached layers within an ida tree
830  * ida:		ida handle
831  */
832 void ida_destroy(struct ida *ida)
833 {
834 	idr_destroy(&ida->idr);
835 	kfree(ida->free_bitmap);
836 }
837 EXPORT_SYMBOL(ida_destroy);
838 
839 /**
840  * ida_init - initialize ida handle
841  * @ida:	ida handle
842  *
843  * This function is use to set up the handle (@ida) that you will pass
844  * to the rest of the functions.
845  */
846 void ida_init(struct ida *ida)
847 {
848 	memset(ida, 0, sizeof(struct ida));
849 	idr_init(&ida->idr);
850 
851 }
852 EXPORT_SYMBOL(ida_init);
853