xref: /openbmc/linux/lib/idr.c (revision b627b4ed)
1 /*
2  * 2002-10-18  written by Jim Houston jim.houston@ccur.com
3  *	Copyright (C) 2002 by Concurrent Computer Corporation
4  *	Distributed under the GNU GPL license version 2.
5  *
6  * Modified by George Anzinger to reuse immediately and to use
7  * find bit instructions.  Also removed _irq on spinlocks.
8  *
9  * Modified by Nadia Derbey to make it RCU safe.
10  *
11  * Small id to pointer translation service.
12  *
13  * It uses a radix tree like structure as a sparse array indexed
14  * by the id to obtain the pointer.  The bitmap makes allocating
15  * a new id quick.
16  *
17  * You call it to allocate an id (an int) an associate with that id a
18  * pointer or what ever, we treat it as a (void *).  You can pass this
19  * id to a user for him to pass back at a later time.  You then pass
20  * that id to this code and it returns your pointer.
21 
22  * You can release ids at any time. When all ids are released, most of
23  * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we
24  * don't need to go to the memory "store" during an id allocate, just
25  * so you don't need to be too concerned about locking and conflicts
26  * with the slab allocator.
27  */
28 
29 #ifndef TEST                        // to test in user space...
30 #include <linux/slab.h>
31 #include <linux/init.h>
32 #include <linux/module.h>
33 #endif
34 #include <linux/err.h>
35 #include <linux/string.h>
36 #include <linux/idr.h>
37 
38 static struct kmem_cache *idr_layer_cache;
39 
40 static struct idr_layer *get_from_free_list(struct idr *idp)
41 {
42 	struct idr_layer *p;
43 	unsigned long flags;
44 
45 	spin_lock_irqsave(&idp->lock, flags);
46 	if ((p = idp->id_free)) {
47 		idp->id_free = p->ary[0];
48 		idp->id_free_cnt--;
49 		p->ary[0] = NULL;
50 	}
51 	spin_unlock_irqrestore(&idp->lock, flags);
52 	return(p);
53 }
54 
55 static void idr_layer_rcu_free(struct rcu_head *head)
56 {
57 	struct idr_layer *layer;
58 
59 	layer = container_of(head, struct idr_layer, rcu_head);
60 	kmem_cache_free(idr_layer_cache, layer);
61 }
62 
63 static inline void free_layer(struct idr_layer *p)
64 {
65 	call_rcu(&p->rcu_head, idr_layer_rcu_free);
66 }
67 
68 /* only called when idp->lock is held */
69 static void __move_to_free_list(struct idr *idp, struct idr_layer *p)
70 {
71 	p->ary[0] = idp->id_free;
72 	idp->id_free = p;
73 	idp->id_free_cnt++;
74 }
75 
76 static void move_to_free_list(struct idr *idp, struct idr_layer *p)
77 {
78 	unsigned long flags;
79 
80 	/*
81 	 * Depends on the return element being zeroed.
82 	 */
83 	spin_lock_irqsave(&idp->lock, flags);
84 	__move_to_free_list(idp, p);
85 	spin_unlock_irqrestore(&idp->lock, flags);
86 }
87 
88 static void idr_mark_full(struct idr_layer **pa, int id)
89 {
90 	struct idr_layer *p = pa[0];
91 	int l = 0;
92 
93 	__set_bit(id & IDR_MASK, &p->bitmap);
94 	/*
95 	 * If this layer is full mark the bit in the layer above to
96 	 * show that this part of the radix tree is full.  This may
97 	 * complete the layer above and require walking up the radix
98 	 * tree.
99 	 */
100 	while (p->bitmap == IDR_FULL) {
101 		if (!(p = pa[++l]))
102 			break;
103 		id = id >> IDR_BITS;
104 		__set_bit((id & IDR_MASK), &p->bitmap);
105 	}
106 }
107 
108 /**
109  * idr_pre_get - reserver resources for idr allocation
110  * @idp:	idr handle
111  * @gfp_mask:	memory allocation flags
112  *
113  * This function should be called prior to locking and calling the
114  * idr_get_new* functions. It preallocates enough memory to satisfy
115  * the worst possible allocation.
116  *
117  * If the system is REALLY out of memory this function returns 0,
118  * otherwise 1.
119  */
120 int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
121 {
122 	while (idp->id_free_cnt < IDR_FREE_MAX) {
123 		struct idr_layer *new;
124 		new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
125 		if (new == NULL)
126 			return (0);
127 		move_to_free_list(idp, new);
128 	}
129 	return 1;
130 }
131 EXPORT_SYMBOL(idr_pre_get);
132 
133 static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
134 {
135 	int n, m, sh;
136 	struct idr_layer *p, *new;
137 	int l, id, oid;
138 	unsigned long bm;
139 
140 	id = *starting_id;
141  restart:
142 	p = idp->top;
143 	l = idp->layers;
144 	pa[l--] = NULL;
145 	while (1) {
146 		/*
147 		 * We run around this while until we reach the leaf node...
148 		 */
149 		n = (id >> (IDR_BITS*l)) & IDR_MASK;
150 		bm = ~p->bitmap;
151 		m = find_next_bit(&bm, IDR_SIZE, n);
152 		if (m == IDR_SIZE) {
153 			/* no space available go back to previous layer. */
154 			l++;
155 			oid = id;
156 			id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
157 
158 			/* if already at the top layer, we need to grow */
159 			if (!(p = pa[l])) {
160 				*starting_id = id;
161 				return IDR_NEED_TO_GROW;
162 			}
163 
164 			/* If we need to go up one layer, continue the
165 			 * loop; otherwise, restart from the top.
166 			 */
167 			sh = IDR_BITS * (l + 1);
168 			if (oid >> sh == id >> sh)
169 				continue;
170 			else
171 				goto restart;
172 		}
173 		if (m != n) {
174 			sh = IDR_BITS*l;
175 			id = ((id >> sh) ^ n ^ m) << sh;
176 		}
177 		if ((id >= MAX_ID_BIT) || (id < 0))
178 			return IDR_NOMORE_SPACE;
179 		if (l == 0)
180 			break;
181 		/*
182 		 * Create the layer below if it is missing.
183 		 */
184 		if (!p->ary[m]) {
185 			new = get_from_free_list(idp);
186 			if (!new)
187 				return -1;
188 			new->layer = l-1;
189 			rcu_assign_pointer(p->ary[m], new);
190 			p->count++;
191 		}
192 		pa[l--] = p;
193 		p = p->ary[m];
194 	}
195 
196 	pa[l] = p;
197 	return id;
198 }
199 
200 static int idr_get_empty_slot(struct idr *idp, int starting_id,
201 			      struct idr_layer **pa)
202 {
203 	struct idr_layer *p, *new;
204 	int layers, v, id;
205 	unsigned long flags;
206 
207 	id = starting_id;
208 build_up:
209 	p = idp->top;
210 	layers = idp->layers;
211 	if (unlikely(!p)) {
212 		if (!(p = get_from_free_list(idp)))
213 			return -1;
214 		p->layer = 0;
215 		layers = 1;
216 	}
217 	/*
218 	 * Add a new layer to the top of the tree if the requested
219 	 * id is larger than the currently allocated space.
220 	 */
221 	while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
222 		layers++;
223 		if (!p->count) {
224 			/* special case: if the tree is currently empty,
225 			 * then we grow the tree by moving the top node
226 			 * upwards.
227 			 */
228 			p->layer++;
229 			continue;
230 		}
231 		if (!(new = get_from_free_list(idp))) {
232 			/*
233 			 * The allocation failed.  If we built part of
234 			 * the structure tear it down.
235 			 */
236 			spin_lock_irqsave(&idp->lock, flags);
237 			for (new = p; p && p != idp->top; new = p) {
238 				p = p->ary[0];
239 				new->ary[0] = NULL;
240 				new->bitmap = new->count = 0;
241 				__move_to_free_list(idp, new);
242 			}
243 			spin_unlock_irqrestore(&idp->lock, flags);
244 			return -1;
245 		}
246 		new->ary[0] = p;
247 		new->count = 1;
248 		new->layer = layers-1;
249 		if (p->bitmap == IDR_FULL)
250 			__set_bit(0, &new->bitmap);
251 		p = new;
252 	}
253 	rcu_assign_pointer(idp->top, p);
254 	idp->layers = layers;
255 	v = sub_alloc(idp, &id, pa);
256 	if (v == IDR_NEED_TO_GROW)
257 		goto build_up;
258 	return(v);
259 }
260 
261 static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
262 {
263 	struct idr_layer *pa[MAX_LEVEL];
264 	int id;
265 
266 	id = idr_get_empty_slot(idp, starting_id, pa);
267 	if (id >= 0) {
268 		/*
269 		 * Successfully found an empty slot.  Install the user
270 		 * pointer and mark the slot full.
271 		 */
272 		rcu_assign_pointer(pa[0]->ary[id & IDR_MASK],
273 				(struct idr_layer *)ptr);
274 		pa[0]->count++;
275 		idr_mark_full(pa, id);
276 	}
277 
278 	return id;
279 }
280 
281 /**
282  * idr_get_new_above - allocate new idr entry above or equal to a start id
283  * @idp: idr handle
284  * @ptr: pointer you want associated with the ide
285  * @start_id: id to start search at
286  * @id: pointer to the allocated handle
287  *
288  * This is the allocate id function.  It should be called with any
289  * required locks.
290  *
291  * If memory is required, it will return -EAGAIN, you should unlock
292  * and go back to the idr_pre_get() call.  If the idr is full, it will
293  * return -ENOSPC.
294  *
295  * @id returns a value in the range @starting_id ... 0x7fffffff
296  */
297 int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
298 {
299 	int rv;
300 
301 	rv = idr_get_new_above_int(idp, ptr, starting_id);
302 	/*
303 	 * This is a cheap hack until the IDR code can be fixed to
304 	 * return proper error values.
305 	 */
306 	if (rv < 0)
307 		return _idr_rc_to_errno(rv);
308 	*id = rv;
309 	return 0;
310 }
311 EXPORT_SYMBOL(idr_get_new_above);
312 
313 /**
314  * idr_get_new - allocate new idr entry
315  * @idp: idr handle
316  * @ptr: pointer you want associated with the ide
317  * @id: pointer to the allocated handle
318  *
319  * This is the allocate id function.  It should be called with any
320  * required locks.
321  *
322  * If memory is required, it will return -EAGAIN, you should unlock
323  * and go back to the idr_pre_get() call.  If the idr is full, it will
324  * return -ENOSPC.
325  *
326  * @id returns a value in the range 0 ... 0x7fffffff
327  */
328 int idr_get_new(struct idr *idp, void *ptr, int *id)
329 {
330 	int rv;
331 
332 	rv = idr_get_new_above_int(idp, ptr, 0);
333 	/*
334 	 * This is a cheap hack until the IDR code can be fixed to
335 	 * return proper error values.
336 	 */
337 	if (rv < 0)
338 		return _idr_rc_to_errno(rv);
339 	*id = rv;
340 	return 0;
341 }
342 EXPORT_SYMBOL(idr_get_new);
343 
344 static void idr_remove_warning(int id)
345 {
346 	printk(KERN_WARNING
347 		"idr_remove called for id=%d which is not allocated.\n", id);
348 	dump_stack();
349 }
350 
351 static void sub_remove(struct idr *idp, int shift, int id)
352 {
353 	struct idr_layer *p = idp->top;
354 	struct idr_layer **pa[MAX_LEVEL];
355 	struct idr_layer ***paa = &pa[0];
356 	struct idr_layer *to_free;
357 	int n;
358 
359 	*paa = NULL;
360 	*++paa = &idp->top;
361 
362 	while ((shift > 0) && p) {
363 		n = (id >> shift) & IDR_MASK;
364 		__clear_bit(n, &p->bitmap);
365 		*++paa = &p->ary[n];
366 		p = p->ary[n];
367 		shift -= IDR_BITS;
368 	}
369 	n = id & IDR_MASK;
370 	if (likely(p != NULL && test_bit(n, &p->bitmap))){
371 		__clear_bit(n, &p->bitmap);
372 		rcu_assign_pointer(p->ary[n], NULL);
373 		to_free = NULL;
374 		while(*paa && ! --((**paa)->count)){
375 			if (to_free)
376 				free_layer(to_free);
377 			to_free = **paa;
378 			**paa-- = NULL;
379 		}
380 		if (!*paa)
381 			idp->layers = 0;
382 		if (to_free)
383 			free_layer(to_free);
384 	} else
385 		idr_remove_warning(id);
386 }
387 
388 /**
389  * idr_remove - remove the given id and free it's slot
390  * @idp: idr handle
391  * @id: unique key
392  */
393 void idr_remove(struct idr *idp, int id)
394 {
395 	struct idr_layer *p;
396 	struct idr_layer *to_free;
397 
398 	/* Mask off upper bits we don't use for the search. */
399 	id &= MAX_ID_MASK;
400 
401 	sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
402 	if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
403 	    idp->top->ary[0]) {
404 		/*
405 		 * Single child at leftmost slot: we can shrink the tree.
406 		 * This level is not needed anymore since when layers are
407 		 * inserted, they are inserted at the top of the existing
408 		 * tree.
409 		 */
410 		to_free = idp->top;
411 		p = idp->top->ary[0];
412 		rcu_assign_pointer(idp->top, p);
413 		--idp->layers;
414 		to_free->bitmap = to_free->count = 0;
415 		free_layer(to_free);
416 	}
417 	while (idp->id_free_cnt >= IDR_FREE_MAX) {
418 		p = get_from_free_list(idp);
419 		/*
420 		 * Note: we don't call the rcu callback here, since the only
421 		 * layers that fall into the freelist are those that have been
422 		 * preallocated.
423 		 */
424 		kmem_cache_free(idr_layer_cache, p);
425 	}
426 	return;
427 }
428 EXPORT_SYMBOL(idr_remove);
429 
430 /**
431  * idr_remove_all - remove all ids from the given idr tree
432  * @idp: idr handle
433  *
434  * idr_destroy() only frees up unused, cached idp_layers, but this
435  * function will remove all id mappings and leave all idp_layers
436  * unused.
437  *
438  * A typical clean-up sequence for objects stored in an idr tree, will
439  * use idr_for_each() to free all objects, if necessay, then
440  * idr_remove_all() to remove all ids, and idr_destroy() to free
441  * up the cached idr_layers.
442  */
443 void idr_remove_all(struct idr *idp)
444 {
445 	int n, id, max;
446 	struct idr_layer *p;
447 	struct idr_layer *pa[MAX_LEVEL];
448 	struct idr_layer **paa = &pa[0];
449 
450 	n = idp->layers * IDR_BITS;
451 	p = idp->top;
452 	rcu_assign_pointer(idp->top, NULL);
453 	max = 1 << n;
454 
455 	id = 0;
456 	while (id < max) {
457 		while (n > IDR_BITS && p) {
458 			n -= IDR_BITS;
459 			*paa++ = p;
460 			p = p->ary[(id >> n) & IDR_MASK];
461 		}
462 
463 		id += 1 << n;
464 		while (n < fls(id)) {
465 			if (p)
466 				free_layer(p);
467 			n += IDR_BITS;
468 			p = *--paa;
469 		}
470 	}
471 	idp->layers = 0;
472 }
473 EXPORT_SYMBOL(idr_remove_all);
474 
475 /**
476  * idr_destroy - release all cached layers within an idr tree
477  * idp: idr handle
478  */
479 void idr_destroy(struct idr *idp)
480 {
481 	while (idp->id_free_cnt) {
482 		struct idr_layer *p = get_from_free_list(idp);
483 		kmem_cache_free(idr_layer_cache, p);
484 	}
485 }
486 EXPORT_SYMBOL(idr_destroy);
487 
488 /**
489  * idr_find - return pointer for given id
490  * @idp: idr handle
491  * @id: lookup key
492  *
493  * Return the pointer given the id it has been registered with.  A %NULL
494  * return indicates that @id is not valid or you passed %NULL in
495  * idr_get_new().
496  *
497  * This function can be called under rcu_read_lock(), given that the leaf
498  * pointers lifetimes are correctly managed.
499  */
500 void *idr_find(struct idr *idp, int id)
501 {
502 	int n;
503 	struct idr_layer *p;
504 
505 	p = rcu_dereference(idp->top);
506 	if (!p)
507 		return NULL;
508 	n = (p->layer+1) * IDR_BITS;
509 
510 	/* Mask off upper bits we don't use for the search. */
511 	id &= MAX_ID_MASK;
512 
513 	if (id >= (1 << n))
514 		return NULL;
515 	BUG_ON(n == 0);
516 
517 	while (n > 0 && p) {
518 		n -= IDR_BITS;
519 		BUG_ON(n != p->layer*IDR_BITS);
520 		p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
521 	}
522 	return((void *)p);
523 }
524 EXPORT_SYMBOL(idr_find);
525 
526 /**
527  * idr_for_each - iterate through all stored pointers
528  * @idp: idr handle
529  * @fn: function to be called for each pointer
530  * @data: data passed back to callback function
531  *
532  * Iterate over the pointers registered with the given idr.  The
533  * callback function will be called for each pointer currently
534  * registered, passing the id, the pointer and the data pointer passed
535  * to this function.  It is not safe to modify the idr tree while in
536  * the callback, so functions such as idr_get_new and idr_remove are
537  * not allowed.
538  *
539  * We check the return of @fn each time. If it returns anything other
540  * than 0, we break out and return that value.
541  *
542  * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove().
543  */
544 int idr_for_each(struct idr *idp,
545 		 int (*fn)(int id, void *p, void *data), void *data)
546 {
547 	int n, id, max, error = 0;
548 	struct idr_layer *p;
549 	struct idr_layer *pa[MAX_LEVEL];
550 	struct idr_layer **paa = &pa[0];
551 
552 	n = idp->layers * IDR_BITS;
553 	p = rcu_dereference(idp->top);
554 	max = 1 << n;
555 
556 	id = 0;
557 	while (id < max) {
558 		while (n > 0 && p) {
559 			n -= IDR_BITS;
560 			*paa++ = p;
561 			p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
562 		}
563 
564 		if (p) {
565 			error = fn(id, (void *)p, data);
566 			if (error)
567 				break;
568 		}
569 
570 		id += 1 << n;
571 		while (n < fls(id)) {
572 			n += IDR_BITS;
573 			p = *--paa;
574 		}
575 	}
576 
577 	return error;
578 }
579 EXPORT_SYMBOL(idr_for_each);
580 
581 /**
582  * idr_get_next - lookup next object of id to given id.
583  * @idp: idr handle
584  * @id:  pointer to lookup key
585  *
586  * Returns pointer to registered object with id, which is next number to
587  * given id.
588  */
589 
590 void *idr_get_next(struct idr *idp, int *nextidp)
591 {
592 	struct idr_layer *p, *pa[MAX_LEVEL];
593 	struct idr_layer **paa = &pa[0];
594 	int id = *nextidp;
595 	int n, max;
596 
597 	/* find first ent */
598 	n = idp->layers * IDR_BITS;
599 	max = 1 << n;
600 	p = rcu_dereference(idp->top);
601 	if (!p)
602 		return NULL;
603 
604 	while (id < max) {
605 		while (n > 0 && p) {
606 			n -= IDR_BITS;
607 			*paa++ = p;
608 			p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
609 		}
610 
611 		if (p) {
612 			*nextidp = id;
613 			return p;
614 		}
615 
616 		id += 1 << n;
617 		while (n < fls(id)) {
618 			n += IDR_BITS;
619 			p = *--paa;
620 		}
621 	}
622 	return NULL;
623 }
624 
625 
626 
627 /**
628  * idr_replace - replace pointer for given id
629  * @idp: idr handle
630  * @ptr: pointer you want associated with the id
631  * @id: lookup key
632  *
633  * Replace the pointer registered with an id and return the old value.
634  * A -ENOENT return indicates that @id was not found.
635  * A -EINVAL return indicates that @id was not within valid constraints.
636  *
637  * The caller must serialize with writers.
638  */
639 void *idr_replace(struct idr *idp, void *ptr, int id)
640 {
641 	int n;
642 	struct idr_layer *p, *old_p;
643 
644 	p = idp->top;
645 	if (!p)
646 		return ERR_PTR(-EINVAL);
647 
648 	n = (p->layer+1) * IDR_BITS;
649 
650 	id &= MAX_ID_MASK;
651 
652 	if (id >= (1 << n))
653 		return ERR_PTR(-EINVAL);
654 
655 	n -= IDR_BITS;
656 	while ((n > 0) && p) {
657 		p = p->ary[(id >> n) & IDR_MASK];
658 		n -= IDR_BITS;
659 	}
660 
661 	n = id & IDR_MASK;
662 	if (unlikely(p == NULL || !test_bit(n, &p->bitmap)))
663 		return ERR_PTR(-ENOENT);
664 
665 	old_p = p->ary[n];
666 	rcu_assign_pointer(p->ary[n], ptr);
667 
668 	return old_p;
669 }
670 EXPORT_SYMBOL(idr_replace);
671 
672 void __init idr_init_cache(void)
673 {
674 	idr_layer_cache = kmem_cache_create("idr_layer_cache",
675 				sizeof(struct idr_layer), 0, SLAB_PANIC, NULL);
676 }
677 
678 /**
679  * idr_init - initialize idr handle
680  * @idp:	idr handle
681  *
682  * This function is use to set up the handle (@idp) that you will pass
683  * to the rest of the functions.
684  */
685 void idr_init(struct idr *idp)
686 {
687 	memset(idp, 0, sizeof(struct idr));
688 	spin_lock_init(&idp->lock);
689 }
690 EXPORT_SYMBOL(idr_init);
691 
692 
693 /*
694  * IDA - IDR based ID allocator
695  *
696  * this is id allocator without id -> pointer translation.  Memory
697  * usage is much lower than full blown idr because each id only
698  * occupies a bit.  ida uses a custom leaf node which contains
699  * IDA_BITMAP_BITS slots.
700  *
701  * 2007-04-25  written by Tejun Heo <htejun@gmail.com>
702  */
703 
704 static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
705 {
706 	unsigned long flags;
707 
708 	if (!ida->free_bitmap) {
709 		spin_lock_irqsave(&ida->idr.lock, flags);
710 		if (!ida->free_bitmap) {
711 			ida->free_bitmap = bitmap;
712 			bitmap = NULL;
713 		}
714 		spin_unlock_irqrestore(&ida->idr.lock, flags);
715 	}
716 
717 	kfree(bitmap);
718 }
719 
720 /**
721  * ida_pre_get - reserve resources for ida allocation
722  * @ida:	ida handle
723  * @gfp_mask:	memory allocation flag
724  *
725  * This function should be called prior to locking and calling the
726  * following function.  It preallocates enough memory to satisfy the
727  * worst possible allocation.
728  *
729  * If the system is REALLY out of memory this function returns 0,
730  * otherwise 1.
731  */
732 int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
733 {
734 	/* allocate idr_layers */
735 	if (!idr_pre_get(&ida->idr, gfp_mask))
736 		return 0;
737 
738 	/* allocate free_bitmap */
739 	if (!ida->free_bitmap) {
740 		struct ida_bitmap *bitmap;
741 
742 		bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask);
743 		if (!bitmap)
744 			return 0;
745 
746 		free_bitmap(ida, bitmap);
747 	}
748 
749 	return 1;
750 }
751 EXPORT_SYMBOL(ida_pre_get);
752 
753 /**
754  * ida_get_new_above - allocate new ID above or equal to a start id
755  * @ida:	ida handle
756  * @staring_id:	id to start search at
757  * @p_id:	pointer to the allocated handle
758  *
759  * Allocate new ID above or equal to @ida.  It should be called with
760  * any required locks.
761  *
762  * If memory is required, it will return -EAGAIN, you should unlock
763  * and go back to the ida_pre_get() call.  If the ida is full, it will
764  * return -ENOSPC.
765  *
766  * @p_id returns a value in the range @starting_id ... 0x7fffffff.
767  */
768 int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
769 {
770 	struct idr_layer *pa[MAX_LEVEL];
771 	struct ida_bitmap *bitmap;
772 	unsigned long flags;
773 	int idr_id = starting_id / IDA_BITMAP_BITS;
774 	int offset = starting_id % IDA_BITMAP_BITS;
775 	int t, id;
776 
777  restart:
778 	/* get vacant slot */
779 	t = idr_get_empty_slot(&ida->idr, idr_id, pa);
780 	if (t < 0)
781 		return _idr_rc_to_errno(t);
782 
783 	if (t * IDA_BITMAP_BITS >= MAX_ID_BIT)
784 		return -ENOSPC;
785 
786 	if (t != idr_id)
787 		offset = 0;
788 	idr_id = t;
789 
790 	/* if bitmap isn't there, create a new one */
791 	bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK];
792 	if (!bitmap) {
793 		spin_lock_irqsave(&ida->idr.lock, flags);
794 		bitmap = ida->free_bitmap;
795 		ida->free_bitmap = NULL;
796 		spin_unlock_irqrestore(&ida->idr.lock, flags);
797 
798 		if (!bitmap)
799 			return -EAGAIN;
800 
801 		memset(bitmap, 0, sizeof(struct ida_bitmap));
802 		rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK],
803 				(void *)bitmap);
804 		pa[0]->count++;
805 	}
806 
807 	/* lookup for empty slot */
808 	t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset);
809 	if (t == IDA_BITMAP_BITS) {
810 		/* no empty slot after offset, continue to the next chunk */
811 		idr_id++;
812 		offset = 0;
813 		goto restart;
814 	}
815 
816 	id = idr_id * IDA_BITMAP_BITS + t;
817 	if (id >= MAX_ID_BIT)
818 		return -ENOSPC;
819 
820 	__set_bit(t, bitmap->bitmap);
821 	if (++bitmap->nr_busy == IDA_BITMAP_BITS)
822 		idr_mark_full(pa, idr_id);
823 
824 	*p_id = id;
825 
826 	/* Each leaf node can handle nearly a thousand slots and the
827 	 * whole idea of ida is to have small memory foot print.
828 	 * Throw away extra resources one by one after each successful
829 	 * allocation.
830 	 */
831 	if (ida->idr.id_free_cnt || ida->free_bitmap) {
832 		struct idr_layer *p = get_from_free_list(&ida->idr);
833 		if (p)
834 			kmem_cache_free(idr_layer_cache, p);
835 	}
836 
837 	return 0;
838 }
839 EXPORT_SYMBOL(ida_get_new_above);
840 
841 /**
842  * ida_get_new - allocate new ID
843  * @ida:	idr handle
844  * @p_id:	pointer to the allocated handle
845  *
846  * Allocate new ID.  It should be called with any required locks.
847  *
848  * If memory is required, it will return -EAGAIN, you should unlock
849  * and go back to the idr_pre_get() call.  If the idr is full, it will
850  * return -ENOSPC.
851  *
852  * @id returns a value in the range 0 ... 0x7fffffff.
853  */
854 int ida_get_new(struct ida *ida, int *p_id)
855 {
856 	return ida_get_new_above(ida, 0, p_id);
857 }
858 EXPORT_SYMBOL(ida_get_new);
859 
860 /**
861  * ida_remove - remove the given ID
862  * @ida:	ida handle
863  * @id:		ID to free
864  */
865 void ida_remove(struct ida *ida, int id)
866 {
867 	struct idr_layer *p = ida->idr.top;
868 	int shift = (ida->idr.layers - 1) * IDR_BITS;
869 	int idr_id = id / IDA_BITMAP_BITS;
870 	int offset = id % IDA_BITMAP_BITS;
871 	int n;
872 	struct ida_bitmap *bitmap;
873 
874 	/* clear full bits while looking up the leaf idr_layer */
875 	while ((shift > 0) && p) {
876 		n = (idr_id >> shift) & IDR_MASK;
877 		__clear_bit(n, &p->bitmap);
878 		p = p->ary[n];
879 		shift -= IDR_BITS;
880 	}
881 
882 	if (p == NULL)
883 		goto err;
884 
885 	n = idr_id & IDR_MASK;
886 	__clear_bit(n, &p->bitmap);
887 
888 	bitmap = (void *)p->ary[n];
889 	if (!test_bit(offset, bitmap->bitmap))
890 		goto err;
891 
892 	/* update bitmap and remove it if empty */
893 	__clear_bit(offset, bitmap->bitmap);
894 	if (--bitmap->nr_busy == 0) {
895 		__set_bit(n, &p->bitmap);	/* to please idr_remove() */
896 		idr_remove(&ida->idr, idr_id);
897 		free_bitmap(ida, bitmap);
898 	}
899 
900 	return;
901 
902  err:
903 	printk(KERN_WARNING
904 	       "ida_remove called for id=%d which is not allocated.\n", id);
905 }
906 EXPORT_SYMBOL(ida_remove);
907 
908 /**
909  * ida_destroy - release all cached layers within an ida tree
910  * ida:		ida handle
911  */
912 void ida_destroy(struct ida *ida)
913 {
914 	idr_destroy(&ida->idr);
915 	kfree(ida->free_bitmap);
916 }
917 EXPORT_SYMBOL(ida_destroy);
918 
919 /**
920  * ida_init - initialize ida handle
921  * @ida:	ida handle
922  *
923  * This function is use to set up the handle (@ida) that you will pass
924  * to the rest of the functions.
925  */
926 void ida_init(struct ida *ida)
927 {
928 	memset(ida, 0, sizeof(struct ida));
929 	idr_init(&ida->idr);
930 
931 }
932 EXPORT_SYMBOL(ida_init);
933