xref: /openbmc/linux/lib/idr.c (revision 9f6d3c4b76314c40c866a935d78c80fd284768bd)
1 /*
2  * 2002-10-18  written by Jim Houston jim.houston@ccur.com
3  *	Copyright (C) 2002 by Concurrent Computer Corporation
4  *	Distributed under the GNU GPL license version 2.
5  *
6  * Modified by George Anzinger to reuse immediately and to use
7  * find bit instructions.  Also removed _irq on spinlocks.
8  *
9  * Modified by Nadia Derbey to make it RCU safe.
10  *
11  * Small id to pointer translation service.
12  *
13  * It uses a radix tree like structure as a sparse array indexed
14  * by the id to obtain the pointer.  The bitmap makes allocating
15  * a new id quick.
16  *
17  * You call it to allocate an id (an int) an associate with that id a
18  * pointer or what ever, we treat it as a (void *).  You can pass this
19  * id to a user for him to pass back at a later time.  You then pass
20  * that id to this code and it returns your pointer.
21 
22  * You can release ids at any time. When all ids are released, most of
23  * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we
24  * don't need to go to the memory "store" during an id allocate, just
25  * so you don't need to be too concerned about locking and conflicts
26  * with the slab allocator.
27  */
28 
29 #ifndef TEST                        // to test in user space...
30 #include <linux/slab.h>
31 #include <linux/init.h>
32 #include <linux/module.h>
33 #endif
34 #include <linux/err.h>
35 #include <linux/string.h>
36 #include <linux/idr.h>
37 
38 static struct kmem_cache *idr_layer_cache;
39 
40 static struct idr_layer *get_from_free_list(struct idr *idp)
41 {
42 	struct idr_layer *p;
43 	unsigned long flags;
44 
45 	spin_lock_irqsave(&idp->lock, flags);
46 	if ((p = idp->id_free)) {
47 		idp->id_free = p->ary[0];
48 		idp->id_free_cnt--;
49 		p->ary[0] = NULL;
50 	}
51 	spin_unlock_irqrestore(&idp->lock, flags);
52 	return(p);
53 }
54 
55 static void idr_layer_rcu_free(struct rcu_head *head)
56 {
57 	struct idr_layer *layer;
58 
59 	layer = container_of(head, struct idr_layer, rcu_head);
60 	kmem_cache_free(idr_layer_cache, layer);
61 }
62 
63 static inline void free_layer(struct idr_layer *p)
64 {
65 	call_rcu(&p->rcu_head, idr_layer_rcu_free);
66 }
67 
68 /* only called when idp->lock is held */
69 static void __move_to_free_list(struct idr *idp, struct idr_layer *p)
70 {
71 	p->ary[0] = idp->id_free;
72 	idp->id_free = p;
73 	idp->id_free_cnt++;
74 }
75 
76 static void move_to_free_list(struct idr *idp, struct idr_layer *p)
77 {
78 	unsigned long flags;
79 
80 	/*
81 	 * Depends on the return element being zeroed.
82 	 */
83 	spin_lock_irqsave(&idp->lock, flags);
84 	__move_to_free_list(idp, p);
85 	spin_unlock_irqrestore(&idp->lock, flags);
86 }
87 
88 static void idr_mark_full(struct idr_layer **pa, int id)
89 {
90 	struct idr_layer *p = pa[0];
91 	int l = 0;
92 
93 	__set_bit(id & IDR_MASK, &p->bitmap);
94 	/*
95 	 * If this layer is full mark the bit in the layer above to
96 	 * show that this part of the radix tree is full.  This may
97 	 * complete the layer above and require walking up the radix
98 	 * tree.
99 	 */
100 	while (p->bitmap == IDR_FULL) {
101 		if (!(p = pa[++l]))
102 			break;
103 		id = id >> IDR_BITS;
104 		__set_bit((id & IDR_MASK), &p->bitmap);
105 	}
106 }
107 
108 /**
109  * idr_pre_get - reserver resources for idr allocation
110  * @idp:	idr handle
111  * @gfp_mask:	memory allocation flags
112  *
113  * This function should be called prior to locking and calling the
114  * idr_get_new* functions. It preallocates enough memory to satisfy
115  * the worst possible allocation.
116  *
117  * If the system is REALLY out of memory this function returns 0,
118  * otherwise 1.
119  */
120 int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
121 {
122 	while (idp->id_free_cnt < IDR_FREE_MAX) {
123 		struct idr_layer *new;
124 		new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
125 		if (new == NULL)
126 			return (0);
127 		move_to_free_list(idp, new);
128 	}
129 	return 1;
130 }
131 EXPORT_SYMBOL(idr_pre_get);
132 
133 static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
134 {
135 	int n, m, sh;
136 	struct idr_layer *p, *new;
137 	int l, id, oid;
138 	unsigned long bm;
139 
140 	id = *starting_id;
141  restart:
142 	p = idp->top;
143 	l = idp->layers;
144 	pa[l--] = NULL;
145 	while (1) {
146 		/*
147 		 * We run around this while until we reach the leaf node...
148 		 */
149 		n = (id >> (IDR_BITS*l)) & IDR_MASK;
150 		bm = ~p->bitmap;
151 		m = find_next_bit(&bm, IDR_SIZE, n);
152 		if (m == IDR_SIZE) {
153 			/* no space available go back to previous layer. */
154 			l++;
155 			oid = id;
156 			id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
157 
158 			/* if already at the top layer, we need to grow */
159 			if (id >= 1 << (idp->layers * IDR_BITS)) {
160 				*starting_id = id;
161 				return IDR_NEED_TO_GROW;
162 			}
163 			p = pa[l];
164 			BUG_ON(!p);
165 
166 			/* If we need to go up one layer, continue the
167 			 * loop; otherwise, restart from the top.
168 			 */
169 			sh = IDR_BITS * (l + 1);
170 			if (oid >> sh == id >> sh)
171 				continue;
172 			else
173 				goto restart;
174 		}
175 		if (m != n) {
176 			sh = IDR_BITS*l;
177 			id = ((id >> sh) ^ n ^ m) << sh;
178 		}
179 		if ((id >= MAX_ID_BIT) || (id < 0))
180 			return IDR_NOMORE_SPACE;
181 		if (l == 0)
182 			break;
183 		/*
184 		 * Create the layer below if it is missing.
185 		 */
186 		if (!p->ary[m]) {
187 			new = get_from_free_list(idp);
188 			if (!new)
189 				return -1;
190 			new->layer = l-1;
191 			rcu_assign_pointer(p->ary[m], new);
192 			p->count++;
193 		}
194 		pa[l--] = p;
195 		p = p->ary[m];
196 	}
197 
198 	pa[l] = p;
199 	return id;
200 }
201 
202 static int idr_get_empty_slot(struct idr *idp, int starting_id,
203 			      struct idr_layer **pa)
204 {
205 	struct idr_layer *p, *new;
206 	int layers, v, id;
207 	unsigned long flags;
208 
209 	id = starting_id;
210 build_up:
211 	p = idp->top;
212 	layers = idp->layers;
213 	if (unlikely(!p)) {
214 		if (!(p = get_from_free_list(idp)))
215 			return -1;
216 		p->layer = 0;
217 		layers = 1;
218 	}
219 	/*
220 	 * Add a new layer to the top of the tree if the requested
221 	 * id is larger than the currently allocated space.
222 	 */
223 	while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
224 		layers++;
225 		if (!p->count) {
226 			/* special case: if the tree is currently empty,
227 			 * then we grow the tree by moving the top node
228 			 * upwards.
229 			 */
230 			p->layer++;
231 			continue;
232 		}
233 		if (!(new = get_from_free_list(idp))) {
234 			/*
235 			 * The allocation failed.  If we built part of
236 			 * the structure tear it down.
237 			 */
238 			spin_lock_irqsave(&idp->lock, flags);
239 			for (new = p; p && p != idp->top; new = p) {
240 				p = p->ary[0];
241 				new->ary[0] = NULL;
242 				new->bitmap = new->count = 0;
243 				__move_to_free_list(idp, new);
244 			}
245 			spin_unlock_irqrestore(&idp->lock, flags);
246 			return -1;
247 		}
248 		new->ary[0] = p;
249 		new->count = 1;
250 		new->layer = layers-1;
251 		if (p->bitmap == IDR_FULL)
252 			__set_bit(0, &new->bitmap);
253 		p = new;
254 	}
255 	rcu_assign_pointer(idp->top, p);
256 	idp->layers = layers;
257 	v = sub_alloc(idp, &id, pa);
258 	if (v == IDR_NEED_TO_GROW)
259 		goto build_up;
260 	return(v);
261 }
262 
263 static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
264 {
265 	struct idr_layer *pa[MAX_LEVEL];
266 	int id;
267 
268 	id = idr_get_empty_slot(idp, starting_id, pa);
269 	if (id >= 0) {
270 		/*
271 		 * Successfully found an empty slot.  Install the user
272 		 * pointer and mark the slot full.
273 		 */
274 		rcu_assign_pointer(pa[0]->ary[id & IDR_MASK],
275 				(struct idr_layer *)ptr);
276 		pa[0]->count++;
277 		idr_mark_full(pa, id);
278 	}
279 
280 	return id;
281 }
282 
283 /**
284  * idr_get_new_above - allocate new idr entry above or equal to a start id
285  * @idp: idr handle
286  * @ptr: pointer you want associated with the id
287  * @start_id: id to start search at
288  * @id: pointer to the allocated handle
289  *
290  * This is the allocate id function.  It should be called with any
291  * required locks.
292  *
293  * If memory is required, it will return -EAGAIN, you should unlock
294  * and go back to the idr_pre_get() call.  If the idr is full, it will
295  * return -ENOSPC.
296  *
297  * @id returns a value in the range @starting_id ... 0x7fffffff
298  */
299 int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
300 {
301 	int rv;
302 
303 	rv = idr_get_new_above_int(idp, ptr, starting_id);
304 	/*
305 	 * This is a cheap hack until the IDR code can be fixed to
306 	 * return proper error values.
307 	 */
308 	if (rv < 0)
309 		return _idr_rc_to_errno(rv);
310 	*id = rv;
311 	return 0;
312 }
313 EXPORT_SYMBOL(idr_get_new_above);
314 
315 /**
316  * idr_get_new - allocate new idr entry
317  * @idp: idr handle
318  * @ptr: pointer you want associated with the id
319  * @id: pointer to the allocated handle
320  *
321  * This is the allocate id function.  It should be called with any
322  * required locks.
323  *
324  * If memory is required, it will return -EAGAIN, you should unlock
325  * and go back to the idr_pre_get() call.  If the idr is full, it will
326  * return -ENOSPC.
327  *
328  * @id returns a value in the range 0 ... 0x7fffffff
329  */
330 int idr_get_new(struct idr *idp, void *ptr, int *id)
331 {
332 	int rv;
333 
334 	rv = idr_get_new_above_int(idp, ptr, 0);
335 	/*
336 	 * This is a cheap hack until the IDR code can be fixed to
337 	 * return proper error values.
338 	 */
339 	if (rv < 0)
340 		return _idr_rc_to_errno(rv);
341 	*id = rv;
342 	return 0;
343 }
344 EXPORT_SYMBOL(idr_get_new);
345 
346 static void idr_remove_warning(int id)
347 {
348 	printk(KERN_WARNING
349 		"idr_remove called for id=%d which is not allocated.\n", id);
350 	dump_stack();
351 }
352 
353 static void sub_remove(struct idr *idp, int shift, int id)
354 {
355 	struct idr_layer *p = idp->top;
356 	struct idr_layer **pa[MAX_LEVEL];
357 	struct idr_layer ***paa = &pa[0];
358 	struct idr_layer *to_free;
359 	int n;
360 
361 	*paa = NULL;
362 	*++paa = &idp->top;
363 
364 	while ((shift > 0) && p) {
365 		n = (id >> shift) & IDR_MASK;
366 		__clear_bit(n, &p->bitmap);
367 		*++paa = &p->ary[n];
368 		p = p->ary[n];
369 		shift -= IDR_BITS;
370 	}
371 	n = id & IDR_MASK;
372 	if (likely(p != NULL && test_bit(n, &p->bitmap))){
373 		__clear_bit(n, &p->bitmap);
374 		rcu_assign_pointer(p->ary[n], NULL);
375 		to_free = NULL;
376 		while(*paa && ! --((**paa)->count)){
377 			if (to_free)
378 				free_layer(to_free);
379 			to_free = **paa;
380 			**paa-- = NULL;
381 		}
382 		if (!*paa)
383 			idp->layers = 0;
384 		if (to_free)
385 			free_layer(to_free);
386 	} else
387 		idr_remove_warning(id);
388 }
389 
390 /**
391  * idr_remove - remove the given id and free it's slot
392  * @idp: idr handle
393  * @id: unique key
394  */
395 void idr_remove(struct idr *idp, int id)
396 {
397 	struct idr_layer *p;
398 	struct idr_layer *to_free;
399 
400 	/* Mask off upper bits we don't use for the search. */
401 	id &= MAX_ID_MASK;
402 
403 	sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
404 	if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
405 	    idp->top->ary[0]) {
406 		/*
407 		 * Single child at leftmost slot: we can shrink the tree.
408 		 * This level is not needed anymore since when layers are
409 		 * inserted, they are inserted at the top of the existing
410 		 * tree.
411 		 */
412 		to_free = idp->top;
413 		p = idp->top->ary[0];
414 		rcu_assign_pointer(idp->top, p);
415 		--idp->layers;
416 		to_free->bitmap = to_free->count = 0;
417 		free_layer(to_free);
418 	}
419 	while (idp->id_free_cnt >= IDR_FREE_MAX) {
420 		p = get_from_free_list(idp);
421 		/*
422 		 * Note: we don't call the rcu callback here, since the only
423 		 * layers that fall into the freelist are those that have been
424 		 * preallocated.
425 		 */
426 		kmem_cache_free(idr_layer_cache, p);
427 	}
428 	return;
429 }
430 EXPORT_SYMBOL(idr_remove);
431 
432 /**
433  * idr_remove_all - remove all ids from the given idr tree
434  * @idp: idr handle
435  *
436  * idr_destroy() only frees up unused, cached idp_layers, but this
437  * function will remove all id mappings and leave all idp_layers
438  * unused.
439  *
440  * A typical clean-up sequence for objects stored in an idr tree, will
441  * use idr_for_each() to free all objects, if necessay, then
442  * idr_remove_all() to remove all ids, and idr_destroy() to free
443  * up the cached idr_layers.
444  */
445 void idr_remove_all(struct idr *idp)
446 {
447 	int n, id, max;
448 	struct idr_layer *p;
449 	struct idr_layer *pa[MAX_LEVEL];
450 	struct idr_layer **paa = &pa[0];
451 
452 	n = idp->layers * IDR_BITS;
453 	p = idp->top;
454 	rcu_assign_pointer(idp->top, NULL);
455 	max = 1 << n;
456 
457 	id = 0;
458 	while (id < max) {
459 		while (n > IDR_BITS && p) {
460 			n -= IDR_BITS;
461 			*paa++ = p;
462 			p = p->ary[(id >> n) & IDR_MASK];
463 		}
464 
465 		id += 1 << n;
466 		while (n < fls(id)) {
467 			if (p)
468 				free_layer(p);
469 			n += IDR_BITS;
470 			p = *--paa;
471 		}
472 	}
473 	idp->layers = 0;
474 }
475 EXPORT_SYMBOL(idr_remove_all);
476 
477 /**
478  * idr_destroy - release all cached layers within an idr tree
479  * idp: idr handle
480  */
481 void idr_destroy(struct idr *idp)
482 {
483 	while (idp->id_free_cnt) {
484 		struct idr_layer *p = get_from_free_list(idp);
485 		kmem_cache_free(idr_layer_cache, p);
486 	}
487 }
488 EXPORT_SYMBOL(idr_destroy);
489 
490 /**
491  * idr_find - return pointer for given id
492  * @idp: idr handle
493  * @id: lookup key
494  *
495  * Return the pointer given the id it has been registered with.  A %NULL
496  * return indicates that @id is not valid or you passed %NULL in
497  * idr_get_new().
498  *
499  * This function can be called under rcu_read_lock(), given that the leaf
500  * pointers lifetimes are correctly managed.
501  */
502 void *idr_find(struct idr *idp, int id)
503 {
504 	int n;
505 	struct idr_layer *p;
506 
507 	p = rcu_dereference_raw(idp->top);
508 	if (!p)
509 		return NULL;
510 	n = (p->layer+1) * IDR_BITS;
511 
512 	/* Mask off upper bits we don't use for the search. */
513 	id &= MAX_ID_MASK;
514 
515 	if (id >= (1 << n))
516 		return NULL;
517 	BUG_ON(n == 0);
518 
519 	while (n > 0 && p) {
520 		n -= IDR_BITS;
521 		BUG_ON(n != p->layer*IDR_BITS);
522 		p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
523 	}
524 	return((void *)p);
525 }
526 EXPORT_SYMBOL(idr_find);
527 
528 /**
529  * idr_for_each - iterate through all stored pointers
530  * @idp: idr handle
531  * @fn: function to be called for each pointer
532  * @data: data passed back to callback function
533  *
534  * Iterate over the pointers registered with the given idr.  The
535  * callback function will be called for each pointer currently
536  * registered, passing the id, the pointer and the data pointer passed
537  * to this function.  It is not safe to modify the idr tree while in
538  * the callback, so functions such as idr_get_new and idr_remove are
539  * not allowed.
540  *
541  * We check the return of @fn each time. If it returns anything other
542  * than 0, we break out and return that value.
543  *
544  * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove().
545  */
546 int idr_for_each(struct idr *idp,
547 		 int (*fn)(int id, void *p, void *data), void *data)
548 {
549 	int n, id, max, error = 0;
550 	struct idr_layer *p;
551 	struct idr_layer *pa[MAX_LEVEL];
552 	struct idr_layer **paa = &pa[0];
553 
554 	n = idp->layers * IDR_BITS;
555 	p = rcu_dereference_raw(idp->top);
556 	max = 1 << n;
557 
558 	id = 0;
559 	while (id < max) {
560 		while (n > 0 && p) {
561 			n -= IDR_BITS;
562 			*paa++ = p;
563 			p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
564 		}
565 
566 		if (p) {
567 			error = fn(id, (void *)p, data);
568 			if (error)
569 				break;
570 		}
571 
572 		id += 1 << n;
573 		while (n < fls(id)) {
574 			n += IDR_BITS;
575 			p = *--paa;
576 		}
577 	}
578 
579 	return error;
580 }
581 EXPORT_SYMBOL(idr_for_each);
582 
583 /**
584  * idr_get_next - lookup next object of id to given id.
585  * @idp: idr handle
586  * @id:  pointer to lookup key
587  *
588  * Returns pointer to registered object with id, which is next number to
589  * given id.
590  */
591 
592 void *idr_get_next(struct idr *idp, int *nextidp)
593 {
594 	struct idr_layer *p, *pa[MAX_LEVEL];
595 	struct idr_layer **paa = &pa[0];
596 	int id = *nextidp;
597 	int n, max;
598 
599 	/* find first ent */
600 	n = idp->layers * IDR_BITS;
601 	max = 1 << n;
602 	p = rcu_dereference(idp->top);
603 	if (!p)
604 		return NULL;
605 
606 	while (id < max) {
607 		while (n > 0 && p) {
608 			n -= IDR_BITS;
609 			*paa++ = p;
610 			p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
611 		}
612 
613 		if (p) {
614 			*nextidp = id;
615 			return p;
616 		}
617 
618 		id += 1 << n;
619 		while (n < fls(id)) {
620 			n += IDR_BITS;
621 			p = *--paa;
622 		}
623 	}
624 	return NULL;
625 }
626 
627 
628 
629 /**
630  * idr_replace - replace pointer for given id
631  * @idp: idr handle
632  * @ptr: pointer you want associated with the id
633  * @id: lookup key
634  *
635  * Replace the pointer registered with an id and return the old value.
636  * A -ENOENT return indicates that @id was not found.
637  * A -EINVAL return indicates that @id was not within valid constraints.
638  *
639  * The caller must serialize with writers.
640  */
641 void *idr_replace(struct idr *idp, void *ptr, int id)
642 {
643 	int n;
644 	struct idr_layer *p, *old_p;
645 
646 	p = idp->top;
647 	if (!p)
648 		return ERR_PTR(-EINVAL);
649 
650 	n = (p->layer+1) * IDR_BITS;
651 
652 	id &= MAX_ID_MASK;
653 
654 	if (id >= (1 << n))
655 		return ERR_PTR(-EINVAL);
656 
657 	n -= IDR_BITS;
658 	while ((n > 0) && p) {
659 		p = p->ary[(id >> n) & IDR_MASK];
660 		n -= IDR_BITS;
661 	}
662 
663 	n = id & IDR_MASK;
664 	if (unlikely(p == NULL || !test_bit(n, &p->bitmap)))
665 		return ERR_PTR(-ENOENT);
666 
667 	old_p = p->ary[n];
668 	rcu_assign_pointer(p->ary[n], ptr);
669 
670 	return old_p;
671 }
672 EXPORT_SYMBOL(idr_replace);
673 
674 void __init idr_init_cache(void)
675 {
676 	idr_layer_cache = kmem_cache_create("idr_layer_cache",
677 				sizeof(struct idr_layer), 0, SLAB_PANIC, NULL);
678 }
679 
680 /**
681  * idr_init - initialize idr handle
682  * @idp:	idr handle
683  *
684  * This function is use to set up the handle (@idp) that you will pass
685  * to the rest of the functions.
686  */
687 void idr_init(struct idr *idp)
688 {
689 	memset(idp, 0, sizeof(struct idr));
690 	spin_lock_init(&idp->lock);
691 }
692 EXPORT_SYMBOL(idr_init);
693 
694 
695 /*
696  * IDA - IDR based ID allocator
697  *
698  * this is id allocator without id -> pointer translation.  Memory
699  * usage is much lower than full blown idr because each id only
700  * occupies a bit.  ida uses a custom leaf node which contains
701  * IDA_BITMAP_BITS slots.
702  *
703  * 2007-04-25  written by Tejun Heo <htejun@gmail.com>
704  */
705 
706 static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
707 {
708 	unsigned long flags;
709 
710 	if (!ida->free_bitmap) {
711 		spin_lock_irqsave(&ida->idr.lock, flags);
712 		if (!ida->free_bitmap) {
713 			ida->free_bitmap = bitmap;
714 			bitmap = NULL;
715 		}
716 		spin_unlock_irqrestore(&ida->idr.lock, flags);
717 	}
718 
719 	kfree(bitmap);
720 }
721 
722 /**
723  * ida_pre_get - reserve resources for ida allocation
724  * @ida:	ida handle
725  * @gfp_mask:	memory allocation flag
726  *
727  * This function should be called prior to locking and calling the
728  * following function.  It preallocates enough memory to satisfy the
729  * worst possible allocation.
730  *
731  * If the system is REALLY out of memory this function returns 0,
732  * otherwise 1.
733  */
734 int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
735 {
736 	/* allocate idr_layers */
737 	if (!idr_pre_get(&ida->idr, gfp_mask))
738 		return 0;
739 
740 	/* allocate free_bitmap */
741 	if (!ida->free_bitmap) {
742 		struct ida_bitmap *bitmap;
743 
744 		bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask);
745 		if (!bitmap)
746 			return 0;
747 
748 		free_bitmap(ida, bitmap);
749 	}
750 
751 	return 1;
752 }
753 EXPORT_SYMBOL(ida_pre_get);
754 
755 /**
756  * ida_get_new_above - allocate new ID above or equal to a start id
757  * @ida:	ida handle
758  * @staring_id:	id to start search at
759  * @p_id:	pointer to the allocated handle
760  *
761  * Allocate new ID above or equal to @ida.  It should be called with
762  * any required locks.
763  *
764  * If memory is required, it will return -EAGAIN, you should unlock
765  * and go back to the ida_pre_get() call.  If the ida is full, it will
766  * return -ENOSPC.
767  *
768  * @p_id returns a value in the range @starting_id ... 0x7fffffff.
769  */
770 int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
771 {
772 	struct idr_layer *pa[MAX_LEVEL];
773 	struct ida_bitmap *bitmap;
774 	unsigned long flags;
775 	int idr_id = starting_id / IDA_BITMAP_BITS;
776 	int offset = starting_id % IDA_BITMAP_BITS;
777 	int t, id;
778 
779  restart:
780 	/* get vacant slot */
781 	t = idr_get_empty_slot(&ida->idr, idr_id, pa);
782 	if (t < 0)
783 		return _idr_rc_to_errno(t);
784 
785 	if (t * IDA_BITMAP_BITS >= MAX_ID_BIT)
786 		return -ENOSPC;
787 
788 	if (t != idr_id)
789 		offset = 0;
790 	idr_id = t;
791 
792 	/* if bitmap isn't there, create a new one */
793 	bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK];
794 	if (!bitmap) {
795 		spin_lock_irqsave(&ida->idr.lock, flags);
796 		bitmap = ida->free_bitmap;
797 		ida->free_bitmap = NULL;
798 		spin_unlock_irqrestore(&ida->idr.lock, flags);
799 
800 		if (!bitmap)
801 			return -EAGAIN;
802 
803 		memset(bitmap, 0, sizeof(struct ida_bitmap));
804 		rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK],
805 				(void *)bitmap);
806 		pa[0]->count++;
807 	}
808 
809 	/* lookup for empty slot */
810 	t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset);
811 	if (t == IDA_BITMAP_BITS) {
812 		/* no empty slot after offset, continue to the next chunk */
813 		idr_id++;
814 		offset = 0;
815 		goto restart;
816 	}
817 
818 	id = idr_id * IDA_BITMAP_BITS + t;
819 	if (id >= MAX_ID_BIT)
820 		return -ENOSPC;
821 
822 	__set_bit(t, bitmap->bitmap);
823 	if (++bitmap->nr_busy == IDA_BITMAP_BITS)
824 		idr_mark_full(pa, idr_id);
825 
826 	*p_id = id;
827 
828 	/* Each leaf node can handle nearly a thousand slots and the
829 	 * whole idea of ida is to have small memory foot print.
830 	 * Throw away extra resources one by one after each successful
831 	 * allocation.
832 	 */
833 	if (ida->idr.id_free_cnt || ida->free_bitmap) {
834 		struct idr_layer *p = get_from_free_list(&ida->idr);
835 		if (p)
836 			kmem_cache_free(idr_layer_cache, p);
837 	}
838 
839 	return 0;
840 }
841 EXPORT_SYMBOL(ida_get_new_above);
842 
843 /**
844  * ida_get_new - allocate new ID
845  * @ida:	idr handle
846  * @p_id:	pointer to the allocated handle
847  *
848  * Allocate new ID.  It should be called with any required locks.
849  *
850  * If memory is required, it will return -EAGAIN, you should unlock
851  * and go back to the idr_pre_get() call.  If the idr is full, it will
852  * return -ENOSPC.
853  *
854  * @id returns a value in the range 0 ... 0x7fffffff.
855  */
856 int ida_get_new(struct ida *ida, int *p_id)
857 {
858 	return ida_get_new_above(ida, 0, p_id);
859 }
860 EXPORT_SYMBOL(ida_get_new);
861 
862 /**
863  * ida_remove - remove the given ID
864  * @ida:	ida handle
865  * @id:		ID to free
866  */
867 void ida_remove(struct ida *ida, int id)
868 {
869 	struct idr_layer *p = ida->idr.top;
870 	int shift = (ida->idr.layers - 1) * IDR_BITS;
871 	int idr_id = id / IDA_BITMAP_BITS;
872 	int offset = id % IDA_BITMAP_BITS;
873 	int n;
874 	struct ida_bitmap *bitmap;
875 
876 	/* clear full bits while looking up the leaf idr_layer */
877 	while ((shift > 0) && p) {
878 		n = (idr_id >> shift) & IDR_MASK;
879 		__clear_bit(n, &p->bitmap);
880 		p = p->ary[n];
881 		shift -= IDR_BITS;
882 	}
883 
884 	if (p == NULL)
885 		goto err;
886 
887 	n = idr_id & IDR_MASK;
888 	__clear_bit(n, &p->bitmap);
889 
890 	bitmap = (void *)p->ary[n];
891 	if (!test_bit(offset, bitmap->bitmap))
892 		goto err;
893 
894 	/* update bitmap and remove it if empty */
895 	__clear_bit(offset, bitmap->bitmap);
896 	if (--bitmap->nr_busy == 0) {
897 		__set_bit(n, &p->bitmap);	/* to please idr_remove() */
898 		idr_remove(&ida->idr, idr_id);
899 		free_bitmap(ida, bitmap);
900 	}
901 
902 	return;
903 
904  err:
905 	printk(KERN_WARNING
906 	       "ida_remove called for id=%d which is not allocated.\n", id);
907 }
908 EXPORT_SYMBOL(ida_remove);
909 
910 /**
911  * ida_destroy - release all cached layers within an ida tree
912  * ida:		ida handle
913  */
914 void ida_destroy(struct ida *ida)
915 {
916 	idr_destroy(&ida->idr);
917 	kfree(ida->free_bitmap);
918 }
919 EXPORT_SYMBOL(ida_destroy);
920 
921 /**
922  * ida_init - initialize ida handle
923  * @ida:	ida handle
924  *
925  * This function is use to set up the handle (@ida) that you will pass
926  * to the rest of the functions.
927  */
928 void ida_init(struct ida *ida)
929 {
930 	memset(ida, 0, sizeof(struct ida));
931 	idr_init(&ida->idr);
932 
933 }
934 EXPORT_SYMBOL(ida_init);
935