xref: /openbmc/linux/lib/idr.c (revision 1b23336a)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * 2002-10-18  written by Jim Houston jim.houston@ccur.com
31da177e4SLinus Torvalds  *	Copyright (C) 2002 by Concurrent Computer Corporation
41da177e4SLinus Torvalds  *	Distributed under the GNU GPL license version 2.
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  * Modified by George Anzinger to reuse immediately and to use
71da177e4SLinus Torvalds  * find bit instructions.  Also removed _irq on spinlocks.
81da177e4SLinus Torvalds  *
93219b3b7SNadia Derbey  * Modified by Nadia Derbey to make it RCU safe.
103219b3b7SNadia Derbey  *
111da177e4SLinus Torvalds  * Small id to pointer translation service.
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  * It uses a radix tree like structure as a sparse array indexed
141da177e4SLinus Torvalds  * by the id to obtain the pointer.  The bitmap makes allocating
151da177e4SLinus Torvalds  * a new id quick.
161da177e4SLinus Torvalds  *
171da177e4SLinus Torvalds  * You call it to allocate an id (an int) an associate with that id a
181da177e4SLinus Torvalds  * pointer or what ever, we treat it as a (void *).  You can pass this
191da177e4SLinus Torvalds  * id to a user for him to pass back at a later time.  You then pass
201da177e4SLinus Torvalds  * that id to this code and it returns your pointer.
211da177e4SLinus Torvalds 
221da177e4SLinus Torvalds  * You can release ids at any time. When all ids are released, most of
231da177e4SLinus Torvalds  * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we
241da177e4SLinus Torvalds  * don't need to go to the memory "store" during an id allocate, just
251da177e4SLinus Torvalds  * so you don't need to be too concerned about locking and conflicts
261da177e4SLinus Torvalds  * with the slab allocator.
271da177e4SLinus Torvalds  */
281da177e4SLinus Torvalds 
291da177e4SLinus Torvalds #ifndef TEST                        // to test in user space...
301da177e4SLinus Torvalds #include <linux/slab.h>
311da177e4SLinus Torvalds #include <linux/init.h>
321da177e4SLinus Torvalds #include <linux/module.h>
331da177e4SLinus Torvalds #endif
345806f07cSJeff Mahoney #include <linux/err.h>
351da177e4SLinus Torvalds #include <linux/string.h>
361da177e4SLinus Torvalds #include <linux/idr.h>
371da177e4SLinus Torvalds 
38e18b890bSChristoph Lameter static struct kmem_cache *idr_layer_cache;
391da177e4SLinus Torvalds 
404ae53789SNadia Derbey static struct idr_layer *get_from_free_list(struct idr *idp)
411da177e4SLinus Torvalds {
421da177e4SLinus Torvalds 	struct idr_layer *p;
43c259cc28SRoland Dreier 	unsigned long flags;
441da177e4SLinus Torvalds 
45c259cc28SRoland Dreier 	spin_lock_irqsave(&idp->lock, flags);
461da177e4SLinus Torvalds 	if ((p = idp->id_free)) {
471da177e4SLinus Torvalds 		idp->id_free = p->ary[0];
481da177e4SLinus Torvalds 		idp->id_free_cnt--;
491da177e4SLinus Torvalds 		p->ary[0] = NULL;
501da177e4SLinus Torvalds 	}
51c259cc28SRoland Dreier 	spin_unlock_irqrestore(&idp->lock, flags);
521da177e4SLinus Torvalds 	return(p);
531da177e4SLinus Torvalds }
541da177e4SLinus Torvalds 
55cf481c20SNadia Derbey static void idr_layer_rcu_free(struct rcu_head *head)
56cf481c20SNadia Derbey {
57cf481c20SNadia Derbey 	struct idr_layer *layer;
58cf481c20SNadia Derbey 
59cf481c20SNadia Derbey 	layer = container_of(head, struct idr_layer, rcu_head);
60cf481c20SNadia Derbey 	kmem_cache_free(idr_layer_cache, layer);
61cf481c20SNadia Derbey }
62cf481c20SNadia Derbey 
63cf481c20SNadia Derbey static inline void free_layer(struct idr_layer *p)
64cf481c20SNadia Derbey {
65cf481c20SNadia Derbey 	call_rcu(&p->rcu_head, idr_layer_rcu_free);
66cf481c20SNadia Derbey }
67cf481c20SNadia Derbey 
681eec0056SSonny Rao /* only called when idp->lock is held */
694ae53789SNadia Derbey static void __move_to_free_list(struct idr *idp, struct idr_layer *p)
701eec0056SSonny Rao {
711eec0056SSonny Rao 	p->ary[0] = idp->id_free;
721eec0056SSonny Rao 	idp->id_free = p;
731eec0056SSonny Rao 	idp->id_free_cnt++;
741eec0056SSonny Rao }
751eec0056SSonny Rao 
764ae53789SNadia Derbey static void move_to_free_list(struct idr *idp, struct idr_layer *p)
771da177e4SLinus Torvalds {
78c259cc28SRoland Dreier 	unsigned long flags;
79c259cc28SRoland Dreier 
801da177e4SLinus Torvalds 	/*
811da177e4SLinus Torvalds 	 * Depends on the return element being zeroed.
821da177e4SLinus Torvalds 	 */
83c259cc28SRoland Dreier 	spin_lock_irqsave(&idp->lock, flags);
844ae53789SNadia Derbey 	__move_to_free_list(idp, p);
85c259cc28SRoland Dreier 	spin_unlock_irqrestore(&idp->lock, flags);
861da177e4SLinus Torvalds }
871da177e4SLinus Torvalds 
88e33ac8bdSTejun Heo static void idr_mark_full(struct idr_layer **pa, int id)
89e33ac8bdSTejun Heo {
90e33ac8bdSTejun Heo 	struct idr_layer *p = pa[0];
91e33ac8bdSTejun Heo 	int l = 0;
92e33ac8bdSTejun Heo 
93e33ac8bdSTejun Heo 	__set_bit(id & IDR_MASK, &p->bitmap);
94e33ac8bdSTejun Heo 	/*
95e33ac8bdSTejun Heo 	 * If this layer is full mark the bit in the layer above to
96e33ac8bdSTejun Heo 	 * show that this part of the radix tree is full.  This may
97e33ac8bdSTejun Heo 	 * complete the layer above and require walking up the radix
98e33ac8bdSTejun Heo 	 * tree.
99e33ac8bdSTejun Heo 	 */
100e33ac8bdSTejun Heo 	while (p->bitmap == IDR_FULL) {
101e33ac8bdSTejun Heo 		if (!(p = pa[++l]))
102e33ac8bdSTejun Heo 			break;
103e33ac8bdSTejun Heo 		id = id >> IDR_BITS;
104e33ac8bdSTejun Heo 		__set_bit((id & IDR_MASK), &p->bitmap);
105e33ac8bdSTejun Heo 	}
106e33ac8bdSTejun Heo }
107e33ac8bdSTejun Heo 
1081da177e4SLinus Torvalds /**
1091da177e4SLinus Torvalds  * idr_pre_get - reserver resources for idr allocation
1101da177e4SLinus Torvalds  * @idp:	idr handle
1111da177e4SLinus Torvalds  * @gfp_mask:	memory allocation flags
1121da177e4SLinus Torvalds  *
1131da177e4SLinus Torvalds  * This function should be called prior to locking and calling the
1143219b3b7SNadia Derbey  * idr_get_new* functions. It preallocates enough memory to satisfy
1151da177e4SLinus Torvalds  * the worst possible allocation.
1161da177e4SLinus Torvalds  *
1171da177e4SLinus Torvalds  * If the system is REALLY out of memory this function returns 0,
1181da177e4SLinus Torvalds  * otherwise 1.
1191da177e4SLinus Torvalds  */
120fd4f2df2SAl Viro int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
1211da177e4SLinus Torvalds {
1221da177e4SLinus Torvalds 	while (idp->id_free_cnt < IDR_FREE_MAX) {
1231da177e4SLinus Torvalds 		struct idr_layer *new;
1245b019e99SAndrew Morton 		new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
1251da177e4SLinus Torvalds 		if (new == NULL)
1261da177e4SLinus Torvalds 			return (0);
1274ae53789SNadia Derbey 		move_to_free_list(idp, new);
1281da177e4SLinus Torvalds 	}
1291da177e4SLinus Torvalds 	return 1;
1301da177e4SLinus Torvalds }
1311da177e4SLinus Torvalds EXPORT_SYMBOL(idr_pre_get);
1321da177e4SLinus Torvalds 
133e33ac8bdSTejun Heo static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
1341da177e4SLinus Torvalds {
1351da177e4SLinus Torvalds 	int n, m, sh;
1361da177e4SLinus Torvalds 	struct idr_layer *p, *new;
1377aae6dd8STejun Heo 	int l, id, oid;
1385ba25331SAl Viro 	unsigned long bm;
1391da177e4SLinus Torvalds 
1401da177e4SLinus Torvalds 	id = *starting_id;
1417aae6dd8STejun Heo  restart:
1421da177e4SLinus Torvalds 	p = idp->top;
1431da177e4SLinus Torvalds 	l = idp->layers;
1441da177e4SLinus Torvalds 	pa[l--] = NULL;
1451da177e4SLinus Torvalds 	while (1) {
1461da177e4SLinus Torvalds 		/*
1471da177e4SLinus Torvalds 		 * We run around this while until we reach the leaf node...
1481da177e4SLinus Torvalds 		 */
1491da177e4SLinus Torvalds 		n = (id >> (IDR_BITS*l)) & IDR_MASK;
1501da177e4SLinus Torvalds 		bm = ~p->bitmap;
1511da177e4SLinus Torvalds 		m = find_next_bit(&bm, IDR_SIZE, n);
1521da177e4SLinus Torvalds 		if (m == IDR_SIZE) {
1531da177e4SLinus Torvalds 			/* no space available go back to previous layer. */
1541da177e4SLinus Torvalds 			l++;
1557aae6dd8STejun Heo 			oid = id;
1561da177e4SLinus Torvalds 			id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
1577aae6dd8STejun Heo 
1587aae6dd8STejun Heo 			/* if already at the top layer, we need to grow */
1591da177e4SLinus Torvalds 			if (!(p = pa[l])) {
1601da177e4SLinus Torvalds 				*starting_id = id;
161944ca05cSNadia Derbey 				return IDR_NEED_TO_GROW;
1621da177e4SLinus Torvalds 			}
1637aae6dd8STejun Heo 
1647aae6dd8STejun Heo 			/* If we need to go up one layer, continue the
1657aae6dd8STejun Heo 			 * loop; otherwise, restart from the top.
1667aae6dd8STejun Heo 			 */
1677aae6dd8STejun Heo 			sh = IDR_BITS * (l + 1);
1687aae6dd8STejun Heo 			if (oid >> sh == id >> sh)
1691da177e4SLinus Torvalds 				continue;
1707aae6dd8STejun Heo 			else
1717aae6dd8STejun Heo 				goto restart;
1721da177e4SLinus Torvalds 		}
1731da177e4SLinus Torvalds 		if (m != n) {
1741da177e4SLinus Torvalds 			sh = IDR_BITS*l;
1751da177e4SLinus Torvalds 			id = ((id >> sh) ^ n ^ m) << sh;
1761da177e4SLinus Torvalds 		}
1771da177e4SLinus Torvalds 		if ((id >= MAX_ID_BIT) || (id < 0))
178944ca05cSNadia Derbey 			return IDR_NOMORE_SPACE;
1791da177e4SLinus Torvalds 		if (l == 0)
1801da177e4SLinus Torvalds 			break;
1811da177e4SLinus Torvalds 		/*
1821da177e4SLinus Torvalds 		 * Create the layer below if it is missing.
1831da177e4SLinus Torvalds 		 */
1841da177e4SLinus Torvalds 		if (!p->ary[m]) {
1854ae53789SNadia Derbey 			new = get_from_free_list(idp);
1864ae53789SNadia Derbey 			if (!new)
1871da177e4SLinus Torvalds 				return -1;
1886ff2d39bSManfred Spraul 			new->layer = l-1;
1893219b3b7SNadia Derbey 			rcu_assign_pointer(p->ary[m], new);
1901da177e4SLinus Torvalds 			p->count++;
1911da177e4SLinus Torvalds 		}
1921da177e4SLinus Torvalds 		pa[l--] = p;
1931da177e4SLinus Torvalds 		p = p->ary[m];
1941da177e4SLinus Torvalds 	}
195e33ac8bdSTejun Heo 
196e33ac8bdSTejun Heo 	pa[l] = p;
197e33ac8bdSTejun Heo 	return id;
1981da177e4SLinus Torvalds }
1991da177e4SLinus Torvalds 
200e33ac8bdSTejun Heo static int idr_get_empty_slot(struct idr *idp, int starting_id,
201e33ac8bdSTejun Heo 			      struct idr_layer **pa)
2021da177e4SLinus Torvalds {
2031da177e4SLinus Torvalds 	struct idr_layer *p, *new;
2041da177e4SLinus Torvalds 	int layers, v, id;
205c259cc28SRoland Dreier 	unsigned long flags;
2061da177e4SLinus Torvalds 
2071da177e4SLinus Torvalds 	id = starting_id;
2081da177e4SLinus Torvalds build_up:
2091da177e4SLinus Torvalds 	p = idp->top;
2101da177e4SLinus Torvalds 	layers = idp->layers;
2111da177e4SLinus Torvalds 	if (unlikely(!p)) {
2124ae53789SNadia Derbey 		if (!(p = get_from_free_list(idp)))
2131da177e4SLinus Torvalds 			return -1;
2146ff2d39bSManfred Spraul 		p->layer = 0;
2151da177e4SLinus Torvalds 		layers = 1;
2161da177e4SLinus Torvalds 	}
2171da177e4SLinus Torvalds 	/*
2181da177e4SLinus Torvalds 	 * Add a new layer to the top of the tree if the requested
2191da177e4SLinus Torvalds 	 * id is larger than the currently allocated space.
2201da177e4SLinus Torvalds 	 */
221589777eaSZaur Kambarov 	while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
2221da177e4SLinus Torvalds 		layers++;
223711a49a0SManfred Spraul 		if (!p->count) {
224711a49a0SManfred Spraul 			/* special case: if the tree is currently empty,
225711a49a0SManfred Spraul 			 * then we grow the tree by moving the top node
226711a49a0SManfred Spraul 			 * upwards.
227711a49a0SManfred Spraul 			 */
228711a49a0SManfred Spraul 			p->layer++;
2291da177e4SLinus Torvalds 			continue;
230711a49a0SManfred Spraul 		}
2314ae53789SNadia Derbey 		if (!(new = get_from_free_list(idp))) {
2321da177e4SLinus Torvalds 			/*
2331da177e4SLinus Torvalds 			 * The allocation failed.  If we built part of
2341da177e4SLinus Torvalds 			 * the structure tear it down.
2351da177e4SLinus Torvalds 			 */
236c259cc28SRoland Dreier 			spin_lock_irqsave(&idp->lock, flags);
2371da177e4SLinus Torvalds 			for (new = p; p && p != idp->top; new = p) {
2381da177e4SLinus Torvalds 				p = p->ary[0];
2391da177e4SLinus Torvalds 				new->ary[0] = NULL;
2401da177e4SLinus Torvalds 				new->bitmap = new->count = 0;
2414ae53789SNadia Derbey 				__move_to_free_list(idp, new);
2421da177e4SLinus Torvalds 			}
243c259cc28SRoland Dreier 			spin_unlock_irqrestore(&idp->lock, flags);
2441da177e4SLinus Torvalds 			return -1;
2451da177e4SLinus Torvalds 		}
2461da177e4SLinus Torvalds 		new->ary[0] = p;
2471da177e4SLinus Torvalds 		new->count = 1;
2486ff2d39bSManfred Spraul 		new->layer = layers-1;
2491da177e4SLinus Torvalds 		if (p->bitmap == IDR_FULL)
2501da177e4SLinus Torvalds 			__set_bit(0, &new->bitmap);
2511da177e4SLinus Torvalds 		p = new;
2521da177e4SLinus Torvalds 	}
2533219b3b7SNadia Derbey 	rcu_assign_pointer(idp->top, p);
2541da177e4SLinus Torvalds 	idp->layers = layers;
255e33ac8bdSTejun Heo 	v = sub_alloc(idp, &id, pa);
256944ca05cSNadia Derbey 	if (v == IDR_NEED_TO_GROW)
2571da177e4SLinus Torvalds 		goto build_up;
2581da177e4SLinus Torvalds 	return(v);
2591da177e4SLinus Torvalds }
2601da177e4SLinus Torvalds 
261e33ac8bdSTejun Heo static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
262e33ac8bdSTejun Heo {
263e33ac8bdSTejun Heo 	struct idr_layer *pa[MAX_LEVEL];
264e33ac8bdSTejun Heo 	int id;
265e33ac8bdSTejun Heo 
266e33ac8bdSTejun Heo 	id = idr_get_empty_slot(idp, starting_id, pa);
267e33ac8bdSTejun Heo 	if (id >= 0) {
268e33ac8bdSTejun Heo 		/*
269e33ac8bdSTejun Heo 		 * Successfully found an empty slot.  Install the user
270e33ac8bdSTejun Heo 		 * pointer and mark the slot full.
271e33ac8bdSTejun Heo 		 */
2723219b3b7SNadia Derbey 		rcu_assign_pointer(pa[0]->ary[id & IDR_MASK],
2733219b3b7SNadia Derbey 				(struct idr_layer *)ptr);
274e33ac8bdSTejun Heo 		pa[0]->count++;
275e33ac8bdSTejun Heo 		idr_mark_full(pa, id);
276e33ac8bdSTejun Heo 	}
277e33ac8bdSTejun Heo 
278e33ac8bdSTejun Heo 	return id;
279e33ac8bdSTejun Heo }
280e33ac8bdSTejun Heo 
2811da177e4SLinus Torvalds /**
2827c657f2fSJohn McCutchan  * idr_get_new_above - allocate new idr entry above or equal to a start id
2831da177e4SLinus Torvalds  * @idp: idr handle
2841da177e4SLinus Torvalds  * @ptr: pointer you want associated with the ide
2851da177e4SLinus Torvalds  * @start_id: id to start search at
2861da177e4SLinus Torvalds  * @id: pointer to the allocated handle
2871da177e4SLinus Torvalds  *
2881da177e4SLinus Torvalds  * This is the allocate id function.  It should be called with any
2891da177e4SLinus Torvalds  * required locks.
2901da177e4SLinus Torvalds  *
2911da177e4SLinus Torvalds  * If memory is required, it will return -EAGAIN, you should unlock
2921da177e4SLinus Torvalds  * and go back to the idr_pre_get() call.  If the idr is full, it will
2931da177e4SLinus Torvalds  * return -ENOSPC.
2941da177e4SLinus Torvalds  *
295b098161bSLi Zefan  * @id returns a value in the range @starting_id ... 0x7fffffff
2961da177e4SLinus Torvalds  */
2971da177e4SLinus Torvalds int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
2981da177e4SLinus Torvalds {
2991da177e4SLinus Torvalds 	int rv;
300e15ae2ddSJesper Juhl 
3011da177e4SLinus Torvalds 	rv = idr_get_new_above_int(idp, ptr, starting_id);
3021da177e4SLinus Torvalds 	/*
3031da177e4SLinus Torvalds 	 * This is a cheap hack until the IDR code can be fixed to
3041da177e4SLinus Torvalds 	 * return proper error values.
3051da177e4SLinus Torvalds 	 */
306944ca05cSNadia Derbey 	if (rv < 0)
307944ca05cSNadia Derbey 		return _idr_rc_to_errno(rv);
3081da177e4SLinus Torvalds 	*id = rv;
3091da177e4SLinus Torvalds 	return 0;
3101da177e4SLinus Torvalds }
3111da177e4SLinus Torvalds EXPORT_SYMBOL(idr_get_new_above);
3121da177e4SLinus Torvalds 
3131da177e4SLinus Torvalds /**
3141da177e4SLinus Torvalds  * idr_get_new - allocate new idr entry
3151da177e4SLinus Torvalds  * @idp: idr handle
3161da177e4SLinus Torvalds  * @ptr: pointer you want associated with the ide
3171da177e4SLinus Torvalds  * @id: pointer to the allocated handle
3181da177e4SLinus Torvalds  *
3191da177e4SLinus Torvalds  * This is the allocate id function.  It should be called with any
3201da177e4SLinus Torvalds  * required locks.
3211da177e4SLinus Torvalds  *
3221da177e4SLinus Torvalds  * If memory is required, it will return -EAGAIN, you should unlock
3231da177e4SLinus Torvalds  * and go back to the idr_pre_get() call.  If the idr is full, it will
3241da177e4SLinus Torvalds  * return -ENOSPC.
3251da177e4SLinus Torvalds  *
3261da177e4SLinus Torvalds  * @id returns a value in the range 0 ... 0x7fffffff
3271da177e4SLinus Torvalds  */
3281da177e4SLinus Torvalds int idr_get_new(struct idr *idp, void *ptr, int *id)
3291da177e4SLinus Torvalds {
3301da177e4SLinus Torvalds 	int rv;
331e15ae2ddSJesper Juhl 
3321da177e4SLinus Torvalds 	rv = idr_get_new_above_int(idp, ptr, 0);
3331da177e4SLinus Torvalds 	/*
3341da177e4SLinus Torvalds 	 * This is a cheap hack until the IDR code can be fixed to
3351da177e4SLinus Torvalds 	 * return proper error values.
3361da177e4SLinus Torvalds 	 */
337944ca05cSNadia Derbey 	if (rv < 0)
338944ca05cSNadia Derbey 		return _idr_rc_to_errno(rv);
3391da177e4SLinus Torvalds 	*id = rv;
3401da177e4SLinus Torvalds 	return 0;
3411da177e4SLinus Torvalds }
3421da177e4SLinus Torvalds EXPORT_SYMBOL(idr_get_new);
3431da177e4SLinus Torvalds 
3441da177e4SLinus Torvalds static void idr_remove_warning(int id)
3451da177e4SLinus Torvalds {
346f098ad65SNadia Derbey 	printk(KERN_WARNING
347f098ad65SNadia Derbey 		"idr_remove called for id=%d which is not allocated.\n", id);
3481da177e4SLinus Torvalds 	dump_stack();
3491da177e4SLinus Torvalds }
3501da177e4SLinus Torvalds 
3511da177e4SLinus Torvalds static void sub_remove(struct idr *idp, int shift, int id)
3521da177e4SLinus Torvalds {
3531da177e4SLinus Torvalds 	struct idr_layer *p = idp->top;
3541da177e4SLinus Torvalds 	struct idr_layer **pa[MAX_LEVEL];
3551da177e4SLinus Torvalds 	struct idr_layer ***paa = &pa[0];
356cf481c20SNadia Derbey 	struct idr_layer *to_free;
3571da177e4SLinus Torvalds 	int n;
3581da177e4SLinus Torvalds 
3591da177e4SLinus Torvalds 	*paa = NULL;
3601da177e4SLinus Torvalds 	*++paa = &idp->top;
3611da177e4SLinus Torvalds 
3621da177e4SLinus Torvalds 	while ((shift > 0) && p) {
3631da177e4SLinus Torvalds 		n = (id >> shift) & IDR_MASK;
3641da177e4SLinus Torvalds 		__clear_bit(n, &p->bitmap);
3651da177e4SLinus Torvalds 		*++paa = &p->ary[n];
3661da177e4SLinus Torvalds 		p = p->ary[n];
3671da177e4SLinus Torvalds 		shift -= IDR_BITS;
3681da177e4SLinus Torvalds 	}
3691da177e4SLinus Torvalds 	n = id & IDR_MASK;
3701da177e4SLinus Torvalds 	if (likely(p != NULL && test_bit(n, &p->bitmap))){
3711da177e4SLinus Torvalds 		__clear_bit(n, &p->bitmap);
372cf481c20SNadia Derbey 		rcu_assign_pointer(p->ary[n], NULL);
373cf481c20SNadia Derbey 		to_free = NULL;
3741da177e4SLinus Torvalds 		while(*paa && ! --((**paa)->count)){
375cf481c20SNadia Derbey 			if (to_free)
376cf481c20SNadia Derbey 				free_layer(to_free);
377cf481c20SNadia Derbey 			to_free = **paa;
3781da177e4SLinus Torvalds 			**paa-- = NULL;
3791da177e4SLinus Torvalds 		}
3801da177e4SLinus Torvalds 		if (!*paa)
3811da177e4SLinus Torvalds 			idp->layers = 0;
382cf481c20SNadia Derbey 		if (to_free)
383cf481c20SNadia Derbey 			free_layer(to_free);
384e15ae2ddSJesper Juhl 	} else
3851da177e4SLinus Torvalds 		idr_remove_warning(id);
3861da177e4SLinus Torvalds }
3871da177e4SLinus Torvalds 
3881da177e4SLinus Torvalds /**
3891da177e4SLinus Torvalds  * idr_remove - remove the given id and free it's slot
39072fd4a35SRobert P. J. Day  * @idp: idr handle
39172fd4a35SRobert P. J. Day  * @id: unique key
3921da177e4SLinus Torvalds  */
3931da177e4SLinus Torvalds void idr_remove(struct idr *idp, int id)
3941da177e4SLinus Torvalds {
3951da177e4SLinus Torvalds 	struct idr_layer *p;
396cf481c20SNadia Derbey 	struct idr_layer *to_free;
3971da177e4SLinus Torvalds 
3981da177e4SLinus Torvalds 	/* Mask off upper bits we don't use for the search. */
3991da177e4SLinus Torvalds 	id &= MAX_ID_MASK;
4001da177e4SLinus Torvalds 
4011da177e4SLinus Torvalds 	sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
402e15ae2ddSJesper Juhl 	if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
403cf481c20SNadia Derbey 	    idp->top->ary[0]) {
404cf481c20SNadia Derbey 		/*
405cf481c20SNadia Derbey 		 * Single child at leftmost slot: we can shrink the tree.
406cf481c20SNadia Derbey 		 * This level is not needed anymore since when layers are
407cf481c20SNadia Derbey 		 * inserted, they are inserted at the top of the existing
408cf481c20SNadia Derbey 		 * tree.
409cf481c20SNadia Derbey 		 */
410cf481c20SNadia Derbey 		to_free = idp->top;
4111da177e4SLinus Torvalds 		p = idp->top->ary[0];
412cf481c20SNadia Derbey 		rcu_assign_pointer(idp->top, p);
4131da177e4SLinus Torvalds 		--idp->layers;
414cf481c20SNadia Derbey 		to_free->bitmap = to_free->count = 0;
415cf481c20SNadia Derbey 		free_layer(to_free);
4161da177e4SLinus Torvalds 	}
4171da177e4SLinus Torvalds 	while (idp->id_free_cnt >= IDR_FREE_MAX) {
4184ae53789SNadia Derbey 		p = get_from_free_list(idp);
419cf481c20SNadia Derbey 		/*
420cf481c20SNadia Derbey 		 * Note: we don't call the rcu callback here, since the only
421cf481c20SNadia Derbey 		 * layers that fall into the freelist are those that have been
422cf481c20SNadia Derbey 		 * preallocated.
423cf481c20SNadia Derbey 		 */
4241da177e4SLinus Torvalds 		kmem_cache_free(idr_layer_cache, p);
4251da177e4SLinus Torvalds 	}
426af8e2a4cSNadia Derbey 	return;
4271da177e4SLinus Torvalds }
4281da177e4SLinus Torvalds EXPORT_SYMBOL(idr_remove);
4291da177e4SLinus Torvalds 
4301da177e4SLinus Torvalds /**
43123936cc0SKristian Hoegsberg  * idr_remove_all - remove all ids from the given idr tree
43223936cc0SKristian Hoegsberg  * @idp: idr handle
43323936cc0SKristian Hoegsberg  *
43423936cc0SKristian Hoegsberg  * idr_destroy() only frees up unused, cached idp_layers, but this
43523936cc0SKristian Hoegsberg  * function will remove all id mappings and leave all idp_layers
43623936cc0SKristian Hoegsberg  * unused.
43723936cc0SKristian Hoegsberg  *
43823936cc0SKristian Hoegsberg  * A typical clean-up sequence for objects stored in an idr tree, will
43923936cc0SKristian Hoegsberg  * use idr_for_each() to free all objects, if necessay, then
44023936cc0SKristian Hoegsberg  * idr_remove_all() to remove all ids, and idr_destroy() to free
44123936cc0SKristian Hoegsberg  * up the cached idr_layers.
44223936cc0SKristian Hoegsberg  */
44323936cc0SKristian Hoegsberg void idr_remove_all(struct idr *idp)
44423936cc0SKristian Hoegsberg {
4456ace06dcSOleg Nesterov 	int n, id, max;
44623936cc0SKristian Hoegsberg 	struct idr_layer *p;
44723936cc0SKristian Hoegsberg 	struct idr_layer *pa[MAX_LEVEL];
44823936cc0SKristian Hoegsberg 	struct idr_layer **paa = &pa[0];
44923936cc0SKristian Hoegsberg 
45023936cc0SKristian Hoegsberg 	n = idp->layers * IDR_BITS;
45123936cc0SKristian Hoegsberg 	p = idp->top;
4521b23336aSPaul E. McKenney 	rcu_assign_pointer(idp->top, NULL);
45323936cc0SKristian Hoegsberg 	max = 1 << n;
45423936cc0SKristian Hoegsberg 
45523936cc0SKristian Hoegsberg 	id = 0;
4566ace06dcSOleg Nesterov 	while (id < max) {
45723936cc0SKristian Hoegsberg 		while (n > IDR_BITS && p) {
45823936cc0SKristian Hoegsberg 			n -= IDR_BITS;
45923936cc0SKristian Hoegsberg 			*paa++ = p;
46023936cc0SKristian Hoegsberg 			p = p->ary[(id >> n) & IDR_MASK];
46123936cc0SKristian Hoegsberg 		}
46223936cc0SKristian Hoegsberg 
46323936cc0SKristian Hoegsberg 		id += 1 << n;
46423936cc0SKristian Hoegsberg 		while (n < fls(id)) {
465cf481c20SNadia Derbey 			if (p)
466cf481c20SNadia Derbey 				free_layer(p);
46723936cc0SKristian Hoegsberg 			n += IDR_BITS;
46823936cc0SKristian Hoegsberg 			p = *--paa;
46923936cc0SKristian Hoegsberg 		}
47023936cc0SKristian Hoegsberg 	}
47123936cc0SKristian Hoegsberg 	idp->layers = 0;
47223936cc0SKristian Hoegsberg }
47323936cc0SKristian Hoegsberg EXPORT_SYMBOL(idr_remove_all);
47423936cc0SKristian Hoegsberg 
47523936cc0SKristian Hoegsberg /**
4768d3b3591SAndrew Morton  * idr_destroy - release all cached layers within an idr tree
4778d3b3591SAndrew Morton  * idp: idr handle
4788d3b3591SAndrew Morton  */
4798d3b3591SAndrew Morton void idr_destroy(struct idr *idp)
4808d3b3591SAndrew Morton {
4818d3b3591SAndrew Morton 	while (idp->id_free_cnt) {
4824ae53789SNadia Derbey 		struct idr_layer *p = get_from_free_list(idp);
4838d3b3591SAndrew Morton 		kmem_cache_free(idr_layer_cache, p);
4848d3b3591SAndrew Morton 	}
4858d3b3591SAndrew Morton }
4868d3b3591SAndrew Morton EXPORT_SYMBOL(idr_destroy);
4878d3b3591SAndrew Morton 
4888d3b3591SAndrew Morton /**
4891da177e4SLinus Torvalds  * idr_find - return pointer for given id
4901da177e4SLinus Torvalds  * @idp: idr handle
4911da177e4SLinus Torvalds  * @id: lookup key
4921da177e4SLinus Torvalds  *
4931da177e4SLinus Torvalds  * Return the pointer given the id it has been registered with.  A %NULL
4941da177e4SLinus Torvalds  * return indicates that @id is not valid or you passed %NULL in
4951da177e4SLinus Torvalds  * idr_get_new().
4961da177e4SLinus Torvalds  *
497f9c46d6eSNadia Derbey  * This function can be called under rcu_read_lock(), given that the leaf
498f9c46d6eSNadia Derbey  * pointers lifetimes are correctly managed.
4991da177e4SLinus Torvalds  */
5001da177e4SLinus Torvalds void *idr_find(struct idr *idp, int id)
5011da177e4SLinus Torvalds {
5021da177e4SLinus Torvalds 	int n;
5031da177e4SLinus Torvalds 	struct idr_layer *p;
5041da177e4SLinus Torvalds 
505f9c46d6eSNadia Derbey 	p = rcu_dereference(idp->top);
5066ff2d39bSManfred Spraul 	if (!p)
5076ff2d39bSManfred Spraul 		return NULL;
5086ff2d39bSManfred Spraul 	n = (p->layer+1) * IDR_BITS;
5091da177e4SLinus Torvalds 
5101da177e4SLinus Torvalds 	/* Mask off upper bits we don't use for the search. */
5111da177e4SLinus Torvalds 	id &= MAX_ID_MASK;
5121da177e4SLinus Torvalds 
5131da177e4SLinus Torvalds 	if (id >= (1 << n))
5141da177e4SLinus Torvalds 		return NULL;
5156ff2d39bSManfred Spraul 	BUG_ON(n == 0);
5161da177e4SLinus Torvalds 
5171da177e4SLinus Torvalds 	while (n > 0 && p) {
5181da177e4SLinus Torvalds 		n -= IDR_BITS;
5196ff2d39bSManfred Spraul 		BUG_ON(n != p->layer*IDR_BITS);
520f9c46d6eSNadia Derbey 		p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
5211da177e4SLinus Torvalds 	}
5221da177e4SLinus Torvalds 	return((void *)p);
5231da177e4SLinus Torvalds }
5241da177e4SLinus Torvalds EXPORT_SYMBOL(idr_find);
5251da177e4SLinus Torvalds 
5265806f07cSJeff Mahoney /**
52796d7fa42SKristian Hoegsberg  * idr_for_each - iterate through all stored pointers
52896d7fa42SKristian Hoegsberg  * @idp: idr handle
52996d7fa42SKristian Hoegsberg  * @fn: function to be called for each pointer
53096d7fa42SKristian Hoegsberg  * @data: data passed back to callback function
53196d7fa42SKristian Hoegsberg  *
53296d7fa42SKristian Hoegsberg  * Iterate over the pointers registered with the given idr.  The
53396d7fa42SKristian Hoegsberg  * callback function will be called for each pointer currently
53496d7fa42SKristian Hoegsberg  * registered, passing the id, the pointer and the data pointer passed
53596d7fa42SKristian Hoegsberg  * to this function.  It is not safe to modify the idr tree while in
53696d7fa42SKristian Hoegsberg  * the callback, so functions such as idr_get_new and idr_remove are
53796d7fa42SKristian Hoegsberg  * not allowed.
53896d7fa42SKristian Hoegsberg  *
53996d7fa42SKristian Hoegsberg  * We check the return of @fn each time. If it returns anything other
54096d7fa42SKristian Hoegsberg  * than 0, we break out and return that value.
54196d7fa42SKristian Hoegsberg  *
54296d7fa42SKristian Hoegsberg  * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove().
54396d7fa42SKristian Hoegsberg  */
54496d7fa42SKristian Hoegsberg int idr_for_each(struct idr *idp,
54596d7fa42SKristian Hoegsberg 		 int (*fn)(int id, void *p, void *data), void *data)
54696d7fa42SKristian Hoegsberg {
54796d7fa42SKristian Hoegsberg 	int n, id, max, error = 0;
54896d7fa42SKristian Hoegsberg 	struct idr_layer *p;
54996d7fa42SKristian Hoegsberg 	struct idr_layer *pa[MAX_LEVEL];
55096d7fa42SKristian Hoegsberg 	struct idr_layer **paa = &pa[0];
55196d7fa42SKristian Hoegsberg 
55296d7fa42SKristian Hoegsberg 	n = idp->layers * IDR_BITS;
553f9c46d6eSNadia Derbey 	p = rcu_dereference(idp->top);
55496d7fa42SKristian Hoegsberg 	max = 1 << n;
55596d7fa42SKristian Hoegsberg 
55696d7fa42SKristian Hoegsberg 	id = 0;
55796d7fa42SKristian Hoegsberg 	while (id < max) {
55896d7fa42SKristian Hoegsberg 		while (n > 0 && p) {
55996d7fa42SKristian Hoegsberg 			n -= IDR_BITS;
56096d7fa42SKristian Hoegsberg 			*paa++ = p;
561f9c46d6eSNadia Derbey 			p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
56296d7fa42SKristian Hoegsberg 		}
56396d7fa42SKristian Hoegsberg 
56496d7fa42SKristian Hoegsberg 		if (p) {
56596d7fa42SKristian Hoegsberg 			error = fn(id, (void *)p, data);
56696d7fa42SKristian Hoegsberg 			if (error)
56796d7fa42SKristian Hoegsberg 				break;
56896d7fa42SKristian Hoegsberg 		}
56996d7fa42SKristian Hoegsberg 
57096d7fa42SKristian Hoegsberg 		id += 1 << n;
57196d7fa42SKristian Hoegsberg 		while (n < fls(id)) {
57296d7fa42SKristian Hoegsberg 			n += IDR_BITS;
57396d7fa42SKristian Hoegsberg 			p = *--paa;
57496d7fa42SKristian Hoegsberg 		}
57596d7fa42SKristian Hoegsberg 	}
57696d7fa42SKristian Hoegsberg 
57796d7fa42SKristian Hoegsberg 	return error;
57896d7fa42SKristian Hoegsberg }
57996d7fa42SKristian Hoegsberg EXPORT_SYMBOL(idr_for_each);
58096d7fa42SKristian Hoegsberg 
58196d7fa42SKristian Hoegsberg /**
5825806f07cSJeff Mahoney  * idr_replace - replace pointer for given id
5835806f07cSJeff Mahoney  * @idp: idr handle
5845806f07cSJeff Mahoney  * @ptr: pointer you want associated with the id
5855806f07cSJeff Mahoney  * @id: lookup key
5865806f07cSJeff Mahoney  *
5875806f07cSJeff Mahoney  * Replace the pointer registered with an id and return the old value.
5885806f07cSJeff Mahoney  * A -ENOENT return indicates that @id was not found.
5895806f07cSJeff Mahoney  * A -EINVAL return indicates that @id was not within valid constraints.
5905806f07cSJeff Mahoney  *
591cf481c20SNadia Derbey  * The caller must serialize with writers.
5925806f07cSJeff Mahoney  */
5935806f07cSJeff Mahoney void *idr_replace(struct idr *idp, void *ptr, int id)
5945806f07cSJeff Mahoney {
5955806f07cSJeff Mahoney 	int n;
5965806f07cSJeff Mahoney 	struct idr_layer *p, *old_p;
5975806f07cSJeff Mahoney 
5985806f07cSJeff Mahoney 	p = idp->top;
5996ff2d39bSManfred Spraul 	if (!p)
6006ff2d39bSManfred Spraul 		return ERR_PTR(-EINVAL);
6016ff2d39bSManfred Spraul 
6026ff2d39bSManfred Spraul 	n = (p->layer+1) * IDR_BITS;
6035806f07cSJeff Mahoney 
6045806f07cSJeff Mahoney 	id &= MAX_ID_MASK;
6055806f07cSJeff Mahoney 
6065806f07cSJeff Mahoney 	if (id >= (1 << n))
6075806f07cSJeff Mahoney 		return ERR_PTR(-EINVAL);
6085806f07cSJeff Mahoney 
6095806f07cSJeff Mahoney 	n -= IDR_BITS;
6105806f07cSJeff Mahoney 	while ((n > 0) && p) {
6115806f07cSJeff Mahoney 		p = p->ary[(id >> n) & IDR_MASK];
6125806f07cSJeff Mahoney 		n -= IDR_BITS;
6135806f07cSJeff Mahoney 	}
6145806f07cSJeff Mahoney 
6155806f07cSJeff Mahoney 	n = id & IDR_MASK;
6165806f07cSJeff Mahoney 	if (unlikely(p == NULL || !test_bit(n, &p->bitmap)))
6175806f07cSJeff Mahoney 		return ERR_PTR(-ENOENT);
6185806f07cSJeff Mahoney 
6195806f07cSJeff Mahoney 	old_p = p->ary[n];
620cf481c20SNadia Derbey 	rcu_assign_pointer(p->ary[n], ptr);
6215806f07cSJeff Mahoney 
6225806f07cSJeff Mahoney 	return old_p;
6235806f07cSJeff Mahoney }
6245806f07cSJeff Mahoney EXPORT_SYMBOL(idr_replace);
6255806f07cSJeff Mahoney 
626199f0ca5SAkinobu Mita void __init idr_init_cache(void)
6271da177e4SLinus Torvalds {
6281da177e4SLinus Torvalds 	idr_layer_cache = kmem_cache_create("idr_layer_cache",
6295b019e99SAndrew Morton 				sizeof(struct idr_layer), 0, SLAB_PANIC, NULL);
6301da177e4SLinus Torvalds }
6311da177e4SLinus Torvalds 
6321da177e4SLinus Torvalds /**
6331da177e4SLinus Torvalds  * idr_init - initialize idr handle
6341da177e4SLinus Torvalds  * @idp:	idr handle
6351da177e4SLinus Torvalds  *
6361da177e4SLinus Torvalds  * This function is use to set up the handle (@idp) that you will pass
6371da177e4SLinus Torvalds  * to the rest of the functions.
6381da177e4SLinus Torvalds  */
6391da177e4SLinus Torvalds void idr_init(struct idr *idp)
6401da177e4SLinus Torvalds {
6411da177e4SLinus Torvalds 	memset(idp, 0, sizeof(struct idr));
6421da177e4SLinus Torvalds 	spin_lock_init(&idp->lock);
6431da177e4SLinus Torvalds }
6441da177e4SLinus Torvalds EXPORT_SYMBOL(idr_init);
64572dba584STejun Heo 
64672dba584STejun Heo 
64772dba584STejun Heo /*
64872dba584STejun Heo  * IDA - IDR based ID allocator
64972dba584STejun Heo  *
65072dba584STejun Heo  * this is id allocator without id -> pointer translation.  Memory
65172dba584STejun Heo  * usage is much lower than full blown idr because each id only
65272dba584STejun Heo  * occupies a bit.  ida uses a custom leaf node which contains
65372dba584STejun Heo  * IDA_BITMAP_BITS slots.
65472dba584STejun Heo  *
65572dba584STejun Heo  * 2007-04-25  written by Tejun Heo <htejun@gmail.com>
65672dba584STejun Heo  */
65772dba584STejun Heo 
65872dba584STejun Heo static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
65972dba584STejun Heo {
66072dba584STejun Heo 	unsigned long flags;
66172dba584STejun Heo 
66272dba584STejun Heo 	if (!ida->free_bitmap) {
66372dba584STejun Heo 		spin_lock_irqsave(&ida->idr.lock, flags);
66472dba584STejun Heo 		if (!ida->free_bitmap) {
66572dba584STejun Heo 			ida->free_bitmap = bitmap;
66672dba584STejun Heo 			bitmap = NULL;
66772dba584STejun Heo 		}
66872dba584STejun Heo 		spin_unlock_irqrestore(&ida->idr.lock, flags);
66972dba584STejun Heo 	}
67072dba584STejun Heo 
67172dba584STejun Heo 	kfree(bitmap);
67272dba584STejun Heo }
67372dba584STejun Heo 
67472dba584STejun Heo /**
67572dba584STejun Heo  * ida_pre_get - reserve resources for ida allocation
67672dba584STejun Heo  * @ida:	ida handle
67772dba584STejun Heo  * @gfp_mask:	memory allocation flag
67872dba584STejun Heo  *
67972dba584STejun Heo  * This function should be called prior to locking and calling the
68072dba584STejun Heo  * following function.  It preallocates enough memory to satisfy the
68172dba584STejun Heo  * worst possible allocation.
68272dba584STejun Heo  *
68372dba584STejun Heo  * If the system is REALLY out of memory this function returns 0,
68472dba584STejun Heo  * otherwise 1.
68572dba584STejun Heo  */
68672dba584STejun Heo int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
68772dba584STejun Heo {
68872dba584STejun Heo 	/* allocate idr_layers */
68972dba584STejun Heo 	if (!idr_pre_get(&ida->idr, gfp_mask))
69072dba584STejun Heo 		return 0;
69172dba584STejun Heo 
69272dba584STejun Heo 	/* allocate free_bitmap */
69372dba584STejun Heo 	if (!ida->free_bitmap) {
69472dba584STejun Heo 		struct ida_bitmap *bitmap;
69572dba584STejun Heo 
69672dba584STejun Heo 		bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask);
69772dba584STejun Heo 		if (!bitmap)
69872dba584STejun Heo 			return 0;
69972dba584STejun Heo 
70072dba584STejun Heo 		free_bitmap(ida, bitmap);
70172dba584STejun Heo 	}
70272dba584STejun Heo 
70372dba584STejun Heo 	return 1;
70472dba584STejun Heo }
70572dba584STejun Heo EXPORT_SYMBOL(ida_pre_get);
70672dba584STejun Heo 
70772dba584STejun Heo /**
70872dba584STejun Heo  * ida_get_new_above - allocate new ID above or equal to a start id
70972dba584STejun Heo  * @ida:	ida handle
71072dba584STejun Heo  * @staring_id:	id to start search at
71172dba584STejun Heo  * @p_id:	pointer to the allocated handle
71272dba584STejun Heo  *
71372dba584STejun Heo  * Allocate new ID above or equal to @ida.  It should be called with
71472dba584STejun Heo  * any required locks.
71572dba584STejun Heo  *
71672dba584STejun Heo  * If memory is required, it will return -EAGAIN, you should unlock
71772dba584STejun Heo  * and go back to the ida_pre_get() call.  If the ida is full, it will
71872dba584STejun Heo  * return -ENOSPC.
71972dba584STejun Heo  *
720b098161bSLi Zefan  * @p_id returns a value in the range @starting_id ... 0x7fffffff.
72172dba584STejun Heo  */
72272dba584STejun Heo int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
72372dba584STejun Heo {
72472dba584STejun Heo 	struct idr_layer *pa[MAX_LEVEL];
72572dba584STejun Heo 	struct ida_bitmap *bitmap;
72672dba584STejun Heo 	unsigned long flags;
72772dba584STejun Heo 	int idr_id = starting_id / IDA_BITMAP_BITS;
72872dba584STejun Heo 	int offset = starting_id % IDA_BITMAP_BITS;
72972dba584STejun Heo 	int t, id;
73072dba584STejun Heo 
73172dba584STejun Heo  restart:
73272dba584STejun Heo 	/* get vacant slot */
73372dba584STejun Heo 	t = idr_get_empty_slot(&ida->idr, idr_id, pa);
734944ca05cSNadia Derbey 	if (t < 0)
735944ca05cSNadia Derbey 		return _idr_rc_to_errno(t);
73672dba584STejun Heo 
73772dba584STejun Heo 	if (t * IDA_BITMAP_BITS >= MAX_ID_BIT)
73872dba584STejun Heo 		return -ENOSPC;
73972dba584STejun Heo 
74072dba584STejun Heo 	if (t != idr_id)
74172dba584STejun Heo 		offset = 0;
74272dba584STejun Heo 	idr_id = t;
74372dba584STejun Heo 
74472dba584STejun Heo 	/* if bitmap isn't there, create a new one */
74572dba584STejun Heo 	bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK];
74672dba584STejun Heo 	if (!bitmap) {
74772dba584STejun Heo 		spin_lock_irqsave(&ida->idr.lock, flags);
74872dba584STejun Heo 		bitmap = ida->free_bitmap;
74972dba584STejun Heo 		ida->free_bitmap = NULL;
75072dba584STejun Heo 		spin_unlock_irqrestore(&ida->idr.lock, flags);
75172dba584STejun Heo 
75272dba584STejun Heo 		if (!bitmap)
75372dba584STejun Heo 			return -EAGAIN;
75472dba584STejun Heo 
75572dba584STejun Heo 		memset(bitmap, 0, sizeof(struct ida_bitmap));
7563219b3b7SNadia Derbey 		rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK],
7573219b3b7SNadia Derbey 				(void *)bitmap);
75872dba584STejun Heo 		pa[0]->count++;
75972dba584STejun Heo 	}
76072dba584STejun Heo 
76172dba584STejun Heo 	/* lookup for empty slot */
76272dba584STejun Heo 	t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset);
76372dba584STejun Heo 	if (t == IDA_BITMAP_BITS) {
76472dba584STejun Heo 		/* no empty slot after offset, continue to the next chunk */
76572dba584STejun Heo 		idr_id++;
76672dba584STejun Heo 		offset = 0;
76772dba584STejun Heo 		goto restart;
76872dba584STejun Heo 	}
76972dba584STejun Heo 
77072dba584STejun Heo 	id = idr_id * IDA_BITMAP_BITS + t;
77172dba584STejun Heo 	if (id >= MAX_ID_BIT)
77272dba584STejun Heo 		return -ENOSPC;
77372dba584STejun Heo 
77472dba584STejun Heo 	__set_bit(t, bitmap->bitmap);
77572dba584STejun Heo 	if (++bitmap->nr_busy == IDA_BITMAP_BITS)
77672dba584STejun Heo 		idr_mark_full(pa, idr_id);
77772dba584STejun Heo 
77872dba584STejun Heo 	*p_id = id;
77972dba584STejun Heo 
78072dba584STejun Heo 	/* Each leaf node can handle nearly a thousand slots and the
78172dba584STejun Heo 	 * whole idea of ida is to have small memory foot print.
78272dba584STejun Heo 	 * Throw away extra resources one by one after each successful
78372dba584STejun Heo 	 * allocation.
78472dba584STejun Heo 	 */
78572dba584STejun Heo 	if (ida->idr.id_free_cnt || ida->free_bitmap) {
7864ae53789SNadia Derbey 		struct idr_layer *p = get_from_free_list(&ida->idr);
78772dba584STejun Heo 		if (p)
78872dba584STejun Heo 			kmem_cache_free(idr_layer_cache, p);
78972dba584STejun Heo 	}
79072dba584STejun Heo 
79172dba584STejun Heo 	return 0;
79272dba584STejun Heo }
79372dba584STejun Heo EXPORT_SYMBOL(ida_get_new_above);
79472dba584STejun Heo 
79572dba584STejun Heo /**
79672dba584STejun Heo  * ida_get_new - allocate new ID
79772dba584STejun Heo  * @ida:	idr handle
79872dba584STejun Heo  * @p_id:	pointer to the allocated handle
79972dba584STejun Heo  *
80072dba584STejun Heo  * Allocate new ID.  It should be called with any required locks.
80172dba584STejun Heo  *
80272dba584STejun Heo  * If memory is required, it will return -EAGAIN, you should unlock
80372dba584STejun Heo  * and go back to the idr_pre_get() call.  If the idr is full, it will
80472dba584STejun Heo  * return -ENOSPC.
80572dba584STejun Heo  *
80672dba584STejun Heo  * @id returns a value in the range 0 ... 0x7fffffff.
80772dba584STejun Heo  */
80872dba584STejun Heo int ida_get_new(struct ida *ida, int *p_id)
80972dba584STejun Heo {
81072dba584STejun Heo 	return ida_get_new_above(ida, 0, p_id);
81172dba584STejun Heo }
81272dba584STejun Heo EXPORT_SYMBOL(ida_get_new);
81372dba584STejun Heo 
81472dba584STejun Heo /**
81572dba584STejun Heo  * ida_remove - remove the given ID
81672dba584STejun Heo  * @ida:	ida handle
81772dba584STejun Heo  * @id:		ID to free
81872dba584STejun Heo  */
81972dba584STejun Heo void ida_remove(struct ida *ida, int id)
82072dba584STejun Heo {
82172dba584STejun Heo 	struct idr_layer *p = ida->idr.top;
82272dba584STejun Heo 	int shift = (ida->idr.layers - 1) * IDR_BITS;
82372dba584STejun Heo 	int idr_id = id / IDA_BITMAP_BITS;
82472dba584STejun Heo 	int offset = id % IDA_BITMAP_BITS;
82572dba584STejun Heo 	int n;
82672dba584STejun Heo 	struct ida_bitmap *bitmap;
82772dba584STejun Heo 
82872dba584STejun Heo 	/* clear full bits while looking up the leaf idr_layer */
82972dba584STejun Heo 	while ((shift > 0) && p) {
83072dba584STejun Heo 		n = (idr_id >> shift) & IDR_MASK;
83172dba584STejun Heo 		__clear_bit(n, &p->bitmap);
83272dba584STejun Heo 		p = p->ary[n];
83372dba584STejun Heo 		shift -= IDR_BITS;
83472dba584STejun Heo 	}
83572dba584STejun Heo 
83672dba584STejun Heo 	if (p == NULL)
83772dba584STejun Heo 		goto err;
83872dba584STejun Heo 
83972dba584STejun Heo 	n = idr_id & IDR_MASK;
84072dba584STejun Heo 	__clear_bit(n, &p->bitmap);
84172dba584STejun Heo 
84272dba584STejun Heo 	bitmap = (void *)p->ary[n];
84372dba584STejun Heo 	if (!test_bit(offset, bitmap->bitmap))
84472dba584STejun Heo 		goto err;
84572dba584STejun Heo 
84672dba584STejun Heo 	/* update bitmap and remove it if empty */
84772dba584STejun Heo 	__clear_bit(offset, bitmap->bitmap);
84872dba584STejun Heo 	if (--bitmap->nr_busy == 0) {
84972dba584STejun Heo 		__set_bit(n, &p->bitmap);	/* to please idr_remove() */
85072dba584STejun Heo 		idr_remove(&ida->idr, idr_id);
85172dba584STejun Heo 		free_bitmap(ida, bitmap);
85272dba584STejun Heo 	}
85372dba584STejun Heo 
85472dba584STejun Heo 	return;
85572dba584STejun Heo 
85672dba584STejun Heo  err:
85772dba584STejun Heo 	printk(KERN_WARNING
85872dba584STejun Heo 	       "ida_remove called for id=%d which is not allocated.\n", id);
85972dba584STejun Heo }
86072dba584STejun Heo EXPORT_SYMBOL(ida_remove);
86172dba584STejun Heo 
86272dba584STejun Heo /**
86372dba584STejun Heo  * ida_destroy - release all cached layers within an ida tree
86472dba584STejun Heo  * ida:		ida handle
86572dba584STejun Heo  */
86672dba584STejun Heo void ida_destroy(struct ida *ida)
86772dba584STejun Heo {
86872dba584STejun Heo 	idr_destroy(&ida->idr);
86972dba584STejun Heo 	kfree(ida->free_bitmap);
87072dba584STejun Heo }
87172dba584STejun Heo EXPORT_SYMBOL(ida_destroy);
87272dba584STejun Heo 
87372dba584STejun Heo /**
87472dba584STejun Heo  * ida_init - initialize ida handle
87572dba584STejun Heo  * @ida:	ida handle
87672dba584STejun Heo  *
87772dba584STejun Heo  * This function is use to set up the handle (@ida) that you will pass
87872dba584STejun Heo  * to the rest of the functions.
87972dba584STejun Heo  */
88072dba584STejun Heo void ida_init(struct ida *ida)
88172dba584STejun Heo {
88272dba584STejun Heo 	memset(ida, 0, sizeof(struct ida));
88372dba584STejun Heo 	idr_init(&ida->idr);
88472dba584STejun Heo 
88572dba584STejun Heo }
88672dba584STejun Heo EXPORT_SYMBOL(ida_init);
887