xref: /openbmc/linux/lib/idr.c (revision 7ad3d4d8)
10a835c4fSMatthew Wilcox #include <linux/bitmap.h>
28bc3bcc9SPaul Gortmaker #include <linux/export.h>
31da177e4SLinus Torvalds #include <linux/idr.h>
40a835c4fSMatthew Wilcox #include <linux/slab.h>
588eca020SRusty Russell #include <linux/spinlock.h>
61da177e4SLinus Torvalds 
77ad3d4d8SMatthew Wilcox DEFINE_PER_CPU(struct ida_bitmap *, ida_bitmap);
888eca020SRusty Russell static DEFINE_SPINLOCK(simple_ida_lock);
91da177e4SLinus Torvalds 
10d5c7409fSTejun Heo /**
110a835c4fSMatthew Wilcox  * idr_alloc - allocate an id
120a835c4fSMatthew Wilcox  * @idr: idr handle
13d5c7409fSTejun Heo  * @ptr: pointer to be associated with the new id
14d5c7409fSTejun Heo  * @start: the minimum id (inclusive)
150a835c4fSMatthew Wilcox  * @end: the maximum id (exclusive)
160a835c4fSMatthew Wilcox  * @gfp: memory allocation flags
17d5c7409fSTejun Heo  *
180a835c4fSMatthew Wilcox  * Allocates an unused ID in the range [start, end).  Returns -ENOSPC
190a835c4fSMatthew Wilcox  * if there are no unused IDs in that range.
20d5c7409fSTejun Heo  *
21d5c7409fSTejun Heo  * Note that @end is treated as max when <= 0.  This is to always allow
22d5c7409fSTejun Heo  * using @start + N as @end as long as N is inside integer range.
23d5c7409fSTejun Heo  *
240a835c4fSMatthew Wilcox  * Simultaneous modifications to the @idr are not allowed and should be
250a835c4fSMatthew Wilcox  * prevented by the user, usually with a lock.  idr_alloc() may be called
260a835c4fSMatthew Wilcox  * concurrently with read-only accesses to the @idr, such as idr_find() and
270a835c4fSMatthew Wilcox  * idr_for_each_entry().
28d5c7409fSTejun Heo  */
290a835c4fSMatthew Wilcox int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
30d5c7409fSTejun Heo {
310a835c4fSMatthew Wilcox 	void **slot;
320a835c4fSMatthew Wilcox 	struct radix_tree_iter iter;
33d5c7409fSTejun Heo 
34d5c7409fSTejun Heo 	if (WARN_ON_ONCE(start < 0))
35d5c7409fSTejun Heo 		return -EINVAL;
360a835c4fSMatthew Wilcox 	if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr)))
370a835c4fSMatthew Wilcox 		return -EINVAL;
38d5c7409fSTejun Heo 
390a835c4fSMatthew Wilcox 	radix_tree_iter_init(&iter, start);
400a835c4fSMatthew Wilcox 	slot = idr_get_free(&idr->idr_rt, &iter, gfp, end);
410a835c4fSMatthew Wilcox 	if (IS_ERR(slot))
420a835c4fSMatthew Wilcox 		return PTR_ERR(slot);
43d5c7409fSTejun Heo 
440a835c4fSMatthew Wilcox 	radix_tree_iter_replace(&idr->idr_rt, &iter, slot, ptr);
450a835c4fSMatthew Wilcox 	radix_tree_iter_tag_clear(&idr->idr_rt, &iter, IDR_FREE);
460a835c4fSMatthew Wilcox 	return iter.index;
47d5c7409fSTejun Heo }
48d5c7409fSTejun Heo EXPORT_SYMBOL_GPL(idr_alloc);
49d5c7409fSTejun Heo 
503e6628c4SJeff Layton /**
513e6628c4SJeff Layton  * idr_alloc_cyclic - allocate new idr entry in a cyclical fashion
520a835c4fSMatthew Wilcox  * @idr: idr handle
533e6628c4SJeff Layton  * @ptr: pointer to be associated with the new id
543e6628c4SJeff Layton  * @start: the minimum id (inclusive)
550a835c4fSMatthew Wilcox  * @end: the maximum id (exclusive)
560a835c4fSMatthew Wilcox  * @gfp: memory allocation flags
573e6628c4SJeff Layton  *
580a835c4fSMatthew Wilcox  * Allocates an ID larger than the last ID allocated if one is available.
590a835c4fSMatthew Wilcox  * If not, it will attempt to allocate the smallest ID that is larger or
600a835c4fSMatthew Wilcox  * equal to @start.
613e6628c4SJeff Layton  */
620a835c4fSMatthew Wilcox int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
633e6628c4SJeff Layton {
640a835c4fSMatthew Wilcox 	int id, curr = idr->idr_next;
653e6628c4SJeff Layton 
660a835c4fSMatthew Wilcox 	if (curr < start)
670a835c4fSMatthew Wilcox 		curr = start;
683e6628c4SJeff Layton 
690a835c4fSMatthew Wilcox 	id = idr_alloc(idr, ptr, curr, end, gfp);
700a835c4fSMatthew Wilcox 	if ((id == -ENOSPC) && (curr > start))
710a835c4fSMatthew Wilcox 		id = idr_alloc(idr, ptr, start, curr, gfp);
720a835c4fSMatthew Wilcox 
730a835c4fSMatthew Wilcox 	if (id >= 0)
740a835c4fSMatthew Wilcox 		idr->idr_next = id + 1U;
750a835c4fSMatthew Wilcox 
763e6628c4SJeff Layton 	return id;
773e6628c4SJeff Layton }
783e6628c4SJeff Layton EXPORT_SYMBOL(idr_alloc_cyclic);
793e6628c4SJeff Layton 
805806f07cSJeff Mahoney /**
8196d7fa42SKristian Hoegsberg  * idr_for_each - iterate through all stored pointers
820a835c4fSMatthew Wilcox  * @idr: idr handle
8396d7fa42SKristian Hoegsberg  * @fn: function to be called for each pointer
840a835c4fSMatthew Wilcox  * @data: data passed to callback function
8596d7fa42SKristian Hoegsberg  *
860a835c4fSMatthew Wilcox  * The callback function will be called for each entry in @idr, passing
870a835c4fSMatthew Wilcox  * the id, the pointer and the data pointer passed to this function.
8896d7fa42SKristian Hoegsberg  *
890a835c4fSMatthew Wilcox  * If @fn returns anything other than %0, the iteration stops and that
900a835c4fSMatthew Wilcox  * value is returned from this function.
9196d7fa42SKristian Hoegsberg  *
920a835c4fSMatthew Wilcox  * idr_for_each() can be called concurrently with idr_alloc() and
930a835c4fSMatthew Wilcox  * idr_remove() if protected by RCU.  Newly added entries may not be
940a835c4fSMatthew Wilcox  * seen and deleted entries may be seen, but adding and removing entries
950a835c4fSMatthew Wilcox  * will not cause other entries to be skipped, nor spurious ones to be seen.
9696d7fa42SKristian Hoegsberg  */
970a835c4fSMatthew Wilcox int idr_for_each(const struct idr *idr,
9896d7fa42SKristian Hoegsberg 		int (*fn)(int id, void *p, void *data), void *data)
9996d7fa42SKristian Hoegsberg {
1000a835c4fSMatthew Wilcox 	struct radix_tree_iter iter;
1010a835c4fSMatthew Wilcox 	void **slot;
10296d7fa42SKristian Hoegsberg 
1030a835c4fSMatthew Wilcox 	radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, 0) {
1040a835c4fSMatthew Wilcox 		int ret = fn(iter.index, rcu_dereference_raw(*slot), data);
1050a835c4fSMatthew Wilcox 		if (ret)
1060a835c4fSMatthew Wilcox 			return ret;
10796d7fa42SKristian Hoegsberg 	}
10896d7fa42SKristian Hoegsberg 
1090a835c4fSMatthew Wilcox 	return 0;
11096d7fa42SKristian Hoegsberg }
11196d7fa42SKristian Hoegsberg EXPORT_SYMBOL(idr_for_each);
11296d7fa42SKristian Hoegsberg 
11396d7fa42SKristian Hoegsberg /**
1140a835c4fSMatthew Wilcox  * idr_get_next - Find next populated entry
1150a835c4fSMatthew Wilcox  * @idr: idr handle
1160a835c4fSMatthew Wilcox  * @nextid: Pointer to lowest possible ID to return
11738460b48SKAMEZAWA Hiroyuki  *
1180a835c4fSMatthew Wilcox  * Returns the next populated entry in the tree with an ID greater than
1190a835c4fSMatthew Wilcox  * or equal to the value pointed to by @nextid.  On exit, @nextid is updated
1200a835c4fSMatthew Wilcox  * to the ID of the found value.  To use in a loop, the value pointed to by
1210a835c4fSMatthew Wilcox  * nextid must be incremented by the user.
12238460b48SKAMEZAWA Hiroyuki  */
1230a835c4fSMatthew Wilcox void *idr_get_next(struct idr *idr, int *nextid)
12438460b48SKAMEZAWA Hiroyuki {
1250a835c4fSMatthew Wilcox 	struct radix_tree_iter iter;
1260a835c4fSMatthew Wilcox 	void **slot;
12738460b48SKAMEZAWA Hiroyuki 
1280a835c4fSMatthew Wilcox 	slot = radix_tree_iter_find(&idr->idr_rt, &iter, *nextid);
1290a835c4fSMatthew Wilcox 	if (!slot)
13038460b48SKAMEZAWA Hiroyuki 		return NULL;
13138460b48SKAMEZAWA Hiroyuki 
1320a835c4fSMatthew Wilcox 	*nextid = iter.index;
1330a835c4fSMatthew Wilcox 	return rcu_dereference_raw(*slot);
13438460b48SKAMEZAWA Hiroyuki }
1354d1ee80fSBen Hutchings EXPORT_SYMBOL(idr_get_next);
13638460b48SKAMEZAWA Hiroyuki 
13738460b48SKAMEZAWA Hiroyuki /**
1385806f07cSJeff Mahoney  * idr_replace - replace pointer for given id
1390a835c4fSMatthew Wilcox  * @idr: idr handle
1400a835c4fSMatthew Wilcox  * @ptr: New pointer to associate with the ID
1410a835c4fSMatthew Wilcox  * @id: Lookup key
1425806f07cSJeff Mahoney  *
1430a835c4fSMatthew Wilcox  * Replace the pointer registered with an ID and return the old value.
1440a835c4fSMatthew Wilcox  * This function can be called under the RCU read lock concurrently with
1450a835c4fSMatthew Wilcox  * idr_alloc() and idr_remove() (as long as the ID being removed is not
1460a835c4fSMatthew Wilcox  * the one being replaced!).
1475806f07cSJeff Mahoney  *
1480a835c4fSMatthew Wilcox  * Returns: 0 on success.  %-ENOENT indicates that @id was not found.
1490a835c4fSMatthew Wilcox  * %-EINVAL indicates that @id or @ptr were not valid.
1505806f07cSJeff Mahoney  */
1510a835c4fSMatthew Wilcox void *idr_replace(struct idr *idr, void *ptr, int id)
1525806f07cSJeff Mahoney {
1530a835c4fSMatthew Wilcox 	struct radix_tree_node *node;
1540a835c4fSMatthew Wilcox 	void **slot = NULL;
1550a835c4fSMatthew Wilcox 	void *entry;
1565806f07cSJeff Mahoney 
1570a835c4fSMatthew Wilcox 	if (WARN_ON_ONCE(id < 0))
1580a835c4fSMatthew Wilcox 		return ERR_PTR(-EINVAL);
1590a835c4fSMatthew Wilcox 	if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr)))
160e8c8d1bcSTejun Heo 		return ERR_PTR(-EINVAL);
161e8c8d1bcSTejun Heo 
1620a835c4fSMatthew Wilcox 	entry = __radix_tree_lookup(&idr->idr_rt, id, &node, &slot);
1630a835c4fSMatthew Wilcox 	if (!slot || radix_tree_tag_get(&idr->idr_rt, id, IDR_FREE))
164b93804b2SLai Jiangshan 		return ERR_PTR(-ENOENT);
1656ff2d39bSManfred Spraul 
1660a835c4fSMatthew Wilcox 	__radix_tree_replace(&idr->idr_rt, node, slot, ptr, NULL, NULL);
1675806f07cSJeff Mahoney 
1680a835c4fSMatthew Wilcox 	return entry;
1695806f07cSJeff Mahoney }
1705806f07cSJeff Mahoney EXPORT_SYMBOL(idr_replace);
1715806f07cSJeff Mahoney 
17256083ab1SRandy Dunlap /**
17356083ab1SRandy Dunlap  * DOC: IDA description
17472dba584STejun Heo  *
1750a835c4fSMatthew Wilcox  * The IDA is an ID allocator which does not provide the ability to
1760a835c4fSMatthew Wilcox  * associate an ID with a pointer.  As such, it only needs to store one
1770a835c4fSMatthew Wilcox  * bit per ID, and so is more space efficient than an IDR.  To use an IDA,
1780a835c4fSMatthew Wilcox  * define it using DEFINE_IDA() (or embed a &struct ida in a data structure,
1790a835c4fSMatthew Wilcox  * then initialise it using ida_init()).  To allocate a new ID, call
1800a835c4fSMatthew Wilcox  * ida_simple_get().  To free an ID, call ida_simple_remove().
18172dba584STejun Heo  *
1820a835c4fSMatthew Wilcox  * If you have more complex locking requirements, use a loop around
1830a835c4fSMatthew Wilcox  * ida_pre_get() and ida_get_new() to allocate a new ID.  Then use
1840a835c4fSMatthew Wilcox  * ida_remove() to free an ID.  You must make sure that ida_get_new() and
1850a835c4fSMatthew Wilcox  * ida_remove() cannot be called at the same time as each other for the
1860a835c4fSMatthew Wilcox  * same IDA.
1870a835c4fSMatthew Wilcox  *
1880a835c4fSMatthew Wilcox  * You can also use ida_get_new_above() if you need an ID to be allocated
1890a835c4fSMatthew Wilcox  * above a particular number.  ida_destroy() can be used to dispose of an
1900a835c4fSMatthew Wilcox  * IDA without needing to free the individual IDs in it.  You can use
1910a835c4fSMatthew Wilcox  * ida_is_empty() to find out whether the IDA has any IDs currently allocated.
1920a835c4fSMatthew Wilcox  *
1930a835c4fSMatthew Wilcox  * IDs are currently limited to the range [0-INT_MAX].  If this is an awkward
1940a835c4fSMatthew Wilcox  * limitation, it should be quite straightforward to raise the maximum.
19572dba584STejun Heo  */
19672dba584STejun Heo 
1970a835c4fSMatthew Wilcox #define IDA_MAX (0x80000000U / IDA_BITMAP_BITS)
1980a835c4fSMatthew Wilcox 
19972dba584STejun Heo /**
20072dba584STejun Heo  * ida_get_new_above - allocate new ID above or equal to a start id
20172dba584STejun Heo  * @ida: ida handle
2020a835c4fSMatthew Wilcox  * @start: id to start search at
2030a835c4fSMatthew Wilcox  * @id: pointer to the allocated handle
20472dba584STejun Heo  *
2050a835c4fSMatthew Wilcox  * Allocate new ID above or equal to @start.  It should be called
2060a835c4fSMatthew Wilcox  * with any required locks to ensure that concurrent calls to
2070a835c4fSMatthew Wilcox  * ida_get_new_above() / ida_get_new() / ida_remove() are not allowed.
2080a835c4fSMatthew Wilcox  * Consider using ida_simple_get() if you do not have complex locking
2090a835c4fSMatthew Wilcox  * requirements.
21072dba584STejun Heo  *
21156083ab1SRandy Dunlap  * If memory is required, it will return %-EAGAIN, you should unlock
21272dba584STejun Heo  * and go back to the ida_pre_get() call.  If the ida is full, it will
2130a835c4fSMatthew Wilcox  * return %-ENOSPC.  On success, it will return 0.
21472dba584STejun Heo  *
2150a835c4fSMatthew Wilcox  * @id returns a value in the range @start ... %0x7fffffff.
21672dba584STejun Heo  */
2170a835c4fSMatthew Wilcox int ida_get_new_above(struct ida *ida, int start, int *id)
21872dba584STejun Heo {
2190a835c4fSMatthew Wilcox 	struct radix_tree_root *root = &ida->ida_rt;
2200a835c4fSMatthew Wilcox 	void **slot;
2210a835c4fSMatthew Wilcox 	struct radix_tree_iter iter;
22272dba584STejun Heo 	struct ida_bitmap *bitmap;
2230a835c4fSMatthew Wilcox 	unsigned long index;
2240a835c4fSMatthew Wilcox 	unsigned bit;
2250a835c4fSMatthew Wilcox 	int new;
22672dba584STejun Heo 
2270a835c4fSMatthew Wilcox 	index = start / IDA_BITMAP_BITS;
2280a835c4fSMatthew Wilcox 	bit = start % IDA_BITMAP_BITS;
22972dba584STejun Heo 
2300a835c4fSMatthew Wilcox 	slot = radix_tree_iter_init(&iter, index);
2310a835c4fSMatthew Wilcox 	for (;;) {
2320a835c4fSMatthew Wilcox 		if (slot)
2330a835c4fSMatthew Wilcox 			slot = radix_tree_next_slot(slot, &iter,
2340a835c4fSMatthew Wilcox 						RADIX_TREE_ITER_TAGGED);
2350a835c4fSMatthew Wilcox 		if (!slot) {
2360a835c4fSMatthew Wilcox 			slot = idr_get_free(root, &iter, GFP_NOWAIT, IDA_MAX);
2370a835c4fSMatthew Wilcox 			if (IS_ERR(slot)) {
2380a835c4fSMatthew Wilcox 				if (slot == ERR_PTR(-ENOMEM))
2390a835c4fSMatthew Wilcox 					return -EAGAIN;
2400a835c4fSMatthew Wilcox 				return PTR_ERR(slot);
2410a835c4fSMatthew Wilcox 			}
2420a835c4fSMatthew Wilcox 		}
2430a835c4fSMatthew Wilcox 		if (iter.index > index)
2440a835c4fSMatthew Wilcox 			bit = 0;
2450a835c4fSMatthew Wilcox 		new = iter.index * IDA_BITMAP_BITS;
2460a835c4fSMatthew Wilcox 		bitmap = rcu_dereference_raw(*slot);
2470a835c4fSMatthew Wilcox 		if (bitmap) {
2480a835c4fSMatthew Wilcox 			bit = find_next_zero_bit(bitmap->bitmap,
2490a835c4fSMatthew Wilcox 							IDA_BITMAP_BITS, bit);
2500a835c4fSMatthew Wilcox 			new += bit;
2510a835c4fSMatthew Wilcox 			if (new < 0)
25272dba584STejun Heo 				return -ENOSPC;
2530a835c4fSMatthew Wilcox 			if (bit == IDA_BITMAP_BITS)
2540a835c4fSMatthew Wilcox 				continue;
25572dba584STejun Heo 
2560a835c4fSMatthew Wilcox 			__set_bit(bit, bitmap->bitmap);
2570a835c4fSMatthew Wilcox 			if (bitmap_full(bitmap->bitmap, IDA_BITMAP_BITS))
2580a835c4fSMatthew Wilcox 				radix_tree_iter_tag_clear(root, &iter,
2590a835c4fSMatthew Wilcox 								IDR_FREE);
2600a835c4fSMatthew Wilcox 		} else {
2610a835c4fSMatthew Wilcox 			new += bit;
2620a835c4fSMatthew Wilcox 			if (new < 0)
2630a835c4fSMatthew Wilcox 				return -ENOSPC;
2647ad3d4d8SMatthew Wilcox 			bitmap = this_cpu_xchg(ida_bitmap, NULL);
26572dba584STejun Heo 			if (!bitmap)
26672dba584STejun Heo 				return -EAGAIN;
2670a835c4fSMatthew Wilcox 			memset(bitmap, 0, sizeof(*bitmap));
2680a835c4fSMatthew Wilcox 			__set_bit(bit, bitmap->bitmap);
2690a835c4fSMatthew Wilcox 			radix_tree_iter_replace(root, &iter, slot, bitmap);
27072dba584STejun Heo 		}
27172dba584STejun Heo 
2720a835c4fSMatthew Wilcox 		*id = new;
27372dba584STejun Heo 		return 0;
27472dba584STejun Heo 	}
2750a835c4fSMatthew Wilcox }
27672dba584STejun Heo EXPORT_SYMBOL(ida_get_new_above);
27772dba584STejun Heo 
27872dba584STejun Heo /**
2790a835c4fSMatthew Wilcox  * ida_remove - Free the given ID
28072dba584STejun Heo  * @ida: ida handle
28172dba584STejun Heo  * @id: ID to free
2820a835c4fSMatthew Wilcox  *
2830a835c4fSMatthew Wilcox  * This function should not be called at the same time as ida_get_new_above().
28472dba584STejun Heo  */
28572dba584STejun Heo void ida_remove(struct ida *ida, int id)
28672dba584STejun Heo {
2870a835c4fSMatthew Wilcox 	unsigned long index = id / IDA_BITMAP_BITS;
2880a835c4fSMatthew Wilcox 	unsigned offset = id % IDA_BITMAP_BITS;
28972dba584STejun Heo 	struct ida_bitmap *bitmap;
2900a835c4fSMatthew Wilcox 	struct radix_tree_iter iter;
2910a835c4fSMatthew Wilcox 	void **slot;
29272dba584STejun Heo 
2930a835c4fSMatthew Wilcox 	slot = radix_tree_iter_lookup(&ida->ida_rt, &iter, index);
2940a835c4fSMatthew Wilcox 	if (!slot)
2958f9f665aSLai Jiangshan 		goto err;
2968f9f665aSLai Jiangshan 
2970a835c4fSMatthew Wilcox 	bitmap = rcu_dereference_raw(*slot);
2980a835c4fSMatthew Wilcox 	if (!test_bit(offset, bitmap->bitmap))
29972dba584STejun Heo 		goto err;
30072dba584STejun Heo 
30172dba584STejun Heo 	__clear_bit(offset, bitmap->bitmap);
3020a835c4fSMatthew Wilcox 	radix_tree_iter_tag_set(&ida->ida_rt, &iter, IDR_FREE);
3030a835c4fSMatthew Wilcox 	if (bitmap_empty(bitmap->bitmap, IDA_BITMAP_BITS)) {
3040a835c4fSMatthew Wilcox 		kfree(bitmap);
3050a835c4fSMatthew Wilcox 		radix_tree_iter_delete(&ida->ida_rt, &iter, slot);
30672dba584STejun Heo 	}
30772dba584STejun Heo 	return;
30872dba584STejun Heo  err:
309dd04b452SJean Delvare 	WARN(1, "ida_remove called for id=%d which is not allocated.\n", id);
31072dba584STejun Heo }
31172dba584STejun Heo EXPORT_SYMBOL(ida_remove);
31272dba584STejun Heo 
31372dba584STejun Heo /**
3140a835c4fSMatthew Wilcox  * ida_destroy - Free the contents of an ida
315ea24ea85SNaohiro Aota  * @ida: ida handle
3160a835c4fSMatthew Wilcox  *
3170a835c4fSMatthew Wilcox  * Calling this function releases all resources associated with an IDA.  When
3180a835c4fSMatthew Wilcox  * this call returns, the IDA is empty and can be reused or freed.  The caller
3190a835c4fSMatthew Wilcox  * should not allow ida_remove() or ida_get_new_above() to be called at the
3200a835c4fSMatthew Wilcox  * same time.
32172dba584STejun Heo  */
32272dba584STejun Heo void ida_destroy(struct ida *ida)
32372dba584STejun Heo {
3240a835c4fSMatthew Wilcox 	struct radix_tree_iter iter;
3250a835c4fSMatthew Wilcox 	void **slot;
3260a835c4fSMatthew Wilcox 
3270a835c4fSMatthew Wilcox 	radix_tree_for_each_slot(slot, &ida->ida_rt, &iter, 0) {
3280a835c4fSMatthew Wilcox 		struct ida_bitmap *bitmap = rcu_dereference_raw(*slot);
3290a835c4fSMatthew Wilcox 		kfree(bitmap);
3300a835c4fSMatthew Wilcox 		radix_tree_iter_delete(&ida->ida_rt, &iter, slot);
3310a835c4fSMatthew Wilcox 	}
33272dba584STejun Heo }
33372dba584STejun Heo EXPORT_SYMBOL(ida_destroy);
33472dba584STejun Heo 
33572dba584STejun Heo /**
33688eca020SRusty Russell  * ida_simple_get - get a new id.
33788eca020SRusty Russell  * @ida: the (initialized) ida.
33888eca020SRusty Russell  * @start: the minimum id (inclusive, < 0x8000000)
33988eca020SRusty Russell  * @end: the maximum id (exclusive, < 0x8000000 or 0)
34088eca020SRusty Russell  * @gfp_mask: memory allocation flags
34188eca020SRusty Russell  *
34288eca020SRusty Russell  * Allocates an id in the range start <= id < end, or returns -ENOSPC.
34388eca020SRusty Russell  * On memory allocation failure, returns -ENOMEM.
34488eca020SRusty Russell  *
345a2ef9471SDaniel Vetter  * Compared to ida_get_new_above() this function does its own locking, and
346a2ef9471SDaniel Vetter  * should be used unless there are special requirements.
347a2ef9471SDaniel Vetter  *
34888eca020SRusty Russell  * Use ida_simple_remove() to get rid of an id.
34988eca020SRusty Russell  */
35088eca020SRusty Russell int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
35188eca020SRusty Russell 		   gfp_t gfp_mask)
35288eca020SRusty Russell {
35388eca020SRusty Russell 	int ret, id;
35488eca020SRusty Russell 	unsigned int max;
35546cbc1d3STejun Heo 	unsigned long flags;
35688eca020SRusty Russell 
35788eca020SRusty Russell 	BUG_ON((int)start < 0);
35888eca020SRusty Russell 	BUG_ON((int)end < 0);
35988eca020SRusty Russell 
36088eca020SRusty Russell 	if (end == 0)
36188eca020SRusty Russell 		max = 0x80000000;
36288eca020SRusty Russell 	else {
36388eca020SRusty Russell 		BUG_ON(end < start);
36488eca020SRusty Russell 		max = end - 1;
36588eca020SRusty Russell 	}
36688eca020SRusty Russell 
36788eca020SRusty Russell again:
36888eca020SRusty Russell 	if (!ida_pre_get(ida, gfp_mask))
36988eca020SRusty Russell 		return -ENOMEM;
37088eca020SRusty Russell 
37146cbc1d3STejun Heo 	spin_lock_irqsave(&simple_ida_lock, flags);
37288eca020SRusty Russell 	ret = ida_get_new_above(ida, start, &id);
37388eca020SRusty Russell 	if (!ret) {
37488eca020SRusty Russell 		if (id > max) {
37588eca020SRusty Russell 			ida_remove(ida, id);
37688eca020SRusty Russell 			ret = -ENOSPC;
37788eca020SRusty Russell 		} else {
37888eca020SRusty Russell 			ret = id;
37988eca020SRusty Russell 		}
38088eca020SRusty Russell 	}
38146cbc1d3STejun Heo 	spin_unlock_irqrestore(&simple_ida_lock, flags);
38288eca020SRusty Russell 
38388eca020SRusty Russell 	if (unlikely(ret == -EAGAIN))
38488eca020SRusty Russell 		goto again;
38588eca020SRusty Russell 
38688eca020SRusty Russell 	return ret;
38788eca020SRusty Russell }
38888eca020SRusty Russell EXPORT_SYMBOL(ida_simple_get);
38988eca020SRusty Russell 
39088eca020SRusty Russell /**
39188eca020SRusty Russell  * ida_simple_remove - remove an allocated id.
39288eca020SRusty Russell  * @ida: the (initialized) ida.
39388eca020SRusty Russell  * @id: the id returned by ida_simple_get.
394a2ef9471SDaniel Vetter  *
395a2ef9471SDaniel Vetter  * Use to release an id allocated with ida_simple_get().
396a2ef9471SDaniel Vetter  *
397a2ef9471SDaniel Vetter  * Compared to ida_remove() this function does its own locking, and should be
398a2ef9471SDaniel Vetter  * used unless there are special requirements.
39988eca020SRusty Russell  */
40088eca020SRusty Russell void ida_simple_remove(struct ida *ida, unsigned int id)
40188eca020SRusty Russell {
40246cbc1d3STejun Heo 	unsigned long flags;
40346cbc1d3STejun Heo 
40488eca020SRusty Russell 	BUG_ON((int)id < 0);
40546cbc1d3STejun Heo 	spin_lock_irqsave(&simple_ida_lock, flags);
40688eca020SRusty Russell 	ida_remove(ida, id);
40746cbc1d3STejun Heo 	spin_unlock_irqrestore(&simple_ida_lock, flags);
40888eca020SRusty Russell }
40988eca020SRusty Russell EXPORT_SYMBOL(ida_simple_remove);
410