10a835c4fSMatthew Wilcox #include <linux/bitmap.h> 2460488c5SMatthew Wilcox #include <linux/bug.h> 38bc3bcc9SPaul Gortmaker #include <linux/export.h> 41da177e4SLinus Torvalds #include <linux/idr.h> 50a835c4fSMatthew Wilcox #include <linux/slab.h> 688eca020SRusty Russell #include <linux/spinlock.h> 71da177e4SLinus Torvalds 87ad3d4d8SMatthew Wilcox DEFINE_PER_CPU(struct ida_bitmap *, ida_bitmap); 988eca020SRusty Russell static DEFINE_SPINLOCK(simple_ida_lock); 101da177e4SLinus Torvalds 11e096f6a7SMatthew Wilcox /** 12e096f6a7SMatthew Wilcox * idr_alloc_u32() - Allocate an ID. 13e096f6a7SMatthew Wilcox * @idr: IDR handle. 14e096f6a7SMatthew Wilcox * @ptr: Pointer to be associated with the new ID. 15e096f6a7SMatthew Wilcox * @nextid: Pointer to an ID. 16e096f6a7SMatthew Wilcox * @max: The maximum ID to allocate (inclusive). 17e096f6a7SMatthew Wilcox * @gfp: Memory allocation flags. 18e096f6a7SMatthew Wilcox * 19e096f6a7SMatthew Wilcox * Allocates an unused ID in the range specified by @nextid and @max. 20e096f6a7SMatthew Wilcox * Note that @max is inclusive whereas the @end parameter to idr_alloc() 21460488c5SMatthew Wilcox * is exclusive. The new ID is assigned to @nextid before the pointer 22460488c5SMatthew Wilcox * is inserted into the IDR, so if @nextid points into the object pointed 23460488c5SMatthew Wilcox * to by @ptr, a concurrent lookup will not find an uninitialised ID. 24e096f6a7SMatthew Wilcox * 25e096f6a7SMatthew Wilcox * The caller should provide their own locking to ensure that two 26e096f6a7SMatthew Wilcox * concurrent modifications to the IDR are not possible. Read-only 27e096f6a7SMatthew Wilcox * accesses to the IDR may be done under the RCU read lock or may 28e096f6a7SMatthew Wilcox * exclude simultaneous writers. 29e096f6a7SMatthew Wilcox * 30e096f6a7SMatthew Wilcox * Return: 0 if an ID was allocated, -ENOMEM if memory allocation failed, 31e096f6a7SMatthew Wilcox * or -ENOSPC if no free IDs could be found. If an error occurred, 32e096f6a7SMatthew Wilcox * @nextid is unchanged. 33e096f6a7SMatthew Wilcox */ 34e096f6a7SMatthew Wilcox int idr_alloc_u32(struct idr *idr, void *ptr, u32 *nextid, 35e096f6a7SMatthew Wilcox unsigned long max, gfp_t gfp) 36e096f6a7SMatthew Wilcox { 370a835c4fSMatthew Wilcox struct radix_tree_iter iter; 38388f79fdSChris Mi void __rcu **slot; 39d5c7409fSTejun Heo 400a835c4fSMatthew Wilcox if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr))) 410a835c4fSMatthew Wilcox return -EINVAL; 42460488c5SMatthew Wilcox if (WARN_ON_ONCE(!(idr->idr_rt.gfp_mask & ROOT_IS_IDR))) 43460488c5SMatthew Wilcox idr->idr_rt.gfp_mask |= IDR_RT_MARKER; 44d5c7409fSTejun Heo 45460488c5SMatthew Wilcox radix_tree_iter_init(&iter, *nextid); 46460488c5SMatthew Wilcox slot = idr_get_free(&idr->idr_rt, &iter, gfp, max); 470a835c4fSMatthew Wilcox if (IS_ERR(slot)) 480a835c4fSMatthew Wilcox return PTR_ERR(slot); 49d5c7409fSTejun Heo 50460488c5SMatthew Wilcox *nextid = iter.index; 51460488c5SMatthew Wilcox /* there is a memory barrier inside radix_tree_iter_replace() */ 520a835c4fSMatthew Wilcox radix_tree_iter_replace(&idr->idr_rt, &iter, slot, ptr); 530a835c4fSMatthew Wilcox radix_tree_iter_tag_clear(&idr->idr_rt, &iter, IDR_FREE); 54388f79fdSChris Mi 55388f79fdSChris Mi return 0; 56d5c7409fSTejun Heo } 57460488c5SMatthew Wilcox EXPORT_SYMBOL_GPL(idr_alloc_u32); 58d5c7409fSTejun Heo 593e6628c4SJeff Layton /** 60460488c5SMatthew Wilcox * idr_alloc() - Allocate an ID. 61460488c5SMatthew Wilcox * @idr: IDR handle. 62460488c5SMatthew Wilcox * @ptr: Pointer to be associated with the new ID. 63460488c5SMatthew Wilcox * @start: The minimum ID (inclusive). 64460488c5SMatthew Wilcox * @end: The maximum ID (exclusive). 65460488c5SMatthew Wilcox * @gfp: Memory allocation flags. 663e6628c4SJeff Layton * 67460488c5SMatthew Wilcox * Allocates an unused ID in the range specified by @start and @end. If 68460488c5SMatthew Wilcox * @end is <= 0, it is treated as one larger than %INT_MAX. This allows 69460488c5SMatthew Wilcox * callers to use @start + N as @end as long as N is within integer range. 70460488c5SMatthew Wilcox * 71460488c5SMatthew Wilcox * The caller should provide their own locking to ensure that two 72460488c5SMatthew Wilcox * concurrent modifications to the IDR are not possible. Read-only 73460488c5SMatthew Wilcox * accesses to the IDR may be done under the RCU read lock or may 74460488c5SMatthew Wilcox * exclude simultaneous writers. 75460488c5SMatthew Wilcox * 76460488c5SMatthew Wilcox * Return: The newly allocated ID, -ENOMEM if memory allocation failed, 77460488c5SMatthew Wilcox * or -ENOSPC if no free IDs could be found. 78460488c5SMatthew Wilcox */ 79460488c5SMatthew Wilcox int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp) 80460488c5SMatthew Wilcox { 81460488c5SMatthew Wilcox u32 id = start; 82460488c5SMatthew Wilcox int ret; 83460488c5SMatthew Wilcox 84460488c5SMatthew Wilcox if (WARN_ON_ONCE(start < 0)) 85460488c5SMatthew Wilcox return -EINVAL; 86460488c5SMatthew Wilcox 87460488c5SMatthew Wilcox ret = idr_alloc_u32(idr, ptr, &id, end > 0 ? end - 1 : INT_MAX, gfp); 88460488c5SMatthew Wilcox if (ret) 89460488c5SMatthew Wilcox return ret; 90460488c5SMatthew Wilcox 91460488c5SMatthew Wilcox return id; 92460488c5SMatthew Wilcox } 93460488c5SMatthew Wilcox EXPORT_SYMBOL_GPL(idr_alloc); 94460488c5SMatthew Wilcox 95460488c5SMatthew Wilcox /** 96460488c5SMatthew Wilcox * idr_alloc_cyclic() - Allocate an ID cyclically. 97460488c5SMatthew Wilcox * @idr: IDR handle. 98460488c5SMatthew Wilcox * @ptr: Pointer to be associated with the new ID. 99460488c5SMatthew Wilcox * @start: The minimum ID (inclusive). 100460488c5SMatthew Wilcox * @end: The maximum ID (exclusive). 101460488c5SMatthew Wilcox * @gfp: Memory allocation flags. 102460488c5SMatthew Wilcox * 103460488c5SMatthew Wilcox * Allocates an unused ID in the range specified by @nextid and @end. If 104460488c5SMatthew Wilcox * @end is <= 0, it is treated as one larger than %INT_MAX. This allows 105460488c5SMatthew Wilcox * callers to use @start + N as @end as long as N is within integer range. 106460488c5SMatthew Wilcox * The search for an unused ID will start at the last ID allocated and will 107460488c5SMatthew Wilcox * wrap around to @start if no free IDs are found before reaching @end. 108460488c5SMatthew Wilcox * 109460488c5SMatthew Wilcox * The caller should provide their own locking to ensure that two 110460488c5SMatthew Wilcox * concurrent modifications to the IDR are not possible. Read-only 111460488c5SMatthew Wilcox * accesses to the IDR may be done under the RCU read lock or may 112460488c5SMatthew Wilcox * exclude simultaneous writers. 113460488c5SMatthew Wilcox * 114460488c5SMatthew Wilcox * Return: The newly allocated ID, -ENOMEM if memory allocation failed, 115460488c5SMatthew Wilcox * or -ENOSPC if no free IDs could be found. 1163e6628c4SJeff Layton */ 1170a835c4fSMatthew Wilcox int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp) 1183e6628c4SJeff Layton { 119460488c5SMatthew Wilcox u32 id = idr->idr_next; 120460488c5SMatthew Wilcox int err, max = end > 0 ? end - 1 : INT_MAX; 1213e6628c4SJeff Layton 122460488c5SMatthew Wilcox if ((int)id < start) 123460488c5SMatthew Wilcox id = start; 1243e6628c4SJeff Layton 125460488c5SMatthew Wilcox err = idr_alloc_u32(idr, ptr, &id, max, gfp); 126460488c5SMatthew Wilcox if ((err == -ENOSPC) && (id > start)) { 127460488c5SMatthew Wilcox id = start; 128460488c5SMatthew Wilcox err = idr_alloc_u32(idr, ptr, &id, max, gfp); 129460488c5SMatthew Wilcox } 130460488c5SMatthew Wilcox if (err) 131460488c5SMatthew Wilcox return err; 1320a835c4fSMatthew Wilcox 133460488c5SMatthew Wilcox idr->idr_next = id + 1; 1343e6628c4SJeff Layton return id; 1353e6628c4SJeff Layton } 1363e6628c4SJeff Layton EXPORT_SYMBOL(idr_alloc_cyclic); 1373e6628c4SJeff Layton 1385806f07cSJeff Mahoney /** 13996d7fa42SKristian Hoegsberg * idr_for_each - iterate through all stored pointers 1400a835c4fSMatthew Wilcox * @idr: idr handle 14196d7fa42SKristian Hoegsberg * @fn: function to be called for each pointer 1420a835c4fSMatthew Wilcox * @data: data passed to callback function 14396d7fa42SKristian Hoegsberg * 1440a835c4fSMatthew Wilcox * The callback function will be called for each entry in @idr, passing 1450a835c4fSMatthew Wilcox * the id, the pointer and the data pointer passed to this function. 14696d7fa42SKristian Hoegsberg * 1470a835c4fSMatthew Wilcox * If @fn returns anything other than %0, the iteration stops and that 1480a835c4fSMatthew Wilcox * value is returned from this function. 14996d7fa42SKristian Hoegsberg * 1500a835c4fSMatthew Wilcox * idr_for_each() can be called concurrently with idr_alloc() and 1510a835c4fSMatthew Wilcox * idr_remove() if protected by RCU. Newly added entries may not be 1520a835c4fSMatthew Wilcox * seen and deleted entries may be seen, but adding and removing entries 1530a835c4fSMatthew Wilcox * will not cause other entries to be skipped, nor spurious ones to be seen. 15496d7fa42SKristian Hoegsberg */ 1550a835c4fSMatthew Wilcox int idr_for_each(const struct idr *idr, 15696d7fa42SKristian Hoegsberg int (*fn)(int id, void *p, void *data), void *data) 15796d7fa42SKristian Hoegsberg { 1580a835c4fSMatthew Wilcox struct radix_tree_iter iter; 1597e73eb0bSMatthew Wilcox void __rcu **slot; 16096d7fa42SKristian Hoegsberg 1610a835c4fSMatthew Wilcox radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, 0) { 1620a835c4fSMatthew Wilcox int ret = fn(iter.index, rcu_dereference_raw(*slot), data); 1630a835c4fSMatthew Wilcox if (ret) 1640a835c4fSMatthew Wilcox return ret; 16596d7fa42SKristian Hoegsberg } 16696d7fa42SKristian Hoegsberg 1670a835c4fSMatthew Wilcox return 0; 16896d7fa42SKristian Hoegsberg } 16996d7fa42SKristian Hoegsberg EXPORT_SYMBOL(idr_for_each); 17096d7fa42SKristian Hoegsberg 17196d7fa42SKristian Hoegsberg /** 1720a835c4fSMatthew Wilcox * idr_get_next - Find next populated entry 1730a835c4fSMatthew Wilcox * @idr: idr handle 1740a835c4fSMatthew Wilcox * @nextid: Pointer to lowest possible ID to return 17538460b48SKAMEZAWA Hiroyuki * 1760a835c4fSMatthew Wilcox * Returns the next populated entry in the tree with an ID greater than 1770a835c4fSMatthew Wilcox * or equal to the value pointed to by @nextid. On exit, @nextid is updated 1780a835c4fSMatthew Wilcox * to the ID of the found value. To use in a loop, the value pointed to by 1790a835c4fSMatthew Wilcox * nextid must be incremented by the user. 18038460b48SKAMEZAWA Hiroyuki */ 1810a835c4fSMatthew Wilcox void *idr_get_next(struct idr *idr, int *nextid) 18238460b48SKAMEZAWA Hiroyuki { 1830a835c4fSMatthew Wilcox struct radix_tree_iter iter; 1847e73eb0bSMatthew Wilcox void __rcu **slot; 18538460b48SKAMEZAWA Hiroyuki 1860a835c4fSMatthew Wilcox slot = radix_tree_iter_find(&idr->idr_rt, &iter, *nextid); 1870a835c4fSMatthew Wilcox if (!slot) 18838460b48SKAMEZAWA Hiroyuki return NULL; 18938460b48SKAMEZAWA Hiroyuki 1900a835c4fSMatthew Wilcox *nextid = iter.index; 1910a835c4fSMatthew Wilcox return rcu_dereference_raw(*slot); 19238460b48SKAMEZAWA Hiroyuki } 1934d1ee80fSBen Hutchings EXPORT_SYMBOL(idr_get_next); 19438460b48SKAMEZAWA Hiroyuki 195388f79fdSChris Mi void *idr_get_next_ext(struct idr *idr, unsigned long *nextid) 196388f79fdSChris Mi { 197388f79fdSChris Mi struct radix_tree_iter iter; 198388f79fdSChris Mi void __rcu **slot; 199388f79fdSChris Mi 200388f79fdSChris Mi slot = radix_tree_iter_find(&idr->idr_rt, &iter, *nextid); 201388f79fdSChris Mi if (!slot) 202388f79fdSChris Mi return NULL; 203388f79fdSChris Mi 204388f79fdSChris Mi *nextid = iter.index; 205388f79fdSChris Mi return rcu_dereference_raw(*slot); 206388f79fdSChris Mi } 207388f79fdSChris Mi EXPORT_SYMBOL(idr_get_next_ext); 208388f79fdSChris Mi 20938460b48SKAMEZAWA Hiroyuki /** 210460488c5SMatthew Wilcox * idr_replace() - replace pointer for given ID. 211460488c5SMatthew Wilcox * @idr: IDR handle. 212460488c5SMatthew Wilcox * @ptr: New pointer to associate with the ID. 213460488c5SMatthew Wilcox * @id: ID to change. 2145806f07cSJeff Mahoney * 2150a835c4fSMatthew Wilcox * Replace the pointer registered with an ID and return the old value. 2160a835c4fSMatthew Wilcox * This function can be called under the RCU read lock concurrently with 2170a835c4fSMatthew Wilcox * idr_alloc() and idr_remove() (as long as the ID being removed is not 2180a835c4fSMatthew Wilcox * the one being replaced!). 2195806f07cSJeff Mahoney * 220a70e43a5SEric Biggers * Returns: the old value on success. %-ENOENT indicates that @id was not 221234a4624SMatthew Wilcox * found. %-EINVAL indicates that @ptr was not valid. 2225806f07cSJeff Mahoney */ 223234a4624SMatthew Wilcox void *idr_replace(struct idr *idr, void *ptr, unsigned long id) 224388f79fdSChris Mi { 2250a835c4fSMatthew Wilcox struct radix_tree_node *node; 2267e73eb0bSMatthew Wilcox void __rcu **slot = NULL; 2270a835c4fSMatthew Wilcox void *entry; 2285806f07cSJeff Mahoney 2290a835c4fSMatthew Wilcox if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr))) 230e8c8d1bcSTejun Heo return ERR_PTR(-EINVAL); 231e8c8d1bcSTejun Heo 2320a835c4fSMatthew Wilcox entry = __radix_tree_lookup(&idr->idr_rt, id, &node, &slot); 2330a835c4fSMatthew Wilcox if (!slot || radix_tree_tag_get(&idr->idr_rt, id, IDR_FREE)) 234b93804b2SLai Jiangshan return ERR_PTR(-ENOENT); 2356ff2d39bSManfred Spraul 236c7df8ad2SMel Gorman __radix_tree_replace(&idr->idr_rt, node, slot, ptr, NULL); 2375806f07cSJeff Mahoney 2380a835c4fSMatthew Wilcox return entry; 2395806f07cSJeff Mahoney } 240234a4624SMatthew Wilcox EXPORT_SYMBOL(idr_replace); 2415806f07cSJeff Mahoney 24256083ab1SRandy Dunlap /** 24356083ab1SRandy Dunlap * DOC: IDA description 24472dba584STejun Heo * 2450a835c4fSMatthew Wilcox * The IDA is an ID allocator which does not provide the ability to 2460a835c4fSMatthew Wilcox * associate an ID with a pointer. As such, it only needs to store one 2470a835c4fSMatthew Wilcox * bit per ID, and so is more space efficient than an IDR. To use an IDA, 2480a835c4fSMatthew Wilcox * define it using DEFINE_IDA() (or embed a &struct ida in a data structure, 2490a835c4fSMatthew Wilcox * then initialise it using ida_init()). To allocate a new ID, call 2500a835c4fSMatthew Wilcox * ida_simple_get(). To free an ID, call ida_simple_remove(). 25172dba584STejun Heo * 2520a835c4fSMatthew Wilcox * If you have more complex locking requirements, use a loop around 2530a835c4fSMatthew Wilcox * ida_pre_get() and ida_get_new() to allocate a new ID. Then use 2540a835c4fSMatthew Wilcox * ida_remove() to free an ID. You must make sure that ida_get_new() and 2550a835c4fSMatthew Wilcox * ida_remove() cannot be called at the same time as each other for the 2560a835c4fSMatthew Wilcox * same IDA. 2570a835c4fSMatthew Wilcox * 2580a835c4fSMatthew Wilcox * You can also use ida_get_new_above() if you need an ID to be allocated 2590a835c4fSMatthew Wilcox * above a particular number. ida_destroy() can be used to dispose of an 2600a835c4fSMatthew Wilcox * IDA without needing to free the individual IDs in it. You can use 2610a835c4fSMatthew Wilcox * ida_is_empty() to find out whether the IDA has any IDs currently allocated. 2620a835c4fSMatthew Wilcox * 2630a835c4fSMatthew Wilcox * IDs are currently limited to the range [0-INT_MAX]. If this is an awkward 2640a835c4fSMatthew Wilcox * limitation, it should be quite straightforward to raise the maximum. 26572dba584STejun Heo */ 26672dba584STejun Heo 267d37cacc5SMatthew Wilcox /* 268d37cacc5SMatthew Wilcox * Developer's notes: 269d37cacc5SMatthew Wilcox * 270d37cacc5SMatthew Wilcox * The IDA uses the functionality provided by the IDR & radix tree to store 271d37cacc5SMatthew Wilcox * bitmaps in each entry. The IDR_FREE tag means there is at least one bit 272d37cacc5SMatthew Wilcox * free, unlike the IDR where it means at least one entry is free. 273d37cacc5SMatthew Wilcox * 274d37cacc5SMatthew Wilcox * I considered telling the radix tree that each slot is an order-10 node 275d37cacc5SMatthew Wilcox * and storing the bit numbers in the radix tree, but the radix tree can't 276d37cacc5SMatthew Wilcox * allow a single multiorder entry at index 0, which would significantly 277d37cacc5SMatthew Wilcox * increase memory consumption for the IDA. So instead we divide the index 278d37cacc5SMatthew Wilcox * by the number of bits in the leaf bitmap before doing a radix tree lookup. 279d37cacc5SMatthew Wilcox * 280d37cacc5SMatthew Wilcox * As an optimisation, if there are only a few low bits set in any given 281d37cacc5SMatthew Wilcox * leaf, instead of allocating a 128-byte bitmap, we use the 'exceptional 282d37cacc5SMatthew Wilcox * entry' functionality of the radix tree to store BITS_PER_LONG - 2 bits 283d37cacc5SMatthew Wilcox * directly in the entry. By being really tricksy, we could store 284d37cacc5SMatthew Wilcox * BITS_PER_LONG - 1 bits, but there're diminishing returns after optimising 285d37cacc5SMatthew Wilcox * for 0-3 allocated IDs. 286d37cacc5SMatthew Wilcox * 287d37cacc5SMatthew Wilcox * We allow the radix tree 'exceptional' count to get out of date. Nothing 288d37cacc5SMatthew Wilcox * in the IDA nor the radix tree code checks it. If it becomes important 289d37cacc5SMatthew Wilcox * to maintain an accurate exceptional count, switch the rcu_assign_pointer() 290d37cacc5SMatthew Wilcox * calls to radix_tree_iter_replace() which will correct the exceptional 291d37cacc5SMatthew Wilcox * count. 292d37cacc5SMatthew Wilcox * 293d37cacc5SMatthew Wilcox * The IDA always requires a lock to alloc/free. If we add a 'test_bit' 294d37cacc5SMatthew Wilcox * equivalent, it will still need locking. Going to RCU lookup would require 295d37cacc5SMatthew Wilcox * using RCU to free bitmaps, and that's not trivial without embedding an 296d37cacc5SMatthew Wilcox * RCU head in the bitmap, which adds a 2-pointer overhead to each 128-byte 297d37cacc5SMatthew Wilcox * bitmap, which is excessive. 298d37cacc5SMatthew Wilcox */ 299d37cacc5SMatthew Wilcox 300460488c5SMatthew Wilcox #define IDA_MAX (0x80000000U / IDA_BITMAP_BITS - 1) 3010a835c4fSMatthew Wilcox 30272dba584STejun Heo /** 30372dba584STejun Heo * ida_get_new_above - allocate new ID above or equal to a start id 30472dba584STejun Heo * @ida: ida handle 3050a835c4fSMatthew Wilcox * @start: id to start search at 3060a835c4fSMatthew Wilcox * @id: pointer to the allocated handle 30772dba584STejun Heo * 3080a835c4fSMatthew Wilcox * Allocate new ID above or equal to @start. It should be called 3090a835c4fSMatthew Wilcox * with any required locks to ensure that concurrent calls to 3100a835c4fSMatthew Wilcox * ida_get_new_above() / ida_get_new() / ida_remove() are not allowed. 3110a835c4fSMatthew Wilcox * Consider using ida_simple_get() if you do not have complex locking 3120a835c4fSMatthew Wilcox * requirements. 31372dba584STejun Heo * 31456083ab1SRandy Dunlap * If memory is required, it will return %-EAGAIN, you should unlock 31572dba584STejun Heo * and go back to the ida_pre_get() call. If the ida is full, it will 3160a835c4fSMatthew Wilcox * return %-ENOSPC. On success, it will return 0. 31772dba584STejun Heo * 3180a835c4fSMatthew Wilcox * @id returns a value in the range @start ... %0x7fffffff. 31972dba584STejun Heo */ 3200a835c4fSMatthew Wilcox int ida_get_new_above(struct ida *ida, int start, int *id) 32172dba584STejun Heo { 3220a835c4fSMatthew Wilcox struct radix_tree_root *root = &ida->ida_rt; 3237e73eb0bSMatthew Wilcox void __rcu **slot; 3240a835c4fSMatthew Wilcox struct radix_tree_iter iter; 32572dba584STejun Heo struct ida_bitmap *bitmap; 3260a835c4fSMatthew Wilcox unsigned long index; 327d37cacc5SMatthew Wilcox unsigned bit, ebit; 3280a835c4fSMatthew Wilcox int new; 32972dba584STejun Heo 3300a835c4fSMatthew Wilcox index = start / IDA_BITMAP_BITS; 3310a835c4fSMatthew Wilcox bit = start % IDA_BITMAP_BITS; 332d37cacc5SMatthew Wilcox ebit = bit + RADIX_TREE_EXCEPTIONAL_SHIFT; 33372dba584STejun Heo 3340a835c4fSMatthew Wilcox slot = radix_tree_iter_init(&iter, index); 3350a835c4fSMatthew Wilcox for (;;) { 3360a835c4fSMatthew Wilcox if (slot) 3370a835c4fSMatthew Wilcox slot = radix_tree_next_slot(slot, &iter, 3380a835c4fSMatthew Wilcox RADIX_TREE_ITER_TAGGED); 3390a835c4fSMatthew Wilcox if (!slot) { 3400a835c4fSMatthew Wilcox slot = idr_get_free(root, &iter, GFP_NOWAIT, IDA_MAX); 3410a835c4fSMatthew Wilcox if (IS_ERR(slot)) { 3420a835c4fSMatthew Wilcox if (slot == ERR_PTR(-ENOMEM)) 3430a835c4fSMatthew Wilcox return -EAGAIN; 3440a835c4fSMatthew Wilcox return PTR_ERR(slot); 3450a835c4fSMatthew Wilcox } 3460a835c4fSMatthew Wilcox } 347d37cacc5SMatthew Wilcox if (iter.index > index) { 3480a835c4fSMatthew Wilcox bit = 0; 349d37cacc5SMatthew Wilcox ebit = RADIX_TREE_EXCEPTIONAL_SHIFT; 350d37cacc5SMatthew Wilcox } 3510a835c4fSMatthew Wilcox new = iter.index * IDA_BITMAP_BITS; 3520a835c4fSMatthew Wilcox bitmap = rcu_dereference_raw(*slot); 353d37cacc5SMatthew Wilcox if (radix_tree_exception(bitmap)) { 354d37cacc5SMatthew Wilcox unsigned long tmp = (unsigned long)bitmap; 355d37cacc5SMatthew Wilcox ebit = find_next_zero_bit(&tmp, BITS_PER_LONG, ebit); 356d37cacc5SMatthew Wilcox if (ebit < BITS_PER_LONG) { 357d37cacc5SMatthew Wilcox tmp |= 1UL << ebit; 358d37cacc5SMatthew Wilcox rcu_assign_pointer(*slot, (void *)tmp); 359d37cacc5SMatthew Wilcox *id = new + ebit - RADIX_TREE_EXCEPTIONAL_SHIFT; 360d37cacc5SMatthew Wilcox return 0; 361d37cacc5SMatthew Wilcox } 362d37cacc5SMatthew Wilcox bitmap = this_cpu_xchg(ida_bitmap, NULL); 363d37cacc5SMatthew Wilcox if (!bitmap) 364d37cacc5SMatthew Wilcox return -EAGAIN; 365d37cacc5SMatthew Wilcox memset(bitmap, 0, sizeof(*bitmap)); 366d37cacc5SMatthew Wilcox bitmap->bitmap[0] = tmp >> RADIX_TREE_EXCEPTIONAL_SHIFT; 367d37cacc5SMatthew Wilcox rcu_assign_pointer(*slot, bitmap); 368d37cacc5SMatthew Wilcox } 369d37cacc5SMatthew Wilcox 3700a835c4fSMatthew Wilcox if (bitmap) { 3710a835c4fSMatthew Wilcox bit = find_next_zero_bit(bitmap->bitmap, 3720a835c4fSMatthew Wilcox IDA_BITMAP_BITS, bit); 3730a835c4fSMatthew Wilcox new += bit; 3740a835c4fSMatthew Wilcox if (new < 0) 37572dba584STejun Heo return -ENOSPC; 3760a835c4fSMatthew Wilcox if (bit == IDA_BITMAP_BITS) 3770a835c4fSMatthew Wilcox continue; 37872dba584STejun Heo 3790a835c4fSMatthew Wilcox __set_bit(bit, bitmap->bitmap); 3800a835c4fSMatthew Wilcox if (bitmap_full(bitmap->bitmap, IDA_BITMAP_BITS)) 3810a835c4fSMatthew Wilcox radix_tree_iter_tag_clear(root, &iter, 3820a835c4fSMatthew Wilcox IDR_FREE); 3830a835c4fSMatthew Wilcox } else { 3840a835c4fSMatthew Wilcox new += bit; 3850a835c4fSMatthew Wilcox if (new < 0) 3860a835c4fSMatthew Wilcox return -ENOSPC; 387d37cacc5SMatthew Wilcox if (ebit < BITS_PER_LONG) { 388d37cacc5SMatthew Wilcox bitmap = (void *)((1UL << ebit) | 389d37cacc5SMatthew Wilcox RADIX_TREE_EXCEPTIONAL_ENTRY); 390d37cacc5SMatthew Wilcox radix_tree_iter_replace(root, &iter, slot, 391d37cacc5SMatthew Wilcox bitmap); 392d37cacc5SMatthew Wilcox *id = new; 393d37cacc5SMatthew Wilcox return 0; 394d37cacc5SMatthew Wilcox } 3957ad3d4d8SMatthew Wilcox bitmap = this_cpu_xchg(ida_bitmap, NULL); 39672dba584STejun Heo if (!bitmap) 39772dba584STejun Heo return -EAGAIN; 3980a835c4fSMatthew Wilcox memset(bitmap, 0, sizeof(*bitmap)); 3990a835c4fSMatthew Wilcox __set_bit(bit, bitmap->bitmap); 4000a835c4fSMatthew Wilcox radix_tree_iter_replace(root, &iter, slot, bitmap); 40172dba584STejun Heo } 40272dba584STejun Heo 4030a835c4fSMatthew Wilcox *id = new; 40472dba584STejun Heo return 0; 40572dba584STejun Heo } 4060a835c4fSMatthew Wilcox } 40772dba584STejun Heo EXPORT_SYMBOL(ida_get_new_above); 40872dba584STejun Heo 40972dba584STejun Heo /** 4100a835c4fSMatthew Wilcox * ida_remove - Free the given ID 41172dba584STejun Heo * @ida: ida handle 41272dba584STejun Heo * @id: ID to free 4130a835c4fSMatthew Wilcox * 4140a835c4fSMatthew Wilcox * This function should not be called at the same time as ida_get_new_above(). 41572dba584STejun Heo */ 41672dba584STejun Heo void ida_remove(struct ida *ida, int id) 41772dba584STejun Heo { 4180a835c4fSMatthew Wilcox unsigned long index = id / IDA_BITMAP_BITS; 4190a835c4fSMatthew Wilcox unsigned offset = id % IDA_BITMAP_BITS; 42072dba584STejun Heo struct ida_bitmap *bitmap; 421d37cacc5SMatthew Wilcox unsigned long *btmp; 4220a835c4fSMatthew Wilcox struct radix_tree_iter iter; 4237e73eb0bSMatthew Wilcox void __rcu **slot; 42472dba584STejun Heo 4250a835c4fSMatthew Wilcox slot = radix_tree_iter_lookup(&ida->ida_rt, &iter, index); 4260a835c4fSMatthew Wilcox if (!slot) 4278f9f665aSLai Jiangshan goto err; 4288f9f665aSLai Jiangshan 4290a835c4fSMatthew Wilcox bitmap = rcu_dereference_raw(*slot); 430d37cacc5SMatthew Wilcox if (radix_tree_exception(bitmap)) { 431d37cacc5SMatthew Wilcox btmp = (unsigned long *)slot; 432d37cacc5SMatthew Wilcox offset += RADIX_TREE_EXCEPTIONAL_SHIFT; 433d37cacc5SMatthew Wilcox if (offset >= BITS_PER_LONG) 434d37cacc5SMatthew Wilcox goto err; 435d37cacc5SMatthew Wilcox } else { 436d37cacc5SMatthew Wilcox btmp = bitmap->bitmap; 437d37cacc5SMatthew Wilcox } 438d37cacc5SMatthew Wilcox if (!test_bit(offset, btmp)) 43972dba584STejun Heo goto err; 44072dba584STejun Heo 441d37cacc5SMatthew Wilcox __clear_bit(offset, btmp); 4420a835c4fSMatthew Wilcox radix_tree_iter_tag_set(&ida->ida_rt, &iter, IDR_FREE); 443d37cacc5SMatthew Wilcox if (radix_tree_exception(bitmap)) { 444d37cacc5SMatthew Wilcox if (rcu_dereference_raw(*slot) == 445d37cacc5SMatthew Wilcox (void *)RADIX_TREE_EXCEPTIONAL_ENTRY) 446d37cacc5SMatthew Wilcox radix_tree_iter_delete(&ida->ida_rt, &iter, slot); 447d37cacc5SMatthew Wilcox } else if (bitmap_empty(btmp, IDA_BITMAP_BITS)) { 4480a835c4fSMatthew Wilcox kfree(bitmap); 4490a835c4fSMatthew Wilcox radix_tree_iter_delete(&ida->ida_rt, &iter, slot); 45072dba584STejun Heo } 45172dba584STejun Heo return; 45272dba584STejun Heo err: 453dd04b452SJean Delvare WARN(1, "ida_remove called for id=%d which is not allocated.\n", id); 45472dba584STejun Heo } 45572dba584STejun Heo EXPORT_SYMBOL(ida_remove); 45672dba584STejun Heo 45772dba584STejun Heo /** 4580a835c4fSMatthew Wilcox * ida_destroy - Free the contents of an ida 459ea24ea85SNaohiro Aota * @ida: ida handle 4600a835c4fSMatthew Wilcox * 4610a835c4fSMatthew Wilcox * Calling this function releases all resources associated with an IDA. When 4620a835c4fSMatthew Wilcox * this call returns, the IDA is empty and can be reused or freed. The caller 4630a835c4fSMatthew Wilcox * should not allow ida_remove() or ida_get_new_above() to be called at the 4640a835c4fSMatthew Wilcox * same time. 46572dba584STejun Heo */ 46672dba584STejun Heo void ida_destroy(struct ida *ida) 46772dba584STejun Heo { 4680a835c4fSMatthew Wilcox struct radix_tree_iter iter; 4697e73eb0bSMatthew Wilcox void __rcu **slot; 4700a835c4fSMatthew Wilcox 4710a835c4fSMatthew Wilcox radix_tree_for_each_slot(slot, &ida->ida_rt, &iter, 0) { 4720a835c4fSMatthew Wilcox struct ida_bitmap *bitmap = rcu_dereference_raw(*slot); 473d37cacc5SMatthew Wilcox if (!radix_tree_exception(bitmap)) 4740a835c4fSMatthew Wilcox kfree(bitmap); 4750a835c4fSMatthew Wilcox radix_tree_iter_delete(&ida->ida_rt, &iter, slot); 4760a835c4fSMatthew Wilcox } 47772dba584STejun Heo } 47872dba584STejun Heo EXPORT_SYMBOL(ida_destroy); 47972dba584STejun Heo 48072dba584STejun Heo /** 48188eca020SRusty Russell * ida_simple_get - get a new id. 48288eca020SRusty Russell * @ida: the (initialized) ida. 48388eca020SRusty Russell * @start: the minimum id (inclusive, < 0x8000000) 48488eca020SRusty Russell * @end: the maximum id (exclusive, < 0x8000000 or 0) 48588eca020SRusty Russell * @gfp_mask: memory allocation flags 48688eca020SRusty Russell * 48788eca020SRusty Russell * Allocates an id in the range start <= id < end, or returns -ENOSPC. 48888eca020SRusty Russell * On memory allocation failure, returns -ENOMEM. 48988eca020SRusty Russell * 490a2ef9471SDaniel Vetter * Compared to ida_get_new_above() this function does its own locking, and 491a2ef9471SDaniel Vetter * should be used unless there are special requirements. 492a2ef9471SDaniel Vetter * 49388eca020SRusty Russell * Use ida_simple_remove() to get rid of an id. 49488eca020SRusty Russell */ 49588eca020SRusty Russell int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, 49688eca020SRusty Russell gfp_t gfp_mask) 49788eca020SRusty Russell { 49888eca020SRusty Russell int ret, id; 49988eca020SRusty Russell unsigned int max; 50046cbc1d3STejun Heo unsigned long flags; 50188eca020SRusty Russell 50288eca020SRusty Russell BUG_ON((int)start < 0); 50388eca020SRusty Russell BUG_ON((int)end < 0); 50488eca020SRusty Russell 50588eca020SRusty Russell if (end == 0) 50688eca020SRusty Russell max = 0x80000000; 50788eca020SRusty Russell else { 50888eca020SRusty Russell BUG_ON(end < start); 50988eca020SRusty Russell max = end - 1; 51088eca020SRusty Russell } 51188eca020SRusty Russell 51288eca020SRusty Russell again: 51388eca020SRusty Russell if (!ida_pre_get(ida, gfp_mask)) 51488eca020SRusty Russell return -ENOMEM; 51588eca020SRusty Russell 51646cbc1d3STejun Heo spin_lock_irqsave(&simple_ida_lock, flags); 51788eca020SRusty Russell ret = ida_get_new_above(ida, start, &id); 51888eca020SRusty Russell if (!ret) { 51988eca020SRusty Russell if (id > max) { 52088eca020SRusty Russell ida_remove(ida, id); 52188eca020SRusty Russell ret = -ENOSPC; 52288eca020SRusty Russell } else { 52388eca020SRusty Russell ret = id; 52488eca020SRusty Russell } 52588eca020SRusty Russell } 52646cbc1d3STejun Heo spin_unlock_irqrestore(&simple_ida_lock, flags); 52788eca020SRusty Russell 52888eca020SRusty Russell if (unlikely(ret == -EAGAIN)) 52988eca020SRusty Russell goto again; 53088eca020SRusty Russell 53188eca020SRusty Russell return ret; 53288eca020SRusty Russell } 53388eca020SRusty Russell EXPORT_SYMBOL(ida_simple_get); 53488eca020SRusty Russell 53588eca020SRusty Russell /** 53688eca020SRusty Russell * ida_simple_remove - remove an allocated id. 53788eca020SRusty Russell * @ida: the (initialized) ida. 53888eca020SRusty Russell * @id: the id returned by ida_simple_get. 539a2ef9471SDaniel Vetter * 540a2ef9471SDaniel Vetter * Use to release an id allocated with ida_simple_get(). 541a2ef9471SDaniel Vetter * 542a2ef9471SDaniel Vetter * Compared to ida_remove() this function does its own locking, and should be 543a2ef9471SDaniel Vetter * used unless there are special requirements. 54488eca020SRusty Russell */ 54588eca020SRusty Russell void ida_simple_remove(struct ida *ida, unsigned int id) 54688eca020SRusty Russell { 54746cbc1d3STejun Heo unsigned long flags; 54846cbc1d3STejun Heo 54988eca020SRusty Russell BUG_ON((int)id < 0); 55046cbc1d3STejun Heo spin_lock_irqsave(&simple_ida_lock, flags); 55188eca020SRusty Russell ida_remove(ida, id); 55246cbc1d3STejun Heo spin_unlock_irqrestore(&simple_ida_lock, flags); 55388eca020SRusty Russell } 55488eca020SRusty Russell EXPORT_SYMBOL(ida_simple_remove); 555