1 #include <linux/bitmap.h> 2 #include <linux/export.h> 3 #include <linux/idr.h> 4 #include <linux/slab.h> 5 #include <linux/spinlock.h> 6 7 DEFINE_PER_CPU(struct ida_bitmap *, ida_bitmap); 8 static DEFINE_SPINLOCK(simple_ida_lock); 9 10 int idr_alloc_cmn(struct idr *idr, void *ptr, unsigned long *index, 11 unsigned long start, unsigned long end, gfp_t gfp, 12 bool ext) 13 { 14 struct radix_tree_iter iter; 15 void __rcu **slot; 16 17 if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr))) 18 return -EINVAL; 19 20 radix_tree_iter_init(&iter, start); 21 if (ext) 22 slot = idr_get_free_ext(&idr->idr_rt, &iter, gfp, end); 23 else 24 slot = idr_get_free(&idr->idr_rt, &iter, gfp, end); 25 if (IS_ERR(slot)) 26 return PTR_ERR(slot); 27 28 radix_tree_iter_replace(&idr->idr_rt, &iter, slot, ptr); 29 radix_tree_iter_tag_clear(&idr->idr_rt, &iter, IDR_FREE); 30 31 if (index) 32 *index = iter.index; 33 return 0; 34 } 35 EXPORT_SYMBOL_GPL(idr_alloc_cmn); 36 37 /** 38 * idr_alloc_cyclic - allocate new idr entry in a cyclical fashion 39 * @idr: idr handle 40 * @ptr: pointer to be associated with the new id 41 * @start: the minimum id (inclusive) 42 * @end: the maximum id (exclusive) 43 * @gfp: memory allocation flags 44 * 45 * Allocates an ID larger than the last ID allocated if one is available. 46 * If not, it will attempt to allocate the smallest ID that is larger or 47 * equal to @start. 48 */ 49 int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp) 50 { 51 int id, curr = idr->idr_next; 52 53 if (curr < start) 54 curr = start; 55 56 id = idr_alloc(idr, ptr, curr, end, gfp); 57 if ((id == -ENOSPC) && (curr > start)) 58 id = idr_alloc(idr, ptr, start, curr, gfp); 59 60 if (id >= 0) 61 idr->idr_next = id + 1U; 62 63 return id; 64 } 65 EXPORT_SYMBOL(idr_alloc_cyclic); 66 67 /** 68 * idr_for_each - iterate through all stored pointers 69 * @idr: idr handle 70 * @fn: function to be called for each pointer 71 * @data: data passed to callback function 72 * 73 * The callback function will be called for each entry in @idr, passing 74 * the id, the pointer and the data pointer passed to this function. 75 * 76 * If @fn returns anything other than %0, the iteration stops and that 77 * value is returned from this function. 78 * 79 * idr_for_each() can be called concurrently with idr_alloc() and 80 * idr_remove() if protected by RCU. Newly added entries may not be 81 * seen and deleted entries may be seen, but adding and removing entries 82 * will not cause other entries to be skipped, nor spurious ones to be seen. 83 */ 84 int idr_for_each(const struct idr *idr, 85 int (*fn)(int id, void *p, void *data), void *data) 86 { 87 struct radix_tree_iter iter; 88 void __rcu **slot; 89 90 radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, 0) { 91 int ret = fn(iter.index, rcu_dereference_raw(*slot), data); 92 if (ret) 93 return ret; 94 } 95 96 return 0; 97 } 98 EXPORT_SYMBOL(idr_for_each); 99 100 /** 101 * idr_get_next - Find next populated entry 102 * @idr: idr handle 103 * @nextid: Pointer to lowest possible ID to return 104 * 105 * Returns the next populated entry in the tree with an ID greater than 106 * or equal to the value pointed to by @nextid. On exit, @nextid is updated 107 * to the ID of the found value. To use in a loop, the value pointed to by 108 * nextid must be incremented by the user. 109 */ 110 void *idr_get_next(struct idr *idr, int *nextid) 111 { 112 struct radix_tree_iter iter; 113 void __rcu **slot; 114 115 slot = radix_tree_iter_find(&idr->idr_rt, &iter, *nextid); 116 if (!slot) 117 return NULL; 118 119 *nextid = iter.index; 120 return rcu_dereference_raw(*slot); 121 } 122 EXPORT_SYMBOL(idr_get_next); 123 124 void *idr_get_next_ext(struct idr *idr, unsigned long *nextid) 125 { 126 struct radix_tree_iter iter; 127 void __rcu **slot; 128 129 slot = radix_tree_iter_find(&idr->idr_rt, &iter, *nextid); 130 if (!slot) 131 return NULL; 132 133 *nextid = iter.index; 134 return rcu_dereference_raw(*slot); 135 } 136 EXPORT_SYMBOL(idr_get_next_ext); 137 138 /** 139 * idr_replace - replace pointer for given id 140 * @idr: idr handle 141 * @ptr: New pointer to associate with the ID 142 * @id: Lookup key 143 * 144 * Replace the pointer registered with an ID and return the old value. 145 * This function can be called under the RCU read lock concurrently with 146 * idr_alloc() and idr_remove() (as long as the ID being removed is not 147 * the one being replaced!). 148 * 149 * Returns: the old value on success. %-ENOENT indicates that @id was not 150 * found. %-EINVAL indicates that @id or @ptr were not valid. 151 */ 152 void *idr_replace(struct idr *idr, void *ptr, int id) 153 { 154 if (id < 0) 155 return ERR_PTR(-EINVAL); 156 157 return idr_replace_ext(idr, ptr, id); 158 } 159 EXPORT_SYMBOL(idr_replace); 160 161 void *idr_replace_ext(struct idr *idr, void *ptr, unsigned long id) 162 { 163 struct radix_tree_node *node; 164 void __rcu **slot = NULL; 165 void *entry; 166 167 if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr))) 168 return ERR_PTR(-EINVAL); 169 170 entry = __radix_tree_lookup(&idr->idr_rt, id, &node, &slot); 171 if (!slot || radix_tree_tag_get(&idr->idr_rt, id, IDR_FREE)) 172 return ERR_PTR(-ENOENT); 173 174 __radix_tree_replace(&idr->idr_rt, node, slot, ptr, NULL); 175 176 return entry; 177 } 178 EXPORT_SYMBOL(idr_replace_ext); 179 180 /** 181 * DOC: IDA description 182 * 183 * The IDA is an ID allocator which does not provide the ability to 184 * associate an ID with a pointer. As such, it only needs to store one 185 * bit per ID, and so is more space efficient than an IDR. To use an IDA, 186 * define it using DEFINE_IDA() (or embed a &struct ida in a data structure, 187 * then initialise it using ida_init()). To allocate a new ID, call 188 * ida_simple_get(). To free an ID, call ida_simple_remove(). 189 * 190 * If you have more complex locking requirements, use a loop around 191 * ida_pre_get() and ida_get_new() to allocate a new ID. Then use 192 * ida_remove() to free an ID. You must make sure that ida_get_new() and 193 * ida_remove() cannot be called at the same time as each other for the 194 * same IDA. 195 * 196 * You can also use ida_get_new_above() if you need an ID to be allocated 197 * above a particular number. ida_destroy() can be used to dispose of an 198 * IDA without needing to free the individual IDs in it. You can use 199 * ida_is_empty() to find out whether the IDA has any IDs currently allocated. 200 * 201 * IDs are currently limited to the range [0-INT_MAX]. If this is an awkward 202 * limitation, it should be quite straightforward to raise the maximum. 203 */ 204 205 /* 206 * Developer's notes: 207 * 208 * The IDA uses the functionality provided by the IDR & radix tree to store 209 * bitmaps in each entry. The IDR_FREE tag means there is at least one bit 210 * free, unlike the IDR where it means at least one entry is free. 211 * 212 * I considered telling the radix tree that each slot is an order-10 node 213 * and storing the bit numbers in the radix tree, but the radix tree can't 214 * allow a single multiorder entry at index 0, which would significantly 215 * increase memory consumption for the IDA. So instead we divide the index 216 * by the number of bits in the leaf bitmap before doing a radix tree lookup. 217 * 218 * As an optimisation, if there are only a few low bits set in any given 219 * leaf, instead of allocating a 128-byte bitmap, we use the 'exceptional 220 * entry' functionality of the radix tree to store BITS_PER_LONG - 2 bits 221 * directly in the entry. By being really tricksy, we could store 222 * BITS_PER_LONG - 1 bits, but there're diminishing returns after optimising 223 * for 0-3 allocated IDs. 224 * 225 * We allow the radix tree 'exceptional' count to get out of date. Nothing 226 * in the IDA nor the radix tree code checks it. If it becomes important 227 * to maintain an accurate exceptional count, switch the rcu_assign_pointer() 228 * calls to radix_tree_iter_replace() which will correct the exceptional 229 * count. 230 * 231 * The IDA always requires a lock to alloc/free. If we add a 'test_bit' 232 * equivalent, it will still need locking. Going to RCU lookup would require 233 * using RCU to free bitmaps, and that's not trivial without embedding an 234 * RCU head in the bitmap, which adds a 2-pointer overhead to each 128-byte 235 * bitmap, which is excessive. 236 */ 237 238 #define IDA_MAX (0x80000000U / IDA_BITMAP_BITS) 239 240 /** 241 * ida_get_new_above - allocate new ID above or equal to a start id 242 * @ida: ida handle 243 * @start: id to start search at 244 * @id: pointer to the allocated handle 245 * 246 * Allocate new ID above or equal to @start. It should be called 247 * with any required locks to ensure that concurrent calls to 248 * ida_get_new_above() / ida_get_new() / ida_remove() are not allowed. 249 * Consider using ida_simple_get() if you do not have complex locking 250 * requirements. 251 * 252 * If memory is required, it will return %-EAGAIN, you should unlock 253 * and go back to the ida_pre_get() call. If the ida is full, it will 254 * return %-ENOSPC. On success, it will return 0. 255 * 256 * @id returns a value in the range @start ... %0x7fffffff. 257 */ 258 int ida_get_new_above(struct ida *ida, int start, int *id) 259 { 260 struct radix_tree_root *root = &ida->ida_rt; 261 void __rcu **slot; 262 struct radix_tree_iter iter; 263 struct ida_bitmap *bitmap; 264 unsigned long index; 265 unsigned bit, ebit; 266 int new; 267 268 index = start / IDA_BITMAP_BITS; 269 bit = start % IDA_BITMAP_BITS; 270 ebit = bit + RADIX_TREE_EXCEPTIONAL_SHIFT; 271 272 slot = radix_tree_iter_init(&iter, index); 273 for (;;) { 274 if (slot) 275 slot = radix_tree_next_slot(slot, &iter, 276 RADIX_TREE_ITER_TAGGED); 277 if (!slot) { 278 slot = idr_get_free(root, &iter, GFP_NOWAIT, IDA_MAX); 279 if (IS_ERR(slot)) { 280 if (slot == ERR_PTR(-ENOMEM)) 281 return -EAGAIN; 282 return PTR_ERR(slot); 283 } 284 } 285 if (iter.index > index) { 286 bit = 0; 287 ebit = RADIX_TREE_EXCEPTIONAL_SHIFT; 288 } 289 new = iter.index * IDA_BITMAP_BITS; 290 bitmap = rcu_dereference_raw(*slot); 291 if (radix_tree_exception(bitmap)) { 292 unsigned long tmp = (unsigned long)bitmap; 293 ebit = find_next_zero_bit(&tmp, BITS_PER_LONG, ebit); 294 if (ebit < BITS_PER_LONG) { 295 tmp |= 1UL << ebit; 296 rcu_assign_pointer(*slot, (void *)tmp); 297 *id = new + ebit - RADIX_TREE_EXCEPTIONAL_SHIFT; 298 return 0; 299 } 300 bitmap = this_cpu_xchg(ida_bitmap, NULL); 301 if (!bitmap) 302 return -EAGAIN; 303 memset(bitmap, 0, sizeof(*bitmap)); 304 bitmap->bitmap[0] = tmp >> RADIX_TREE_EXCEPTIONAL_SHIFT; 305 rcu_assign_pointer(*slot, bitmap); 306 } 307 308 if (bitmap) { 309 bit = find_next_zero_bit(bitmap->bitmap, 310 IDA_BITMAP_BITS, bit); 311 new += bit; 312 if (new < 0) 313 return -ENOSPC; 314 if (bit == IDA_BITMAP_BITS) 315 continue; 316 317 __set_bit(bit, bitmap->bitmap); 318 if (bitmap_full(bitmap->bitmap, IDA_BITMAP_BITS)) 319 radix_tree_iter_tag_clear(root, &iter, 320 IDR_FREE); 321 } else { 322 new += bit; 323 if (new < 0) 324 return -ENOSPC; 325 if (ebit < BITS_PER_LONG) { 326 bitmap = (void *)((1UL << ebit) | 327 RADIX_TREE_EXCEPTIONAL_ENTRY); 328 radix_tree_iter_replace(root, &iter, slot, 329 bitmap); 330 *id = new; 331 return 0; 332 } 333 bitmap = this_cpu_xchg(ida_bitmap, NULL); 334 if (!bitmap) 335 return -EAGAIN; 336 memset(bitmap, 0, sizeof(*bitmap)); 337 __set_bit(bit, bitmap->bitmap); 338 radix_tree_iter_replace(root, &iter, slot, bitmap); 339 } 340 341 *id = new; 342 return 0; 343 } 344 } 345 EXPORT_SYMBOL(ida_get_new_above); 346 347 /** 348 * ida_remove - Free the given ID 349 * @ida: ida handle 350 * @id: ID to free 351 * 352 * This function should not be called at the same time as ida_get_new_above(). 353 */ 354 void ida_remove(struct ida *ida, int id) 355 { 356 unsigned long index = id / IDA_BITMAP_BITS; 357 unsigned offset = id % IDA_BITMAP_BITS; 358 struct ida_bitmap *bitmap; 359 unsigned long *btmp; 360 struct radix_tree_iter iter; 361 void __rcu **slot; 362 363 slot = radix_tree_iter_lookup(&ida->ida_rt, &iter, index); 364 if (!slot) 365 goto err; 366 367 bitmap = rcu_dereference_raw(*slot); 368 if (radix_tree_exception(bitmap)) { 369 btmp = (unsigned long *)slot; 370 offset += RADIX_TREE_EXCEPTIONAL_SHIFT; 371 if (offset >= BITS_PER_LONG) 372 goto err; 373 } else { 374 btmp = bitmap->bitmap; 375 } 376 if (!test_bit(offset, btmp)) 377 goto err; 378 379 __clear_bit(offset, btmp); 380 radix_tree_iter_tag_set(&ida->ida_rt, &iter, IDR_FREE); 381 if (radix_tree_exception(bitmap)) { 382 if (rcu_dereference_raw(*slot) == 383 (void *)RADIX_TREE_EXCEPTIONAL_ENTRY) 384 radix_tree_iter_delete(&ida->ida_rt, &iter, slot); 385 } else if (bitmap_empty(btmp, IDA_BITMAP_BITS)) { 386 kfree(bitmap); 387 radix_tree_iter_delete(&ida->ida_rt, &iter, slot); 388 } 389 return; 390 err: 391 WARN(1, "ida_remove called for id=%d which is not allocated.\n", id); 392 } 393 EXPORT_SYMBOL(ida_remove); 394 395 /** 396 * ida_destroy - Free the contents of an ida 397 * @ida: ida handle 398 * 399 * Calling this function releases all resources associated with an IDA. When 400 * this call returns, the IDA is empty and can be reused or freed. The caller 401 * should not allow ida_remove() or ida_get_new_above() to be called at the 402 * same time. 403 */ 404 void ida_destroy(struct ida *ida) 405 { 406 struct radix_tree_iter iter; 407 void __rcu **slot; 408 409 radix_tree_for_each_slot(slot, &ida->ida_rt, &iter, 0) { 410 struct ida_bitmap *bitmap = rcu_dereference_raw(*slot); 411 if (!radix_tree_exception(bitmap)) 412 kfree(bitmap); 413 radix_tree_iter_delete(&ida->ida_rt, &iter, slot); 414 } 415 } 416 EXPORT_SYMBOL(ida_destroy); 417 418 /** 419 * ida_simple_get - get a new id. 420 * @ida: the (initialized) ida. 421 * @start: the minimum id (inclusive, < 0x8000000) 422 * @end: the maximum id (exclusive, < 0x8000000 or 0) 423 * @gfp_mask: memory allocation flags 424 * 425 * Allocates an id in the range start <= id < end, or returns -ENOSPC. 426 * On memory allocation failure, returns -ENOMEM. 427 * 428 * Compared to ida_get_new_above() this function does its own locking, and 429 * should be used unless there are special requirements. 430 * 431 * Use ida_simple_remove() to get rid of an id. 432 */ 433 int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, 434 gfp_t gfp_mask) 435 { 436 int ret, id; 437 unsigned int max; 438 unsigned long flags; 439 440 BUG_ON((int)start < 0); 441 BUG_ON((int)end < 0); 442 443 if (end == 0) 444 max = 0x80000000; 445 else { 446 BUG_ON(end < start); 447 max = end - 1; 448 } 449 450 again: 451 if (!ida_pre_get(ida, gfp_mask)) 452 return -ENOMEM; 453 454 spin_lock_irqsave(&simple_ida_lock, flags); 455 ret = ida_get_new_above(ida, start, &id); 456 if (!ret) { 457 if (id > max) { 458 ida_remove(ida, id); 459 ret = -ENOSPC; 460 } else { 461 ret = id; 462 } 463 } 464 spin_unlock_irqrestore(&simple_ida_lock, flags); 465 466 if (unlikely(ret == -EAGAIN)) 467 goto again; 468 469 return ret; 470 } 471 EXPORT_SYMBOL(ida_simple_get); 472 473 /** 474 * ida_simple_remove - remove an allocated id. 475 * @ida: the (initialized) ida. 476 * @id: the id returned by ida_simple_get. 477 * 478 * Use to release an id allocated with ida_simple_get(). 479 * 480 * Compared to ida_remove() this function does its own locking, and should be 481 * used unless there are special requirements. 482 */ 483 void ida_simple_remove(struct ida *ida, unsigned int id) 484 { 485 unsigned long flags; 486 487 BUG_ON((int)id < 0); 488 spin_lock_irqsave(&simple_ida_lock, flags); 489 ida_remove(ida, id); 490 spin_unlock_irqrestore(&simple_ida_lock, flags); 491 } 492 EXPORT_SYMBOL(ida_simple_remove); 493