1 /* 2 * 2002-10-18 written by Jim Houston jim.houston@ccur.com 3 * Copyright (C) 2002 by Concurrent Computer Corporation 4 * Distributed under the GNU GPL license version 2. 5 * 6 * Modified by George Anzinger to reuse immediately and to use 7 * find bit instructions. Also removed _irq on spinlocks. 8 * 9 * Small id to pointer translation service. 10 * 11 * It uses a radix tree like structure as a sparse array indexed 12 * by the id to obtain the pointer. The bitmap makes allocating 13 * a new id quick. 14 * 15 * You call it to allocate an id (an int) an associate with that id a 16 * pointer or what ever, we treat it as a (void *). You can pass this 17 * id to a user for him to pass back at a later time. You then pass 18 * that id to this code and it returns your pointer. 19 20 * You can release ids at any time. When all ids are released, most of 21 * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we 22 * don't need to go to the memory "store" during an id allocate, just 23 * so you don't need to be too concerned about locking and conflicts 24 * with the slab allocator. 25 */ 26 27 #ifndef TEST // to test in user space... 28 #include <linux/slab.h> 29 #include <linux/init.h> 30 #include <linux/module.h> 31 #endif 32 #include <linux/err.h> 33 #include <linux/string.h> 34 #include <linux/idr.h> 35 36 static struct kmem_cache *idr_layer_cache; 37 38 static struct idr_layer *alloc_layer(struct idr *idp) 39 { 40 struct idr_layer *p; 41 unsigned long flags; 42 43 spin_lock_irqsave(&idp->lock, flags); 44 if ((p = idp->id_free)) { 45 idp->id_free = p->ary[0]; 46 idp->id_free_cnt--; 47 p->ary[0] = NULL; 48 } 49 spin_unlock_irqrestore(&idp->lock, flags); 50 return(p); 51 } 52 53 /* only called when idp->lock is held */ 54 static void __free_layer(struct idr *idp, struct idr_layer *p) 55 { 56 p->ary[0] = idp->id_free; 57 idp->id_free = p; 58 idp->id_free_cnt++; 59 } 60 61 static void free_layer(struct idr *idp, struct idr_layer *p) 62 { 63 unsigned long flags; 64 65 /* 66 * Depends on the return element being zeroed. 67 */ 68 spin_lock_irqsave(&idp->lock, flags); 69 __free_layer(idp, p); 70 spin_unlock_irqrestore(&idp->lock, flags); 71 } 72 73 static void idr_mark_full(struct idr_layer **pa, int id) 74 { 75 struct idr_layer *p = pa[0]; 76 int l = 0; 77 78 __set_bit(id & IDR_MASK, &p->bitmap); 79 /* 80 * If this layer is full mark the bit in the layer above to 81 * show that this part of the radix tree is full. This may 82 * complete the layer above and require walking up the radix 83 * tree. 84 */ 85 while (p->bitmap == IDR_FULL) { 86 if (!(p = pa[++l])) 87 break; 88 id = id >> IDR_BITS; 89 __set_bit((id & IDR_MASK), &p->bitmap); 90 } 91 } 92 93 /** 94 * idr_pre_get - reserver resources for idr allocation 95 * @idp: idr handle 96 * @gfp_mask: memory allocation flags 97 * 98 * This function should be called prior to locking and calling the 99 * following function. It preallocates enough memory to satisfy 100 * the worst possible allocation. 101 * 102 * If the system is REALLY out of memory this function returns 0, 103 * otherwise 1. 104 */ 105 int idr_pre_get(struct idr *idp, gfp_t gfp_mask) 106 { 107 while (idp->id_free_cnt < IDR_FREE_MAX) { 108 struct idr_layer *new; 109 new = kmem_cache_alloc(idr_layer_cache, gfp_mask); 110 if (new == NULL) 111 return (0); 112 free_layer(idp, new); 113 } 114 return 1; 115 } 116 EXPORT_SYMBOL(idr_pre_get); 117 118 static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) 119 { 120 int n, m, sh; 121 struct idr_layer *p, *new; 122 int l, id, oid; 123 long bm; 124 125 id = *starting_id; 126 restart: 127 p = idp->top; 128 l = idp->layers; 129 pa[l--] = NULL; 130 while (1) { 131 /* 132 * We run around this while until we reach the leaf node... 133 */ 134 n = (id >> (IDR_BITS*l)) & IDR_MASK; 135 bm = ~p->bitmap; 136 m = find_next_bit(&bm, IDR_SIZE, n); 137 if (m == IDR_SIZE) { 138 /* no space available go back to previous layer. */ 139 l++; 140 oid = id; 141 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; 142 143 /* if already at the top layer, we need to grow */ 144 if (!(p = pa[l])) { 145 *starting_id = id; 146 return -2; 147 } 148 149 /* If we need to go up one layer, continue the 150 * loop; otherwise, restart from the top. 151 */ 152 sh = IDR_BITS * (l + 1); 153 if (oid >> sh == id >> sh) 154 continue; 155 else 156 goto restart; 157 } 158 if (m != n) { 159 sh = IDR_BITS*l; 160 id = ((id >> sh) ^ n ^ m) << sh; 161 } 162 if ((id >= MAX_ID_BIT) || (id < 0)) 163 return -3; 164 if (l == 0) 165 break; 166 /* 167 * Create the layer below if it is missing. 168 */ 169 if (!p->ary[m]) { 170 if (!(new = alloc_layer(idp))) 171 return -1; 172 p->ary[m] = new; 173 p->count++; 174 } 175 pa[l--] = p; 176 p = p->ary[m]; 177 } 178 179 pa[l] = p; 180 return id; 181 } 182 183 static int idr_get_empty_slot(struct idr *idp, int starting_id, 184 struct idr_layer **pa) 185 { 186 struct idr_layer *p, *new; 187 int layers, v, id; 188 unsigned long flags; 189 190 id = starting_id; 191 build_up: 192 p = idp->top; 193 layers = idp->layers; 194 if (unlikely(!p)) { 195 if (!(p = alloc_layer(idp))) 196 return -1; 197 layers = 1; 198 } 199 /* 200 * Add a new layer to the top of the tree if the requested 201 * id is larger than the currently allocated space. 202 */ 203 while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { 204 layers++; 205 if (!p->count) 206 continue; 207 if (!(new = alloc_layer(idp))) { 208 /* 209 * The allocation failed. If we built part of 210 * the structure tear it down. 211 */ 212 spin_lock_irqsave(&idp->lock, flags); 213 for (new = p; p && p != idp->top; new = p) { 214 p = p->ary[0]; 215 new->ary[0] = NULL; 216 new->bitmap = new->count = 0; 217 __free_layer(idp, new); 218 } 219 spin_unlock_irqrestore(&idp->lock, flags); 220 return -1; 221 } 222 new->ary[0] = p; 223 new->count = 1; 224 if (p->bitmap == IDR_FULL) 225 __set_bit(0, &new->bitmap); 226 p = new; 227 } 228 idp->top = p; 229 idp->layers = layers; 230 v = sub_alloc(idp, &id, pa); 231 if (v == -2) 232 goto build_up; 233 return(v); 234 } 235 236 static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) 237 { 238 struct idr_layer *pa[MAX_LEVEL]; 239 int id; 240 241 id = idr_get_empty_slot(idp, starting_id, pa); 242 if (id >= 0) { 243 /* 244 * Successfully found an empty slot. Install the user 245 * pointer and mark the slot full. 246 */ 247 pa[0]->ary[id & IDR_MASK] = (struct idr_layer *)ptr; 248 pa[0]->count++; 249 idr_mark_full(pa, id); 250 } 251 252 return id; 253 } 254 255 /** 256 * idr_get_new_above - allocate new idr entry above or equal to a start id 257 * @idp: idr handle 258 * @ptr: pointer you want associated with the ide 259 * @start_id: id to start search at 260 * @id: pointer to the allocated handle 261 * 262 * This is the allocate id function. It should be called with any 263 * required locks. 264 * 265 * If memory is required, it will return -EAGAIN, you should unlock 266 * and go back to the idr_pre_get() call. If the idr is full, it will 267 * return -ENOSPC. 268 * 269 * @id returns a value in the range 0 ... 0x7fffffff 270 */ 271 int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) 272 { 273 int rv; 274 275 rv = idr_get_new_above_int(idp, ptr, starting_id); 276 /* 277 * This is a cheap hack until the IDR code can be fixed to 278 * return proper error values. 279 */ 280 if (rv < 0) { 281 if (rv == -1) 282 return -EAGAIN; 283 else /* Will be -3 */ 284 return -ENOSPC; 285 } 286 *id = rv; 287 return 0; 288 } 289 EXPORT_SYMBOL(idr_get_new_above); 290 291 /** 292 * idr_get_new - allocate new idr entry 293 * @idp: idr handle 294 * @ptr: pointer you want associated with the ide 295 * @id: pointer to the allocated handle 296 * 297 * This is the allocate id function. It should be called with any 298 * required locks. 299 * 300 * If memory is required, it will return -EAGAIN, you should unlock 301 * and go back to the idr_pre_get() call. If the idr is full, it will 302 * return -ENOSPC. 303 * 304 * @id returns a value in the range 0 ... 0x7fffffff 305 */ 306 int idr_get_new(struct idr *idp, void *ptr, int *id) 307 { 308 int rv; 309 310 rv = idr_get_new_above_int(idp, ptr, 0); 311 /* 312 * This is a cheap hack until the IDR code can be fixed to 313 * return proper error values. 314 */ 315 if (rv < 0) { 316 if (rv == -1) 317 return -EAGAIN; 318 else /* Will be -3 */ 319 return -ENOSPC; 320 } 321 *id = rv; 322 return 0; 323 } 324 EXPORT_SYMBOL(idr_get_new); 325 326 static void idr_remove_warning(int id) 327 { 328 printk("idr_remove called for id=%d which is not allocated.\n", id); 329 dump_stack(); 330 } 331 332 static void sub_remove(struct idr *idp, int shift, int id) 333 { 334 struct idr_layer *p = idp->top; 335 struct idr_layer **pa[MAX_LEVEL]; 336 struct idr_layer ***paa = &pa[0]; 337 int n; 338 339 *paa = NULL; 340 *++paa = &idp->top; 341 342 while ((shift > 0) && p) { 343 n = (id >> shift) & IDR_MASK; 344 __clear_bit(n, &p->bitmap); 345 *++paa = &p->ary[n]; 346 p = p->ary[n]; 347 shift -= IDR_BITS; 348 } 349 n = id & IDR_MASK; 350 if (likely(p != NULL && test_bit(n, &p->bitmap))){ 351 __clear_bit(n, &p->bitmap); 352 p->ary[n] = NULL; 353 while(*paa && ! --((**paa)->count)){ 354 free_layer(idp, **paa); 355 **paa-- = NULL; 356 } 357 if (!*paa) 358 idp->layers = 0; 359 } else 360 idr_remove_warning(id); 361 } 362 363 /** 364 * idr_remove - remove the given id and free it's slot 365 * @idp: idr handle 366 * @id: unique key 367 */ 368 void idr_remove(struct idr *idp, int id) 369 { 370 struct idr_layer *p; 371 372 /* Mask off upper bits we don't use for the search. */ 373 id &= MAX_ID_MASK; 374 375 sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); 376 if (idp->top && idp->top->count == 1 && (idp->layers > 1) && 377 idp->top->ary[0]) { // We can drop a layer 378 379 p = idp->top->ary[0]; 380 idp->top->bitmap = idp->top->count = 0; 381 free_layer(idp, idp->top); 382 idp->top = p; 383 --idp->layers; 384 } 385 while (idp->id_free_cnt >= IDR_FREE_MAX) { 386 p = alloc_layer(idp); 387 kmem_cache_free(idr_layer_cache, p); 388 return; 389 } 390 } 391 EXPORT_SYMBOL(idr_remove); 392 393 /** 394 * idr_remove_all - remove all ids from the given idr tree 395 * @idp: idr handle 396 * 397 * idr_destroy() only frees up unused, cached idp_layers, but this 398 * function will remove all id mappings and leave all idp_layers 399 * unused. 400 * 401 * A typical clean-up sequence for objects stored in an idr tree, will 402 * use idr_for_each() to free all objects, if necessay, then 403 * idr_remove_all() to remove all ids, and idr_destroy() to free 404 * up the cached idr_layers. 405 */ 406 void idr_remove_all(struct idr *idp) 407 { 408 int n, id, max, error = 0; 409 struct idr_layer *p; 410 struct idr_layer *pa[MAX_LEVEL]; 411 struct idr_layer **paa = &pa[0]; 412 413 n = idp->layers * IDR_BITS; 414 p = idp->top; 415 max = 1 << n; 416 417 id = 0; 418 while (id < max && !error) { 419 while (n > IDR_BITS && p) { 420 n -= IDR_BITS; 421 *paa++ = p; 422 p = p->ary[(id >> n) & IDR_MASK]; 423 } 424 425 id += 1 << n; 426 while (n < fls(id)) { 427 if (p) { 428 memset(p, 0, sizeof *p); 429 free_layer(idp, p); 430 } 431 n += IDR_BITS; 432 p = *--paa; 433 } 434 } 435 idp->top = NULL; 436 idp->layers = 0; 437 } 438 EXPORT_SYMBOL(idr_remove_all); 439 440 /** 441 * idr_destroy - release all cached layers within an idr tree 442 * idp: idr handle 443 */ 444 void idr_destroy(struct idr *idp) 445 { 446 while (idp->id_free_cnt) { 447 struct idr_layer *p = alloc_layer(idp); 448 kmem_cache_free(idr_layer_cache, p); 449 } 450 } 451 EXPORT_SYMBOL(idr_destroy); 452 453 /** 454 * idr_find - return pointer for given id 455 * @idp: idr handle 456 * @id: lookup key 457 * 458 * Return the pointer given the id it has been registered with. A %NULL 459 * return indicates that @id is not valid or you passed %NULL in 460 * idr_get_new(). 461 * 462 * The caller must serialize idr_find() vs idr_get_new() and idr_remove(). 463 */ 464 void *idr_find(struct idr *idp, int id) 465 { 466 int n; 467 struct idr_layer *p; 468 469 n = idp->layers * IDR_BITS; 470 p = idp->top; 471 472 /* Mask off upper bits we don't use for the search. */ 473 id &= MAX_ID_MASK; 474 475 if (id >= (1 << n)) 476 return NULL; 477 478 while (n > 0 && p) { 479 n -= IDR_BITS; 480 p = p->ary[(id >> n) & IDR_MASK]; 481 } 482 return((void *)p); 483 } 484 EXPORT_SYMBOL(idr_find); 485 486 /** 487 * idr_for_each - iterate through all stored pointers 488 * @idp: idr handle 489 * @fn: function to be called for each pointer 490 * @data: data passed back to callback function 491 * 492 * Iterate over the pointers registered with the given idr. The 493 * callback function will be called for each pointer currently 494 * registered, passing the id, the pointer and the data pointer passed 495 * to this function. It is not safe to modify the idr tree while in 496 * the callback, so functions such as idr_get_new and idr_remove are 497 * not allowed. 498 * 499 * We check the return of @fn each time. If it returns anything other 500 * than 0, we break out and return that value. 501 * 502 * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove(). 503 */ 504 int idr_for_each(struct idr *idp, 505 int (*fn)(int id, void *p, void *data), void *data) 506 { 507 int n, id, max, error = 0; 508 struct idr_layer *p; 509 struct idr_layer *pa[MAX_LEVEL]; 510 struct idr_layer **paa = &pa[0]; 511 512 n = idp->layers * IDR_BITS; 513 p = idp->top; 514 max = 1 << n; 515 516 id = 0; 517 while (id < max) { 518 while (n > 0 && p) { 519 n -= IDR_BITS; 520 *paa++ = p; 521 p = p->ary[(id >> n) & IDR_MASK]; 522 } 523 524 if (p) { 525 error = fn(id, (void *)p, data); 526 if (error) 527 break; 528 } 529 530 id += 1 << n; 531 while (n < fls(id)) { 532 n += IDR_BITS; 533 p = *--paa; 534 } 535 } 536 537 return error; 538 } 539 EXPORT_SYMBOL(idr_for_each); 540 541 /** 542 * idr_replace - replace pointer for given id 543 * @idp: idr handle 544 * @ptr: pointer you want associated with the id 545 * @id: lookup key 546 * 547 * Replace the pointer registered with an id and return the old value. 548 * A -ENOENT return indicates that @id was not found. 549 * A -EINVAL return indicates that @id was not within valid constraints. 550 * 551 * The caller must serialize vs idr_find(), idr_get_new(), and idr_remove(). 552 */ 553 void *idr_replace(struct idr *idp, void *ptr, int id) 554 { 555 int n; 556 struct idr_layer *p, *old_p; 557 558 n = idp->layers * IDR_BITS; 559 p = idp->top; 560 561 id &= MAX_ID_MASK; 562 563 if (id >= (1 << n)) 564 return ERR_PTR(-EINVAL); 565 566 n -= IDR_BITS; 567 while ((n > 0) && p) { 568 p = p->ary[(id >> n) & IDR_MASK]; 569 n -= IDR_BITS; 570 } 571 572 n = id & IDR_MASK; 573 if (unlikely(p == NULL || !test_bit(n, &p->bitmap))) 574 return ERR_PTR(-ENOENT); 575 576 old_p = p->ary[n]; 577 p->ary[n] = ptr; 578 579 return old_p; 580 } 581 EXPORT_SYMBOL(idr_replace); 582 583 static void idr_cache_ctor(void * idr_layer, struct kmem_cache *idr_layer_cache, 584 unsigned long flags) 585 { 586 memset(idr_layer, 0, sizeof(struct idr_layer)); 587 } 588 589 static int init_id_cache(void) 590 { 591 if (!idr_layer_cache) 592 idr_layer_cache = kmem_cache_create("idr_layer_cache", 593 sizeof(struct idr_layer), 0, 0, idr_cache_ctor); 594 return 0; 595 } 596 597 /** 598 * idr_init - initialize idr handle 599 * @idp: idr handle 600 * 601 * This function is use to set up the handle (@idp) that you will pass 602 * to the rest of the functions. 603 */ 604 void idr_init(struct idr *idp) 605 { 606 init_id_cache(); 607 memset(idp, 0, sizeof(struct idr)); 608 spin_lock_init(&idp->lock); 609 } 610 EXPORT_SYMBOL(idr_init); 611 612 613 /* 614 * IDA - IDR based ID allocator 615 * 616 * this is id allocator without id -> pointer translation. Memory 617 * usage is much lower than full blown idr because each id only 618 * occupies a bit. ida uses a custom leaf node which contains 619 * IDA_BITMAP_BITS slots. 620 * 621 * 2007-04-25 written by Tejun Heo <htejun@gmail.com> 622 */ 623 624 static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap) 625 { 626 unsigned long flags; 627 628 if (!ida->free_bitmap) { 629 spin_lock_irqsave(&ida->idr.lock, flags); 630 if (!ida->free_bitmap) { 631 ida->free_bitmap = bitmap; 632 bitmap = NULL; 633 } 634 spin_unlock_irqrestore(&ida->idr.lock, flags); 635 } 636 637 kfree(bitmap); 638 } 639 640 /** 641 * ida_pre_get - reserve resources for ida allocation 642 * @ida: ida handle 643 * @gfp_mask: memory allocation flag 644 * 645 * This function should be called prior to locking and calling the 646 * following function. It preallocates enough memory to satisfy the 647 * worst possible allocation. 648 * 649 * If the system is REALLY out of memory this function returns 0, 650 * otherwise 1. 651 */ 652 int ida_pre_get(struct ida *ida, gfp_t gfp_mask) 653 { 654 /* allocate idr_layers */ 655 if (!idr_pre_get(&ida->idr, gfp_mask)) 656 return 0; 657 658 /* allocate free_bitmap */ 659 if (!ida->free_bitmap) { 660 struct ida_bitmap *bitmap; 661 662 bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask); 663 if (!bitmap) 664 return 0; 665 666 free_bitmap(ida, bitmap); 667 } 668 669 return 1; 670 } 671 EXPORT_SYMBOL(ida_pre_get); 672 673 /** 674 * ida_get_new_above - allocate new ID above or equal to a start id 675 * @ida: ida handle 676 * @staring_id: id to start search at 677 * @p_id: pointer to the allocated handle 678 * 679 * Allocate new ID above or equal to @ida. It should be called with 680 * any required locks. 681 * 682 * If memory is required, it will return -EAGAIN, you should unlock 683 * and go back to the ida_pre_get() call. If the ida is full, it will 684 * return -ENOSPC. 685 * 686 * @p_id returns a value in the range 0 ... 0x7fffffff. 687 */ 688 int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) 689 { 690 struct idr_layer *pa[MAX_LEVEL]; 691 struct ida_bitmap *bitmap; 692 unsigned long flags; 693 int idr_id = starting_id / IDA_BITMAP_BITS; 694 int offset = starting_id % IDA_BITMAP_BITS; 695 int t, id; 696 697 restart: 698 /* get vacant slot */ 699 t = idr_get_empty_slot(&ida->idr, idr_id, pa); 700 if (t < 0) { 701 if (t == -1) 702 return -EAGAIN; 703 else /* will be -3 */ 704 return -ENOSPC; 705 } 706 707 if (t * IDA_BITMAP_BITS >= MAX_ID_BIT) 708 return -ENOSPC; 709 710 if (t != idr_id) 711 offset = 0; 712 idr_id = t; 713 714 /* if bitmap isn't there, create a new one */ 715 bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK]; 716 if (!bitmap) { 717 spin_lock_irqsave(&ida->idr.lock, flags); 718 bitmap = ida->free_bitmap; 719 ida->free_bitmap = NULL; 720 spin_unlock_irqrestore(&ida->idr.lock, flags); 721 722 if (!bitmap) 723 return -EAGAIN; 724 725 memset(bitmap, 0, sizeof(struct ida_bitmap)); 726 pa[0]->ary[idr_id & IDR_MASK] = (void *)bitmap; 727 pa[0]->count++; 728 } 729 730 /* lookup for empty slot */ 731 t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset); 732 if (t == IDA_BITMAP_BITS) { 733 /* no empty slot after offset, continue to the next chunk */ 734 idr_id++; 735 offset = 0; 736 goto restart; 737 } 738 739 id = idr_id * IDA_BITMAP_BITS + t; 740 if (id >= MAX_ID_BIT) 741 return -ENOSPC; 742 743 __set_bit(t, bitmap->bitmap); 744 if (++bitmap->nr_busy == IDA_BITMAP_BITS) 745 idr_mark_full(pa, idr_id); 746 747 *p_id = id; 748 749 /* Each leaf node can handle nearly a thousand slots and the 750 * whole idea of ida is to have small memory foot print. 751 * Throw away extra resources one by one after each successful 752 * allocation. 753 */ 754 if (ida->idr.id_free_cnt || ida->free_bitmap) { 755 struct idr_layer *p = alloc_layer(&ida->idr); 756 if (p) 757 kmem_cache_free(idr_layer_cache, p); 758 } 759 760 return 0; 761 } 762 EXPORT_SYMBOL(ida_get_new_above); 763 764 /** 765 * ida_get_new - allocate new ID 766 * @ida: idr handle 767 * @p_id: pointer to the allocated handle 768 * 769 * Allocate new ID. It should be called with any required locks. 770 * 771 * If memory is required, it will return -EAGAIN, you should unlock 772 * and go back to the idr_pre_get() call. If the idr is full, it will 773 * return -ENOSPC. 774 * 775 * @id returns a value in the range 0 ... 0x7fffffff. 776 */ 777 int ida_get_new(struct ida *ida, int *p_id) 778 { 779 return ida_get_new_above(ida, 0, p_id); 780 } 781 EXPORT_SYMBOL(ida_get_new); 782 783 /** 784 * ida_remove - remove the given ID 785 * @ida: ida handle 786 * @id: ID to free 787 */ 788 void ida_remove(struct ida *ida, int id) 789 { 790 struct idr_layer *p = ida->idr.top; 791 int shift = (ida->idr.layers - 1) * IDR_BITS; 792 int idr_id = id / IDA_BITMAP_BITS; 793 int offset = id % IDA_BITMAP_BITS; 794 int n; 795 struct ida_bitmap *bitmap; 796 797 /* clear full bits while looking up the leaf idr_layer */ 798 while ((shift > 0) && p) { 799 n = (idr_id >> shift) & IDR_MASK; 800 __clear_bit(n, &p->bitmap); 801 p = p->ary[n]; 802 shift -= IDR_BITS; 803 } 804 805 if (p == NULL) 806 goto err; 807 808 n = idr_id & IDR_MASK; 809 __clear_bit(n, &p->bitmap); 810 811 bitmap = (void *)p->ary[n]; 812 if (!test_bit(offset, bitmap->bitmap)) 813 goto err; 814 815 /* update bitmap and remove it if empty */ 816 __clear_bit(offset, bitmap->bitmap); 817 if (--bitmap->nr_busy == 0) { 818 __set_bit(n, &p->bitmap); /* to please idr_remove() */ 819 idr_remove(&ida->idr, idr_id); 820 free_bitmap(ida, bitmap); 821 } 822 823 return; 824 825 err: 826 printk(KERN_WARNING 827 "ida_remove called for id=%d which is not allocated.\n", id); 828 } 829 EXPORT_SYMBOL(ida_remove); 830 831 /** 832 * ida_destroy - release all cached layers within an ida tree 833 * ida: ida handle 834 */ 835 void ida_destroy(struct ida *ida) 836 { 837 idr_destroy(&ida->idr); 838 kfree(ida->free_bitmap); 839 } 840 EXPORT_SYMBOL(ida_destroy); 841 842 /** 843 * ida_init - initialize ida handle 844 * @ida: ida handle 845 * 846 * This function is use to set up the handle (@ida) that you will pass 847 * to the rest of the functions. 848 */ 849 void ida_init(struct ida *ida) 850 { 851 memset(ida, 0, sizeof(struct ida)); 852 idr_init(&ida->idr); 853 854 } 855 EXPORT_SYMBOL(ida_init); 856