11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * 2002-10-18 written by Jim Houston jim.houston@ccur.com 31da177e4SLinus Torvalds * Copyright (C) 2002 by Concurrent Computer Corporation 41da177e4SLinus Torvalds * Distributed under the GNU GPL license version 2. 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * Modified by George Anzinger to reuse immediately and to use 71da177e4SLinus Torvalds * find bit instructions. Also removed _irq on spinlocks. 81da177e4SLinus Torvalds * 91da177e4SLinus Torvalds * Small id to pointer translation service. 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds * It uses a radix tree like structure as a sparse array indexed 121da177e4SLinus Torvalds * by the id to obtain the pointer. The bitmap makes allocating 131da177e4SLinus Torvalds * a new id quick. 141da177e4SLinus Torvalds * 151da177e4SLinus Torvalds * You call it to allocate an id (an int) an associate with that id a 161da177e4SLinus Torvalds * pointer or what ever, we treat it as a (void *). You can pass this 171da177e4SLinus Torvalds * id to a user for him to pass back at a later time. You then pass 181da177e4SLinus Torvalds * that id to this code and it returns your pointer. 191da177e4SLinus Torvalds 201da177e4SLinus Torvalds * You can release ids at any time. When all ids are released, most of 211da177e4SLinus Torvalds * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we 221da177e4SLinus Torvalds * don't need to go to the memory "store" during an id allocate, just 231da177e4SLinus Torvalds * so you don't need to be too concerned about locking and conflicts 241da177e4SLinus Torvalds * with the slab allocator. 251da177e4SLinus Torvalds */ 261da177e4SLinus Torvalds 271da177e4SLinus Torvalds #ifndef TEST // to test in user space... 281da177e4SLinus Torvalds #include <linux/slab.h> 291da177e4SLinus Torvalds #include <linux/init.h> 301da177e4SLinus Torvalds #include <linux/module.h> 311da177e4SLinus Torvalds #endif 325806f07cSJeff Mahoney #include <linux/err.h> 331da177e4SLinus Torvalds #include <linux/string.h> 341da177e4SLinus Torvalds #include <linux/idr.h> 351da177e4SLinus Torvalds 36e18b890bSChristoph Lameter static struct kmem_cache *idr_layer_cache; 371da177e4SLinus Torvalds 384ae53789SNadia Derbey static struct idr_layer *get_from_free_list(struct idr *idp) 391da177e4SLinus Torvalds { 401da177e4SLinus Torvalds struct idr_layer *p; 41c259cc28SRoland Dreier unsigned long flags; 421da177e4SLinus Torvalds 43c259cc28SRoland Dreier spin_lock_irqsave(&idp->lock, flags); 441da177e4SLinus Torvalds if ((p = idp->id_free)) { 451da177e4SLinus Torvalds idp->id_free = p->ary[0]; 461da177e4SLinus Torvalds idp->id_free_cnt--; 471da177e4SLinus Torvalds p->ary[0] = NULL; 481da177e4SLinus Torvalds } 49c259cc28SRoland Dreier spin_unlock_irqrestore(&idp->lock, flags); 501da177e4SLinus Torvalds return(p); 511da177e4SLinus Torvalds } 521da177e4SLinus Torvalds 531eec0056SSonny Rao /* only called when idp->lock is held */ 544ae53789SNadia Derbey static void __move_to_free_list(struct idr *idp, struct idr_layer *p) 551eec0056SSonny Rao { 561eec0056SSonny Rao p->ary[0] = idp->id_free; 571eec0056SSonny Rao idp->id_free = p; 581eec0056SSonny Rao idp->id_free_cnt++; 591eec0056SSonny Rao } 601eec0056SSonny Rao 614ae53789SNadia Derbey static void move_to_free_list(struct idr *idp, struct idr_layer *p) 621da177e4SLinus Torvalds { 63c259cc28SRoland Dreier unsigned long flags; 64c259cc28SRoland Dreier 651da177e4SLinus Torvalds /* 661da177e4SLinus Torvalds * Depends on the return element being zeroed. 671da177e4SLinus Torvalds */ 68c259cc28SRoland Dreier spin_lock_irqsave(&idp->lock, flags); 694ae53789SNadia Derbey __move_to_free_list(idp, p); 70c259cc28SRoland Dreier spin_unlock_irqrestore(&idp->lock, flags); 711da177e4SLinus Torvalds } 721da177e4SLinus Torvalds 73e33ac8bdSTejun Heo static void idr_mark_full(struct idr_layer **pa, int id) 74e33ac8bdSTejun Heo { 75e33ac8bdSTejun Heo struct idr_layer *p = pa[0]; 76e33ac8bdSTejun Heo int l = 0; 77e33ac8bdSTejun Heo 78e33ac8bdSTejun Heo __set_bit(id & IDR_MASK, &p->bitmap); 79e33ac8bdSTejun Heo /* 80e33ac8bdSTejun Heo * If this layer is full mark the bit in the layer above to 81e33ac8bdSTejun Heo * show that this part of the radix tree is full. This may 82e33ac8bdSTejun Heo * complete the layer above and require walking up the radix 83e33ac8bdSTejun Heo * tree. 84e33ac8bdSTejun Heo */ 85e33ac8bdSTejun Heo while (p->bitmap == IDR_FULL) { 86e33ac8bdSTejun Heo if (!(p = pa[++l])) 87e33ac8bdSTejun Heo break; 88e33ac8bdSTejun Heo id = id >> IDR_BITS; 89e33ac8bdSTejun Heo __set_bit((id & IDR_MASK), &p->bitmap); 90e33ac8bdSTejun Heo } 91e33ac8bdSTejun Heo } 92e33ac8bdSTejun Heo 931da177e4SLinus Torvalds /** 941da177e4SLinus Torvalds * idr_pre_get - reserver resources for idr allocation 951da177e4SLinus Torvalds * @idp: idr handle 961da177e4SLinus Torvalds * @gfp_mask: memory allocation flags 971da177e4SLinus Torvalds * 981da177e4SLinus Torvalds * This function should be called prior to locking and calling the 991da177e4SLinus Torvalds * following function. It preallocates enough memory to satisfy 1001da177e4SLinus Torvalds * the worst possible allocation. 1011da177e4SLinus Torvalds * 1021da177e4SLinus Torvalds * If the system is REALLY out of memory this function returns 0, 1031da177e4SLinus Torvalds * otherwise 1. 1041da177e4SLinus Torvalds */ 105fd4f2df2SAl Viro int idr_pre_get(struct idr *idp, gfp_t gfp_mask) 1061da177e4SLinus Torvalds { 1071da177e4SLinus Torvalds while (idp->id_free_cnt < IDR_FREE_MAX) { 1081da177e4SLinus Torvalds struct idr_layer *new; 1091da177e4SLinus Torvalds new = kmem_cache_alloc(idr_layer_cache, gfp_mask); 1101da177e4SLinus Torvalds if (new == NULL) 1111da177e4SLinus Torvalds return (0); 1124ae53789SNadia Derbey move_to_free_list(idp, new); 1131da177e4SLinus Torvalds } 1141da177e4SLinus Torvalds return 1; 1151da177e4SLinus Torvalds } 1161da177e4SLinus Torvalds EXPORT_SYMBOL(idr_pre_get); 1171da177e4SLinus Torvalds 118e33ac8bdSTejun Heo static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) 1191da177e4SLinus Torvalds { 1201da177e4SLinus Torvalds int n, m, sh; 1211da177e4SLinus Torvalds struct idr_layer *p, *new; 1227aae6dd8STejun Heo int l, id, oid; 1235ba25331SAl Viro unsigned long bm; 1241da177e4SLinus Torvalds 1251da177e4SLinus Torvalds id = *starting_id; 1267aae6dd8STejun Heo restart: 1271da177e4SLinus Torvalds p = idp->top; 1281da177e4SLinus Torvalds l = idp->layers; 1291da177e4SLinus Torvalds pa[l--] = NULL; 1301da177e4SLinus Torvalds while (1) { 1311da177e4SLinus Torvalds /* 1321da177e4SLinus Torvalds * We run around this while until we reach the leaf node... 1331da177e4SLinus Torvalds */ 1341da177e4SLinus Torvalds n = (id >> (IDR_BITS*l)) & IDR_MASK; 1351da177e4SLinus Torvalds bm = ~p->bitmap; 1361da177e4SLinus Torvalds m = find_next_bit(&bm, IDR_SIZE, n); 1371da177e4SLinus Torvalds if (m == IDR_SIZE) { 1381da177e4SLinus Torvalds /* no space available go back to previous layer. */ 1391da177e4SLinus Torvalds l++; 1407aae6dd8STejun Heo oid = id; 1411da177e4SLinus Torvalds id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; 1427aae6dd8STejun Heo 1437aae6dd8STejun Heo /* if already at the top layer, we need to grow */ 1441da177e4SLinus Torvalds if (!(p = pa[l])) { 1451da177e4SLinus Torvalds *starting_id = id; 146944ca05cSNadia Derbey return IDR_NEED_TO_GROW; 1471da177e4SLinus Torvalds } 1487aae6dd8STejun Heo 1497aae6dd8STejun Heo /* If we need to go up one layer, continue the 1507aae6dd8STejun Heo * loop; otherwise, restart from the top. 1517aae6dd8STejun Heo */ 1527aae6dd8STejun Heo sh = IDR_BITS * (l + 1); 1537aae6dd8STejun Heo if (oid >> sh == id >> sh) 1541da177e4SLinus Torvalds continue; 1557aae6dd8STejun Heo else 1567aae6dd8STejun Heo goto restart; 1571da177e4SLinus Torvalds } 1581da177e4SLinus Torvalds if (m != n) { 1591da177e4SLinus Torvalds sh = IDR_BITS*l; 1601da177e4SLinus Torvalds id = ((id >> sh) ^ n ^ m) << sh; 1611da177e4SLinus Torvalds } 1621da177e4SLinus Torvalds if ((id >= MAX_ID_BIT) || (id < 0)) 163944ca05cSNadia Derbey return IDR_NOMORE_SPACE; 1641da177e4SLinus Torvalds if (l == 0) 1651da177e4SLinus Torvalds break; 1661da177e4SLinus Torvalds /* 1671da177e4SLinus Torvalds * Create the layer below if it is missing. 1681da177e4SLinus Torvalds */ 1691da177e4SLinus Torvalds if (!p->ary[m]) { 1704ae53789SNadia Derbey new = get_from_free_list(idp); 1714ae53789SNadia Derbey if (!new) 1721da177e4SLinus Torvalds return -1; 1731da177e4SLinus Torvalds p->ary[m] = new; 1741da177e4SLinus Torvalds p->count++; 1751da177e4SLinus Torvalds } 1761da177e4SLinus Torvalds pa[l--] = p; 1771da177e4SLinus Torvalds p = p->ary[m]; 1781da177e4SLinus Torvalds } 179e33ac8bdSTejun Heo 180e33ac8bdSTejun Heo pa[l] = p; 181e33ac8bdSTejun Heo return id; 1821da177e4SLinus Torvalds } 1831da177e4SLinus Torvalds 184e33ac8bdSTejun Heo static int idr_get_empty_slot(struct idr *idp, int starting_id, 185e33ac8bdSTejun Heo struct idr_layer **pa) 1861da177e4SLinus Torvalds { 1871da177e4SLinus Torvalds struct idr_layer *p, *new; 1881da177e4SLinus Torvalds int layers, v, id; 189c259cc28SRoland Dreier unsigned long flags; 1901da177e4SLinus Torvalds 1911da177e4SLinus Torvalds id = starting_id; 1921da177e4SLinus Torvalds build_up: 1931da177e4SLinus Torvalds p = idp->top; 1941da177e4SLinus Torvalds layers = idp->layers; 1951da177e4SLinus Torvalds if (unlikely(!p)) { 1964ae53789SNadia Derbey if (!(p = get_from_free_list(idp))) 1971da177e4SLinus Torvalds return -1; 1981da177e4SLinus Torvalds layers = 1; 1991da177e4SLinus Torvalds } 2001da177e4SLinus Torvalds /* 2011da177e4SLinus Torvalds * Add a new layer to the top of the tree if the requested 2021da177e4SLinus Torvalds * id is larger than the currently allocated space. 2031da177e4SLinus Torvalds */ 204589777eaSZaur Kambarov while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { 2051da177e4SLinus Torvalds layers++; 2061da177e4SLinus Torvalds if (!p->count) 2071da177e4SLinus Torvalds continue; 2084ae53789SNadia Derbey if (!(new = get_from_free_list(idp))) { 2091da177e4SLinus Torvalds /* 2101da177e4SLinus Torvalds * The allocation failed. If we built part of 2111da177e4SLinus Torvalds * the structure tear it down. 2121da177e4SLinus Torvalds */ 213c259cc28SRoland Dreier spin_lock_irqsave(&idp->lock, flags); 2141da177e4SLinus Torvalds for (new = p; p && p != idp->top; new = p) { 2151da177e4SLinus Torvalds p = p->ary[0]; 2161da177e4SLinus Torvalds new->ary[0] = NULL; 2171da177e4SLinus Torvalds new->bitmap = new->count = 0; 2184ae53789SNadia Derbey __move_to_free_list(idp, new); 2191da177e4SLinus Torvalds } 220c259cc28SRoland Dreier spin_unlock_irqrestore(&idp->lock, flags); 2211da177e4SLinus Torvalds return -1; 2221da177e4SLinus Torvalds } 2231da177e4SLinus Torvalds new->ary[0] = p; 2241da177e4SLinus Torvalds new->count = 1; 2251da177e4SLinus Torvalds if (p->bitmap == IDR_FULL) 2261da177e4SLinus Torvalds __set_bit(0, &new->bitmap); 2271da177e4SLinus Torvalds p = new; 2281da177e4SLinus Torvalds } 2291da177e4SLinus Torvalds idp->top = p; 2301da177e4SLinus Torvalds idp->layers = layers; 231e33ac8bdSTejun Heo v = sub_alloc(idp, &id, pa); 232944ca05cSNadia Derbey if (v == IDR_NEED_TO_GROW) 2331da177e4SLinus Torvalds goto build_up; 2341da177e4SLinus Torvalds return(v); 2351da177e4SLinus Torvalds } 2361da177e4SLinus Torvalds 237e33ac8bdSTejun Heo static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) 238e33ac8bdSTejun Heo { 239e33ac8bdSTejun Heo struct idr_layer *pa[MAX_LEVEL]; 240e33ac8bdSTejun Heo int id; 241e33ac8bdSTejun Heo 242e33ac8bdSTejun Heo id = idr_get_empty_slot(idp, starting_id, pa); 243e33ac8bdSTejun Heo if (id >= 0) { 244e33ac8bdSTejun Heo /* 245e33ac8bdSTejun Heo * Successfully found an empty slot. Install the user 246e33ac8bdSTejun Heo * pointer and mark the slot full. 247e33ac8bdSTejun Heo */ 248e33ac8bdSTejun Heo pa[0]->ary[id & IDR_MASK] = (struct idr_layer *)ptr; 249e33ac8bdSTejun Heo pa[0]->count++; 250e33ac8bdSTejun Heo idr_mark_full(pa, id); 251e33ac8bdSTejun Heo } 252e33ac8bdSTejun Heo 253e33ac8bdSTejun Heo return id; 254e33ac8bdSTejun Heo } 255e33ac8bdSTejun Heo 2561da177e4SLinus Torvalds /** 2577c657f2fSJohn McCutchan * idr_get_new_above - allocate new idr entry above or equal to a start id 2581da177e4SLinus Torvalds * @idp: idr handle 2591da177e4SLinus Torvalds * @ptr: pointer you want associated with the ide 2601da177e4SLinus Torvalds * @start_id: id to start search at 2611da177e4SLinus Torvalds * @id: pointer to the allocated handle 2621da177e4SLinus Torvalds * 2631da177e4SLinus Torvalds * This is the allocate id function. It should be called with any 2641da177e4SLinus Torvalds * required locks. 2651da177e4SLinus Torvalds * 2661da177e4SLinus Torvalds * If memory is required, it will return -EAGAIN, you should unlock 2671da177e4SLinus Torvalds * and go back to the idr_pre_get() call. If the idr is full, it will 2681da177e4SLinus Torvalds * return -ENOSPC. 2691da177e4SLinus Torvalds * 2701da177e4SLinus Torvalds * @id returns a value in the range 0 ... 0x7fffffff 2711da177e4SLinus Torvalds */ 2721da177e4SLinus Torvalds int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) 2731da177e4SLinus Torvalds { 2741da177e4SLinus Torvalds int rv; 275e15ae2ddSJesper Juhl 2761da177e4SLinus Torvalds rv = idr_get_new_above_int(idp, ptr, starting_id); 2771da177e4SLinus Torvalds /* 2781da177e4SLinus Torvalds * This is a cheap hack until the IDR code can be fixed to 2791da177e4SLinus Torvalds * return proper error values. 2801da177e4SLinus Torvalds */ 281944ca05cSNadia Derbey if (rv < 0) 282944ca05cSNadia Derbey return _idr_rc_to_errno(rv); 2831da177e4SLinus Torvalds *id = rv; 2841da177e4SLinus Torvalds return 0; 2851da177e4SLinus Torvalds } 2861da177e4SLinus Torvalds EXPORT_SYMBOL(idr_get_new_above); 2871da177e4SLinus Torvalds 2881da177e4SLinus Torvalds /** 2891da177e4SLinus Torvalds * idr_get_new - allocate new idr entry 2901da177e4SLinus Torvalds * @idp: idr handle 2911da177e4SLinus Torvalds * @ptr: pointer you want associated with the ide 2921da177e4SLinus Torvalds * @id: pointer to the allocated handle 2931da177e4SLinus Torvalds * 2941da177e4SLinus Torvalds * This is the allocate id function. It should be called with any 2951da177e4SLinus Torvalds * required locks. 2961da177e4SLinus Torvalds * 2971da177e4SLinus Torvalds * If memory is required, it will return -EAGAIN, you should unlock 2981da177e4SLinus Torvalds * and go back to the idr_pre_get() call. If the idr is full, it will 2991da177e4SLinus Torvalds * return -ENOSPC. 3001da177e4SLinus Torvalds * 3011da177e4SLinus Torvalds * @id returns a value in the range 0 ... 0x7fffffff 3021da177e4SLinus Torvalds */ 3031da177e4SLinus Torvalds int idr_get_new(struct idr *idp, void *ptr, int *id) 3041da177e4SLinus Torvalds { 3051da177e4SLinus Torvalds int rv; 306e15ae2ddSJesper Juhl 3071da177e4SLinus Torvalds rv = idr_get_new_above_int(idp, ptr, 0); 3081da177e4SLinus Torvalds /* 3091da177e4SLinus Torvalds * This is a cheap hack until the IDR code can be fixed to 3101da177e4SLinus Torvalds * return proper error values. 3111da177e4SLinus Torvalds */ 312944ca05cSNadia Derbey if (rv < 0) 313944ca05cSNadia Derbey return _idr_rc_to_errno(rv); 3141da177e4SLinus Torvalds *id = rv; 3151da177e4SLinus Torvalds return 0; 3161da177e4SLinus Torvalds } 3171da177e4SLinus Torvalds EXPORT_SYMBOL(idr_get_new); 3181da177e4SLinus Torvalds 3191da177e4SLinus Torvalds static void idr_remove_warning(int id) 3201da177e4SLinus Torvalds { 321f098ad65SNadia Derbey printk(KERN_WARNING 322f098ad65SNadia Derbey "idr_remove called for id=%d which is not allocated.\n", id); 3231da177e4SLinus Torvalds dump_stack(); 3241da177e4SLinus Torvalds } 3251da177e4SLinus Torvalds 3261da177e4SLinus Torvalds static void sub_remove(struct idr *idp, int shift, int id) 3271da177e4SLinus Torvalds { 3281da177e4SLinus Torvalds struct idr_layer *p = idp->top; 3291da177e4SLinus Torvalds struct idr_layer **pa[MAX_LEVEL]; 3301da177e4SLinus Torvalds struct idr_layer ***paa = &pa[0]; 3311da177e4SLinus Torvalds int n; 3321da177e4SLinus Torvalds 3331da177e4SLinus Torvalds *paa = NULL; 3341da177e4SLinus Torvalds *++paa = &idp->top; 3351da177e4SLinus Torvalds 3361da177e4SLinus Torvalds while ((shift > 0) && p) { 3371da177e4SLinus Torvalds n = (id >> shift) & IDR_MASK; 3381da177e4SLinus Torvalds __clear_bit(n, &p->bitmap); 3391da177e4SLinus Torvalds *++paa = &p->ary[n]; 3401da177e4SLinus Torvalds p = p->ary[n]; 3411da177e4SLinus Torvalds shift -= IDR_BITS; 3421da177e4SLinus Torvalds } 3431da177e4SLinus Torvalds n = id & IDR_MASK; 3441da177e4SLinus Torvalds if (likely(p != NULL && test_bit(n, &p->bitmap))){ 3451da177e4SLinus Torvalds __clear_bit(n, &p->bitmap); 3461da177e4SLinus Torvalds p->ary[n] = NULL; 3471da177e4SLinus Torvalds while(*paa && ! --((**paa)->count)){ 3484ae53789SNadia Derbey move_to_free_list(idp, **paa); 3491da177e4SLinus Torvalds **paa-- = NULL; 3501da177e4SLinus Torvalds } 3511da177e4SLinus Torvalds if (!*paa) 3521da177e4SLinus Torvalds idp->layers = 0; 353e15ae2ddSJesper Juhl } else 3541da177e4SLinus Torvalds idr_remove_warning(id); 3551da177e4SLinus Torvalds } 3561da177e4SLinus Torvalds 3571da177e4SLinus Torvalds /** 3581da177e4SLinus Torvalds * idr_remove - remove the given id and free it's slot 35972fd4a35SRobert P. J. Day * @idp: idr handle 36072fd4a35SRobert P. J. Day * @id: unique key 3611da177e4SLinus Torvalds */ 3621da177e4SLinus Torvalds void idr_remove(struct idr *idp, int id) 3631da177e4SLinus Torvalds { 3641da177e4SLinus Torvalds struct idr_layer *p; 3651da177e4SLinus Torvalds 3661da177e4SLinus Torvalds /* Mask off upper bits we don't use for the search. */ 3671da177e4SLinus Torvalds id &= MAX_ID_MASK; 3681da177e4SLinus Torvalds 3691da177e4SLinus Torvalds sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); 370e15ae2ddSJesper Juhl if (idp->top && idp->top->count == 1 && (idp->layers > 1) && 3711da177e4SLinus Torvalds idp->top->ary[0]) { // We can drop a layer 3721da177e4SLinus Torvalds 3731da177e4SLinus Torvalds p = idp->top->ary[0]; 3741da177e4SLinus Torvalds idp->top->bitmap = idp->top->count = 0; 3754ae53789SNadia Derbey move_to_free_list(idp, idp->top); 3761da177e4SLinus Torvalds idp->top = p; 3771da177e4SLinus Torvalds --idp->layers; 3781da177e4SLinus Torvalds } 3791da177e4SLinus Torvalds while (idp->id_free_cnt >= IDR_FREE_MAX) { 3804ae53789SNadia Derbey p = get_from_free_list(idp); 3811da177e4SLinus Torvalds kmem_cache_free(idr_layer_cache, p); 3821da177e4SLinus Torvalds } 383af8e2a4cSNadia Derbey return; 3841da177e4SLinus Torvalds } 3851da177e4SLinus Torvalds EXPORT_SYMBOL(idr_remove); 3861da177e4SLinus Torvalds 3871da177e4SLinus Torvalds /** 38823936cc0SKristian Hoegsberg * idr_remove_all - remove all ids from the given idr tree 38923936cc0SKristian Hoegsberg * @idp: idr handle 39023936cc0SKristian Hoegsberg * 39123936cc0SKristian Hoegsberg * idr_destroy() only frees up unused, cached idp_layers, but this 39223936cc0SKristian Hoegsberg * function will remove all id mappings and leave all idp_layers 39323936cc0SKristian Hoegsberg * unused. 39423936cc0SKristian Hoegsberg * 39523936cc0SKristian Hoegsberg * A typical clean-up sequence for objects stored in an idr tree, will 39623936cc0SKristian Hoegsberg * use idr_for_each() to free all objects, if necessay, then 39723936cc0SKristian Hoegsberg * idr_remove_all() to remove all ids, and idr_destroy() to free 39823936cc0SKristian Hoegsberg * up the cached idr_layers. 39923936cc0SKristian Hoegsberg */ 40023936cc0SKristian Hoegsberg void idr_remove_all(struct idr *idp) 40123936cc0SKristian Hoegsberg { 4026ace06dcSOleg Nesterov int n, id, max; 40323936cc0SKristian Hoegsberg struct idr_layer *p; 40423936cc0SKristian Hoegsberg struct idr_layer *pa[MAX_LEVEL]; 40523936cc0SKristian Hoegsberg struct idr_layer **paa = &pa[0]; 40623936cc0SKristian Hoegsberg 40723936cc0SKristian Hoegsberg n = idp->layers * IDR_BITS; 40823936cc0SKristian Hoegsberg p = idp->top; 40923936cc0SKristian Hoegsberg max = 1 << n; 41023936cc0SKristian Hoegsberg 41123936cc0SKristian Hoegsberg id = 0; 4126ace06dcSOleg Nesterov while (id < max) { 41323936cc0SKristian Hoegsberg while (n > IDR_BITS && p) { 41423936cc0SKristian Hoegsberg n -= IDR_BITS; 41523936cc0SKristian Hoegsberg *paa++ = p; 41623936cc0SKristian Hoegsberg p = p->ary[(id >> n) & IDR_MASK]; 41723936cc0SKristian Hoegsberg } 41823936cc0SKristian Hoegsberg 41923936cc0SKristian Hoegsberg id += 1 << n; 42023936cc0SKristian Hoegsberg while (n < fls(id)) { 42123936cc0SKristian Hoegsberg if (p) { 42223936cc0SKristian Hoegsberg memset(p, 0, sizeof *p); 4234ae53789SNadia Derbey move_to_free_list(idp, p); 42423936cc0SKristian Hoegsberg } 42523936cc0SKristian Hoegsberg n += IDR_BITS; 42623936cc0SKristian Hoegsberg p = *--paa; 42723936cc0SKristian Hoegsberg } 42823936cc0SKristian Hoegsberg } 42923936cc0SKristian Hoegsberg idp->top = NULL; 43023936cc0SKristian Hoegsberg idp->layers = 0; 43123936cc0SKristian Hoegsberg } 43223936cc0SKristian Hoegsberg EXPORT_SYMBOL(idr_remove_all); 43323936cc0SKristian Hoegsberg 43423936cc0SKristian Hoegsberg /** 4358d3b3591SAndrew Morton * idr_destroy - release all cached layers within an idr tree 4368d3b3591SAndrew Morton * idp: idr handle 4378d3b3591SAndrew Morton */ 4388d3b3591SAndrew Morton void idr_destroy(struct idr *idp) 4398d3b3591SAndrew Morton { 4408d3b3591SAndrew Morton while (idp->id_free_cnt) { 4414ae53789SNadia Derbey struct idr_layer *p = get_from_free_list(idp); 4428d3b3591SAndrew Morton kmem_cache_free(idr_layer_cache, p); 4438d3b3591SAndrew Morton } 4448d3b3591SAndrew Morton } 4458d3b3591SAndrew Morton EXPORT_SYMBOL(idr_destroy); 4468d3b3591SAndrew Morton 4478d3b3591SAndrew Morton /** 4481da177e4SLinus Torvalds * idr_find - return pointer for given id 4491da177e4SLinus Torvalds * @idp: idr handle 4501da177e4SLinus Torvalds * @id: lookup key 4511da177e4SLinus Torvalds * 4521da177e4SLinus Torvalds * Return the pointer given the id it has been registered with. A %NULL 4531da177e4SLinus Torvalds * return indicates that @id is not valid or you passed %NULL in 4541da177e4SLinus Torvalds * idr_get_new(). 4551da177e4SLinus Torvalds * 4561da177e4SLinus Torvalds * The caller must serialize idr_find() vs idr_get_new() and idr_remove(). 4571da177e4SLinus Torvalds */ 4581da177e4SLinus Torvalds void *idr_find(struct idr *idp, int id) 4591da177e4SLinus Torvalds { 4601da177e4SLinus Torvalds int n; 4611da177e4SLinus Torvalds struct idr_layer *p; 4621da177e4SLinus Torvalds 4631da177e4SLinus Torvalds n = idp->layers * IDR_BITS; 4641da177e4SLinus Torvalds p = idp->top; 4651da177e4SLinus Torvalds 4661da177e4SLinus Torvalds /* Mask off upper bits we don't use for the search. */ 4671da177e4SLinus Torvalds id &= MAX_ID_MASK; 4681da177e4SLinus Torvalds 4691da177e4SLinus Torvalds if (id >= (1 << n)) 4701da177e4SLinus Torvalds return NULL; 4711da177e4SLinus Torvalds 4721da177e4SLinus Torvalds while (n > 0 && p) { 4731da177e4SLinus Torvalds n -= IDR_BITS; 4741da177e4SLinus Torvalds p = p->ary[(id >> n) & IDR_MASK]; 4751da177e4SLinus Torvalds } 4761da177e4SLinus Torvalds return((void *)p); 4771da177e4SLinus Torvalds } 4781da177e4SLinus Torvalds EXPORT_SYMBOL(idr_find); 4791da177e4SLinus Torvalds 4805806f07cSJeff Mahoney /** 48196d7fa42SKristian Hoegsberg * idr_for_each - iterate through all stored pointers 48296d7fa42SKristian Hoegsberg * @idp: idr handle 48396d7fa42SKristian Hoegsberg * @fn: function to be called for each pointer 48496d7fa42SKristian Hoegsberg * @data: data passed back to callback function 48596d7fa42SKristian Hoegsberg * 48696d7fa42SKristian Hoegsberg * Iterate over the pointers registered with the given idr. The 48796d7fa42SKristian Hoegsberg * callback function will be called for each pointer currently 48896d7fa42SKristian Hoegsberg * registered, passing the id, the pointer and the data pointer passed 48996d7fa42SKristian Hoegsberg * to this function. It is not safe to modify the idr tree while in 49096d7fa42SKristian Hoegsberg * the callback, so functions such as idr_get_new and idr_remove are 49196d7fa42SKristian Hoegsberg * not allowed. 49296d7fa42SKristian Hoegsberg * 49396d7fa42SKristian Hoegsberg * We check the return of @fn each time. If it returns anything other 49496d7fa42SKristian Hoegsberg * than 0, we break out and return that value. 49596d7fa42SKristian Hoegsberg * 49696d7fa42SKristian Hoegsberg * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove(). 49796d7fa42SKristian Hoegsberg */ 49896d7fa42SKristian Hoegsberg int idr_for_each(struct idr *idp, 49996d7fa42SKristian Hoegsberg int (*fn)(int id, void *p, void *data), void *data) 50096d7fa42SKristian Hoegsberg { 50196d7fa42SKristian Hoegsberg int n, id, max, error = 0; 50296d7fa42SKristian Hoegsberg struct idr_layer *p; 50396d7fa42SKristian Hoegsberg struct idr_layer *pa[MAX_LEVEL]; 50496d7fa42SKristian Hoegsberg struct idr_layer **paa = &pa[0]; 50596d7fa42SKristian Hoegsberg 50696d7fa42SKristian Hoegsberg n = idp->layers * IDR_BITS; 50796d7fa42SKristian Hoegsberg p = idp->top; 50896d7fa42SKristian Hoegsberg max = 1 << n; 50996d7fa42SKristian Hoegsberg 51096d7fa42SKristian Hoegsberg id = 0; 51196d7fa42SKristian Hoegsberg while (id < max) { 51296d7fa42SKristian Hoegsberg while (n > 0 && p) { 51396d7fa42SKristian Hoegsberg n -= IDR_BITS; 51496d7fa42SKristian Hoegsberg *paa++ = p; 51596d7fa42SKristian Hoegsberg p = p->ary[(id >> n) & IDR_MASK]; 51696d7fa42SKristian Hoegsberg } 51796d7fa42SKristian Hoegsberg 51896d7fa42SKristian Hoegsberg if (p) { 51996d7fa42SKristian Hoegsberg error = fn(id, (void *)p, data); 52096d7fa42SKristian Hoegsberg if (error) 52196d7fa42SKristian Hoegsberg break; 52296d7fa42SKristian Hoegsberg } 52396d7fa42SKristian Hoegsberg 52496d7fa42SKristian Hoegsberg id += 1 << n; 52596d7fa42SKristian Hoegsberg while (n < fls(id)) { 52696d7fa42SKristian Hoegsberg n += IDR_BITS; 52796d7fa42SKristian Hoegsberg p = *--paa; 52896d7fa42SKristian Hoegsberg } 52996d7fa42SKristian Hoegsberg } 53096d7fa42SKristian Hoegsberg 53196d7fa42SKristian Hoegsberg return error; 53296d7fa42SKristian Hoegsberg } 53396d7fa42SKristian Hoegsberg EXPORT_SYMBOL(idr_for_each); 53496d7fa42SKristian Hoegsberg 53596d7fa42SKristian Hoegsberg /** 5365806f07cSJeff Mahoney * idr_replace - replace pointer for given id 5375806f07cSJeff Mahoney * @idp: idr handle 5385806f07cSJeff Mahoney * @ptr: pointer you want associated with the id 5395806f07cSJeff Mahoney * @id: lookup key 5405806f07cSJeff Mahoney * 5415806f07cSJeff Mahoney * Replace the pointer registered with an id and return the old value. 5425806f07cSJeff Mahoney * A -ENOENT return indicates that @id was not found. 5435806f07cSJeff Mahoney * A -EINVAL return indicates that @id was not within valid constraints. 5445806f07cSJeff Mahoney * 5455806f07cSJeff Mahoney * The caller must serialize vs idr_find(), idr_get_new(), and idr_remove(). 5465806f07cSJeff Mahoney */ 5475806f07cSJeff Mahoney void *idr_replace(struct idr *idp, void *ptr, int id) 5485806f07cSJeff Mahoney { 5495806f07cSJeff Mahoney int n; 5505806f07cSJeff Mahoney struct idr_layer *p, *old_p; 5515806f07cSJeff Mahoney 5525806f07cSJeff Mahoney n = idp->layers * IDR_BITS; 5535806f07cSJeff Mahoney p = idp->top; 5545806f07cSJeff Mahoney 5555806f07cSJeff Mahoney id &= MAX_ID_MASK; 5565806f07cSJeff Mahoney 5575806f07cSJeff Mahoney if (id >= (1 << n)) 5585806f07cSJeff Mahoney return ERR_PTR(-EINVAL); 5595806f07cSJeff Mahoney 5605806f07cSJeff Mahoney n -= IDR_BITS; 5615806f07cSJeff Mahoney while ((n > 0) && p) { 5625806f07cSJeff Mahoney p = p->ary[(id >> n) & IDR_MASK]; 5635806f07cSJeff Mahoney n -= IDR_BITS; 5645806f07cSJeff Mahoney } 5655806f07cSJeff Mahoney 5665806f07cSJeff Mahoney n = id & IDR_MASK; 5675806f07cSJeff Mahoney if (unlikely(p == NULL || !test_bit(n, &p->bitmap))) 5685806f07cSJeff Mahoney return ERR_PTR(-ENOENT); 5695806f07cSJeff Mahoney 5705806f07cSJeff Mahoney old_p = p->ary[n]; 5715806f07cSJeff Mahoney p->ary[n] = ptr; 5725806f07cSJeff Mahoney 5735806f07cSJeff Mahoney return old_p; 5745806f07cSJeff Mahoney } 5755806f07cSJeff Mahoney EXPORT_SYMBOL(idr_replace); 5765806f07cSJeff Mahoney 5774ba9b9d0SChristoph Lameter static void idr_cache_ctor(struct kmem_cache *idr_layer_cache, void *idr_layer) 5781da177e4SLinus Torvalds { 5791da177e4SLinus Torvalds memset(idr_layer, 0, sizeof(struct idr_layer)); 5801da177e4SLinus Torvalds } 5811da177e4SLinus Torvalds 582199f0ca5SAkinobu Mita void __init idr_init_cache(void) 5831da177e4SLinus Torvalds { 5841da177e4SLinus Torvalds idr_layer_cache = kmem_cache_create("idr_layer_cache", 585199f0ca5SAkinobu Mita sizeof(struct idr_layer), 0, SLAB_PANIC, 586199f0ca5SAkinobu Mita idr_cache_ctor); 5871da177e4SLinus Torvalds } 5881da177e4SLinus Torvalds 5891da177e4SLinus Torvalds /** 5901da177e4SLinus Torvalds * idr_init - initialize idr handle 5911da177e4SLinus Torvalds * @idp: idr handle 5921da177e4SLinus Torvalds * 5931da177e4SLinus Torvalds * This function is use to set up the handle (@idp) that you will pass 5941da177e4SLinus Torvalds * to the rest of the functions. 5951da177e4SLinus Torvalds */ 5961da177e4SLinus Torvalds void idr_init(struct idr *idp) 5971da177e4SLinus Torvalds { 5981da177e4SLinus Torvalds memset(idp, 0, sizeof(struct idr)); 5991da177e4SLinus Torvalds spin_lock_init(&idp->lock); 6001da177e4SLinus Torvalds } 6011da177e4SLinus Torvalds EXPORT_SYMBOL(idr_init); 60272dba584STejun Heo 60372dba584STejun Heo 60472dba584STejun Heo /* 60572dba584STejun Heo * IDA - IDR based ID allocator 60672dba584STejun Heo * 60772dba584STejun Heo * this is id allocator without id -> pointer translation. Memory 60872dba584STejun Heo * usage is much lower than full blown idr because each id only 60972dba584STejun Heo * occupies a bit. ida uses a custom leaf node which contains 61072dba584STejun Heo * IDA_BITMAP_BITS slots. 61172dba584STejun Heo * 61272dba584STejun Heo * 2007-04-25 written by Tejun Heo <htejun@gmail.com> 61372dba584STejun Heo */ 61472dba584STejun Heo 61572dba584STejun Heo static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap) 61672dba584STejun Heo { 61772dba584STejun Heo unsigned long flags; 61872dba584STejun Heo 61972dba584STejun Heo if (!ida->free_bitmap) { 62072dba584STejun Heo spin_lock_irqsave(&ida->idr.lock, flags); 62172dba584STejun Heo if (!ida->free_bitmap) { 62272dba584STejun Heo ida->free_bitmap = bitmap; 62372dba584STejun Heo bitmap = NULL; 62472dba584STejun Heo } 62572dba584STejun Heo spin_unlock_irqrestore(&ida->idr.lock, flags); 62672dba584STejun Heo } 62772dba584STejun Heo 62872dba584STejun Heo kfree(bitmap); 62972dba584STejun Heo } 63072dba584STejun Heo 63172dba584STejun Heo /** 63272dba584STejun Heo * ida_pre_get - reserve resources for ida allocation 63372dba584STejun Heo * @ida: ida handle 63472dba584STejun Heo * @gfp_mask: memory allocation flag 63572dba584STejun Heo * 63672dba584STejun Heo * This function should be called prior to locking and calling the 63772dba584STejun Heo * following function. It preallocates enough memory to satisfy the 63872dba584STejun Heo * worst possible allocation. 63972dba584STejun Heo * 64072dba584STejun Heo * If the system is REALLY out of memory this function returns 0, 64172dba584STejun Heo * otherwise 1. 64272dba584STejun Heo */ 64372dba584STejun Heo int ida_pre_get(struct ida *ida, gfp_t gfp_mask) 64472dba584STejun Heo { 64572dba584STejun Heo /* allocate idr_layers */ 64672dba584STejun Heo if (!idr_pre_get(&ida->idr, gfp_mask)) 64772dba584STejun Heo return 0; 64872dba584STejun Heo 64972dba584STejun Heo /* allocate free_bitmap */ 65072dba584STejun Heo if (!ida->free_bitmap) { 65172dba584STejun Heo struct ida_bitmap *bitmap; 65272dba584STejun Heo 65372dba584STejun Heo bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask); 65472dba584STejun Heo if (!bitmap) 65572dba584STejun Heo return 0; 65672dba584STejun Heo 65772dba584STejun Heo free_bitmap(ida, bitmap); 65872dba584STejun Heo } 65972dba584STejun Heo 66072dba584STejun Heo return 1; 66172dba584STejun Heo } 66272dba584STejun Heo EXPORT_SYMBOL(ida_pre_get); 66372dba584STejun Heo 66472dba584STejun Heo /** 66572dba584STejun Heo * ida_get_new_above - allocate new ID above or equal to a start id 66672dba584STejun Heo * @ida: ida handle 66772dba584STejun Heo * @staring_id: id to start search at 66872dba584STejun Heo * @p_id: pointer to the allocated handle 66972dba584STejun Heo * 67072dba584STejun Heo * Allocate new ID above or equal to @ida. It should be called with 67172dba584STejun Heo * any required locks. 67272dba584STejun Heo * 67372dba584STejun Heo * If memory is required, it will return -EAGAIN, you should unlock 67472dba584STejun Heo * and go back to the ida_pre_get() call. If the ida is full, it will 67572dba584STejun Heo * return -ENOSPC. 67672dba584STejun Heo * 67772dba584STejun Heo * @p_id returns a value in the range 0 ... 0x7fffffff. 67872dba584STejun Heo */ 67972dba584STejun Heo int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) 68072dba584STejun Heo { 68172dba584STejun Heo struct idr_layer *pa[MAX_LEVEL]; 68272dba584STejun Heo struct ida_bitmap *bitmap; 68372dba584STejun Heo unsigned long flags; 68472dba584STejun Heo int idr_id = starting_id / IDA_BITMAP_BITS; 68572dba584STejun Heo int offset = starting_id % IDA_BITMAP_BITS; 68672dba584STejun Heo int t, id; 68772dba584STejun Heo 68872dba584STejun Heo restart: 68972dba584STejun Heo /* get vacant slot */ 69072dba584STejun Heo t = idr_get_empty_slot(&ida->idr, idr_id, pa); 691944ca05cSNadia Derbey if (t < 0) 692944ca05cSNadia Derbey return _idr_rc_to_errno(t); 69372dba584STejun Heo 69472dba584STejun Heo if (t * IDA_BITMAP_BITS >= MAX_ID_BIT) 69572dba584STejun Heo return -ENOSPC; 69672dba584STejun Heo 69772dba584STejun Heo if (t != idr_id) 69872dba584STejun Heo offset = 0; 69972dba584STejun Heo idr_id = t; 70072dba584STejun Heo 70172dba584STejun Heo /* if bitmap isn't there, create a new one */ 70272dba584STejun Heo bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK]; 70372dba584STejun Heo if (!bitmap) { 70472dba584STejun Heo spin_lock_irqsave(&ida->idr.lock, flags); 70572dba584STejun Heo bitmap = ida->free_bitmap; 70672dba584STejun Heo ida->free_bitmap = NULL; 70772dba584STejun Heo spin_unlock_irqrestore(&ida->idr.lock, flags); 70872dba584STejun Heo 70972dba584STejun Heo if (!bitmap) 71072dba584STejun Heo return -EAGAIN; 71172dba584STejun Heo 71272dba584STejun Heo memset(bitmap, 0, sizeof(struct ida_bitmap)); 71372dba584STejun Heo pa[0]->ary[idr_id & IDR_MASK] = (void *)bitmap; 71472dba584STejun Heo pa[0]->count++; 71572dba584STejun Heo } 71672dba584STejun Heo 71772dba584STejun Heo /* lookup for empty slot */ 71872dba584STejun Heo t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset); 71972dba584STejun Heo if (t == IDA_BITMAP_BITS) { 72072dba584STejun Heo /* no empty slot after offset, continue to the next chunk */ 72172dba584STejun Heo idr_id++; 72272dba584STejun Heo offset = 0; 72372dba584STejun Heo goto restart; 72472dba584STejun Heo } 72572dba584STejun Heo 72672dba584STejun Heo id = idr_id * IDA_BITMAP_BITS + t; 72772dba584STejun Heo if (id >= MAX_ID_BIT) 72872dba584STejun Heo return -ENOSPC; 72972dba584STejun Heo 73072dba584STejun Heo __set_bit(t, bitmap->bitmap); 73172dba584STejun Heo if (++bitmap->nr_busy == IDA_BITMAP_BITS) 73272dba584STejun Heo idr_mark_full(pa, idr_id); 73372dba584STejun Heo 73472dba584STejun Heo *p_id = id; 73572dba584STejun Heo 73672dba584STejun Heo /* Each leaf node can handle nearly a thousand slots and the 73772dba584STejun Heo * whole idea of ida is to have small memory foot print. 73872dba584STejun Heo * Throw away extra resources one by one after each successful 73972dba584STejun Heo * allocation. 74072dba584STejun Heo */ 74172dba584STejun Heo if (ida->idr.id_free_cnt || ida->free_bitmap) { 7424ae53789SNadia Derbey struct idr_layer *p = get_from_free_list(&ida->idr); 74372dba584STejun Heo if (p) 74472dba584STejun Heo kmem_cache_free(idr_layer_cache, p); 74572dba584STejun Heo } 74672dba584STejun Heo 74772dba584STejun Heo return 0; 74872dba584STejun Heo } 74972dba584STejun Heo EXPORT_SYMBOL(ida_get_new_above); 75072dba584STejun Heo 75172dba584STejun Heo /** 75272dba584STejun Heo * ida_get_new - allocate new ID 75372dba584STejun Heo * @ida: idr handle 75472dba584STejun Heo * @p_id: pointer to the allocated handle 75572dba584STejun Heo * 75672dba584STejun Heo * Allocate new ID. It should be called with any required locks. 75772dba584STejun Heo * 75872dba584STejun Heo * If memory is required, it will return -EAGAIN, you should unlock 75972dba584STejun Heo * and go back to the idr_pre_get() call. If the idr is full, it will 76072dba584STejun Heo * return -ENOSPC. 76172dba584STejun Heo * 76272dba584STejun Heo * @id returns a value in the range 0 ... 0x7fffffff. 76372dba584STejun Heo */ 76472dba584STejun Heo int ida_get_new(struct ida *ida, int *p_id) 76572dba584STejun Heo { 76672dba584STejun Heo return ida_get_new_above(ida, 0, p_id); 76772dba584STejun Heo } 76872dba584STejun Heo EXPORT_SYMBOL(ida_get_new); 76972dba584STejun Heo 77072dba584STejun Heo /** 77172dba584STejun Heo * ida_remove - remove the given ID 77272dba584STejun Heo * @ida: ida handle 77372dba584STejun Heo * @id: ID to free 77472dba584STejun Heo */ 77572dba584STejun Heo void ida_remove(struct ida *ida, int id) 77672dba584STejun Heo { 77772dba584STejun Heo struct idr_layer *p = ida->idr.top; 77872dba584STejun Heo int shift = (ida->idr.layers - 1) * IDR_BITS; 77972dba584STejun Heo int idr_id = id / IDA_BITMAP_BITS; 78072dba584STejun Heo int offset = id % IDA_BITMAP_BITS; 78172dba584STejun Heo int n; 78272dba584STejun Heo struct ida_bitmap *bitmap; 78372dba584STejun Heo 78472dba584STejun Heo /* clear full bits while looking up the leaf idr_layer */ 78572dba584STejun Heo while ((shift > 0) && p) { 78672dba584STejun Heo n = (idr_id >> shift) & IDR_MASK; 78772dba584STejun Heo __clear_bit(n, &p->bitmap); 78872dba584STejun Heo p = p->ary[n]; 78972dba584STejun Heo shift -= IDR_BITS; 79072dba584STejun Heo } 79172dba584STejun Heo 79272dba584STejun Heo if (p == NULL) 79372dba584STejun Heo goto err; 79472dba584STejun Heo 79572dba584STejun Heo n = idr_id & IDR_MASK; 79672dba584STejun Heo __clear_bit(n, &p->bitmap); 79772dba584STejun Heo 79872dba584STejun Heo bitmap = (void *)p->ary[n]; 79972dba584STejun Heo if (!test_bit(offset, bitmap->bitmap)) 80072dba584STejun Heo goto err; 80172dba584STejun Heo 80272dba584STejun Heo /* update bitmap and remove it if empty */ 80372dba584STejun Heo __clear_bit(offset, bitmap->bitmap); 80472dba584STejun Heo if (--bitmap->nr_busy == 0) { 80572dba584STejun Heo __set_bit(n, &p->bitmap); /* to please idr_remove() */ 80672dba584STejun Heo idr_remove(&ida->idr, idr_id); 80772dba584STejun Heo free_bitmap(ida, bitmap); 80872dba584STejun Heo } 80972dba584STejun Heo 81072dba584STejun Heo return; 81172dba584STejun Heo 81272dba584STejun Heo err: 81372dba584STejun Heo printk(KERN_WARNING 81472dba584STejun Heo "ida_remove called for id=%d which is not allocated.\n", id); 81572dba584STejun Heo } 81672dba584STejun Heo EXPORT_SYMBOL(ida_remove); 81772dba584STejun Heo 81872dba584STejun Heo /** 81972dba584STejun Heo * ida_destroy - release all cached layers within an ida tree 82072dba584STejun Heo * ida: ida handle 82172dba584STejun Heo */ 82272dba584STejun Heo void ida_destroy(struct ida *ida) 82372dba584STejun Heo { 82472dba584STejun Heo idr_destroy(&ida->idr); 82572dba584STejun Heo kfree(ida->free_bitmap); 82672dba584STejun Heo } 82772dba584STejun Heo EXPORT_SYMBOL(ida_destroy); 82872dba584STejun Heo 82972dba584STejun Heo /** 83072dba584STejun Heo * ida_init - initialize ida handle 83172dba584STejun Heo * @ida: ida handle 83272dba584STejun Heo * 83372dba584STejun Heo * This function is use to set up the handle (@ida) that you will pass 83472dba584STejun Heo * to the rest of the functions. 83572dba584STejun Heo */ 83672dba584STejun Heo void ida_init(struct ida *ida) 83772dba584STejun Heo { 83872dba584STejun Heo memset(ida, 0, sizeof(struct ida)); 83972dba584STejun Heo idr_init(&ida->idr); 84072dba584STejun Heo 84172dba584STejun Heo } 84272dba584STejun Heo EXPORT_SYMBOL(ida_init); 843