11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * 2002-10-18 written by Jim Houston jim.houston@ccur.com 31da177e4SLinus Torvalds * Copyright (C) 2002 by Concurrent Computer Corporation 41da177e4SLinus Torvalds * Distributed under the GNU GPL license version 2. 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * Modified by George Anzinger to reuse immediately and to use 71da177e4SLinus Torvalds * find bit instructions. Also removed _irq on spinlocks. 81da177e4SLinus Torvalds * 93219b3b7SNadia Derbey * Modified by Nadia Derbey to make it RCU safe. 103219b3b7SNadia Derbey * 111da177e4SLinus Torvalds * Small id to pointer translation service. 121da177e4SLinus Torvalds * 131da177e4SLinus Torvalds * It uses a radix tree like structure as a sparse array indexed 141da177e4SLinus Torvalds * by the id to obtain the pointer. The bitmap makes allocating 151da177e4SLinus Torvalds * a new id quick. 161da177e4SLinus Torvalds * 171da177e4SLinus Torvalds * You call it to allocate an id (an int) an associate with that id a 181da177e4SLinus Torvalds * pointer or what ever, we treat it as a (void *). You can pass this 191da177e4SLinus Torvalds * id to a user for him to pass back at a later time. You then pass 201da177e4SLinus Torvalds * that id to this code and it returns your pointer. 211da177e4SLinus Torvalds 221da177e4SLinus Torvalds * You can release ids at any time. When all ids are released, most of 23125c4c70SFengguang Wu * the memory is returned (we keep MAX_IDR_FREE) in a local pool so we 241da177e4SLinus Torvalds * don't need to go to the memory "store" during an id allocate, just 251da177e4SLinus Torvalds * so you don't need to be too concerned about locking and conflicts 261da177e4SLinus Torvalds * with the slab allocator. 271da177e4SLinus Torvalds */ 281da177e4SLinus Torvalds 291da177e4SLinus Torvalds #ifndef TEST // to test in user space... 301da177e4SLinus Torvalds #include <linux/slab.h> 311da177e4SLinus Torvalds #include <linux/init.h> 328bc3bcc9SPaul Gortmaker #include <linux/export.h> 331da177e4SLinus Torvalds #endif 345806f07cSJeff Mahoney #include <linux/err.h> 351da177e4SLinus Torvalds #include <linux/string.h> 361da177e4SLinus Torvalds #include <linux/idr.h> 3788eca020SRusty Russell #include <linux/spinlock.h> 38d5c7409fSTejun Heo #include <linux/percpu.h> 39d5c7409fSTejun Heo #include <linux/hardirq.h> 401da177e4SLinus Torvalds 41e8c8d1bcSTejun Heo #define MAX_IDR_SHIFT (sizeof(int) * 8 - 1) 42e8c8d1bcSTejun Heo #define MAX_IDR_BIT (1U << MAX_IDR_SHIFT) 43e8c8d1bcSTejun Heo 44e8c8d1bcSTejun Heo /* Leave the possibility of an incomplete final layer */ 45e8c8d1bcSTejun Heo #define MAX_IDR_LEVEL ((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS) 46e8c8d1bcSTejun Heo 47e8c8d1bcSTejun Heo /* Number of id_layer structs to leave in free list */ 48e8c8d1bcSTejun Heo #define MAX_IDR_FREE (MAX_IDR_LEVEL * 2) 49e8c8d1bcSTejun Heo 50e18b890bSChristoph Lameter static struct kmem_cache *idr_layer_cache; 51d5c7409fSTejun Heo static DEFINE_PER_CPU(struct idr_layer *, idr_preload_head); 52d5c7409fSTejun Heo static DEFINE_PER_CPU(int, idr_preload_cnt); 5388eca020SRusty Russell static DEFINE_SPINLOCK(simple_ida_lock); 541da177e4SLinus Torvalds 55326cf0f0STejun Heo /* the maximum ID which can be allocated given idr->layers */ 56326cf0f0STejun Heo static int idr_max(int layers) 57326cf0f0STejun Heo { 58326cf0f0STejun Heo int bits = min_t(int, layers * IDR_BITS, MAX_IDR_SHIFT); 59326cf0f0STejun Heo 60326cf0f0STejun Heo return (1 << bits) - 1; 61326cf0f0STejun Heo } 62326cf0f0STejun Heo 6354616283STejun Heo /* 6454616283STejun Heo * Prefix mask for an idr_layer at @layer. For layer 0, the prefix mask is 6554616283STejun Heo * all bits except for the lower IDR_BITS. For layer 1, 2 * IDR_BITS, and 6654616283STejun Heo * so on. 6754616283STejun Heo */ 6854616283STejun Heo static int idr_layer_prefix_mask(int layer) 6954616283STejun Heo { 7054616283STejun Heo return ~idr_max(layer + 1); 7154616283STejun Heo } 7254616283STejun Heo 734ae53789SNadia Derbey static struct idr_layer *get_from_free_list(struct idr *idp) 741da177e4SLinus Torvalds { 751da177e4SLinus Torvalds struct idr_layer *p; 76c259cc28SRoland Dreier unsigned long flags; 771da177e4SLinus Torvalds 78c259cc28SRoland Dreier spin_lock_irqsave(&idp->lock, flags); 791da177e4SLinus Torvalds if ((p = idp->id_free)) { 801da177e4SLinus Torvalds idp->id_free = p->ary[0]; 811da177e4SLinus Torvalds idp->id_free_cnt--; 821da177e4SLinus Torvalds p->ary[0] = NULL; 831da177e4SLinus Torvalds } 84c259cc28SRoland Dreier spin_unlock_irqrestore(&idp->lock, flags); 851da177e4SLinus Torvalds return(p); 861da177e4SLinus Torvalds } 871da177e4SLinus Torvalds 88d5c7409fSTejun Heo /** 89d5c7409fSTejun Heo * idr_layer_alloc - allocate a new idr_layer 90d5c7409fSTejun Heo * @gfp_mask: allocation mask 91d5c7409fSTejun Heo * @layer_idr: optional idr to allocate from 92d5c7409fSTejun Heo * 93d5c7409fSTejun Heo * If @layer_idr is %NULL, directly allocate one using @gfp_mask or fetch 94d5c7409fSTejun Heo * one from the per-cpu preload buffer. If @layer_idr is not %NULL, fetch 95d5c7409fSTejun Heo * an idr_layer from @idr->id_free. 96d5c7409fSTejun Heo * 97d5c7409fSTejun Heo * @layer_idr is to maintain backward compatibility with the old alloc 98d5c7409fSTejun Heo * interface - idr_pre_get() and idr_get_new*() - and will be removed 99d5c7409fSTejun Heo * together with per-pool preload buffer. 100d5c7409fSTejun Heo */ 101d5c7409fSTejun Heo static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr) 102d5c7409fSTejun Heo { 103d5c7409fSTejun Heo struct idr_layer *new; 104d5c7409fSTejun Heo 105d5c7409fSTejun Heo /* this is the old path, bypass to get_from_free_list() */ 106d5c7409fSTejun Heo if (layer_idr) 107d5c7409fSTejun Heo return get_from_free_list(layer_idr); 108d5c7409fSTejun Heo 10959bfbcf0STejun Heo /* 11059bfbcf0STejun Heo * Try to allocate directly from kmem_cache. We want to try this 11159bfbcf0STejun Heo * before preload buffer; otherwise, non-preloading idr_alloc() 11259bfbcf0STejun Heo * users will end up taking advantage of preloading ones. As the 11359bfbcf0STejun Heo * following is allowed to fail for preloaded cases, suppress 11459bfbcf0STejun Heo * warning this time. 11559bfbcf0STejun Heo */ 11659bfbcf0STejun Heo new = kmem_cache_zalloc(idr_layer_cache, gfp_mask | __GFP_NOWARN); 117d5c7409fSTejun Heo if (new) 118d5c7409fSTejun Heo return new; 119d5c7409fSTejun Heo 120d5c7409fSTejun Heo /* 121d5c7409fSTejun Heo * Try to fetch one from the per-cpu preload buffer if in process 122d5c7409fSTejun Heo * context. See idr_preload() for details. 123d5c7409fSTejun Heo */ 12459bfbcf0STejun Heo if (!in_interrupt()) { 125d5c7409fSTejun Heo preempt_disable(); 126d5c7409fSTejun Heo new = __this_cpu_read(idr_preload_head); 127d5c7409fSTejun Heo if (new) { 128d5c7409fSTejun Heo __this_cpu_write(idr_preload_head, new->ary[0]); 129d5c7409fSTejun Heo __this_cpu_dec(idr_preload_cnt); 130d5c7409fSTejun Heo new->ary[0] = NULL; 131d5c7409fSTejun Heo } 132d5c7409fSTejun Heo preempt_enable(); 13359bfbcf0STejun Heo if (new) 134d5c7409fSTejun Heo return new; 135d5c7409fSTejun Heo } 136d5c7409fSTejun Heo 13759bfbcf0STejun Heo /* 13859bfbcf0STejun Heo * Both failed. Try kmem_cache again w/o adding __GFP_NOWARN so 13959bfbcf0STejun Heo * that memory allocation failure warning is printed as intended. 14059bfbcf0STejun Heo */ 14159bfbcf0STejun Heo return kmem_cache_zalloc(idr_layer_cache, gfp_mask); 14259bfbcf0STejun Heo } 14359bfbcf0STejun Heo 144cf481c20SNadia Derbey static void idr_layer_rcu_free(struct rcu_head *head) 145cf481c20SNadia Derbey { 146cf481c20SNadia Derbey struct idr_layer *layer; 147cf481c20SNadia Derbey 148cf481c20SNadia Derbey layer = container_of(head, struct idr_layer, rcu_head); 149cf481c20SNadia Derbey kmem_cache_free(idr_layer_cache, layer); 150cf481c20SNadia Derbey } 151cf481c20SNadia Derbey 1520ffc2a9cSTejun Heo static inline void free_layer(struct idr *idr, struct idr_layer *p) 153cf481c20SNadia Derbey { 1540ffc2a9cSTejun Heo if (idr->hint && idr->hint == p) 1550ffc2a9cSTejun Heo RCU_INIT_POINTER(idr->hint, NULL); 156cf481c20SNadia Derbey call_rcu(&p->rcu_head, idr_layer_rcu_free); 157cf481c20SNadia Derbey } 158cf481c20SNadia Derbey 1591eec0056SSonny Rao /* only called when idp->lock is held */ 1604ae53789SNadia Derbey static void __move_to_free_list(struct idr *idp, struct idr_layer *p) 1611eec0056SSonny Rao { 1621eec0056SSonny Rao p->ary[0] = idp->id_free; 1631eec0056SSonny Rao idp->id_free = p; 1641eec0056SSonny Rao idp->id_free_cnt++; 1651eec0056SSonny Rao } 1661eec0056SSonny Rao 1674ae53789SNadia Derbey static void move_to_free_list(struct idr *idp, struct idr_layer *p) 1681da177e4SLinus Torvalds { 169c259cc28SRoland Dreier unsigned long flags; 170c259cc28SRoland Dreier 1711da177e4SLinus Torvalds /* 1721da177e4SLinus Torvalds * Depends on the return element being zeroed. 1731da177e4SLinus Torvalds */ 174c259cc28SRoland Dreier spin_lock_irqsave(&idp->lock, flags); 1754ae53789SNadia Derbey __move_to_free_list(idp, p); 176c259cc28SRoland Dreier spin_unlock_irqrestore(&idp->lock, flags); 1771da177e4SLinus Torvalds } 1781da177e4SLinus Torvalds 179e33ac8bdSTejun Heo static void idr_mark_full(struct idr_layer **pa, int id) 180e33ac8bdSTejun Heo { 181e33ac8bdSTejun Heo struct idr_layer *p = pa[0]; 182e33ac8bdSTejun Heo int l = 0; 183e33ac8bdSTejun Heo 1841d9b2e1eSTejun Heo __set_bit(id & IDR_MASK, p->bitmap); 185e33ac8bdSTejun Heo /* 186e33ac8bdSTejun Heo * If this layer is full mark the bit in the layer above to 187e33ac8bdSTejun Heo * show that this part of the radix tree is full. This may 188e33ac8bdSTejun Heo * complete the layer above and require walking up the radix 189e33ac8bdSTejun Heo * tree. 190e33ac8bdSTejun Heo */ 1911d9b2e1eSTejun Heo while (bitmap_full(p->bitmap, IDR_SIZE)) { 192e33ac8bdSTejun Heo if (!(p = pa[++l])) 193e33ac8bdSTejun Heo break; 194e33ac8bdSTejun Heo id = id >> IDR_BITS; 1951d9b2e1eSTejun Heo __set_bit((id & IDR_MASK), p->bitmap); 196e33ac8bdSTejun Heo } 197e33ac8bdSTejun Heo } 198e33ac8bdSTejun Heo 199c8615d37STejun Heo int __idr_pre_get(struct idr *idp, gfp_t gfp_mask) 2001da177e4SLinus Torvalds { 201125c4c70SFengguang Wu while (idp->id_free_cnt < MAX_IDR_FREE) { 2021da177e4SLinus Torvalds struct idr_layer *new; 2035b019e99SAndrew Morton new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); 2041da177e4SLinus Torvalds if (new == NULL) 2051da177e4SLinus Torvalds return (0); 2064ae53789SNadia Derbey move_to_free_list(idp, new); 2071da177e4SLinus Torvalds } 2081da177e4SLinus Torvalds return 1; 2091da177e4SLinus Torvalds } 210c8615d37STejun Heo EXPORT_SYMBOL(__idr_pre_get); 2111da177e4SLinus Torvalds 21212d1b439STejun Heo /** 21312d1b439STejun Heo * sub_alloc - try to allocate an id without growing the tree depth 21412d1b439STejun Heo * @idp: idr handle 21512d1b439STejun Heo * @starting_id: id to start search at 21612d1b439STejun Heo * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer 217d5c7409fSTejun Heo * @gfp_mask: allocation mask for idr_layer_alloc() 218d5c7409fSTejun Heo * @layer_idr: optional idr passed to idr_layer_alloc() 21912d1b439STejun Heo * 22012d1b439STejun Heo * Allocate an id in range [@starting_id, INT_MAX] from @idp without 22112d1b439STejun Heo * growing its depth. Returns 22212d1b439STejun Heo * 22312d1b439STejun Heo * the allocated id >= 0 if successful, 22412d1b439STejun Heo * -EAGAIN if the tree needs to grow for allocation to succeed, 22512d1b439STejun Heo * -ENOSPC if the id space is exhausted, 22612d1b439STejun Heo * -ENOMEM if more idr_layers need to be allocated. 22712d1b439STejun Heo */ 228d5c7409fSTejun Heo static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa, 229d5c7409fSTejun Heo gfp_t gfp_mask, struct idr *layer_idr) 2301da177e4SLinus Torvalds { 2311da177e4SLinus Torvalds int n, m, sh; 2321da177e4SLinus Torvalds struct idr_layer *p, *new; 2337aae6dd8STejun Heo int l, id, oid; 2341da177e4SLinus Torvalds 2351da177e4SLinus Torvalds id = *starting_id; 2367aae6dd8STejun Heo restart: 2371da177e4SLinus Torvalds p = idp->top; 2381da177e4SLinus Torvalds l = idp->layers; 2391da177e4SLinus Torvalds pa[l--] = NULL; 2401da177e4SLinus Torvalds while (1) { 2411da177e4SLinus Torvalds /* 2421da177e4SLinus Torvalds * We run around this while until we reach the leaf node... 2431da177e4SLinus Torvalds */ 2441da177e4SLinus Torvalds n = (id >> (IDR_BITS*l)) & IDR_MASK; 2451d9b2e1eSTejun Heo m = find_next_zero_bit(p->bitmap, IDR_SIZE, n); 2461da177e4SLinus Torvalds if (m == IDR_SIZE) { 2471da177e4SLinus Torvalds /* no space available go back to previous layer. */ 2481da177e4SLinus Torvalds l++; 2497aae6dd8STejun Heo oid = id; 2501da177e4SLinus Torvalds id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; 2517aae6dd8STejun Heo 2527aae6dd8STejun Heo /* if already at the top layer, we need to grow */ 253d2e7276bSTejun Heo if (id >= 1 << (idp->layers * IDR_BITS)) { 2541da177e4SLinus Torvalds *starting_id = id; 25512d1b439STejun Heo return -EAGAIN; 2561da177e4SLinus Torvalds } 257d2e7276bSTejun Heo p = pa[l]; 258d2e7276bSTejun Heo BUG_ON(!p); 2597aae6dd8STejun Heo 2607aae6dd8STejun Heo /* If we need to go up one layer, continue the 2617aae6dd8STejun Heo * loop; otherwise, restart from the top. 2627aae6dd8STejun Heo */ 2637aae6dd8STejun Heo sh = IDR_BITS * (l + 1); 2647aae6dd8STejun Heo if (oid >> sh == id >> sh) 2651da177e4SLinus Torvalds continue; 2667aae6dd8STejun Heo else 2677aae6dd8STejun Heo goto restart; 2681da177e4SLinus Torvalds } 2691da177e4SLinus Torvalds if (m != n) { 2701da177e4SLinus Torvalds sh = IDR_BITS*l; 2711da177e4SLinus Torvalds id = ((id >> sh) ^ n ^ m) << sh; 2721da177e4SLinus Torvalds } 273125c4c70SFengguang Wu if ((id >= MAX_IDR_BIT) || (id < 0)) 27412d1b439STejun Heo return -ENOSPC; 2751da177e4SLinus Torvalds if (l == 0) 2761da177e4SLinus Torvalds break; 2771da177e4SLinus Torvalds /* 2781da177e4SLinus Torvalds * Create the layer below if it is missing. 2791da177e4SLinus Torvalds */ 2801da177e4SLinus Torvalds if (!p->ary[m]) { 281d5c7409fSTejun Heo new = idr_layer_alloc(gfp_mask, layer_idr); 2824ae53789SNadia Derbey if (!new) 28312d1b439STejun Heo return -ENOMEM; 2846ff2d39bSManfred Spraul new->layer = l-1; 28554616283STejun Heo new->prefix = id & idr_layer_prefix_mask(new->layer); 2863219b3b7SNadia Derbey rcu_assign_pointer(p->ary[m], new); 2871da177e4SLinus Torvalds p->count++; 2881da177e4SLinus Torvalds } 2891da177e4SLinus Torvalds pa[l--] = p; 2901da177e4SLinus Torvalds p = p->ary[m]; 2911da177e4SLinus Torvalds } 292e33ac8bdSTejun Heo 293e33ac8bdSTejun Heo pa[l] = p; 294e33ac8bdSTejun Heo return id; 2951da177e4SLinus Torvalds } 2961da177e4SLinus Torvalds 297e33ac8bdSTejun Heo static int idr_get_empty_slot(struct idr *idp, int starting_id, 298d5c7409fSTejun Heo struct idr_layer **pa, gfp_t gfp_mask, 299d5c7409fSTejun Heo struct idr *layer_idr) 3001da177e4SLinus Torvalds { 3011da177e4SLinus Torvalds struct idr_layer *p, *new; 3021da177e4SLinus Torvalds int layers, v, id; 303c259cc28SRoland Dreier unsigned long flags; 3041da177e4SLinus Torvalds 3051da177e4SLinus Torvalds id = starting_id; 3061da177e4SLinus Torvalds build_up: 3071da177e4SLinus Torvalds p = idp->top; 3081da177e4SLinus Torvalds layers = idp->layers; 3091da177e4SLinus Torvalds if (unlikely(!p)) { 310d5c7409fSTejun Heo if (!(p = idr_layer_alloc(gfp_mask, layer_idr))) 31112d1b439STejun Heo return -ENOMEM; 3126ff2d39bSManfred Spraul p->layer = 0; 3131da177e4SLinus Torvalds layers = 1; 3141da177e4SLinus Torvalds } 3151da177e4SLinus Torvalds /* 3161da177e4SLinus Torvalds * Add a new layer to the top of the tree if the requested 3171da177e4SLinus Torvalds * id is larger than the currently allocated space. 3181da177e4SLinus Torvalds */ 319326cf0f0STejun Heo while (id > idr_max(layers)) { 3201da177e4SLinus Torvalds layers++; 321711a49a0SManfred Spraul if (!p->count) { 322711a49a0SManfred Spraul /* special case: if the tree is currently empty, 323711a49a0SManfred Spraul * then we grow the tree by moving the top node 324711a49a0SManfred Spraul * upwards. 325711a49a0SManfred Spraul */ 326711a49a0SManfred Spraul p->layer++; 32754616283STejun Heo WARN_ON_ONCE(p->prefix); 3281da177e4SLinus Torvalds continue; 329711a49a0SManfred Spraul } 330d5c7409fSTejun Heo if (!(new = idr_layer_alloc(gfp_mask, layer_idr))) { 3311da177e4SLinus Torvalds /* 3321da177e4SLinus Torvalds * The allocation failed. If we built part of 3331da177e4SLinus Torvalds * the structure tear it down. 3341da177e4SLinus Torvalds */ 335c259cc28SRoland Dreier spin_lock_irqsave(&idp->lock, flags); 3361da177e4SLinus Torvalds for (new = p; p && p != idp->top; new = p) { 3371da177e4SLinus Torvalds p = p->ary[0]; 3381da177e4SLinus Torvalds new->ary[0] = NULL; 3391d9b2e1eSTejun Heo new->count = 0; 3401d9b2e1eSTejun Heo bitmap_clear(new->bitmap, 0, IDR_SIZE); 3414ae53789SNadia Derbey __move_to_free_list(idp, new); 3421da177e4SLinus Torvalds } 343c259cc28SRoland Dreier spin_unlock_irqrestore(&idp->lock, flags); 34412d1b439STejun Heo return -ENOMEM; 3451da177e4SLinus Torvalds } 3461da177e4SLinus Torvalds new->ary[0] = p; 3471da177e4SLinus Torvalds new->count = 1; 3486ff2d39bSManfred Spraul new->layer = layers-1; 34954616283STejun Heo new->prefix = id & idr_layer_prefix_mask(new->layer); 3501d9b2e1eSTejun Heo if (bitmap_full(p->bitmap, IDR_SIZE)) 3511d9b2e1eSTejun Heo __set_bit(0, new->bitmap); 3521da177e4SLinus Torvalds p = new; 3531da177e4SLinus Torvalds } 3543219b3b7SNadia Derbey rcu_assign_pointer(idp->top, p); 3551da177e4SLinus Torvalds idp->layers = layers; 356d5c7409fSTejun Heo v = sub_alloc(idp, &id, pa, gfp_mask, layer_idr); 35712d1b439STejun Heo if (v == -EAGAIN) 3581da177e4SLinus Torvalds goto build_up; 3591da177e4SLinus Torvalds return(v); 3601da177e4SLinus Torvalds } 3611da177e4SLinus Torvalds 362e33ac8bdSTejun Heo /* 3633594eb28STejun Heo * @id and @pa are from a successful allocation from idr_get_empty_slot(). 3643594eb28STejun Heo * Install the user pointer @ptr and mark the slot full. 365e33ac8bdSTejun Heo */ 3660ffc2a9cSTejun Heo static void idr_fill_slot(struct idr *idr, void *ptr, int id, 3670ffc2a9cSTejun Heo struct idr_layer **pa) 3683594eb28STejun Heo { 3690ffc2a9cSTejun Heo /* update hint used for lookup, cleared from free_layer() */ 3700ffc2a9cSTejun Heo rcu_assign_pointer(idr->hint, pa[0]); 3710ffc2a9cSTejun Heo 3723594eb28STejun Heo rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], (struct idr_layer *)ptr); 373e33ac8bdSTejun Heo pa[0]->count++; 374e33ac8bdSTejun Heo idr_mark_full(pa, id); 375e33ac8bdSTejun Heo } 376e33ac8bdSTejun Heo 377c8615d37STejun Heo int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) 3781da177e4SLinus Torvalds { 379326cf0f0STejun Heo struct idr_layer *pa[MAX_IDR_LEVEL + 1]; 3801da177e4SLinus Torvalds int rv; 381e15ae2ddSJesper Juhl 382d5c7409fSTejun Heo rv = idr_get_empty_slot(idp, starting_id, pa, 0, idp); 383944ca05cSNadia Derbey if (rv < 0) 38412d1b439STejun Heo return rv == -ENOMEM ? -EAGAIN : rv; 3853594eb28STejun Heo 3860ffc2a9cSTejun Heo idr_fill_slot(idp, ptr, rv, pa); 3871da177e4SLinus Torvalds *id = rv; 3881da177e4SLinus Torvalds return 0; 3891da177e4SLinus Torvalds } 390c8615d37STejun Heo EXPORT_SYMBOL(__idr_get_new_above); 3911da177e4SLinus Torvalds 392d5c7409fSTejun Heo /** 393d5c7409fSTejun Heo * idr_preload - preload for idr_alloc() 394d5c7409fSTejun Heo * @gfp_mask: allocation mask to use for preloading 395d5c7409fSTejun Heo * 396d5c7409fSTejun Heo * Preload per-cpu layer buffer for idr_alloc(). Can only be used from 397d5c7409fSTejun Heo * process context and each idr_preload() invocation should be matched with 398d5c7409fSTejun Heo * idr_preload_end(). Note that preemption is disabled while preloaded. 399d5c7409fSTejun Heo * 400d5c7409fSTejun Heo * The first idr_alloc() in the preloaded section can be treated as if it 401d5c7409fSTejun Heo * were invoked with @gfp_mask used for preloading. This allows using more 402d5c7409fSTejun Heo * permissive allocation masks for idrs protected by spinlocks. 403d5c7409fSTejun Heo * 404d5c7409fSTejun Heo * For example, if idr_alloc() below fails, the failure can be treated as 405d5c7409fSTejun Heo * if idr_alloc() were called with GFP_KERNEL rather than GFP_NOWAIT. 406d5c7409fSTejun Heo * 407d5c7409fSTejun Heo * idr_preload(GFP_KERNEL); 408d5c7409fSTejun Heo * spin_lock(lock); 409d5c7409fSTejun Heo * 410d5c7409fSTejun Heo * id = idr_alloc(idr, ptr, start, end, GFP_NOWAIT); 411d5c7409fSTejun Heo * 412d5c7409fSTejun Heo * spin_unlock(lock); 413d5c7409fSTejun Heo * idr_preload_end(); 414d5c7409fSTejun Heo * if (id < 0) 415d5c7409fSTejun Heo * error; 416d5c7409fSTejun Heo */ 417d5c7409fSTejun Heo void idr_preload(gfp_t gfp_mask) 418d5c7409fSTejun Heo { 419d5c7409fSTejun Heo /* 420d5c7409fSTejun Heo * Consuming preload buffer from non-process context breaks preload 421d5c7409fSTejun Heo * allocation guarantee. Disallow usage from those contexts. 422d5c7409fSTejun Heo */ 423d5c7409fSTejun Heo WARN_ON_ONCE(in_interrupt()); 424d5c7409fSTejun Heo might_sleep_if(gfp_mask & __GFP_WAIT); 425d5c7409fSTejun Heo 426d5c7409fSTejun Heo preempt_disable(); 427d5c7409fSTejun Heo 428d5c7409fSTejun Heo /* 429d5c7409fSTejun Heo * idr_alloc() is likely to succeed w/o full idr_layer buffer and 430d5c7409fSTejun Heo * return value from idr_alloc() needs to be checked for failure 431d5c7409fSTejun Heo * anyway. Silently give up if allocation fails. The caller can 432d5c7409fSTejun Heo * treat failures from idr_alloc() as if idr_alloc() were called 433d5c7409fSTejun Heo * with @gfp_mask which should be enough. 434d5c7409fSTejun Heo */ 435d5c7409fSTejun Heo while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) { 436d5c7409fSTejun Heo struct idr_layer *new; 437d5c7409fSTejun Heo 438d5c7409fSTejun Heo preempt_enable(); 439d5c7409fSTejun Heo new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); 440d5c7409fSTejun Heo preempt_disable(); 441d5c7409fSTejun Heo if (!new) 442d5c7409fSTejun Heo break; 443d5c7409fSTejun Heo 444d5c7409fSTejun Heo /* link the new one to per-cpu preload list */ 445d5c7409fSTejun Heo new->ary[0] = __this_cpu_read(idr_preload_head); 446d5c7409fSTejun Heo __this_cpu_write(idr_preload_head, new); 447d5c7409fSTejun Heo __this_cpu_inc(idr_preload_cnt); 448d5c7409fSTejun Heo } 449d5c7409fSTejun Heo } 450d5c7409fSTejun Heo EXPORT_SYMBOL(idr_preload); 451d5c7409fSTejun Heo 452d5c7409fSTejun Heo /** 453d5c7409fSTejun Heo * idr_alloc - allocate new idr entry 454d5c7409fSTejun Heo * @idr: the (initialized) idr 455d5c7409fSTejun Heo * @ptr: pointer to be associated with the new id 456d5c7409fSTejun Heo * @start: the minimum id (inclusive) 457d5c7409fSTejun Heo * @end: the maximum id (exclusive, <= 0 for max) 458d5c7409fSTejun Heo * @gfp_mask: memory allocation flags 459d5c7409fSTejun Heo * 460d5c7409fSTejun Heo * Allocate an id in [start, end) and associate it with @ptr. If no ID is 461d5c7409fSTejun Heo * available in the specified range, returns -ENOSPC. On memory allocation 462d5c7409fSTejun Heo * failure, returns -ENOMEM. 463d5c7409fSTejun Heo * 464d5c7409fSTejun Heo * Note that @end is treated as max when <= 0. This is to always allow 465d5c7409fSTejun Heo * using @start + N as @end as long as N is inside integer range. 466d5c7409fSTejun Heo * 467d5c7409fSTejun Heo * The user is responsible for exclusively synchronizing all operations 468d5c7409fSTejun Heo * which may modify @idr. However, read-only accesses such as idr_find() 469d5c7409fSTejun Heo * or iteration can be performed under RCU read lock provided the user 470d5c7409fSTejun Heo * destroys @ptr in RCU-safe way after removal from idr. 471d5c7409fSTejun Heo */ 472d5c7409fSTejun Heo int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask) 473d5c7409fSTejun Heo { 474d5c7409fSTejun Heo int max = end > 0 ? end - 1 : INT_MAX; /* inclusive upper limit */ 475326cf0f0STejun Heo struct idr_layer *pa[MAX_IDR_LEVEL + 1]; 476d5c7409fSTejun Heo int id; 477d5c7409fSTejun Heo 478d5c7409fSTejun Heo might_sleep_if(gfp_mask & __GFP_WAIT); 479d5c7409fSTejun Heo 480d5c7409fSTejun Heo /* sanity checks */ 481d5c7409fSTejun Heo if (WARN_ON_ONCE(start < 0)) 482d5c7409fSTejun Heo return -EINVAL; 483d5c7409fSTejun Heo if (unlikely(max < start)) 484d5c7409fSTejun Heo return -ENOSPC; 485d5c7409fSTejun Heo 486d5c7409fSTejun Heo /* allocate id */ 487d5c7409fSTejun Heo id = idr_get_empty_slot(idr, start, pa, gfp_mask, NULL); 488d5c7409fSTejun Heo if (unlikely(id < 0)) 489d5c7409fSTejun Heo return id; 490d5c7409fSTejun Heo if (unlikely(id > max)) 491d5c7409fSTejun Heo return -ENOSPC; 492d5c7409fSTejun Heo 4930ffc2a9cSTejun Heo idr_fill_slot(idr, ptr, id, pa); 494d5c7409fSTejun Heo return id; 495d5c7409fSTejun Heo } 496d5c7409fSTejun Heo EXPORT_SYMBOL_GPL(idr_alloc); 497d5c7409fSTejun Heo 4981da177e4SLinus Torvalds static void idr_remove_warning(int id) 4991da177e4SLinus Torvalds { 500f098ad65SNadia Derbey printk(KERN_WARNING 501f098ad65SNadia Derbey "idr_remove called for id=%d which is not allocated.\n", id); 5021da177e4SLinus Torvalds dump_stack(); 5031da177e4SLinus Torvalds } 5041da177e4SLinus Torvalds 5051da177e4SLinus Torvalds static void sub_remove(struct idr *idp, int shift, int id) 5061da177e4SLinus Torvalds { 5071da177e4SLinus Torvalds struct idr_layer *p = idp->top; 508326cf0f0STejun Heo struct idr_layer **pa[MAX_IDR_LEVEL + 1]; 5091da177e4SLinus Torvalds struct idr_layer ***paa = &pa[0]; 510cf481c20SNadia Derbey struct idr_layer *to_free; 5111da177e4SLinus Torvalds int n; 5121da177e4SLinus Torvalds 5131da177e4SLinus Torvalds *paa = NULL; 5141da177e4SLinus Torvalds *++paa = &idp->top; 5151da177e4SLinus Torvalds 5161da177e4SLinus Torvalds while ((shift > 0) && p) { 5171da177e4SLinus Torvalds n = (id >> shift) & IDR_MASK; 5181d9b2e1eSTejun Heo __clear_bit(n, p->bitmap); 5191da177e4SLinus Torvalds *++paa = &p->ary[n]; 5201da177e4SLinus Torvalds p = p->ary[n]; 5211da177e4SLinus Torvalds shift -= IDR_BITS; 5221da177e4SLinus Torvalds } 5231da177e4SLinus Torvalds n = id & IDR_MASK; 5241d9b2e1eSTejun Heo if (likely(p != NULL && test_bit(n, p->bitmap))) { 5251d9b2e1eSTejun Heo __clear_bit(n, p->bitmap); 526cf481c20SNadia Derbey rcu_assign_pointer(p->ary[n], NULL); 527cf481c20SNadia Derbey to_free = NULL; 5281da177e4SLinus Torvalds while(*paa && ! --((**paa)->count)){ 529cf481c20SNadia Derbey if (to_free) 5300ffc2a9cSTejun Heo free_layer(idp, to_free); 531cf481c20SNadia Derbey to_free = **paa; 5321da177e4SLinus Torvalds **paa-- = NULL; 5331da177e4SLinus Torvalds } 5341da177e4SLinus Torvalds if (!*paa) 5351da177e4SLinus Torvalds idp->layers = 0; 536cf481c20SNadia Derbey if (to_free) 5370ffc2a9cSTejun Heo free_layer(idp, to_free); 538e15ae2ddSJesper Juhl } else 5391da177e4SLinus Torvalds idr_remove_warning(id); 5401da177e4SLinus Torvalds } 5411da177e4SLinus Torvalds 5421da177e4SLinus Torvalds /** 54356083ab1SRandy Dunlap * idr_remove - remove the given id and free its slot 54472fd4a35SRobert P. J. Day * @idp: idr handle 54572fd4a35SRobert P. J. Day * @id: unique key 5461da177e4SLinus Torvalds */ 5471da177e4SLinus Torvalds void idr_remove(struct idr *idp, int id) 5481da177e4SLinus Torvalds { 5491da177e4SLinus Torvalds struct idr_layer *p; 550cf481c20SNadia Derbey struct idr_layer *to_free; 5511da177e4SLinus Torvalds 5522e1c9b28STejun Heo if (id < 0) 553e8c8d1bcSTejun Heo return; 5541da177e4SLinus Torvalds 5551da177e4SLinus Torvalds sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); 556e15ae2ddSJesper Juhl if (idp->top && idp->top->count == 1 && (idp->layers > 1) && 557cf481c20SNadia Derbey idp->top->ary[0]) { 558cf481c20SNadia Derbey /* 559cf481c20SNadia Derbey * Single child at leftmost slot: we can shrink the tree. 560cf481c20SNadia Derbey * This level is not needed anymore since when layers are 561cf481c20SNadia Derbey * inserted, they are inserted at the top of the existing 562cf481c20SNadia Derbey * tree. 563cf481c20SNadia Derbey */ 564cf481c20SNadia Derbey to_free = idp->top; 5651da177e4SLinus Torvalds p = idp->top->ary[0]; 566cf481c20SNadia Derbey rcu_assign_pointer(idp->top, p); 5671da177e4SLinus Torvalds --idp->layers; 5681d9b2e1eSTejun Heo to_free->count = 0; 5691d9b2e1eSTejun Heo bitmap_clear(to_free->bitmap, 0, IDR_SIZE); 5700ffc2a9cSTejun Heo free_layer(idp, to_free); 5711da177e4SLinus Torvalds } 572125c4c70SFengguang Wu while (idp->id_free_cnt >= MAX_IDR_FREE) { 5734ae53789SNadia Derbey p = get_from_free_list(idp); 574cf481c20SNadia Derbey /* 575cf481c20SNadia Derbey * Note: we don't call the rcu callback here, since the only 576cf481c20SNadia Derbey * layers that fall into the freelist are those that have been 577cf481c20SNadia Derbey * preallocated. 578cf481c20SNadia Derbey */ 5791da177e4SLinus Torvalds kmem_cache_free(idr_layer_cache, p); 5801da177e4SLinus Torvalds } 581af8e2a4cSNadia Derbey return; 5821da177e4SLinus Torvalds } 5831da177e4SLinus Torvalds EXPORT_SYMBOL(idr_remove); 5841da177e4SLinus Torvalds 585fe6e24ecSTejun Heo void __idr_remove_all(struct idr *idp) 58623936cc0SKristian Hoegsberg { 5876ace06dcSOleg Nesterov int n, id, max; 5882dcb22b3SImre Deak int bt_mask; 58923936cc0SKristian Hoegsberg struct idr_layer *p; 590326cf0f0STejun Heo struct idr_layer *pa[MAX_IDR_LEVEL + 1]; 59123936cc0SKristian Hoegsberg struct idr_layer **paa = &pa[0]; 59223936cc0SKristian Hoegsberg 59323936cc0SKristian Hoegsberg n = idp->layers * IDR_BITS; 59423936cc0SKristian Hoegsberg p = idp->top; 5951b23336aSPaul E. McKenney rcu_assign_pointer(idp->top, NULL); 596326cf0f0STejun Heo max = idr_max(idp->layers); 59723936cc0SKristian Hoegsberg 59823936cc0SKristian Hoegsberg id = 0; 599326cf0f0STejun Heo while (id >= 0 && id <= max) { 60023936cc0SKristian Hoegsberg while (n > IDR_BITS && p) { 60123936cc0SKristian Hoegsberg n -= IDR_BITS; 60223936cc0SKristian Hoegsberg *paa++ = p; 60323936cc0SKristian Hoegsberg p = p->ary[(id >> n) & IDR_MASK]; 60423936cc0SKristian Hoegsberg } 60523936cc0SKristian Hoegsberg 6062dcb22b3SImre Deak bt_mask = id; 60723936cc0SKristian Hoegsberg id += 1 << n; 6082dcb22b3SImre Deak /* Get the highest bit that the above add changed from 0->1. */ 6092dcb22b3SImre Deak while (n < fls(id ^ bt_mask)) { 610cf481c20SNadia Derbey if (p) 6110ffc2a9cSTejun Heo free_layer(idp, p); 61223936cc0SKristian Hoegsberg n += IDR_BITS; 61323936cc0SKristian Hoegsberg p = *--paa; 61423936cc0SKristian Hoegsberg } 61523936cc0SKristian Hoegsberg } 61623936cc0SKristian Hoegsberg idp->layers = 0; 61723936cc0SKristian Hoegsberg } 618fe6e24ecSTejun Heo EXPORT_SYMBOL(__idr_remove_all); 61923936cc0SKristian Hoegsberg 62023936cc0SKristian Hoegsberg /** 6218d3b3591SAndrew Morton * idr_destroy - release all cached layers within an idr tree 622ea24ea85SNaohiro Aota * @idp: idr handle 6239bb26bc1STejun Heo * 6249bb26bc1STejun Heo * Free all id mappings and all idp_layers. After this function, @idp is 6259bb26bc1STejun Heo * completely unused and can be freed / recycled. The caller is 6269bb26bc1STejun Heo * responsible for ensuring that no one else accesses @idp during or after 6279bb26bc1STejun Heo * idr_destroy(). 6289bb26bc1STejun Heo * 6299bb26bc1STejun Heo * A typical clean-up sequence for objects stored in an idr tree will use 6309bb26bc1STejun Heo * idr_for_each() to free all objects, if necessay, then idr_destroy() to 6319bb26bc1STejun Heo * free up the id mappings and cached idr_layers. 6328d3b3591SAndrew Morton */ 6338d3b3591SAndrew Morton void idr_destroy(struct idr *idp) 6348d3b3591SAndrew Morton { 635fe6e24ecSTejun Heo __idr_remove_all(idp); 6369bb26bc1STejun Heo 6378d3b3591SAndrew Morton while (idp->id_free_cnt) { 6384ae53789SNadia Derbey struct idr_layer *p = get_from_free_list(idp); 6398d3b3591SAndrew Morton kmem_cache_free(idr_layer_cache, p); 6408d3b3591SAndrew Morton } 6418d3b3591SAndrew Morton } 6428d3b3591SAndrew Morton EXPORT_SYMBOL(idr_destroy); 6438d3b3591SAndrew Morton 6440ffc2a9cSTejun Heo void *idr_find_slowpath(struct idr *idp, int id) 6451da177e4SLinus Torvalds { 6461da177e4SLinus Torvalds int n; 6471da177e4SLinus Torvalds struct idr_layer *p; 6481da177e4SLinus Torvalds 6492e1c9b28STejun Heo if (id < 0) 650e8c8d1bcSTejun Heo return NULL; 651e8c8d1bcSTejun Heo 65296be753aSPaul E. McKenney p = rcu_dereference_raw(idp->top); 6536ff2d39bSManfred Spraul if (!p) 6546ff2d39bSManfred Spraul return NULL; 6556ff2d39bSManfred Spraul n = (p->layer+1) * IDR_BITS; 6561da177e4SLinus Torvalds 657326cf0f0STejun Heo if (id > idr_max(p->layer + 1)) 6581da177e4SLinus Torvalds return NULL; 6596ff2d39bSManfred Spraul BUG_ON(n == 0); 6601da177e4SLinus Torvalds 6611da177e4SLinus Torvalds while (n > 0 && p) { 6621da177e4SLinus Torvalds n -= IDR_BITS; 6636ff2d39bSManfred Spraul BUG_ON(n != p->layer*IDR_BITS); 66496be753aSPaul E. McKenney p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); 6651da177e4SLinus Torvalds } 6661da177e4SLinus Torvalds return((void *)p); 6671da177e4SLinus Torvalds } 6680ffc2a9cSTejun Heo EXPORT_SYMBOL(idr_find_slowpath); 6691da177e4SLinus Torvalds 6705806f07cSJeff Mahoney /** 67196d7fa42SKristian Hoegsberg * idr_for_each - iterate through all stored pointers 67296d7fa42SKristian Hoegsberg * @idp: idr handle 67396d7fa42SKristian Hoegsberg * @fn: function to be called for each pointer 67496d7fa42SKristian Hoegsberg * @data: data passed back to callback function 67596d7fa42SKristian Hoegsberg * 67696d7fa42SKristian Hoegsberg * Iterate over the pointers registered with the given idr. The 67796d7fa42SKristian Hoegsberg * callback function will be called for each pointer currently 67896d7fa42SKristian Hoegsberg * registered, passing the id, the pointer and the data pointer passed 67996d7fa42SKristian Hoegsberg * to this function. It is not safe to modify the idr tree while in 68096d7fa42SKristian Hoegsberg * the callback, so functions such as idr_get_new and idr_remove are 68196d7fa42SKristian Hoegsberg * not allowed. 68296d7fa42SKristian Hoegsberg * 68396d7fa42SKristian Hoegsberg * We check the return of @fn each time. If it returns anything other 68456083ab1SRandy Dunlap * than %0, we break out and return that value. 68596d7fa42SKristian Hoegsberg * 68696d7fa42SKristian Hoegsberg * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove(). 68796d7fa42SKristian Hoegsberg */ 68896d7fa42SKristian Hoegsberg int idr_for_each(struct idr *idp, 68996d7fa42SKristian Hoegsberg int (*fn)(int id, void *p, void *data), void *data) 69096d7fa42SKristian Hoegsberg { 69196d7fa42SKristian Hoegsberg int n, id, max, error = 0; 69296d7fa42SKristian Hoegsberg struct idr_layer *p; 693326cf0f0STejun Heo struct idr_layer *pa[MAX_IDR_LEVEL + 1]; 69496d7fa42SKristian Hoegsberg struct idr_layer **paa = &pa[0]; 69596d7fa42SKristian Hoegsberg 69696d7fa42SKristian Hoegsberg n = idp->layers * IDR_BITS; 69796be753aSPaul E. McKenney p = rcu_dereference_raw(idp->top); 698326cf0f0STejun Heo max = idr_max(idp->layers); 69996d7fa42SKristian Hoegsberg 70096d7fa42SKristian Hoegsberg id = 0; 701326cf0f0STejun Heo while (id >= 0 && id <= max) { 70296d7fa42SKristian Hoegsberg while (n > 0 && p) { 70396d7fa42SKristian Hoegsberg n -= IDR_BITS; 70496d7fa42SKristian Hoegsberg *paa++ = p; 70596be753aSPaul E. McKenney p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); 70696d7fa42SKristian Hoegsberg } 70796d7fa42SKristian Hoegsberg 70896d7fa42SKristian Hoegsberg if (p) { 70996d7fa42SKristian Hoegsberg error = fn(id, (void *)p, data); 71096d7fa42SKristian Hoegsberg if (error) 71196d7fa42SKristian Hoegsberg break; 71296d7fa42SKristian Hoegsberg } 71396d7fa42SKristian Hoegsberg 71496d7fa42SKristian Hoegsberg id += 1 << n; 71596d7fa42SKristian Hoegsberg while (n < fls(id)) { 71696d7fa42SKristian Hoegsberg n += IDR_BITS; 71796d7fa42SKristian Hoegsberg p = *--paa; 71896d7fa42SKristian Hoegsberg } 71996d7fa42SKristian Hoegsberg } 72096d7fa42SKristian Hoegsberg 72196d7fa42SKristian Hoegsberg return error; 72296d7fa42SKristian Hoegsberg } 72396d7fa42SKristian Hoegsberg EXPORT_SYMBOL(idr_for_each); 72496d7fa42SKristian Hoegsberg 72596d7fa42SKristian Hoegsberg /** 72638460b48SKAMEZAWA Hiroyuki * idr_get_next - lookup next object of id to given id. 72738460b48SKAMEZAWA Hiroyuki * @idp: idr handle 728ea24ea85SNaohiro Aota * @nextidp: pointer to lookup key 72938460b48SKAMEZAWA Hiroyuki * 73038460b48SKAMEZAWA Hiroyuki * Returns pointer to registered object with id, which is next number to 7311458ce16SNaohiro Aota * given id. After being looked up, *@nextidp will be updated for the next 7321458ce16SNaohiro Aota * iteration. 7339f7de827SHugh Dickins * 7349f7de827SHugh Dickins * This function can be called under rcu_read_lock(), given that the leaf 7359f7de827SHugh Dickins * pointers lifetimes are correctly managed. 73638460b48SKAMEZAWA Hiroyuki */ 73738460b48SKAMEZAWA Hiroyuki void *idr_get_next(struct idr *idp, int *nextidp) 73838460b48SKAMEZAWA Hiroyuki { 739326cf0f0STejun Heo struct idr_layer *p, *pa[MAX_IDR_LEVEL + 1]; 74038460b48SKAMEZAWA Hiroyuki struct idr_layer **paa = &pa[0]; 74138460b48SKAMEZAWA Hiroyuki int id = *nextidp; 74238460b48SKAMEZAWA Hiroyuki int n, max; 74338460b48SKAMEZAWA Hiroyuki 74438460b48SKAMEZAWA Hiroyuki /* find first ent */ 74594bfa3b6SPaul E. McKenney p = rcu_dereference_raw(idp->top); 74638460b48SKAMEZAWA Hiroyuki if (!p) 74738460b48SKAMEZAWA Hiroyuki return NULL; 7489f7de827SHugh Dickins n = (p->layer + 1) * IDR_BITS; 749326cf0f0STejun Heo max = idr_max(p->layer + 1); 75038460b48SKAMEZAWA Hiroyuki 751326cf0f0STejun Heo while (id >= 0 && id <= max) { 75238460b48SKAMEZAWA Hiroyuki while (n > 0 && p) { 75338460b48SKAMEZAWA Hiroyuki n -= IDR_BITS; 75438460b48SKAMEZAWA Hiroyuki *paa++ = p; 75594bfa3b6SPaul E. McKenney p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); 75638460b48SKAMEZAWA Hiroyuki } 75738460b48SKAMEZAWA Hiroyuki 75838460b48SKAMEZAWA Hiroyuki if (p) { 75938460b48SKAMEZAWA Hiroyuki *nextidp = id; 76038460b48SKAMEZAWA Hiroyuki return p; 76138460b48SKAMEZAWA Hiroyuki } 76238460b48SKAMEZAWA Hiroyuki 7636cdae741STejun Heo /* 7646cdae741STejun Heo * Proceed to the next layer at the current level. Unlike 7656cdae741STejun Heo * idr_for_each(), @id isn't guaranteed to be aligned to 7666cdae741STejun Heo * layer boundary at this point and adding 1 << n may 7676cdae741STejun Heo * incorrectly skip IDs. Make sure we jump to the 7686cdae741STejun Heo * beginning of the next layer using round_up(). 7696cdae741STejun Heo */ 7706cdae741STejun Heo id = round_up(id + 1, 1 << n); 77138460b48SKAMEZAWA Hiroyuki while (n < fls(id)) { 77238460b48SKAMEZAWA Hiroyuki n += IDR_BITS; 77338460b48SKAMEZAWA Hiroyuki p = *--paa; 77438460b48SKAMEZAWA Hiroyuki } 77538460b48SKAMEZAWA Hiroyuki } 77638460b48SKAMEZAWA Hiroyuki return NULL; 77738460b48SKAMEZAWA Hiroyuki } 7784d1ee80fSBen Hutchings EXPORT_SYMBOL(idr_get_next); 77938460b48SKAMEZAWA Hiroyuki 78038460b48SKAMEZAWA Hiroyuki 78138460b48SKAMEZAWA Hiroyuki /** 7825806f07cSJeff Mahoney * idr_replace - replace pointer for given id 7835806f07cSJeff Mahoney * @idp: idr handle 7845806f07cSJeff Mahoney * @ptr: pointer you want associated with the id 7855806f07cSJeff Mahoney * @id: lookup key 7865806f07cSJeff Mahoney * 7875806f07cSJeff Mahoney * Replace the pointer registered with an id and return the old value. 78856083ab1SRandy Dunlap * A %-ENOENT return indicates that @id was not found. 78956083ab1SRandy Dunlap * A %-EINVAL return indicates that @id was not within valid constraints. 7905806f07cSJeff Mahoney * 791cf481c20SNadia Derbey * The caller must serialize with writers. 7925806f07cSJeff Mahoney */ 7935806f07cSJeff Mahoney void *idr_replace(struct idr *idp, void *ptr, int id) 7945806f07cSJeff Mahoney { 7955806f07cSJeff Mahoney int n; 7965806f07cSJeff Mahoney struct idr_layer *p, *old_p; 7975806f07cSJeff Mahoney 7982e1c9b28STejun Heo if (id < 0) 799e8c8d1bcSTejun Heo return ERR_PTR(-EINVAL); 800e8c8d1bcSTejun Heo 8015806f07cSJeff Mahoney p = idp->top; 8026ff2d39bSManfred Spraul if (!p) 8036ff2d39bSManfred Spraul return ERR_PTR(-EINVAL); 8046ff2d39bSManfred Spraul 8056ff2d39bSManfred Spraul n = (p->layer+1) * IDR_BITS; 8065806f07cSJeff Mahoney 8075806f07cSJeff Mahoney if (id >= (1 << n)) 8085806f07cSJeff Mahoney return ERR_PTR(-EINVAL); 8095806f07cSJeff Mahoney 8105806f07cSJeff Mahoney n -= IDR_BITS; 8115806f07cSJeff Mahoney while ((n > 0) && p) { 8125806f07cSJeff Mahoney p = p->ary[(id >> n) & IDR_MASK]; 8135806f07cSJeff Mahoney n -= IDR_BITS; 8145806f07cSJeff Mahoney } 8155806f07cSJeff Mahoney 8165806f07cSJeff Mahoney n = id & IDR_MASK; 8171d9b2e1eSTejun Heo if (unlikely(p == NULL || !test_bit(n, p->bitmap))) 8185806f07cSJeff Mahoney return ERR_PTR(-ENOENT); 8195806f07cSJeff Mahoney 8205806f07cSJeff Mahoney old_p = p->ary[n]; 821cf481c20SNadia Derbey rcu_assign_pointer(p->ary[n], ptr); 8225806f07cSJeff Mahoney 8235806f07cSJeff Mahoney return old_p; 8245806f07cSJeff Mahoney } 8255806f07cSJeff Mahoney EXPORT_SYMBOL(idr_replace); 8265806f07cSJeff Mahoney 827199f0ca5SAkinobu Mita void __init idr_init_cache(void) 8281da177e4SLinus Torvalds { 8291da177e4SLinus Torvalds idr_layer_cache = kmem_cache_create("idr_layer_cache", 8305b019e99SAndrew Morton sizeof(struct idr_layer), 0, SLAB_PANIC, NULL); 8311da177e4SLinus Torvalds } 8321da177e4SLinus Torvalds 8331da177e4SLinus Torvalds /** 8341da177e4SLinus Torvalds * idr_init - initialize idr handle 8351da177e4SLinus Torvalds * @idp: idr handle 8361da177e4SLinus Torvalds * 8371da177e4SLinus Torvalds * This function is use to set up the handle (@idp) that you will pass 8381da177e4SLinus Torvalds * to the rest of the functions. 8391da177e4SLinus Torvalds */ 8401da177e4SLinus Torvalds void idr_init(struct idr *idp) 8411da177e4SLinus Torvalds { 8421da177e4SLinus Torvalds memset(idp, 0, sizeof(struct idr)); 8431da177e4SLinus Torvalds spin_lock_init(&idp->lock); 8441da177e4SLinus Torvalds } 8451da177e4SLinus Torvalds EXPORT_SYMBOL(idr_init); 84672dba584STejun Heo 84772dba584STejun Heo 84856083ab1SRandy Dunlap /** 84956083ab1SRandy Dunlap * DOC: IDA description 85072dba584STejun Heo * IDA - IDR based ID allocator 85172dba584STejun Heo * 85256083ab1SRandy Dunlap * This is id allocator without id -> pointer translation. Memory 85372dba584STejun Heo * usage is much lower than full blown idr because each id only 85472dba584STejun Heo * occupies a bit. ida uses a custom leaf node which contains 85572dba584STejun Heo * IDA_BITMAP_BITS slots. 85672dba584STejun Heo * 85772dba584STejun Heo * 2007-04-25 written by Tejun Heo <htejun@gmail.com> 85872dba584STejun Heo */ 85972dba584STejun Heo 86072dba584STejun Heo static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap) 86172dba584STejun Heo { 86272dba584STejun Heo unsigned long flags; 86372dba584STejun Heo 86472dba584STejun Heo if (!ida->free_bitmap) { 86572dba584STejun Heo spin_lock_irqsave(&ida->idr.lock, flags); 86672dba584STejun Heo if (!ida->free_bitmap) { 86772dba584STejun Heo ida->free_bitmap = bitmap; 86872dba584STejun Heo bitmap = NULL; 86972dba584STejun Heo } 87072dba584STejun Heo spin_unlock_irqrestore(&ida->idr.lock, flags); 87172dba584STejun Heo } 87272dba584STejun Heo 87372dba584STejun Heo kfree(bitmap); 87472dba584STejun Heo } 87572dba584STejun Heo 87672dba584STejun Heo /** 87772dba584STejun Heo * ida_pre_get - reserve resources for ida allocation 87872dba584STejun Heo * @ida: ida handle 87972dba584STejun Heo * @gfp_mask: memory allocation flag 88072dba584STejun Heo * 88172dba584STejun Heo * This function should be called prior to locking and calling the 88272dba584STejun Heo * following function. It preallocates enough memory to satisfy the 88372dba584STejun Heo * worst possible allocation. 88472dba584STejun Heo * 88556083ab1SRandy Dunlap * If the system is REALLY out of memory this function returns %0, 88656083ab1SRandy Dunlap * otherwise %1. 88772dba584STejun Heo */ 88872dba584STejun Heo int ida_pre_get(struct ida *ida, gfp_t gfp_mask) 88972dba584STejun Heo { 89072dba584STejun Heo /* allocate idr_layers */ 891c8615d37STejun Heo if (!__idr_pre_get(&ida->idr, gfp_mask)) 89272dba584STejun Heo return 0; 89372dba584STejun Heo 89472dba584STejun Heo /* allocate free_bitmap */ 89572dba584STejun Heo if (!ida->free_bitmap) { 89672dba584STejun Heo struct ida_bitmap *bitmap; 89772dba584STejun Heo 89872dba584STejun Heo bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask); 89972dba584STejun Heo if (!bitmap) 90072dba584STejun Heo return 0; 90172dba584STejun Heo 90272dba584STejun Heo free_bitmap(ida, bitmap); 90372dba584STejun Heo } 90472dba584STejun Heo 90572dba584STejun Heo return 1; 90672dba584STejun Heo } 90772dba584STejun Heo EXPORT_SYMBOL(ida_pre_get); 90872dba584STejun Heo 90972dba584STejun Heo /** 91072dba584STejun Heo * ida_get_new_above - allocate new ID above or equal to a start id 91172dba584STejun Heo * @ida: ida handle 912ea24ea85SNaohiro Aota * @starting_id: id to start search at 91372dba584STejun Heo * @p_id: pointer to the allocated handle 91472dba584STejun Heo * 915e3816c54SWang Sheng-Hui * Allocate new ID above or equal to @starting_id. It should be called 916e3816c54SWang Sheng-Hui * with any required locks. 91772dba584STejun Heo * 91856083ab1SRandy Dunlap * If memory is required, it will return %-EAGAIN, you should unlock 91972dba584STejun Heo * and go back to the ida_pre_get() call. If the ida is full, it will 92056083ab1SRandy Dunlap * return %-ENOSPC. 92172dba584STejun Heo * 92256083ab1SRandy Dunlap * @p_id returns a value in the range @starting_id ... %0x7fffffff. 92372dba584STejun Heo */ 92472dba584STejun Heo int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) 92572dba584STejun Heo { 926326cf0f0STejun Heo struct idr_layer *pa[MAX_IDR_LEVEL + 1]; 92772dba584STejun Heo struct ida_bitmap *bitmap; 92872dba584STejun Heo unsigned long flags; 92972dba584STejun Heo int idr_id = starting_id / IDA_BITMAP_BITS; 93072dba584STejun Heo int offset = starting_id % IDA_BITMAP_BITS; 93172dba584STejun Heo int t, id; 93272dba584STejun Heo 93372dba584STejun Heo restart: 93472dba584STejun Heo /* get vacant slot */ 935d5c7409fSTejun Heo t = idr_get_empty_slot(&ida->idr, idr_id, pa, 0, &ida->idr); 936944ca05cSNadia Derbey if (t < 0) 93712d1b439STejun Heo return t == -ENOMEM ? -EAGAIN : t; 93872dba584STejun Heo 939125c4c70SFengguang Wu if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT) 94072dba584STejun Heo return -ENOSPC; 94172dba584STejun Heo 94272dba584STejun Heo if (t != idr_id) 94372dba584STejun Heo offset = 0; 94472dba584STejun Heo idr_id = t; 94572dba584STejun Heo 94672dba584STejun Heo /* if bitmap isn't there, create a new one */ 94772dba584STejun Heo bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK]; 94872dba584STejun Heo if (!bitmap) { 94972dba584STejun Heo spin_lock_irqsave(&ida->idr.lock, flags); 95072dba584STejun Heo bitmap = ida->free_bitmap; 95172dba584STejun Heo ida->free_bitmap = NULL; 95272dba584STejun Heo spin_unlock_irqrestore(&ida->idr.lock, flags); 95372dba584STejun Heo 95472dba584STejun Heo if (!bitmap) 95572dba584STejun Heo return -EAGAIN; 95672dba584STejun Heo 95772dba584STejun Heo memset(bitmap, 0, sizeof(struct ida_bitmap)); 9583219b3b7SNadia Derbey rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK], 9593219b3b7SNadia Derbey (void *)bitmap); 96072dba584STejun Heo pa[0]->count++; 96172dba584STejun Heo } 96272dba584STejun Heo 96372dba584STejun Heo /* lookup for empty slot */ 96472dba584STejun Heo t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset); 96572dba584STejun Heo if (t == IDA_BITMAP_BITS) { 96672dba584STejun Heo /* no empty slot after offset, continue to the next chunk */ 96772dba584STejun Heo idr_id++; 96872dba584STejun Heo offset = 0; 96972dba584STejun Heo goto restart; 97072dba584STejun Heo } 97172dba584STejun Heo 97272dba584STejun Heo id = idr_id * IDA_BITMAP_BITS + t; 973125c4c70SFengguang Wu if (id >= MAX_IDR_BIT) 97472dba584STejun Heo return -ENOSPC; 97572dba584STejun Heo 97672dba584STejun Heo __set_bit(t, bitmap->bitmap); 97772dba584STejun Heo if (++bitmap->nr_busy == IDA_BITMAP_BITS) 97872dba584STejun Heo idr_mark_full(pa, idr_id); 97972dba584STejun Heo 98072dba584STejun Heo *p_id = id; 98172dba584STejun Heo 98272dba584STejun Heo /* Each leaf node can handle nearly a thousand slots and the 98372dba584STejun Heo * whole idea of ida is to have small memory foot print. 98472dba584STejun Heo * Throw away extra resources one by one after each successful 98572dba584STejun Heo * allocation. 98672dba584STejun Heo */ 98772dba584STejun Heo if (ida->idr.id_free_cnt || ida->free_bitmap) { 9884ae53789SNadia Derbey struct idr_layer *p = get_from_free_list(&ida->idr); 98972dba584STejun Heo if (p) 99072dba584STejun Heo kmem_cache_free(idr_layer_cache, p); 99172dba584STejun Heo } 99272dba584STejun Heo 99372dba584STejun Heo return 0; 99472dba584STejun Heo } 99572dba584STejun Heo EXPORT_SYMBOL(ida_get_new_above); 99672dba584STejun Heo 99772dba584STejun Heo /** 99872dba584STejun Heo * ida_remove - remove the given ID 99972dba584STejun Heo * @ida: ida handle 100072dba584STejun Heo * @id: ID to free 100172dba584STejun Heo */ 100272dba584STejun Heo void ida_remove(struct ida *ida, int id) 100372dba584STejun Heo { 100472dba584STejun Heo struct idr_layer *p = ida->idr.top; 100572dba584STejun Heo int shift = (ida->idr.layers - 1) * IDR_BITS; 100672dba584STejun Heo int idr_id = id / IDA_BITMAP_BITS; 100772dba584STejun Heo int offset = id % IDA_BITMAP_BITS; 100872dba584STejun Heo int n; 100972dba584STejun Heo struct ida_bitmap *bitmap; 101072dba584STejun Heo 101172dba584STejun Heo /* clear full bits while looking up the leaf idr_layer */ 101272dba584STejun Heo while ((shift > 0) && p) { 101372dba584STejun Heo n = (idr_id >> shift) & IDR_MASK; 10141d9b2e1eSTejun Heo __clear_bit(n, p->bitmap); 101572dba584STejun Heo p = p->ary[n]; 101672dba584STejun Heo shift -= IDR_BITS; 101772dba584STejun Heo } 101872dba584STejun Heo 101972dba584STejun Heo if (p == NULL) 102072dba584STejun Heo goto err; 102172dba584STejun Heo 102272dba584STejun Heo n = idr_id & IDR_MASK; 10231d9b2e1eSTejun Heo __clear_bit(n, p->bitmap); 102472dba584STejun Heo 102572dba584STejun Heo bitmap = (void *)p->ary[n]; 102672dba584STejun Heo if (!test_bit(offset, bitmap->bitmap)) 102772dba584STejun Heo goto err; 102872dba584STejun Heo 102972dba584STejun Heo /* update bitmap and remove it if empty */ 103072dba584STejun Heo __clear_bit(offset, bitmap->bitmap); 103172dba584STejun Heo if (--bitmap->nr_busy == 0) { 10321d9b2e1eSTejun Heo __set_bit(n, p->bitmap); /* to please idr_remove() */ 103372dba584STejun Heo idr_remove(&ida->idr, idr_id); 103472dba584STejun Heo free_bitmap(ida, bitmap); 103572dba584STejun Heo } 103672dba584STejun Heo 103772dba584STejun Heo return; 103872dba584STejun Heo 103972dba584STejun Heo err: 104072dba584STejun Heo printk(KERN_WARNING 104172dba584STejun Heo "ida_remove called for id=%d which is not allocated.\n", id); 104272dba584STejun Heo } 104372dba584STejun Heo EXPORT_SYMBOL(ida_remove); 104472dba584STejun Heo 104572dba584STejun Heo /** 104672dba584STejun Heo * ida_destroy - release all cached layers within an ida tree 1047ea24ea85SNaohiro Aota * @ida: ida handle 104872dba584STejun Heo */ 104972dba584STejun Heo void ida_destroy(struct ida *ida) 105072dba584STejun Heo { 105172dba584STejun Heo idr_destroy(&ida->idr); 105272dba584STejun Heo kfree(ida->free_bitmap); 105372dba584STejun Heo } 105472dba584STejun Heo EXPORT_SYMBOL(ida_destroy); 105572dba584STejun Heo 105672dba584STejun Heo /** 105788eca020SRusty Russell * ida_simple_get - get a new id. 105888eca020SRusty Russell * @ida: the (initialized) ida. 105988eca020SRusty Russell * @start: the minimum id (inclusive, < 0x8000000) 106088eca020SRusty Russell * @end: the maximum id (exclusive, < 0x8000000 or 0) 106188eca020SRusty Russell * @gfp_mask: memory allocation flags 106288eca020SRusty Russell * 106388eca020SRusty Russell * Allocates an id in the range start <= id < end, or returns -ENOSPC. 106488eca020SRusty Russell * On memory allocation failure, returns -ENOMEM. 106588eca020SRusty Russell * 106688eca020SRusty Russell * Use ida_simple_remove() to get rid of an id. 106788eca020SRusty Russell */ 106888eca020SRusty Russell int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, 106988eca020SRusty Russell gfp_t gfp_mask) 107088eca020SRusty Russell { 107188eca020SRusty Russell int ret, id; 107288eca020SRusty Russell unsigned int max; 107346cbc1d3STejun Heo unsigned long flags; 107488eca020SRusty Russell 107588eca020SRusty Russell BUG_ON((int)start < 0); 107688eca020SRusty Russell BUG_ON((int)end < 0); 107788eca020SRusty Russell 107888eca020SRusty Russell if (end == 0) 107988eca020SRusty Russell max = 0x80000000; 108088eca020SRusty Russell else { 108188eca020SRusty Russell BUG_ON(end < start); 108288eca020SRusty Russell max = end - 1; 108388eca020SRusty Russell } 108488eca020SRusty Russell 108588eca020SRusty Russell again: 108688eca020SRusty Russell if (!ida_pre_get(ida, gfp_mask)) 108788eca020SRusty Russell return -ENOMEM; 108888eca020SRusty Russell 108946cbc1d3STejun Heo spin_lock_irqsave(&simple_ida_lock, flags); 109088eca020SRusty Russell ret = ida_get_new_above(ida, start, &id); 109188eca020SRusty Russell if (!ret) { 109288eca020SRusty Russell if (id > max) { 109388eca020SRusty Russell ida_remove(ida, id); 109488eca020SRusty Russell ret = -ENOSPC; 109588eca020SRusty Russell } else { 109688eca020SRusty Russell ret = id; 109788eca020SRusty Russell } 109888eca020SRusty Russell } 109946cbc1d3STejun Heo spin_unlock_irqrestore(&simple_ida_lock, flags); 110088eca020SRusty Russell 110188eca020SRusty Russell if (unlikely(ret == -EAGAIN)) 110288eca020SRusty Russell goto again; 110388eca020SRusty Russell 110488eca020SRusty Russell return ret; 110588eca020SRusty Russell } 110688eca020SRusty Russell EXPORT_SYMBOL(ida_simple_get); 110788eca020SRusty Russell 110888eca020SRusty Russell /** 110988eca020SRusty Russell * ida_simple_remove - remove an allocated id. 111088eca020SRusty Russell * @ida: the (initialized) ida. 111188eca020SRusty Russell * @id: the id returned by ida_simple_get. 111288eca020SRusty Russell */ 111388eca020SRusty Russell void ida_simple_remove(struct ida *ida, unsigned int id) 111488eca020SRusty Russell { 111546cbc1d3STejun Heo unsigned long flags; 111646cbc1d3STejun Heo 111788eca020SRusty Russell BUG_ON((int)id < 0); 111846cbc1d3STejun Heo spin_lock_irqsave(&simple_ida_lock, flags); 111988eca020SRusty Russell ida_remove(ida, id); 112046cbc1d3STejun Heo spin_unlock_irqrestore(&simple_ida_lock, flags); 112188eca020SRusty Russell } 112288eca020SRusty Russell EXPORT_SYMBOL(ida_simple_remove); 112388eca020SRusty Russell 112488eca020SRusty Russell /** 112572dba584STejun Heo * ida_init - initialize ida handle 112672dba584STejun Heo * @ida: ida handle 112772dba584STejun Heo * 112872dba584STejun Heo * This function is use to set up the handle (@ida) that you will pass 112972dba584STejun Heo * to the rest of the functions. 113072dba584STejun Heo */ 113172dba584STejun Heo void ida_init(struct ida *ida) 113272dba584STejun Heo { 113372dba584STejun Heo memset(ida, 0, sizeof(struct ida)); 113472dba584STejun Heo idr_init(&ida->idr); 113572dba584STejun Heo 113672dba584STejun Heo } 113772dba584STejun Heo EXPORT_SYMBOL(ida_init); 1138