11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * 2002-10-18 written by Jim Houston jim.houston@ccur.com 31da177e4SLinus Torvalds * Copyright (C) 2002 by Concurrent Computer Corporation 41da177e4SLinus Torvalds * Distributed under the GNU GPL license version 2. 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * Modified by George Anzinger to reuse immediately and to use 71da177e4SLinus Torvalds * find bit instructions. Also removed _irq on spinlocks. 81da177e4SLinus Torvalds * 93219b3b7SNadia Derbey * Modified by Nadia Derbey to make it RCU safe. 103219b3b7SNadia Derbey * 111da177e4SLinus Torvalds * Small id to pointer translation service. 121da177e4SLinus Torvalds * 131da177e4SLinus Torvalds * It uses a radix tree like structure as a sparse array indexed 141da177e4SLinus Torvalds * by the id to obtain the pointer. The bitmap makes allocating 151da177e4SLinus Torvalds * a new id quick. 161da177e4SLinus Torvalds * 171da177e4SLinus Torvalds * You call it to allocate an id (an int) an associate with that id a 181da177e4SLinus Torvalds * pointer or what ever, we treat it as a (void *). You can pass this 191da177e4SLinus Torvalds * id to a user for him to pass back at a later time. You then pass 201da177e4SLinus Torvalds * that id to this code and it returns your pointer. 211da177e4SLinus Torvalds 221da177e4SLinus Torvalds * You can release ids at any time. When all ids are released, most of 23125c4c70SFengguang Wu * the memory is returned (we keep MAX_IDR_FREE) in a local pool so we 241da177e4SLinus Torvalds * don't need to go to the memory "store" during an id allocate, just 251da177e4SLinus Torvalds * so you don't need to be too concerned about locking and conflicts 261da177e4SLinus Torvalds * with the slab allocator. 271da177e4SLinus Torvalds */ 281da177e4SLinus Torvalds 291da177e4SLinus Torvalds #ifndef TEST // to test in user space... 301da177e4SLinus Torvalds #include <linux/slab.h> 311da177e4SLinus Torvalds #include <linux/init.h> 328bc3bcc9SPaul Gortmaker #include <linux/export.h> 331da177e4SLinus Torvalds #endif 345806f07cSJeff Mahoney #include <linux/err.h> 351da177e4SLinus Torvalds #include <linux/string.h> 361da177e4SLinus Torvalds #include <linux/idr.h> 3788eca020SRusty Russell #include <linux/spinlock.h> 38d5c7409fSTejun Heo #include <linux/percpu.h> 39d5c7409fSTejun Heo #include <linux/hardirq.h> 401da177e4SLinus Torvalds 41e8c8d1bcSTejun Heo #define MAX_IDR_SHIFT (sizeof(int) * 8 - 1) 42e8c8d1bcSTejun Heo #define MAX_IDR_BIT (1U << MAX_IDR_SHIFT) 43e8c8d1bcSTejun Heo 44e8c8d1bcSTejun Heo /* Leave the possibility of an incomplete final layer */ 45e8c8d1bcSTejun Heo #define MAX_IDR_LEVEL ((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS) 46e8c8d1bcSTejun Heo 47e8c8d1bcSTejun Heo /* Number of id_layer structs to leave in free list */ 48e8c8d1bcSTejun Heo #define MAX_IDR_FREE (MAX_IDR_LEVEL * 2) 49e8c8d1bcSTejun Heo 50e18b890bSChristoph Lameter static struct kmem_cache *idr_layer_cache; 51d5c7409fSTejun Heo static DEFINE_PER_CPU(struct idr_layer *, idr_preload_head); 52d5c7409fSTejun Heo static DEFINE_PER_CPU(int, idr_preload_cnt); 5388eca020SRusty Russell static DEFINE_SPINLOCK(simple_ida_lock); 541da177e4SLinus Torvalds 55326cf0f0STejun Heo /* the maximum ID which can be allocated given idr->layers */ 56326cf0f0STejun Heo static int idr_max(int layers) 57326cf0f0STejun Heo { 58326cf0f0STejun Heo int bits = min_t(int, layers * IDR_BITS, MAX_IDR_SHIFT); 59326cf0f0STejun Heo 60326cf0f0STejun Heo return (1 << bits) - 1; 61326cf0f0STejun Heo } 62326cf0f0STejun Heo 6354616283STejun Heo /* 6454616283STejun Heo * Prefix mask for an idr_layer at @layer. For layer 0, the prefix mask is 6554616283STejun Heo * all bits except for the lower IDR_BITS. For layer 1, 2 * IDR_BITS, and 6654616283STejun Heo * so on. 6754616283STejun Heo */ 6854616283STejun Heo static int idr_layer_prefix_mask(int layer) 6954616283STejun Heo { 7054616283STejun Heo return ~idr_max(layer + 1); 7154616283STejun Heo } 7254616283STejun Heo 734ae53789SNadia Derbey static struct idr_layer *get_from_free_list(struct idr *idp) 741da177e4SLinus Torvalds { 751da177e4SLinus Torvalds struct idr_layer *p; 76c259cc28SRoland Dreier unsigned long flags; 771da177e4SLinus Torvalds 78c259cc28SRoland Dreier spin_lock_irqsave(&idp->lock, flags); 791da177e4SLinus Torvalds if ((p = idp->id_free)) { 801da177e4SLinus Torvalds idp->id_free = p->ary[0]; 811da177e4SLinus Torvalds idp->id_free_cnt--; 821da177e4SLinus Torvalds p->ary[0] = NULL; 831da177e4SLinus Torvalds } 84c259cc28SRoland Dreier spin_unlock_irqrestore(&idp->lock, flags); 851da177e4SLinus Torvalds return(p); 861da177e4SLinus Torvalds } 871da177e4SLinus Torvalds 88d5c7409fSTejun Heo /** 89d5c7409fSTejun Heo * idr_layer_alloc - allocate a new idr_layer 90d5c7409fSTejun Heo * @gfp_mask: allocation mask 91d5c7409fSTejun Heo * @layer_idr: optional idr to allocate from 92d5c7409fSTejun Heo * 93d5c7409fSTejun Heo * If @layer_idr is %NULL, directly allocate one using @gfp_mask or fetch 94d5c7409fSTejun Heo * one from the per-cpu preload buffer. If @layer_idr is not %NULL, fetch 95d5c7409fSTejun Heo * an idr_layer from @idr->id_free. 96d5c7409fSTejun Heo * 97d5c7409fSTejun Heo * @layer_idr is to maintain backward compatibility with the old alloc 98d5c7409fSTejun Heo * interface - idr_pre_get() and idr_get_new*() - and will be removed 99d5c7409fSTejun Heo * together with per-pool preload buffer. 100d5c7409fSTejun Heo */ 101d5c7409fSTejun Heo static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr) 102d5c7409fSTejun Heo { 103d5c7409fSTejun Heo struct idr_layer *new; 104d5c7409fSTejun Heo 105d5c7409fSTejun Heo /* this is the old path, bypass to get_from_free_list() */ 106d5c7409fSTejun Heo if (layer_idr) 107d5c7409fSTejun Heo return get_from_free_list(layer_idr); 108d5c7409fSTejun Heo 10959bfbcf0STejun Heo /* 11059bfbcf0STejun Heo * Try to allocate directly from kmem_cache. We want to try this 11159bfbcf0STejun Heo * before preload buffer; otherwise, non-preloading idr_alloc() 11259bfbcf0STejun Heo * users will end up taking advantage of preloading ones. As the 11359bfbcf0STejun Heo * following is allowed to fail for preloaded cases, suppress 11459bfbcf0STejun Heo * warning this time. 11559bfbcf0STejun Heo */ 11659bfbcf0STejun Heo new = kmem_cache_zalloc(idr_layer_cache, gfp_mask | __GFP_NOWARN); 117d5c7409fSTejun Heo if (new) 118d5c7409fSTejun Heo return new; 119d5c7409fSTejun Heo 120d5c7409fSTejun Heo /* 121d5c7409fSTejun Heo * Try to fetch one from the per-cpu preload buffer if in process 122d5c7409fSTejun Heo * context. See idr_preload() for details. 123d5c7409fSTejun Heo */ 12459bfbcf0STejun Heo if (!in_interrupt()) { 125d5c7409fSTejun Heo preempt_disable(); 126d5c7409fSTejun Heo new = __this_cpu_read(idr_preload_head); 127d5c7409fSTejun Heo if (new) { 128d5c7409fSTejun Heo __this_cpu_write(idr_preload_head, new->ary[0]); 129d5c7409fSTejun Heo __this_cpu_dec(idr_preload_cnt); 130d5c7409fSTejun Heo new->ary[0] = NULL; 131d5c7409fSTejun Heo } 132d5c7409fSTejun Heo preempt_enable(); 13359bfbcf0STejun Heo if (new) 134d5c7409fSTejun Heo return new; 135d5c7409fSTejun Heo } 136d5c7409fSTejun Heo 13759bfbcf0STejun Heo /* 13859bfbcf0STejun Heo * Both failed. Try kmem_cache again w/o adding __GFP_NOWARN so 13959bfbcf0STejun Heo * that memory allocation failure warning is printed as intended. 14059bfbcf0STejun Heo */ 14159bfbcf0STejun Heo return kmem_cache_zalloc(idr_layer_cache, gfp_mask); 14259bfbcf0STejun Heo } 14359bfbcf0STejun Heo 144cf481c20SNadia Derbey static void idr_layer_rcu_free(struct rcu_head *head) 145cf481c20SNadia Derbey { 146cf481c20SNadia Derbey struct idr_layer *layer; 147cf481c20SNadia Derbey 148cf481c20SNadia Derbey layer = container_of(head, struct idr_layer, rcu_head); 149cf481c20SNadia Derbey kmem_cache_free(idr_layer_cache, layer); 150cf481c20SNadia Derbey } 151cf481c20SNadia Derbey 1520ffc2a9cSTejun Heo static inline void free_layer(struct idr *idr, struct idr_layer *p) 153cf481c20SNadia Derbey { 1540ffc2a9cSTejun Heo if (idr->hint && idr->hint == p) 1550ffc2a9cSTejun Heo RCU_INIT_POINTER(idr->hint, NULL); 156cf481c20SNadia Derbey call_rcu(&p->rcu_head, idr_layer_rcu_free); 157cf481c20SNadia Derbey } 158cf481c20SNadia Derbey 1591eec0056SSonny Rao /* only called when idp->lock is held */ 1604ae53789SNadia Derbey static void __move_to_free_list(struct idr *idp, struct idr_layer *p) 1611eec0056SSonny Rao { 1621eec0056SSonny Rao p->ary[0] = idp->id_free; 1631eec0056SSonny Rao idp->id_free = p; 1641eec0056SSonny Rao idp->id_free_cnt++; 1651eec0056SSonny Rao } 1661eec0056SSonny Rao 1674ae53789SNadia Derbey static void move_to_free_list(struct idr *idp, struct idr_layer *p) 1681da177e4SLinus Torvalds { 169c259cc28SRoland Dreier unsigned long flags; 170c259cc28SRoland Dreier 1711da177e4SLinus Torvalds /* 1721da177e4SLinus Torvalds * Depends on the return element being zeroed. 1731da177e4SLinus Torvalds */ 174c259cc28SRoland Dreier spin_lock_irqsave(&idp->lock, flags); 1754ae53789SNadia Derbey __move_to_free_list(idp, p); 176c259cc28SRoland Dreier spin_unlock_irqrestore(&idp->lock, flags); 1771da177e4SLinus Torvalds } 1781da177e4SLinus Torvalds 179e33ac8bdSTejun Heo static void idr_mark_full(struct idr_layer **pa, int id) 180e33ac8bdSTejun Heo { 181e33ac8bdSTejun Heo struct idr_layer *p = pa[0]; 182e33ac8bdSTejun Heo int l = 0; 183e33ac8bdSTejun Heo 1841d9b2e1eSTejun Heo __set_bit(id & IDR_MASK, p->bitmap); 185e33ac8bdSTejun Heo /* 186e33ac8bdSTejun Heo * If this layer is full mark the bit in the layer above to 187e33ac8bdSTejun Heo * show that this part of the radix tree is full. This may 188e33ac8bdSTejun Heo * complete the layer above and require walking up the radix 189e33ac8bdSTejun Heo * tree. 190e33ac8bdSTejun Heo */ 1911d9b2e1eSTejun Heo while (bitmap_full(p->bitmap, IDR_SIZE)) { 192e33ac8bdSTejun Heo if (!(p = pa[++l])) 193e33ac8bdSTejun Heo break; 194e33ac8bdSTejun Heo id = id >> IDR_BITS; 1951d9b2e1eSTejun Heo __set_bit((id & IDR_MASK), p->bitmap); 196e33ac8bdSTejun Heo } 197e33ac8bdSTejun Heo } 198e33ac8bdSTejun Heo 19990ae3ae5SStephen Hemminger static int __idr_pre_get(struct idr *idp, gfp_t gfp_mask) 2001da177e4SLinus Torvalds { 201125c4c70SFengguang Wu while (idp->id_free_cnt < MAX_IDR_FREE) { 2021da177e4SLinus Torvalds struct idr_layer *new; 2035b019e99SAndrew Morton new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); 2041da177e4SLinus Torvalds if (new == NULL) 2051da177e4SLinus Torvalds return (0); 2064ae53789SNadia Derbey move_to_free_list(idp, new); 2071da177e4SLinus Torvalds } 2081da177e4SLinus Torvalds return 1; 2091da177e4SLinus Torvalds } 2101da177e4SLinus Torvalds 21112d1b439STejun Heo /** 21212d1b439STejun Heo * sub_alloc - try to allocate an id without growing the tree depth 21312d1b439STejun Heo * @idp: idr handle 21412d1b439STejun Heo * @starting_id: id to start search at 21512d1b439STejun Heo * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer 216d5c7409fSTejun Heo * @gfp_mask: allocation mask for idr_layer_alloc() 217d5c7409fSTejun Heo * @layer_idr: optional idr passed to idr_layer_alloc() 21812d1b439STejun Heo * 21912d1b439STejun Heo * Allocate an id in range [@starting_id, INT_MAX] from @idp without 22012d1b439STejun Heo * growing its depth. Returns 22112d1b439STejun Heo * 22212d1b439STejun Heo * the allocated id >= 0 if successful, 22312d1b439STejun Heo * -EAGAIN if the tree needs to grow for allocation to succeed, 22412d1b439STejun Heo * -ENOSPC if the id space is exhausted, 22512d1b439STejun Heo * -ENOMEM if more idr_layers need to be allocated. 22612d1b439STejun Heo */ 227d5c7409fSTejun Heo static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa, 228d5c7409fSTejun Heo gfp_t gfp_mask, struct idr *layer_idr) 2291da177e4SLinus Torvalds { 2301da177e4SLinus Torvalds int n, m, sh; 2311da177e4SLinus Torvalds struct idr_layer *p, *new; 2327aae6dd8STejun Heo int l, id, oid; 2331da177e4SLinus Torvalds 2341da177e4SLinus Torvalds id = *starting_id; 2357aae6dd8STejun Heo restart: 2361da177e4SLinus Torvalds p = idp->top; 2371da177e4SLinus Torvalds l = idp->layers; 2381da177e4SLinus Torvalds pa[l--] = NULL; 2391da177e4SLinus Torvalds while (1) { 2401da177e4SLinus Torvalds /* 2411da177e4SLinus Torvalds * We run around this while until we reach the leaf node... 2421da177e4SLinus Torvalds */ 2431da177e4SLinus Torvalds n = (id >> (IDR_BITS*l)) & IDR_MASK; 2441d9b2e1eSTejun Heo m = find_next_zero_bit(p->bitmap, IDR_SIZE, n); 2451da177e4SLinus Torvalds if (m == IDR_SIZE) { 2461da177e4SLinus Torvalds /* no space available go back to previous layer. */ 2471da177e4SLinus Torvalds l++; 2487aae6dd8STejun Heo oid = id; 2491da177e4SLinus Torvalds id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; 2507aae6dd8STejun Heo 2517aae6dd8STejun Heo /* if already at the top layer, we need to grow */ 252d2e7276bSTejun Heo if (id >= 1 << (idp->layers * IDR_BITS)) { 2531da177e4SLinus Torvalds *starting_id = id; 25412d1b439STejun Heo return -EAGAIN; 2551da177e4SLinus Torvalds } 256d2e7276bSTejun Heo p = pa[l]; 257d2e7276bSTejun Heo BUG_ON(!p); 2587aae6dd8STejun Heo 2597aae6dd8STejun Heo /* If we need to go up one layer, continue the 2607aae6dd8STejun Heo * loop; otherwise, restart from the top. 2617aae6dd8STejun Heo */ 2627aae6dd8STejun Heo sh = IDR_BITS * (l + 1); 2637aae6dd8STejun Heo if (oid >> sh == id >> sh) 2641da177e4SLinus Torvalds continue; 2657aae6dd8STejun Heo else 2667aae6dd8STejun Heo goto restart; 2671da177e4SLinus Torvalds } 2681da177e4SLinus Torvalds if (m != n) { 2691da177e4SLinus Torvalds sh = IDR_BITS*l; 2701da177e4SLinus Torvalds id = ((id >> sh) ^ n ^ m) << sh; 2711da177e4SLinus Torvalds } 272125c4c70SFengguang Wu if ((id >= MAX_IDR_BIT) || (id < 0)) 27312d1b439STejun Heo return -ENOSPC; 2741da177e4SLinus Torvalds if (l == 0) 2751da177e4SLinus Torvalds break; 2761da177e4SLinus Torvalds /* 2771da177e4SLinus Torvalds * Create the layer below if it is missing. 2781da177e4SLinus Torvalds */ 2791da177e4SLinus Torvalds if (!p->ary[m]) { 280d5c7409fSTejun Heo new = idr_layer_alloc(gfp_mask, layer_idr); 2814ae53789SNadia Derbey if (!new) 28212d1b439STejun Heo return -ENOMEM; 2836ff2d39bSManfred Spraul new->layer = l-1; 28454616283STejun Heo new->prefix = id & idr_layer_prefix_mask(new->layer); 2853219b3b7SNadia Derbey rcu_assign_pointer(p->ary[m], new); 2861da177e4SLinus Torvalds p->count++; 2871da177e4SLinus Torvalds } 2881da177e4SLinus Torvalds pa[l--] = p; 2891da177e4SLinus Torvalds p = p->ary[m]; 2901da177e4SLinus Torvalds } 291e33ac8bdSTejun Heo 292e33ac8bdSTejun Heo pa[l] = p; 293e33ac8bdSTejun Heo return id; 2941da177e4SLinus Torvalds } 2951da177e4SLinus Torvalds 296e33ac8bdSTejun Heo static int idr_get_empty_slot(struct idr *idp, int starting_id, 297d5c7409fSTejun Heo struct idr_layer **pa, gfp_t gfp_mask, 298d5c7409fSTejun Heo struct idr *layer_idr) 2991da177e4SLinus Torvalds { 3001da177e4SLinus Torvalds struct idr_layer *p, *new; 3011da177e4SLinus Torvalds int layers, v, id; 302c259cc28SRoland Dreier unsigned long flags; 3031da177e4SLinus Torvalds 3041da177e4SLinus Torvalds id = starting_id; 3051da177e4SLinus Torvalds build_up: 3061da177e4SLinus Torvalds p = idp->top; 3071da177e4SLinus Torvalds layers = idp->layers; 3081da177e4SLinus Torvalds if (unlikely(!p)) { 309d5c7409fSTejun Heo if (!(p = idr_layer_alloc(gfp_mask, layer_idr))) 31012d1b439STejun Heo return -ENOMEM; 3116ff2d39bSManfred Spraul p->layer = 0; 3121da177e4SLinus Torvalds layers = 1; 3131da177e4SLinus Torvalds } 3141da177e4SLinus Torvalds /* 3151da177e4SLinus Torvalds * Add a new layer to the top of the tree if the requested 3161da177e4SLinus Torvalds * id is larger than the currently allocated space. 3171da177e4SLinus Torvalds */ 318326cf0f0STejun Heo while (id > idr_max(layers)) { 3191da177e4SLinus Torvalds layers++; 320711a49a0SManfred Spraul if (!p->count) { 321711a49a0SManfred Spraul /* special case: if the tree is currently empty, 322711a49a0SManfred Spraul * then we grow the tree by moving the top node 323711a49a0SManfred Spraul * upwards. 324711a49a0SManfred Spraul */ 325711a49a0SManfred Spraul p->layer++; 32654616283STejun Heo WARN_ON_ONCE(p->prefix); 3271da177e4SLinus Torvalds continue; 328711a49a0SManfred Spraul } 329d5c7409fSTejun Heo if (!(new = idr_layer_alloc(gfp_mask, layer_idr))) { 3301da177e4SLinus Torvalds /* 3311da177e4SLinus Torvalds * The allocation failed. If we built part of 3321da177e4SLinus Torvalds * the structure tear it down. 3331da177e4SLinus Torvalds */ 334c259cc28SRoland Dreier spin_lock_irqsave(&idp->lock, flags); 3351da177e4SLinus Torvalds for (new = p; p && p != idp->top; new = p) { 3361da177e4SLinus Torvalds p = p->ary[0]; 3371da177e4SLinus Torvalds new->ary[0] = NULL; 3381d9b2e1eSTejun Heo new->count = 0; 3391d9b2e1eSTejun Heo bitmap_clear(new->bitmap, 0, IDR_SIZE); 3404ae53789SNadia Derbey __move_to_free_list(idp, new); 3411da177e4SLinus Torvalds } 342c259cc28SRoland Dreier spin_unlock_irqrestore(&idp->lock, flags); 34312d1b439STejun Heo return -ENOMEM; 3441da177e4SLinus Torvalds } 3451da177e4SLinus Torvalds new->ary[0] = p; 3461da177e4SLinus Torvalds new->count = 1; 3476ff2d39bSManfred Spraul new->layer = layers-1; 34854616283STejun Heo new->prefix = id & idr_layer_prefix_mask(new->layer); 3491d9b2e1eSTejun Heo if (bitmap_full(p->bitmap, IDR_SIZE)) 3501d9b2e1eSTejun Heo __set_bit(0, new->bitmap); 3511da177e4SLinus Torvalds p = new; 3521da177e4SLinus Torvalds } 3533219b3b7SNadia Derbey rcu_assign_pointer(idp->top, p); 3541da177e4SLinus Torvalds idp->layers = layers; 355d5c7409fSTejun Heo v = sub_alloc(idp, &id, pa, gfp_mask, layer_idr); 35612d1b439STejun Heo if (v == -EAGAIN) 3571da177e4SLinus Torvalds goto build_up; 3581da177e4SLinus Torvalds return(v); 3591da177e4SLinus Torvalds } 3601da177e4SLinus Torvalds 361e33ac8bdSTejun Heo /* 3623594eb28STejun Heo * @id and @pa are from a successful allocation from idr_get_empty_slot(). 3633594eb28STejun Heo * Install the user pointer @ptr and mark the slot full. 364e33ac8bdSTejun Heo */ 3650ffc2a9cSTejun Heo static void idr_fill_slot(struct idr *idr, void *ptr, int id, 3660ffc2a9cSTejun Heo struct idr_layer **pa) 3673594eb28STejun Heo { 3680ffc2a9cSTejun Heo /* update hint used for lookup, cleared from free_layer() */ 3690ffc2a9cSTejun Heo rcu_assign_pointer(idr->hint, pa[0]); 3700ffc2a9cSTejun Heo 3713594eb28STejun Heo rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], (struct idr_layer *)ptr); 372e33ac8bdSTejun Heo pa[0]->count++; 373e33ac8bdSTejun Heo idr_mark_full(pa, id); 374e33ac8bdSTejun Heo } 375e33ac8bdSTejun Heo 3761da177e4SLinus Torvalds 377d5c7409fSTejun Heo /** 378d5c7409fSTejun Heo * idr_preload - preload for idr_alloc() 379d5c7409fSTejun Heo * @gfp_mask: allocation mask to use for preloading 380d5c7409fSTejun Heo * 381d5c7409fSTejun Heo * Preload per-cpu layer buffer for idr_alloc(). Can only be used from 382d5c7409fSTejun Heo * process context and each idr_preload() invocation should be matched with 383d5c7409fSTejun Heo * idr_preload_end(). Note that preemption is disabled while preloaded. 384d5c7409fSTejun Heo * 385d5c7409fSTejun Heo * The first idr_alloc() in the preloaded section can be treated as if it 386d5c7409fSTejun Heo * were invoked with @gfp_mask used for preloading. This allows using more 387d5c7409fSTejun Heo * permissive allocation masks for idrs protected by spinlocks. 388d5c7409fSTejun Heo * 389d5c7409fSTejun Heo * For example, if idr_alloc() below fails, the failure can be treated as 390d5c7409fSTejun Heo * if idr_alloc() were called with GFP_KERNEL rather than GFP_NOWAIT. 391d5c7409fSTejun Heo * 392d5c7409fSTejun Heo * idr_preload(GFP_KERNEL); 393d5c7409fSTejun Heo * spin_lock(lock); 394d5c7409fSTejun Heo * 395d5c7409fSTejun Heo * id = idr_alloc(idr, ptr, start, end, GFP_NOWAIT); 396d5c7409fSTejun Heo * 397d5c7409fSTejun Heo * spin_unlock(lock); 398d5c7409fSTejun Heo * idr_preload_end(); 399d5c7409fSTejun Heo * if (id < 0) 400d5c7409fSTejun Heo * error; 401d5c7409fSTejun Heo */ 402d5c7409fSTejun Heo void idr_preload(gfp_t gfp_mask) 403d5c7409fSTejun Heo { 404d5c7409fSTejun Heo /* 405d5c7409fSTejun Heo * Consuming preload buffer from non-process context breaks preload 406d5c7409fSTejun Heo * allocation guarantee. Disallow usage from those contexts. 407d5c7409fSTejun Heo */ 408d5c7409fSTejun Heo WARN_ON_ONCE(in_interrupt()); 409d5c7409fSTejun Heo might_sleep_if(gfp_mask & __GFP_WAIT); 410d5c7409fSTejun Heo 411d5c7409fSTejun Heo preempt_disable(); 412d5c7409fSTejun Heo 413d5c7409fSTejun Heo /* 414d5c7409fSTejun Heo * idr_alloc() is likely to succeed w/o full idr_layer buffer and 415d5c7409fSTejun Heo * return value from idr_alloc() needs to be checked for failure 416d5c7409fSTejun Heo * anyway. Silently give up if allocation fails. The caller can 417d5c7409fSTejun Heo * treat failures from idr_alloc() as if idr_alloc() were called 418d5c7409fSTejun Heo * with @gfp_mask which should be enough. 419d5c7409fSTejun Heo */ 420d5c7409fSTejun Heo while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) { 421d5c7409fSTejun Heo struct idr_layer *new; 422d5c7409fSTejun Heo 423d5c7409fSTejun Heo preempt_enable(); 424d5c7409fSTejun Heo new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); 425d5c7409fSTejun Heo preempt_disable(); 426d5c7409fSTejun Heo if (!new) 427d5c7409fSTejun Heo break; 428d5c7409fSTejun Heo 429d5c7409fSTejun Heo /* link the new one to per-cpu preload list */ 430d5c7409fSTejun Heo new->ary[0] = __this_cpu_read(idr_preload_head); 431d5c7409fSTejun Heo __this_cpu_write(idr_preload_head, new); 432d5c7409fSTejun Heo __this_cpu_inc(idr_preload_cnt); 433d5c7409fSTejun Heo } 434d5c7409fSTejun Heo } 435d5c7409fSTejun Heo EXPORT_SYMBOL(idr_preload); 436d5c7409fSTejun Heo 437d5c7409fSTejun Heo /** 438d5c7409fSTejun Heo * idr_alloc - allocate new idr entry 439d5c7409fSTejun Heo * @idr: the (initialized) idr 440d5c7409fSTejun Heo * @ptr: pointer to be associated with the new id 441d5c7409fSTejun Heo * @start: the minimum id (inclusive) 442d5c7409fSTejun Heo * @end: the maximum id (exclusive, <= 0 for max) 443d5c7409fSTejun Heo * @gfp_mask: memory allocation flags 444d5c7409fSTejun Heo * 445d5c7409fSTejun Heo * Allocate an id in [start, end) and associate it with @ptr. If no ID is 446d5c7409fSTejun Heo * available in the specified range, returns -ENOSPC. On memory allocation 447d5c7409fSTejun Heo * failure, returns -ENOMEM. 448d5c7409fSTejun Heo * 449d5c7409fSTejun Heo * Note that @end is treated as max when <= 0. This is to always allow 450d5c7409fSTejun Heo * using @start + N as @end as long as N is inside integer range. 451d5c7409fSTejun Heo * 452d5c7409fSTejun Heo * The user is responsible for exclusively synchronizing all operations 453d5c7409fSTejun Heo * which may modify @idr. However, read-only accesses such as idr_find() 454d5c7409fSTejun Heo * or iteration can be performed under RCU read lock provided the user 455d5c7409fSTejun Heo * destroys @ptr in RCU-safe way after removal from idr. 456d5c7409fSTejun Heo */ 457d5c7409fSTejun Heo int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask) 458d5c7409fSTejun Heo { 459d5c7409fSTejun Heo int max = end > 0 ? end - 1 : INT_MAX; /* inclusive upper limit */ 460326cf0f0STejun Heo struct idr_layer *pa[MAX_IDR_LEVEL + 1]; 461d5c7409fSTejun Heo int id; 462d5c7409fSTejun Heo 463d5c7409fSTejun Heo might_sleep_if(gfp_mask & __GFP_WAIT); 464d5c7409fSTejun Heo 465d5c7409fSTejun Heo /* sanity checks */ 466d5c7409fSTejun Heo if (WARN_ON_ONCE(start < 0)) 467d5c7409fSTejun Heo return -EINVAL; 468d5c7409fSTejun Heo if (unlikely(max < start)) 469d5c7409fSTejun Heo return -ENOSPC; 470d5c7409fSTejun Heo 471d5c7409fSTejun Heo /* allocate id */ 472d5c7409fSTejun Heo id = idr_get_empty_slot(idr, start, pa, gfp_mask, NULL); 473d5c7409fSTejun Heo if (unlikely(id < 0)) 474d5c7409fSTejun Heo return id; 475d5c7409fSTejun Heo if (unlikely(id > max)) 476d5c7409fSTejun Heo return -ENOSPC; 477d5c7409fSTejun Heo 4780ffc2a9cSTejun Heo idr_fill_slot(idr, ptr, id, pa); 479d5c7409fSTejun Heo return id; 480d5c7409fSTejun Heo } 481d5c7409fSTejun Heo EXPORT_SYMBOL_GPL(idr_alloc); 482d5c7409fSTejun Heo 4833e6628c4SJeff Layton /** 4843e6628c4SJeff Layton * idr_alloc_cyclic - allocate new idr entry in a cyclical fashion 4853e6628c4SJeff Layton * @idr: the (initialized) idr 4863e6628c4SJeff Layton * @ptr: pointer to be associated with the new id 4873e6628c4SJeff Layton * @start: the minimum id (inclusive) 4883e6628c4SJeff Layton * @end: the maximum id (exclusive, <= 0 for max) 4893e6628c4SJeff Layton * @gfp_mask: memory allocation flags 4903e6628c4SJeff Layton * 4913e6628c4SJeff Layton * Essentially the same as idr_alloc, but prefers to allocate progressively 4923e6628c4SJeff Layton * higher ids if it can. If the "cur" counter wraps, then it will start again 4933e6628c4SJeff Layton * at the "start" end of the range and allocate one that has already been used. 4943e6628c4SJeff Layton */ 4953e6628c4SJeff Layton int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, 4963e6628c4SJeff Layton gfp_t gfp_mask) 4973e6628c4SJeff Layton { 4983e6628c4SJeff Layton int id; 4993e6628c4SJeff Layton 5003e6628c4SJeff Layton id = idr_alloc(idr, ptr, max(start, idr->cur), end, gfp_mask); 5013e6628c4SJeff Layton if (id == -ENOSPC) 5023e6628c4SJeff Layton id = idr_alloc(idr, ptr, start, end, gfp_mask); 5033e6628c4SJeff Layton 5043e6628c4SJeff Layton if (likely(id >= 0)) 5053e6628c4SJeff Layton idr->cur = id + 1; 5063e6628c4SJeff Layton return id; 5073e6628c4SJeff Layton } 5083e6628c4SJeff Layton EXPORT_SYMBOL(idr_alloc_cyclic); 5093e6628c4SJeff Layton 5101da177e4SLinus Torvalds static void idr_remove_warning(int id) 5111da177e4SLinus Torvalds { 512dd04b452SJean Delvare WARN(1, "idr_remove called for id=%d which is not allocated.\n", id); 5131da177e4SLinus Torvalds } 5141da177e4SLinus Torvalds 5151da177e4SLinus Torvalds static void sub_remove(struct idr *idp, int shift, int id) 5161da177e4SLinus Torvalds { 5171da177e4SLinus Torvalds struct idr_layer *p = idp->top; 518326cf0f0STejun Heo struct idr_layer **pa[MAX_IDR_LEVEL + 1]; 5191da177e4SLinus Torvalds struct idr_layer ***paa = &pa[0]; 520cf481c20SNadia Derbey struct idr_layer *to_free; 5211da177e4SLinus Torvalds int n; 5221da177e4SLinus Torvalds 5231da177e4SLinus Torvalds *paa = NULL; 5241da177e4SLinus Torvalds *++paa = &idp->top; 5251da177e4SLinus Torvalds 5261da177e4SLinus Torvalds while ((shift > 0) && p) { 5271da177e4SLinus Torvalds n = (id >> shift) & IDR_MASK; 5281d9b2e1eSTejun Heo __clear_bit(n, p->bitmap); 5291da177e4SLinus Torvalds *++paa = &p->ary[n]; 5301da177e4SLinus Torvalds p = p->ary[n]; 5311da177e4SLinus Torvalds shift -= IDR_BITS; 5321da177e4SLinus Torvalds } 5331da177e4SLinus Torvalds n = id & IDR_MASK; 5341d9b2e1eSTejun Heo if (likely(p != NULL && test_bit(n, p->bitmap))) { 5351d9b2e1eSTejun Heo __clear_bit(n, p->bitmap); 5363f59b067SMonam Agarwal RCU_INIT_POINTER(p->ary[n], NULL); 537cf481c20SNadia Derbey to_free = NULL; 5381da177e4SLinus Torvalds while(*paa && ! --((**paa)->count)){ 539cf481c20SNadia Derbey if (to_free) 5400ffc2a9cSTejun Heo free_layer(idp, to_free); 541cf481c20SNadia Derbey to_free = **paa; 5421da177e4SLinus Torvalds **paa-- = NULL; 5431da177e4SLinus Torvalds } 5441da177e4SLinus Torvalds if (!*paa) 5451da177e4SLinus Torvalds idp->layers = 0; 546cf481c20SNadia Derbey if (to_free) 5470ffc2a9cSTejun Heo free_layer(idp, to_free); 548e15ae2ddSJesper Juhl } else 5491da177e4SLinus Torvalds idr_remove_warning(id); 5501da177e4SLinus Torvalds } 5511da177e4SLinus Torvalds 5521da177e4SLinus Torvalds /** 55356083ab1SRandy Dunlap * idr_remove - remove the given id and free its slot 55472fd4a35SRobert P. J. Day * @idp: idr handle 55572fd4a35SRobert P. J. Day * @id: unique key 5561da177e4SLinus Torvalds */ 5571da177e4SLinus Torvalds void idr_remove(struct idr *idp, int id) 5581da177e4SLinus Torvalds { 5591da177e4SLinus Torvalds struct idr_layer *p; 560cf481c20SNadia Derbey struct idr_layer *to_free; 5611da177e4SLinus Torvalds 5622e1c9b28STejun Heo if (id < 0) 563e8c8d1bcSTejun Heo return; 5641da177e4SLinus Torvalds 5651da177e4SLinus Torvalds sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); 566e15ae2ddSJesper Juhl if (idp->top && idp->top->count == 1 && (idp->layers > 1) && 567cf481c20SNadia Derbey idp->top->ary[0]) { 568cf481c20SNadia Derbey /* 569cf481c20SNadia Derbey * Single child at leftmost slot: we can shrink the tree. 570cf481c20SNadia Derbey * This level is not needed anymore since when layers are 571cf481c20SNadia Derbey * inserted, they are inserted at the top of the existing 572cf481c20SNadia Derbey * tree. 573cf481c20SNadia Derbey */ 574cf481c20SNadia Derbey to_free = idp->top; 5751da177e4SLinus Torvalds p = idp->top->ary[0]; 576cf481c20SNadia Derbey rcu_assign_pointer(idp->top, p); 5771da177e4SLinus Torvalds --idp->layers; 5781d9b2e1eSTejun Heo to_free->count = 0; 5791d9b2e1eSTejun Heo bitmap_clear(to_free->bitmap, 0, IDR_SIZE); 5800ffc2a9cSTejun Heo free_layer(idp, to_free); 5811da177e4SLinus Torvalds } 582125c4c70SFengguang Wu while (idp->id_free_cnt >= MAX_IDR_FREE) { 5834ae53789SNadia Derbey p = get_from_free_list(idp); 584cf481c20SNadia Derbey /* 585cf481c20SNadia Derbey * Note: we don't call the rcu callback here, since the only 586cf481c20SNadia Derbey * layers that fall into the freelist are those that have been 587cf481c20SNadia Derbey * preallocated. 588cf481c20SNadia Derbey */ 5891da177e4SLinus Torvalds kmem_cache_free(idr_layer_cache, p); 5901da177e4SLinus Torvalds } 591af8e2a4cSNadia Derbey return; 5921da177e4SLinus Torvalds } 5931da177e4SLinus Torvalds EXPORT_SYMBOL(idr_remove); 5941da177e4SLinus Torvalds 59590ae3ae5SStephen Hemminger static void __idr_remove_all(struct idr *idp) 59623936cc0SKristian Hoegsberg { 5976ace06dcSOleg Nesterov int n, id, max; 5982dcb22b3SImre Deak int bt_mask; 59923936cc0SKristian Hoegsberg struct idr_layer *p; 600326cf0f0STejun Heo struct idr_layer *pa[MAX_IDR_LEVEL + 1]; 60123936cc0SKristian Hoegsberg struct idr_layer **paa = &pa[0]; 60223936cc0SKristian Hoegsberg 60323936cc0SKristian Hoegsberg n = idp->layers * IDR_BITS; 60423936cc0SKristian Hoegsberg p = idp->top; 6053f59b067SMonam Agarwal RCU_INIT_POINTER(idp->top, NULL); 606326cf0f0STejun Heo max = idr_max(idp->layers); 60723936cc0SKristian Hoegsberg 60823936cc0SKristian Hoegsberg id = 0; 609326cf0f0STejun Heo while (id >= 0 && id <= max) { 61023936cc0SKristian Hoegsberg while (n > IDR_BITS && p) { 61123936cc0SKristian Hoegsberg n -= IDR_BITS; 61223936cc0SKristian Hoegsberg *paa++ = p; 61323936cc0SKristian Hoegsberg p = p->ary[(id >> n) & IDR_MASK]; 61423936cc0SKristian Hoegsberg } 61523936cc0SKristian Hoegsberg 6162dcb22b3SImre Deak bt_mask = id; 61723936cc0SKristian Hoegsberg id += 1 << n; 6182dcb22b3SImre Deak /* Get the highest bit that the above add changed from 0->1. */ 6192dcb22b3SImre Deak while (n < fls(id ^ bt_mask)) { 620cf481c20SNadia Derbey if (p) 6210ffc2a9cSTejun Heo free_layer(idp, p); 62223936cc0SKristian Hoegsberg n += IDR_BITS; 62323936cc0SKristian Hoegsberg p = *--paa; 62423936cc0SKristian Hoegsberg } 62523936cc0SKristian Hoegsberg } 62623936cc0SKristian Hoegsberg idp->layers = 0; 62723936cc0SKristian Hoegsberg } 62823936cc0SKristian Hoegsberg 62923936cc0SKristian Hoegsberg /** 6308d3b3591SAndrew Morton * idr_destroy - release all cached layers within an idr tree 631ea24ea85SNaohiro Aota * @idp: idr handle 6329bb26bc1STejun Heo * 6339bb26bc1STejun Heo * Free all id mappings and all idp_layers. After this function, @idp is 6349bb26bc1STejun Heo * completely unused and can be freed / recycled. The caller is 6359bb26bc1STejun Heo * responsible for ensuring that no one else accesses @idp during or after 6369bb26bc1STejun Heo * idr_destroy(). 6379bb26bc1STejun Heo * 6389bb26bc1STejun Heo * A typical clean-up sequence for objects stored in an idr tree will use 6399bb26bc1STejun Heo * idr_for_each() to free all objects, if necessay, then idr_destroy() to 6409bb26bc1STejun Heo * free up the id mappings and cached idr_layers. 6418d3b3591SAndrew Morton */ 6428d3b3591SAndrew Morton void idr_destroy(struct idr *idp) 6438d3b3591SAndrew Morton { 644fe6e24ecSTejun Heo __idr_remove_all(idp); 6459bb26bc1STejun Heo 6468d3b3591SAndrew Morton while (idp->id_free_cnt) { 6474ae53789SNadia Derbey struct idr_layer *p = get_from_free_list(idp); 6488d3b3591SAndrew Morton kmem_cache_free(idr_layer_cache, p); 6498d3b3591SAndrew Morton } 6508d3b3591SAndrew Morton } 6518d3b3591SAndrew Morton EXPORT_SYMBOL(idr_destroy); 6528d3b3591SAndrew Morton 6530ffc2a9cSTejun Heo void *idr_find_slowpath(struct idr *idp, int id) 6541da177e4SLinus Torvalds { 6551da177e4SLinus Torvalds int n; 6561da177e4SLinus Torvalds struct idr_layer *p; 6571da177e4SLinus Torvalds 6582e1c9b28STejun Heo if (id < 0) 659e8c8d1bcSTejun Heo return NULL; 660e8c8d1bcSTejun Heo 66196be753aSPaul E. McKenney p = rcu_dereference_raw(idp->top); 6626ff2d39bSManfred Spraul if (!p) 6636ff2d39bSManfred Spraul return NULL; 6646ff2d39bSManfred Spraul n = (p->layer+1) * IDR_BITS; 6651da177e4SLinus Torvalds 666326cf0f0STejun Heo if (id > idr_max(p->layer + 1)) 6671da177e4SLinus Torvalds return NULL; 6686ff2d39bSManfred Spraul BUG_ON(n == 0); 6691da177e4SLinus Torvalds 6701da177e4SLinus Torvalds while (n > 0 && p) { 6711da177e4SLinus Torvalds n -= IDR_BITS; 6726ff2d39bSManfred Spraul BUG_ON(n != p->layer*IDR_BITS); 67396be753aSPaul E. McKenney p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); 6741da177e4SLinus Torvalds } 6751da177e4SLinus Torvalds return((void *)p); 6761da177e4SLinus Torvalds } 6770ffc2a9cSTejun Heo EXPORT_SYMBOL(idr_find_slowpath); 6781da177e4SLinus Torvalds 6795806f07cSJeff Mahoney /** 68096d7fa42SKristian Hoegsberg * idr_for_each - iterate through all stored pointers 68196d7fa42SKristian Hoegsberg * @idp: idr handle 68296d7fa42SKristian Hoegsberg * @fn: function to be called for each pointer 68396d7fa42SKristian Hoegsberg * @data: data passed back to callback function 68496d7fa42SKristian Hoegsberg * 68596d7fa42SKristian Hoegsberg * Iterate over the pointers registered with the given idr. The 68696d7fa42SKristian Hoegsberg * callback function will be called for each pointer currently 68796d7fa42SKristian Hoegsberg * registered, passing the id, the pointer and the data pointer passed 68896d7fa42SKristian Hoegsberg * to this function. It is not safe to modify the idr tree while in 68996d7fa42SKristian Hoegsberg * the callback, so functions such as idr_get_new and idr_remove are 69096d7fa42SKristian Hoegsberg * not allowed. 69196d7fa42SKristian Hoegsberg * 69296d7fa42SKristian Hoegsberg * We check the return of @fn each time. If it returns anything other 69356083ab1SRandy Dunlap * than %0, we break out and return that value. 69496d7fa42SKristian Hoegsberg * 69596d7fa42SKristian Hoegsberg * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove(). 69696d7fa42SKristian Hoegsberg */ 69796d7fa42SKristian Hoegsberg int idr_for_each(struct idr *idp, 69896d7fa42SKristian Hoegsberg int (*fn)(int id, void *p, void *data), void *data) 69996d7fa42SKristian Hoegsberg { 70096d7fa42SKristian Hoegsberg int n, id, max, error = 0; 70196d7fa42SKristian Hoegsberg struct idr_layer *p; 702326cf0f0STejun Heo struct idr_layer *pa[MAX_IDR_LEVEL + 1]; 70396d7fa42SKristian Hoegsberg struct idr_layer **paa = &pa[0]; 70496d7fa42SKristian Hoegsberg 70596d7fa42SKristian Hoegsberg n = idp->layers * IDR_BITS; 70696be753aSPaul E. McKenney p = rcu_dereference_raw(idp->top); 707326cf0f0STejun Heo max = idr_max(idp->layers); 70896d7fa42SKristian Hoegsberg 70996d7fa42SKristian Hoegsberg id = 0; 710326cf0f0STejun Heo while (id >= 0 && id <= max) { 71196d7fa42SKristian Hoegsberg while (n > 0 && p) { 71296d7fa42SKristian Hoegsberg n -= IDR_BITS; 71396d7fa42SKristian Hoegsberg *paa++ = p; 71496be753aSPaul E. McKenney p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); 71596d7fa42SKristian Hoegsberg } 71696d7fa42SKristian Hoegsberg 71796d7fa42SKristian Hoegsberg if (p) { 71896d7fa42SKristian Hoegsberg error = fn(id, (void *)p, data); 71996d7fa42SKristian Hoegsberg if (error) 72096d7fa42SKristian Hoegsberg break; 72196d7fa42SKristian Hoegsberg } 72296d7fa42SKristian Hoegsberg 72396d7fa42SKristian Hoegsberg id += 1 << n; 72496d7fa42SKristian Hoegsberg while (n < fls(id)) { 72596d7fa42SKristian Hoegsberg n += IDR_BITS; 72696d7fa42SKristian Hoegsberg p = *--paa; 72796d7fa42SKristian Hoegsberg } 72896d7fa42SKristian Hoegsberg } 72996d7fa42SKristian Hoegsberg 73096d7fa42SKristian Hoegsberg return error; 73196d7fa42SKristian Hoegsberg } 73296d7fa42SKristian Hoegsberg EXPORT_SYMBOL(idr_for_each); 73396d7fa42SKristian Hoegsberg 73496d7fa42SKristian Hoegsberg /** 73538460b48SKAMEZAWA Hiroyuki * idr_get_next - lookup next object of id to given id. 73638460b48SKAMEZAWA Hiroyuki * @idp: idr handle 737ea24ea85SNaohiro Aota * @nextidp: pointer to lookup key 73838460b48SKAMEZAWA Hiroyuki * 73938460b48SKAMEZAWA Hiroyuki * Returns pointer to registered object with id, which is next number to 7401458ce16SNaohiro Aota * given id. After being looked up, *@nextidp will be updated for the next 7411458ce16SNaohiro Aota * iteration. 7429f7de827SHugh Dickins * 7439f7de827SHugh Dickins * This function can be called under rcu_read_lock(), given that the leaf 7449f7de827SHugh Dickins * pointers lifetimes are correctly managed. 74538460b48SKAMEZAWA Hiroyuki */ 74638460b48SKAMEZAWA Hiroyuki void *idr_get_next(struct idr *idp, int *nextidp) 74738460b48SKAMEZAWA Hiroyuki { 748326cf0f0STejun Heo struct idr_layer *p, *pa[MAX_IDR_LEVEL + 1]; 74938460b48SKAMEZAWA Hiroyuki struct idr_layer **paa = &pa[0]; 75038460b48SKAMEZAWA Hiroyuki int id = *nextidp; 75138460b48SKAMEZAWA Hiroyuki int n, max; 75238460b48SKAMEZAWA Hiroyuki 75338460b48SKAMEZAWA Hiroyuki /* find first ent */ 75494bfa3b6SPaul E. McKenney p = rcu_dereference_raw(idp->top); 75538460b48SKAMEZAWA Hiroyuki if (!p) 75638460b48SKAMEZAWA Hiroyuki return NULL; 7579f7de827SHugh Dickins n = (p->layer + 1) * IDR_BITS; 758326cf0f0STejun Heo max = idr_max(p->layer + 1); 75938460b48SKAMEZAWA Hiroyuki 760326cf0f0STejun Heo while (id >= 0 && id <= max) { 76138460b48SKAMEZAWA Hiroyuki while (n > 0 && p) { 76238460b48SKAMEZAWA Hiroyuki n -= IDR_BITS; 76338460b48SKAMEZAWA Hiroyuki *paa++ = p; 76494bfa3b6SPaul E. McKenney p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); 76538460b48SKAMEZAWA Hiroyuki } 76638460b48SKAMEZAWA Hiroyuki 76738460b48SKAMEZAWA Hiroyuki if (p) { 76838460b48SKAMEZAWA Hiroyuki *nextidp = id; 76938460b48SKAMEZAWA Hiroyuki return p; 77038460b48SKAMEZAWA Hiroyuki } 77138460b48SKAMEZAWA Hiroyuki 7726cdae741STejun Heo /* 7736cdae741STejun Heo * Proceed to the next layer at the current level. Unlike 7746cdae741STejun Heo * idr_for_each(), @id isn't guaranteed to be aligned to 7756cdae741STejun Heo * layer boundary at this point and adding 1 << n may 7766cdae741STejun Heo * incorrectly skip IDs. Make sure we jump to the 7776cdae741STejun Heo * beginning of the next layer using round_up(). 7786cdae741STejun Heo */ 7796cdae741STejun Heo id = round_up(id + 1, 1 << n); 78038460b48SKAMEZAWA Hiroyuki while (n < fls(id)) { 78138460b48SKAMEZAWA Hiroyuki n += IDR_BITS; 78238460b48SKAMEZAWA Hiroyuki p = *--paa; 78338460b48SKAMEZAWA Hiroyuki } 78438460b48SKAMEZAWA Hiroyuki } 78538460b48SKAMEZAWA Hiroyuki return NULL; 78638460b48SKAMEZAWA Hiroyuki } 7874d1ee80fSBen Hutchings EXPORT_SYMBOL(idr_get_next); 78838460b48SKAMEZAWA Hiroyuki 78938460b48SKAMEZAWA Hiroyuki 79038460b48SKAMEZAWA Hiroyuki /** 7915806f07cSJeff Mahoney * idr_replace - replace pointer for given id 7925806f07cSJeff Mahoney * @idp: idr handle 7935806f07cSJeff Mahoney * @ptr: pointer you want associated with the id 7945806f07cSJeff Mahoney * @id: lookup key 7955806f07cSJeff Mahoney * 7965806f07cSJeff Mahoney * Replace the pointer registered with an id and return the old value. 79756083ab1SRandy Dunlap * A %-ENOENT return indicates that @id was not found. 79856083ab1SRandy Dunlap * A %-EINVAL return indicates that @id was not within valid constraints. 7995806f07cSJeff Mahoney * 800cf481c20SNadia Derbey * The caller must serialize with writers. 8015806f07cSJeff Mahoney */ 8025806f07cSJeff Mahoney void *idr_replace(struct idr *idp, void *ptr, int id) 8035806f07cSJeff Mahoney { 8045806f07cSJeff Mahoney int n; 8055806f07cSJeff Mahoney struct idr_layer *p, *old_p; 8065806f07cSJeff Mahoney 8072e1c9b28STejun Heo if (id < 0) 808e8c8d1bcSTejun Heo return ERR_PTR(-EINVAL); 809e8c8d1bcSTejun Heo 8105806f07cSJeff Mahoney p = idp->top; 8116ff2d39bSManfred Spraul if (!p) 8126ff2d39bSManfred Spraul return ERR_PTR(-EINVAL); 8136ff2d39bSManfred Spraul 8146ff2d39bSManfred Spraul n = (p->layer+1) * IDR_BITS; 8155806f07cSJeff Mahoney 8165806f07cSJeff Mahoney if (id >= (1 << n)) 8175806f07cSJeff Mahoney return ERR_PTR(-EINVAL); 8185806f07cSJeff Mahoney 8195806f07cSJeff Mahoney n -= IDR_BITS; 8205806f07cSJeff Mahoney while ((n > 0) && p) { 8215806f07cSJeff Mahoney p = p->ary[(id >> n) & IDR_MASK]; 8225806f07cSJeff Mahoney n -= IDR_BITS; 8235806f07cSJeff Mahoney } 8245806f07cSJeff Mahoney 8255806f07cSJeff Mahoney n = id & IDR_MASK; 8261d9b2e1eSTejun Heo if (unlikely(p == NULL || !test_bit(n, p->bitmap))) 8275806f07cSJeff Mahoney return ERR_PTR(-ENOENT); 8285806f07cSJeff Mahoney 8295806f07cSJeff Mahoney old_p = p->ary[n]; 830cf481c20SNadia Derbey rcu_assign_pointer(p->ary[n], ptr); 8315806f07cSJeff Mahoney 8325806f07cSJeff Mahoney return old_p; 8335806f07cSJeff Mahoney } 8345806f07cSJeff Mahoney EXPORT_SYMBOL(idr_replace); 8355806f07cSJeff Mahoney 836199f0ca5SAkinobu Mita void __init idr_init_cache(void) 8371da177e4SLinus Torvalds { 8381da177e4SLinus Torvalds idr_layer_cache = kmem_cache_create("idr_layer_cache", 8395b019e99SAndrew Morton sizeof(struct idr_layer), 0, SLAB_PANIC, NULL); 8401da177e4SLinus Torvalds } 8411da177e4SLinus Torvalds 8421da177e4SLinus Torvalds /** 8431da177e4SLinus Torvalds * idr_init - initialize idr handle 8441da177e4SLinus Torvalds * @idp: idr handle 8451da177e4SLinus Torvalds * 8461da177e4SLinus Torvalds * This function is use to set up the handle (@idp) that you will pass 8471da177e4SLinus Torvalds * to the rest of the functions. 8481da177e4SLinus Torvalds */ 8491da177e4SLinus Torvalds void idr_init(struct idr *idp) 8501da177e4SLinus Torvalds { 8511da177e4SLinus Torvalds memset(idp, 0, sizeof(struct idr)); 8521da177e4SLinus Torvalds spin_lock_init(&idp->lock); 8531da177e4SLinus Torvalds } 8541da177e4SLinus Torvalds EXPORT_SYMBOL(idr_init); 85572dba584STejun Heo 85605f7a7d6SAndreas Gruenbacher static int idr_has_entry(int id, void *p, void *data) 85705f7a7d6SAndreas Gruenbacher { 85805f7a7d6SAndreas Gruenbacher return 1; 85905f7a7d6SAndreas Gruenbacher } 86005f7a7d6SAndreas Gruenbacher 86105f7a7d6SAndreas Gruenbacher bool idr_is_empty(struct idr *idp) 86205f7a7d6SAndreas Gruenbacher { 86305f7a7d6SAndreas Gruenbacher return !idr_for_each(idp, idr_has_entry, NULL); 86405f7a7d6SAndreas Gruenbacher } 86505f7a7d6SAndreas Gruenbacher EXPORT_SYMBOL(idr_is_empty); 86672dba584STejun Heo 86756083ab1SRandy Dunlap /** 86856083ab1SRandy Dunlap * DOC: IDA description 86972dba584STejun Heo * IDA - IDR based ID allocator 87072dba584STejun Heo * 87156083ab1SRandy Dunlap * This is id allocator without id -> pointer translation. Memory 87272dba584STejun Heo * usage is much lower than full blown idr because each id only 87372dba584STejun Heo * occupies a bit. ida uses a custom leaf node which contains 87472dba584STejun Heo * IDA_BITMAP_BITS slots. 87572dba584STejun Heo * 87672dba584STejun Heo * 2007-04-25 written by Tejun Heo <htejun@gmail.com> 87772dba584STejun Heo */ 87872dba584STejun Heo 87972dba584STejun Heo static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap) 88072dba584STejun Heo { 88172dba584STejun Heo unsigned long flags; 88272dba584STejun Heo 88372dba584STejun Heo if (!ida->free_bitmap) { 88472dba584STejun Heo spin_lock_irqsave(&ida->idr.lock, flags); 88572dba584STejun Heo if (!ida->free_bitmap) { 88672dba584STejun Heo ida->free_bitmap = bitmap; 88772dba584STejun Heo bitmap = NULL; 88872dba584STejun Heo } 88972dba584STejun Heo spin_unlock_irqrestore(&ida->idr.lock, flags); 89072dba584STejun Heo } 89172dba584STejun Heo 89272dba584STejun Heo kfree(bitmap); 89372dba584STejun Heo } 89472dba584STejun Heo 89572dba584STejun Heo /** 89672dba584STejun Heo * ida_pre_get - reserve resources for ida allocation 89772dba584STejun Heo * @ida: ida handle 89872dba584STejun Heo * @gfp_mask: memory allocation flag 89972dba584STejun Heo * 90072dba584STejun Heo * This function should be called prior to locking and calling the 90172dba584STejun Heo * following function. It preallocates enough memory to satisfy the 90272dba584STejun Heo * worst possible allocation. 90372dba584STejun Heo * 90456083ab1SRandy Dunlap * If the system is REALLY out of memory this function returns %0, 90556083ab1SRandy Dunlap * otherwise %1. 90672dba584STejun Heo */ 90772dba584STejun Heo int ida_pre_get(struct ida *ida, gfp_t gfp_mask) 90872dba584STejun Heo { 90972dba584STejun Heo /* allocate idr_layers */ 910c8615d37STejun Heo if (!__idr_pre_get(&ida->idr, gfp_mask)) 91172dba584STejun Heo return 0; 91272dba584STejun Heo 91372dba584STejun Heo /* allocate free_bitmap */ 91472dba584STejun Heo if (!ida->free_bitmap) { 91572dba584STejun Heo struct ida_bitmap *bitmap; 91672dba584STejun Heo 91772dba584STejun Heo bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask); 91872dba584STejun Heo if (!bitmap) 91972dba584STejun Heo return 0; 92072dba584STejun Heo 92172dba584STejun Heo free_bitmap(ida, bitmap); 92272dba584STejun Heo } 92372dba584STejun Heo 92472dba584STejun Heo return 1; 92572dba584STejun Heo } 92672dba584STejun Heo EXPORT_SYMBOL(ida_pre_get); 92772dba584STejun Heo 92872dba584STejun Heo /** 92972dba584STejun Heo * ida_get_new_above - allocate new ID above or equal to a start id 93072dba584STejun Heo * @ida: ida handle 931ea24ea85SNaohiro Aota * @starting_id: id to start search at 93272dba584STejun Heo * @p_id: pointer to the allocated handle 93372dba584STejun Heo * 934e3816c54SWang Sheng-Hui * Allocate new ID above or equal to @starting_id. It should be called 935e3816c54SWang Sheng-Hui * with any required locks. 93672dba584STejun Heo * 93756083ab1SRandy Dunlap * If memory is required, it will return %-EAGAIN, you should unlock 93872dba584STejun Heo * and go back to the ida_pre_get() call. If the ida is full, it will 93956083ab1SRandy Dunlap * return %-ENOSPC. 94072dba584STejun Heo * 94156083ab1SRandy Dunlap * @p_id returns a value in the range @starting_id ... %0x7fffffff. 94272dba584STejun Heo */ 94372dba584STejun Heo int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) 94472dba584STejun Heo { 945326cf0f0STejun Heo struct idr_layer *pa[MAX_IDR_LEVEL + 1]; 94672dba584STejun Heo struct ida_bitmap *bitmap; 94772dba584STejun Heo unsigned long flags; 94872dba584STejun Heo int idr_id = starting_id / IDA_BITMAP_BITS; 94972dba584STejun Heo int offset = starting_id % IDA_BITMAP_BITS; 95072dba584STejun Heo int t, id; 95172dba584STejun Heo 95272dba584STejun Heo restart: 95372dba584STejun Heo /* get vacant slot */ 954d5c7409fSTejun Heo t = idr_get_empty_slot(&ida->idr, idr_id, pa, 0, &ida->idr); 955944ca05cSNadia Derbey if (t < 0) 95612d1b439STejun Heo return t == -ENOMEM ? -EAGAIN : t; 95772dba584STejun Heo 958125c4c70SFengguang Wu if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT) 95972dba584STejun Heo return -ENOSPC; 96072dba584STejun Heo 96172dba584STejun Heo if (t != idr_id) 96272dba584STejun Heo offset = 0; 96372dba584STejun Heo idr_id = t; 96472dba584STejun Heo 96572dba584STejun Heo /* if bitmap isn't there, create a new one */ 96672dba584STejun Heo bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK]; 96772dba584STejun Heo if (!bitmap) { 96872dba584STejun Heo spin_lock_irqsave(&ida->idr.lock, flags); 96972dba584STejun Heo bitmap = ida->free_bitmap; 97072dba584STejun Heo ida->free_bitmap = NULL; 97172dba584STejun Heo spin_unlock_irqrestore(&ida->idr.lock, flags); 97272dba584STejun Heo 97372dba584STejun Heo if (!bitmap) 97472dba584STejun Heo return -EAGAIN; 97572dba584STejun Heo 97672dba584STejun Heo memset(bitmap, 0, sizeof(struct ida_bitmap)); 9773219b3b7SNadia Derbey rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK], 9783219b3b7SNadia Derbey (void *)bitmap); 97972dba584STejun Heo pa[0]->count++; 98072dba584STejun Heo } 98172dba584STejun Heo 98272dba584STejun Heo /* lookup for empty slot */ 98372dba584STejun Heo t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset); 98472dba584STejun Heo if (t == IDA_BITMAP_BITS) { 98572dba584STejun Heo /* no empty slot after offset, continue to the next chunk */ 98672dba584STejun Heo idr_id++; 98772dba584STejun Heo offset = 0; 98872dba584STejun Heo goto restart; 98972dba584STejun Heo } 99072dba584STejun Heo 99172dba584STejun Heo id = idr_id * IDA_BITMAP_BITS + t; 992125c4c70SFengguang Wu if (id >= MAX_IDR_BIT) 99372dba584STejun Heo return -ENOSPC; 99472dba584STejun Heo 99572dba584STejun Heo __set_bit(t, bitmap->bitmap); 99672dba584STejun Heo if (++bitmap->nr_busy == IDA_BITMAP_BITS) 99772dba584STejun Heo idr_mark_full(pa, idr_id); 99872dba584STejun Heo 99972dba584STejun Heo *p_id = id; 100072dba584STejun Heo 100172dba584STejun Heo /* Each leaf node can handle nearly a thousand slots and the 100272dba584STejun Heo * whole idea of ida is to have small memory foot print. 100372dba584STejun Heo * Throw away extra resources one by one after each successful 100472dba584STejun Heo * allocation. 100572dba584STejun Heo */ 100672dba584STejun Heo if (ida->idr.id_free_cnt || ida->free_bitmap) { 10074ae53789SNadia Derbey struct idr_layer *p = get_from_free_list(&ida->idr); 100872dba584STejun Heo if (p) 100972dba584STejun Heo kmem_cache_free(idr_layer_cache, p); 101072dba584STejun Heo } 101172dba584STejun Heo 101272dba584STejun Heo return 0; 101372dba584STejun Heo } 101472dba584STejun Heo EXPORT_SYMBOL(ida_get_new_above); 101572dba584STejun Heo 101672dba584STejun Heo /** 101772dba584STejun Heo * ida_remove - remove the given ID 101872dba584STejun Heo * @ida: ida handle 101972dba584STejun Heo * @id: ID to free 102072dba584STejun Heo */ 102172dba584STejun Heo void ida_remove(struct ida *ida, int id) 102272dba584STejun Heo { 102372dba584STejun Heo struct idr_layer *p = ida->idr.top; 102472dba584STejun Heo int shift = (ida->idr.layers - 1) * IDR_BITS; 102572dba584STejun Heo int idr_id = id / IDA_BITMAP_BITS; 102672dba584STejun Heo int offset = id % IDA_BITMAP_BITS; 102772dba584STejun Heo int n; 102872dba584STejun Heo struct ida_bitmap *bitmap; 102972dba584STejun Heo 103072dba584STejun Heo /* clear full bits while looking up the leaf idr_layer */ 103172dba584STejun Heo while ((shift > 0) && p) { 103272dba584STejun Heo n = (idr_id >> shift) & IDR_MASK; 10331d9b2e1eSTejun Heo __clear_bit(n, p->bitmap); 103472dba584STejun Heo p = p->ary[n]; 103572dba584STejun Heo shift -= IDR_BITS; 103672dba584STejun Heo } 103772dba584STejun Heo 103872dba584STejun Heo if (p == NULL) 103972dba584STejun Heo goto err; 104072dba584STejun Heo 104172dba584STejun Heo n = idr_id & IDR_MASK; 10421d9b2e1eSTejun Heo __clear_bit(n, p->bitmap); 104372dba584STejun Heo 104472dba584STejun Heo bitmap = (void *)p->ary[n]; 104572dba584STejun Heo if (!test_bit(offset, bitmap->bitmap)) 104672dba584STejun Heo goto err; 104772dba584STejun Heo 104872dba584STejun Heo /* update bitmap and remove it if empty */ 104972dba584STejun Heo __clear_bit(offset, bitmap->bitmap); 105072dba584STejun Heo if (--bitmap->nr_busy == 0) { 10511d9b2e1eSTejun Heo __set_bit(n, p->bitmap); /* to please idr_remove() */ 105272dba584STejun Heo idr_remove(&ida->idr, idr_id); 105372dba584STejun Heo free_bitmap(ida, bitmap); 105472dba584STejun Heo } 105572dba584STejun Heo 105672dba584STejun Heo return; 105772dba584STejun Heo 105872dba584STejun Heo err: 1059dd04b452SJean Delvare WARN(1, "ida_remove called for id=%d which is not allocated.\n", id); 106072dba584STejun Heo } 106172dba584STejun Heo EXPORT_SYMBOL(ida_remove); 106272dba584STejun Heo 106372dba584STejun Heo /** 106472dba584STejun Heo * ida_destroy - release all cached layers within an ida tree 1065ea24ea85SNaohiro Aota * @ida: ida handle 106672dba584STejun Heo */ 106772dba584STejun Heo void ida_destroy(struct ida *ida) 106872dba584STejun Heo { 106972dba584STejun Heo idr_destroy(&ida->idr); 107072dba584STejun Heo kfree(ida->free_bitmap); 107172dba584STejun Heo } 107272dba584STejun Heo EXPORT_SYMBOL(ida_destroy); 107372dba584STejun Heo 107472dba584STejun Heo /** 107588eca020SRusty Russell * ida_simple_get - get a new id. 107688eca020SRusty Russell * @ida: the (initialized) ida. 107788eca020SRusty Russell * @start: the minimum id (inclusive, < 0x8000000) 107888eca020SRusty Russell * @end: the maximum id (exclusive, < 0x8000000 or 0) 107988eca020SRusty Russell * @gfp_mask: memory allocation flags 108088eca020SRusty Russell * 108188eca020SRusty Russell * Allocates an id in the range start <= id < end, or returns -ENOSPC. 108288eca020SRusty Russell * On memory allocation failure, returns -ENOMEM. 108388eca020SRusty Russell * 108488eca020SRusty Russell * Use ida_simple_remove() to get rid of an id. 108588eca020SRusty Russell */ 108688eca020SRusty Russell int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, 108788eca020SRusty Russell gfp_t gfp_mask) 108888eca020SRusty Russell { 108988eca020SRusty Russell int ret, id; 109088eca020SRusty Russell unsigned int max; 109146cbc1d3STejun Heo unsigned long flags; 109288eca020SRusty Russell 109388eca020SRusty Russell BUG_ON((int)start < 0); 109488eca020SRusty Russell BUG_ON((int)end < 0); 109588eca020SRusty Russell 109688eca020SRusty Russell if (end == 0) 109788eca020SRusty Russell max = 0x80000000; 109888eca020SRusty Russell else { 109988eca020SRusty Russell BUG_ON(end < start); 110088eca020SRusty Russell max = end - 1; 110188eca020SRusty Russell } 110288eca020SRusty Russell 110388eca020SRusty Russell again: 110488eca020SRusty Russell if (!ida_pre_get(ida, gfp_mask)) 110588eca020SRusty Russell return -ENOMEM; 110688eca020SRusty Russell 110746cbc1d3STejun Heo spin_lock_irqsave(&simple_ida_lock, flags); 110888eca020SRusty Russell ret = ida_get_new_above(ida, start, &id); 110988eca020SRusty Russell if (!ret) { 111088eca020SRusty Russell if (id > max) { 111188eca020SRusty Russell ida_remove(ida, id); 111288eca020SRusty Russell ret = -ENOSPC; 111388eca020SRusty Russell } else { 111488eca020SRusty Russell ret = id; 111588eca020SRusty Russell } 111688eca020SRusty Russell } 111746cbc1d3STejun Heo spin_unlock_irqrestore(&simple_ida_lock, flags); 111888eca020SRusty Russell 111988eca020SRusty Russell if (unlikely(ret == -EAGAIN)) 112088eca020SRusty Russell goto again; 112188eca020SRusty Russell 112288eca020SRusty Russell return ret; 112388eca020SRusty Russell } 112488eca020SRusty Russell EXPORT_SYMBOL(ida_simple_get); 112588eca020SRusty Russell 112688eca020SRusty Russell /** 112788eca020SRusty Russell * ida_simple_remove - remove an allocated id. 112888eca020SRusty Russell * @ida: the (initialized) ida. 112988eca020SRusty Russell * @id: the id returned by ida_simple_get. 113088eca020SRusty Russell */ 113188eca020SRusty Russell void ida_simple_remove(struct ida *ida, unsigned int id) 113288eca020SRusty Russell { 113346cbc1d3STejun Heo unsigned long flags; 113446cbc1d3STejun Heo 113588eca020SRusty Russell BUG_ON((int)id < 0); 113646cbc1d3STejun Heo spin_lock_irqsave(&simple_ida_lock, flags); 113788eca020SRusty Russell ida_remove(ida, id); 113846cbc1d3STejun Heo spin_unlock_irqrestore(&simple_ida_lock, flags); 113988eca020SRusty Russell } 114088eca020SRusty Russell EXPORT_SYMBOL(ida_simple_remove); 114188eca020SRusty Russell 114288eca020SRusty Russell /** 114372dba584STejun Heo * ida_init - initialize ida handle 114472dba584STejun Heo * @ida: ida handle 114572dba584STejun Heo * 114672dba584STejun Heo * This function is use to set up the handle (@ida) that you will pass 114772dba584STejun Heo * to the rest of the functions. 114872dba584STejun Heo */ 114972dba584STejun Heo void ida_init(struct ida *ida) 115072dba584STejun Heo { 115172dba584STejun Heo memset(ida, 0, sizeof(struct ida)); 115272dba584STejun Heo idr_init(&ida->idr); 115372dba584STejun Heo 115472dba584STejun Heo } 115572dba584STejun Heo EXPORT_SYMBOL(ida_init); 1156