xref: /openbmc/linux/lib/idr.c (revision 6a6d6681ac1add9655b7ab5dd0b46b54aeb1b44f)
1  #include <linux/bitmap.h>
2  #include <linux/bug.h>
3  #include <linux/export.h>
4  #include <linux/idr.h>
5  #include <linux/slab.h>
6  #include <linux/spinlock.h>
7  #include <linux/xarray.h>
8  
9  /**
10   * idr_alloc_u32() - Allocate an ID.
11   * @idr: IDR handle.
12   * @ptr: Pointer to be associated with the new ID.
13   * @nextid: Pointer to an ID.
14   * @max: The maximum ID to allocate (inclusive).
15   * @gfp: Memory allocation flags.
16   *
17   * Allocates an unused ID in the range specified by @nextid and @max.
18   * Note that @max is inclusive whereas the @end parameter to idr_alloc()
19   * is exclusive.  The new ID is assigned to @nextid before the pointer
20   * is inserted into the IDR, so if @nextid points into the object pointed
21   * to by @ptr, a concurrent lookup will not find an uninitialised ID.
22   *
23   * The caller should provide their own locking to ensure that two
24   * concurrent modifications to the IDR are not possible.  Read-only
25   * accesses to the IDR may be done under the RCU read lock or may
26   * exclude simultaneous writers.
27   *
28   * Return: 0 if an ID was allocated, -ENOMEM if memory allocation failed,
29   * or -ENOSPC if no free IDs could be found.  If an error occurred,
30   * @nextid is unchanged.
31   */
32  int idr_alloc_u32(struct idr *idr, void *ptr, u32 *nextid,
33  			unsigned long max, gfp_t gfp)
34  {
35  	struct radix_tree_iter iter;
36  	void __rcu **slot;
37  	unsigned int base = idr->idr_base;
38  	unsigned int id = *nextid;
39  
40  	if (WARN_ON_ONCE(!(idr->idr_rt.xa_flags & ROOT_IS_IDR)))
41  		idr->idr_rt.xa_flags |= IDR_RT_MARKER;
42  
43  	id = (id < base) ? 0 : id - base;
44  	radix_tree_iter_init(&iter, id);
45  	slot = idr_get_free(&idr->idr_rt, &iter, gfp, max - base);
46  	if (IS_ERR(slot))
47  		return PTR_ERR(slot);
48  
49  	*nextid = iter.index + base;
50  	/* there is a memory barrier inside radix_tree_iter_replace() */
51  	radix_tree_iter_replace(&idr->idr_rt, &iter, slot, ptr);
52  	radix_tree_iter_tag_clear(&idr->idr_rt, &iter, IDR_FREE);
53  
54  	return 0;
55  }
56  EXPORT_SYMBOL_GPL(idr_alloc_u32);
57  
58  /**
59   * idr_alloc() - Allocate an ID.
60   * @idr: IDR handle.
61   * @ptr: Pointer to be associated with the new ID.
62   * @start: The minimum ID (inclusive).
63   * @end: The maximum ID (exclusive).
64   * @gfp: Memory allocation flags.
65   *
66   * Allocates an unused ID in the range specified by @start and @end.  If
67   * @end is <= 0, it is treated as one larger than %INT_MAX.  This allows
68   * callers to use @start + N as @end as long as N is within integer range.
69   *
70   * The caller should provide their own locking to ensure that two
71   * concurrent modifications to the IDR are not possible.  Read-only
72   * accesses to the IDR may be done under the RCU read lock or may
73   * exclude simultaneous writers.
74   *
75   * Return: The newly allocated ID, -ENOMEM if memory allocation failed,
76   * or -ENOSPC if no free IDs could be found.
77   */
78  int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
79  {
80  	u32 id = start;
81  	int ret;
82  
83  	if (WARN_ON_ONCE(start < 0))
84  		return -EINVAL;
85  
86  	ret = idr_alloc_u32(idr, ptr, &id, end > 0 ? end - 1 : INT_MAX, gfp);
87  	if (ret)
88  		return ret;
89  
90  	return id;
91  }
92  EXPORT_SYMBOL_GPL(idr_alloc);
93  
94  /**
95   * idr_alloc_cyclic() - Allocate an ID cyclically.
96   * @idr: IDR handle.
97   * @ptr: Pointer to be associated with the new ID.
98   * @start: The minimum ID (inclusive).
99   * @end: The maximum ID (exclusive).
100   * @gfp: Memory allocation flags.
101   *
102   * Allocates an unused ID in the range specified by @nextid and @end.  If
103   * @end is <= 0, it is treated as one larger than %INT_MAX.  This allows
104   * callers to use @start + N as @end as long as N is within integer range.
105   * The search for an unused ID will start at the last ID allocated and will
106   * wrap around to @start if no free IDs are found before reaching @end.
107   *
108   * The caller should provide their own locking to ensure that two
109   * concurrent modifications to the IDR are not possible.  Read-only
110   * accesses to the IDR may be done under the RCU read lock or may
111   * exclude simultaneous writers.
112   *
113   * Return: The newly allocated ID, -ENOMEM if memory allocation failed,
114   * or -ENOSPC if no free IDs could be found.
115   */
116  int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
117  {
118  	u32 id = idr->idr_next;
119  	int err, max = end > 0 ? end - 1 : INT_MAX;
120  
121  	if ((int)id < start)
122  		id = start;
123  
124  	err = idr_alloc_u32(idr, ptr, &id, max, gfp);
125  	if ((err == -ENOSPC) && (id > start)) {
126  		id = start;
127  		err = idr_alloc_u32(idr, ptr, &id, max, gfp);
128  	}
129  	if (err)
130  		return err;
131  
132  	idr->idr_next = id + 1;
133  	return id;
134  }
135  EXPORT_SYMBOL(idr_alloc_cyclic);
136  
137  /**
138   * idr_remove() - Remove an ID from the IDR.
139   * @idr: IDR handle.
140   * @id: Pointer ID.
141   *
142   * Removes this ID from the IDR.  If the ID was not previously in the IDR,
143   * this function returns %NULL.
144   *
145   * Since this function modifies the IDR, the caller should provide their
146   * own locking to ensure that concurrent modification of the same IDR is
147   * not possible.
148   *
149   * Return: The pointer formerly associated with this ID.
150   */
151  void *idr_remove(struct idr *idr, unsigned long id)
152  {
153  	return radix_tree_delete_item(&idr->idr_rt, id - idr->idr_base, NULL);
154  }
155  EXPORT_SYMBOL_GPL(idr_remove);
156  
157  /**
158   * idr_find() - Return pointer for given ID.
159   * @idr: IDR handle.
160   * @id: Pointer ID.
161   *
162   * Looks up the pointer associated with this ID.  A %NULL pointer may
163   * indicate that @id is not allocated or that the %NULL pointer was
164   * associated with this ID.
165   *
166   * This function can be called under rcu_read_lock(), given that the leaf
167   * pointers lifetimes are correctly managed.
168   *
169   * Return: The pointer associated with this ID.
170   */
171  void *idr_find(const struct idr *idr, unsigned long id)
172  {
173  	return radix_tree_lookup(&idr->idr_rt, id - idr->idr_base);
174  }
175  EXPORT_SYMBOL_GPL(idr_find);
176  
177  /**
178   * idr_for_each() - Iterate through all stored pointers.
179   * @idr: IDR handle.
180   * @fn: Function to be called for each pointer.
181   * @data: Data passed to callback function.
182   *
183   * The callback function will be called for each entry in @idr, passing
184   * the ID, the entry and @data.
185   *
186   * If @fn returns anything other than %0, the iteration stops and that
187   * value is returned from this function.
188   *
189   * idr_for_each() can be called concurrently with idr_alloc() and
190   * idr_remove() if protected by RCU.  Newly added entries may not be
191   * seen and deleted entries may be seen, but adding and removing entries
192   * will not cause other entries to be skipped, nor spurious ones to be seen.
193   */
194  int idr_for_each(const struct idr *idr,
195  		int (*fn)(int id, void *p, void *data), void *data)
196  {
197  	struct radix_tree_iter iter;
198  	void __rcu **slot;
199  	int base = idr->idr_base;
200  
201  	radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, 0) {
202  		int ret;
203  		unsigned long id = iter.index + base;
204  
205  		if (WARN_ON_ONCE(id > INT_MAX))
206  			break;
207  		ret = fn(id, rcu_dereference_raw(*slot), data);
208  		if (ret)
209  			return ret;
210  	}
211  
212  	return 0;
213  }
214  EXPORT_SYMBOL(idr_for_each);
215  
216  /**
217   * idr_get_next() - Find next populated entry.
218   * @idr: IDR handle.
219   * @nextid: Pointer to an ID.
220   *
221   * Returns the next populated entry in the tree with an ID greater than
222   * or equal to the value pointed to by @nextid.  On exit, @nextid is updated
223   * to the ID of the found value.  To use in a loop, the value pointed to by
224   * nextid must be incremented by the user.
225   */
226  void *idr_get_next(struct idr *idr, int *nextid)
227  {
228  	struct radix_tree_iter iter;
229  	void __rcu **slot;
230  	unsigned long base = idr->idr_base;
231  	unsigned long id = *nextid;
232  
233  	id = (id < base) ? 0 : id - base;
234  	slot = radix_tree_iter_find(&idr->idr_rt, &iter, id);
235  	if (!slot)
236  		return NULL;
237  	id = iter.index + base;
238  
239  	if (WARN_ON_ONCE(id > INT_MAX))
240  		return NULL;
241  
242  	*nextid = id;
243  	return rcu_dereference_raw(*slot);
244  }
245  EXPORT_SYMBOL(idr_get_next);
246  
247  /**
248   * idr_get_next_ul() - Find next populated entry.
249   * @idr: IDR handle.
250   * @nextid: Pointer to an ID.
251   *
252   * Returns the next populated entry in the tree with an ID greater than
253   * or equal to the value pointed to by @nextid.  On exit, @nextid is updated
254   * to the ID of the found value.  To use in a loop, the value pointed to by
255   * nextid must be incremented by the user.
256   */
257  void *idr_get_next_ul(struct idr *idr, unsigned long *nextid)
258  {
259  	struct radix_tree_iter iter;
260  	void __rcu **slot;
261  	unsigned long base = idr->idr_base;
262  	unsigned long id = *nextid;
263  
264  	id = (id < base) ? 0 : id - base;
265  	slot = radix_tree_iter_find(&idr->idr_rt, &iter, id);
266  	if (!slot)
267  		return NULL;
268  
269  	*nextid = iter.index + base;
270  	return rcu_dereference_raw(*slot);
271  }
272  EXPORT_SYMBOL(idr_get_next_ul);
273  
274  /**
275   * idr_replace() - replace pointer for given ID.
276   * @idr: IDR handle.
277   * @ptr: New pointer to associate with the ID.
278   * @id: ID to change.
279   *
280   * Replace the pointer registered with an ID and return the old value.
281   * This function can be called under the RCU read lock concurrently with
282   * idr_alloc() and idr_remove() (as long as the ID being removed is not
283   * the one being replaced!).
284   *
285   * Returns: the old value on success.  %-ENOENT indicates that @id was not
286   * found.  %-EINVAL indicates that @ptr was not valid.
287   */
288  void *idr_replace(struct idr *idr, void *ptr, unsigned long id)
289  {
290  	struct radix_tree_node *node;
291  	void __rcu **slot = NULL;
292  	void *entry;
293  
294  	id -= idr->idr_base;
295  
296  	entry = __radix_tree_lookup(&idr->idr_rt, id, &node, &slot);
297  	if (!slot || radix_tree_tag_get(&idr->idr_rt, id, IDR_FREE))
298  		return ERR_PTR(-ENOENT);
299  
300  	__radix_tree_replace(&idr->idr_rt, node, slot, ptr);
301  
302  	return entry;
303  }
304  EXPORT_SYMBOL(idr_replace);
305  
306  /**
307   * DOC: IDA description
308   *
309   * The IDA is an ID allocator which does not provide the ability to
310   * associate an ID with a pointer.  As such, it only needs to store one
311   * bit per ID, and so is more space efficient than an IDR.  To use an IDA,
312   * define it using DEFINE_IDA() (or embed a &struct ida in a data structure,
313   * then initialise it using ida_init()).  To allocate a new ID, call
314   * ida_alloc(), ida_alloc_min(), ida_alloc_max() or ida_alloc_range().
315   * To free an ID, call ida_free().
316   *
317   * ida_destroy() can be used to dispose of an IDA without needing to
318   * free the individual IDs in it.  You can use ida_is_empty() to find
319   * out whether the IDA has any IDs currently allocated.
320   *
321   * The IDA handles its own locking.  It is safe to call any of the IDA
322   * functions without synchronisation in your code.
323   *
324   * IDs are currently limited to the range [0-INT_MAX].  If this is an awkward
325   * limitation, it should be quite straightforward to raise the maximum.
326   */
327  
328  /*
329   * Developer's notes:
330   *
331   * The IDA uses the functionality provided by the XArray to store bitmaps in
332   * each entry.  The XA_FREE_MARK is only cleared when all bits in the bitmap
333   * have been set.
334   *
335   * I considered telling the XArray that each slot is an order-10 node
336   * and indexing by bit number, but the XArray can't allow a single multi-index
337   * entry in the head, which would significantly increase memory consumption
338   * for the IDA.  So instead we divide the index by the number of bits in the
339   * leaf bitmap before doing a radix tree lookup.
340   *
341   * As an optimisation, if there are only a few low bits set in any given
342   * leaf, instead of allocating a 128-byte bitmap, we store the bits
343   * as a value entry.  Value entries never have the XA_FREE_MARK cleared
344   * because we can always convert them into a bitmap entry.
345   *
346   * It would be possible to optimise further; once we've run out of a
347   * single 128-byte bitmap, we currently switch to a 576-byte node, put
348   * the 128-byte bitmap in the first entry and then start allocating extra
349   * 128-byte entries.  We could instead use the 512 bytes of the node's
350   * data as a bitmap before moving to that scheme.  I do not believe this
351   * is a worthwhile optimisation; Rasmus Villemoes surveyed the current
352   * users of the IDA and almost none of them use more than 1024 entries.
353   * Those that do use more than the 8192 IDs that the 512 bytes would
354   * provide.
355   *
356   * The IDA always uses a lock to alloc/free.  If we add a 'test_bit'
357   * equivalent, it will still need locking.  Going to RCU lookup would require
358   * using RCU to free bitmaps, and that's not trivial without embedding an
359   * RCU head in the bitmap, which adds a 2-pointer overhead to each 128-byte
360   * bitmap, which is excessive.
361   */
362  
363  /**
364   * ida_alloc_range() - Allocate an unused ID.
365   * @ida: IDA handle.
366   * @min: Lowest ID to allocate.
367   * @max: Highest ID to allocate.
368   * @gfp: Memory allocation flags.
369   *
370   * Allocate an ID between @min and @max, inclusive.  The allocated ID will
371   * not exceed %INT_MAX, even if @max is larger.
372   *
373   * Context: Any context.
374   * Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
375   * or %-ENOSPC if there are no free IDs.
376   */
377  int ida_alloc_range(struct ida *ida, unsigned int min, unsigned int max,
378  			gfp_t gfp)
379  {
380  	XA_STATE(xas, &ida->xa, min / IDA_BITMAP_BITS);
381  	unsigned bit = min % IDA_BITMAP_BITS;
382  	unsigned long flags;
383  	struct ida_bitmap *bitmap, *alloc = NULL;
384  
385  	if ((int)min < 0)
386  		return -ENOSPC;
387  
388  	if ((int)max < 0)
389  		max = INT_MAX;
390  
391  retry:
392  	xas_lock_irqsave(&xas, flags);
393  next:
394  	bitmap = xas_find_marked(&xas, max / IDA_BITMAP_BITS, XA_FREE_MARK);
395  	if (xas.xa_index > min / IDA_BITMAP_BITS)
396  		bit = 0;
397  	if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
398  		goto nospc;
399  
400  	if (xa_is_value(bitmap)) {
401  		unsigned long tmp = xa_to_value(bitmap);
402  
403  		if (bit < BITS_PER_XA_VALUE) {
404  			bit = find_next_zero_bit(&tmp, BITS_PER_XA_VALUE, bit);
405  			if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
406  				goto nospc;
407  			if (bit < BITS_PER_XA_VALUE) {
408  				tmp |= 1UL << bit;
409  				xas_store(&xas, xa_mk_value(tmp));
410  				goto out;
411  			}
412  		}
413  		bitmap = alloc;
414  		if (!bitmap)
415  			bitmap = kzalloc(sizeof(*bitmap), GFP_NOWAIT);
416  		if (!bitmap)
417  			goto alloc;
418  		bitmap->bitmap[0] = tmp;
419  		xas_store(&xas, bitmap);
420  		if (xas_error(&xas)) {
421  			bitmap->bitmap[0] = 0;
422  			goto out;
423  		}
424  	}
425  
426  	if (bitmap) {
427  		bit = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, bit);
428  		if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
429  			goto nospc;
430  		if (bit == IDA_BITMAP_BITS)
431  			goto next;
432  
433  		__set_bit(bit, bitmap->bitmap);
434  		if (bitmap_full(bitmap->bitmap, IDA_BITMAP_BITS))
435  			xas_clear_mark(&xas, XA_FREE_MARK);
436  	} else {
437  		if (bit < BITS_PER_XA_VALUE) {
438  			bitmap = xa_mk_value(1UL << bit);
439  		} else {
440  			bitmap = alloc;
441  			if (!bitmap)
442  				bitmap = kzalloc(sizeof(*bitmap), GFP_NOWAIT);
443  			if (!bitmap)
444  				goto alloc;
445  			__set_bit(bit, bitmap->bitmap);
446  		}
447  		xas_store(&xas, bitmap);
448  	}
449  out:
450  	xas_unlock_irqrestore(&xas, flags);
451  	if (xas_nomem(&xas, gfp)) {
452  		xas.xa_index = min / IDA_BITMAP_BITS;
453  		bit = min % IDA_BITMAP_BITS;
454  		goto retry;
455  	}
456  	if (bitmap != alloc)
457  		kfree(alloc);
458  	if (xas_error(&xas))
459  		return xas_error(&xas);
460  	return xas.xa_index * IDA_BITMAP_BITS + bit;
461  alloc:
462  	xas_unlock_irqrestore(&xas, flags);
463  	alloc = kzalloc(sizeof(*bitmap), gfp);
464  	if (!alloc)
465  		return -ENOMEM;
466  	xas_set(&xas, min / IDA_BITMAP_BITS);
467  	bit = min % IDA_BITMAP_BITS;
468  	goto retry;
469  nospc:
470  	xas_unlock_irqrestore(&xas, flags);
471  	return -ENOSPC;
472  }
473  EXPORT_SYMBOL(ida_alloc_range);
474  
475  /**
476   * ida_free() - Release an allocated ID.
477   * @ida: IDA handle.
478   * @id: Previously allocated ID.
479   *
480   * Context: Any context.
481   */
482  void ida_free(struct ida *ida, unsigned int id)
483  {
484  	XA_STATE(xas, &ida->xa, id / IDA_BITMAP_BITS);
485  	unsigned bit = id % IDA_BITMAP_BITS;
486  	struct ida_bitmap *bitmap;
487  	unsigned long flags;
488  
489  	BUG_ON((int)id < 0);
490  
491  	xas_lock_irqsave(&xas, flags);
492  	bitmap = xas_load(&xas);
493  
494  	if (xa_is_value(bitmap)) {
495  		unsigned long v = xa_to_value(bitmap);
496  		if (bit >= BITS_PER_XA_VALUE)
497  			goto err;
498  		if (!(v & (1UL << bit)))
499  			goto err;
500  		v &= ~(1UL << bit);
501  		if (!v)
502  			goto delete;
503  		xas_store(&xas, xa_mk_value(v));
504  	} else {
505  		if (!test_bit(bit, bitmap->bitmap))
506  			goto err;
507  		__clear_bit(bit, bitmap->bitmap);
508  		xas_set_mark(&xas, XA_FREE_MARK);
509  		if (bitmap_empty(bitmap->bitmap, IDA_BITMAP_BITS)) {
510  			kfree(bitmap);
511  delete:
512  			xas_store(&xas, NULL);
513  		}
514  	}
515  	xas_unlock_irqrestore(&xas, flags);
516  	return;
517   err:
518  	xas_unlock_irqrestore(&xas, flags);
519  	WARN(1, "ida_free called for id=%d which is not allocated.\n", id);
520  }
521  EXPORT_SYMBOL(ida_free);
522  
523  /**
524   * ida_destroy() - Free all IDs.
525   * @ida: IDA handle.
526   *
527   * Calling this function frees all IDs and releases all resources used
528   * by an IDA.  When this call returns, the IDA is empty and can be reused
529   * or freed.  If the IDA is already empty, there is no need to call this
530   * function.
531   *
532   * Context: Any context.
533   */
534  void ida_destroy(struct ida *ida)
535  {
536  	XA_STATE(xas, &ida->xa, 0);
537  	struct ida_bitmap *bitmap;
538  	unsigned long flags;
539  
540  	xas_lock_irqsave(&xas, flags);
541  	xas_for_each(&xas, bitmap, ULONG_MAX) {
542  		if (!xa_is_value(bitmap))
543  			kfree(bitmap);
544  		xas_store(&xas, NULL);
545  	}
546  	xas_unlock_irqrestore(&xas, flags);
547  }
548  EXPORT_SYMBOL(ida_destroy);
549  
550  #ifndef __KERNEL__
551  extern void xa_dump_index(unsigned long index, unsigned int shift);
552  #define IDA_CHUNK_SHIFT		ilog2(IDA_BITMAP_BITS)
553  
554  static void ida_dump_entry(void *entry, unsigned long index)
555  {
556  	unsigned long i;
557  
558  	if (!entry)
559  		return;
560  
561  	if (xa_is_node(entry)) {
562  		struct xa_node *node = xa_to_node(entry);
563  		unsigned int shift = node->shift + IDA_CHUNK_SHIFT +
564  			XA_CHUNK_SHIFT;
565  
566  		xa_dump_index(index * IDA_BITMAP_BITS, shift);
567  		xa_dump_node(node);
568  		for (i = 0; i < XA_CHUNK_SIZE; i++)
569  			ida_dump_entry(node->slots[i],
570  					index | (i << node->shift));
571  	} else if (xa_is_value(entry)) {
572  		xa_dump_index(index * IDA_BITMAP_BITS, ilog2(BITS_PER_LONG));
573  		pr_cont("value: data %lx [%px]\n", xa_to_value(entry), entry);
574  	} else {
575  		struct ida_bitmap *bitmap = entry;
576  
577  		xa_dump_index(index * IDA_BITMAP_BITS, IDA_CHUNK_SHIFT);
578  		pr_cont("bitmap: %p data", bitmap);
579  		for (i = 0; i < IDA_BITMAP_LONGS; i++)
580  			pr_cont(" %lx", bitmap->bitmap[i]);
581  		pr_cont("\n");
582  	}
583  }
584  
585  static void ida_dump(struct ida *ida)
586  {
587  	struct xarray *xa = &ida->xa;
588  	pr_debug("ida: %p node %p free %d\n", ida, xa->xa_head,
589  				xa->xa_flags >> ROOT_TAG_SHIFT);
590  	ida_dump_entry(xa->xa_head, 0);
591  }
592  #endif
593