xref: /openbmc/linux/include/linux/idr.h (revision 4db74c18)
168cf618cSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  * include/linux/idr.h
41da177e4SLinus Torvalds  *
51da177e4SLinus Torvalds  * 2002-10-18  written by Jim Houston jim.houston@ccur.com
61da177e4SLinus Torvalds  *	Copyright (C) 2002 by Concurrent Computer Corporation
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * Small id to pointer translation service avoiding fixed sized
91da177e4SLinus Torvalds  * tables.
101da177e4SLinus Torvalds  */
11f668ab1aSLuben Tuikov 
12f668ab1aSLuben Tuikov #ifndef __IDR_H__
13f668ab1aSLuben Tuikov #define __IDR_H__
14f668ab1aSLuben Tuikov 
150a835c4fSMatthew Wilcox #include <linux/radix-tree.h>
160a835c4fSMatthew Wilcox #include <linux/gfp.h>
177ad3d4d8SMatthew Wilcox #include <linux/percpu.h>
181da177e4SLinus Torvalds 
191da177e4SLinus Torvalds struct idr {
200a835c4fSMatthew Wilcox 	struct radix_tree_root	idr_rt;
216ce711f2SMatthew Wilcox 	unsigned int		idr_base;
220a835c4fSMatthew Wilcox 	unsigned int		idr_next;
231da177e4SLinus Torvalds };
241da177e4SLinus Torvalds 
250a835c4fSMatthew Wilcox /*
260a835c4fSMatthew Wilcox  * The IDR API does not expose the tagging functionality of the radix tree
270a835c4fSMatthew Wilcox  * to users.  Use tag 0 to track whether a node has free space below it.
280a835c4fSMatthew Wilcox  */
290a835c4fSMatthew Wilcox #define IDR_FREE	0
300a835c4fSMatthew Wilcox 
310a835c4fSMatthew Wilcox /* Set the IDR flag and the IDR_FREE tag */
32fa290cdaSMatthew Wilcox #define IDR_RT_MARKER	(ROOT_IS_IDR | (__force gfp_t)			\
33fa290cdaSMatthew Wilcox 					(1 << (ROOT_TAG_SHIFT + IDR_FREE)))
340a835c4fSMatthew Wilcox 
35f6bb2a2cSMatthew Wilcox #define IDR_INIT_BASE(name, base) {					\
36f6bb2a2cSMatthew Wilcox 	.idr_rt = RADIX_TREE_INIT(name, IDR_RT_MARKER),			\
376ce711f2SMatthew Wilcox 	.idr_base = (base),						\
386ce711f2SMatthew Wilcox 	.idr_next = 0,							\
391da177e4SLinus Torvalds }
401da177e4SLinus Torvalds 
41f9c46d6eSNadia Derbey /**
426ce711f2SMatthew Wilcox  * IDR_INIT() - Initialise an IDR.
43f6bb2a2cSMatthew Wilcox  * @name: Name of IDR.
446ce711f2SMatthew Wilcox  *
456ce711f2SMatthew Wilcox  * A freshly-initialised IDR contains no IDs.
466ce711f2SMatthew Wilcox  */
47f6bb2a2cSMatthew Wilcox #define IDR_INIT(name)	IDR_INIT_BASE(name, 0)
486ce711f2SMatthew Wilcox 
496ce711f2SMatthew Wilcox /**
50f6bb2a2cSMatthew Wilcox  * DEFINE_IDR() - Define a statically-allocated IDR.
51f6bb2a2cSMatthew Wilcox  * @name: Name of IDR.
52ac665d94SMatthew Wilcox  *
53ac665d94SMatthew Wilcox  * An IDR defined using this macro is ready for use with no additional
54ac665d94SMatthew Wilcox  * initialisation required.  It contains no IDs.
55ac665d94SMatthew Wilcox  */
56f6bb2a2cSMatthew Wilcox #define DEFINE_IDR(name)	struct idr name = IDR_INIT(name)
57ac665d94SMatthew Wilcox 
58ac665d94SMatthew Wilcox /**
5944430612SMatthew Wilcox  * idr_get_cursor - Return the current position of the cyclic allocator
6044430612SMatthew Wilcox  * @idr: idr handle
6144430612SMatthew Wilcox  *
6244430612SMatthew Wilcox  * The value returned is the value that will be next returned from
6344430612SMatthew Wilcox  * idr_alloc_cyclic() if it is free (otherwise the search will start from
6444430612SMatthew Wilcox  * this position).
6544430612SMatthew Wilcox  */
idr_get_cursor(const struct idr * idr)660a835c4fSMatthew Wilcox static inline unsigned int idr_get_cursor(const struct idr *idr)
6744430612SMatthew Wilcox {
680a835c4fSMatthew Wilcox 	return READ_ONCE(idr->idr_next);
6944430612SMatthew Wilcox }
7044430612SMatthew Wilcox 
7144430612SMatthew Wilcox /**
7244430612SMatthew Wilcox  * idr_set_cursor - Set the current position of the cyclic allocator
7344430612SMatthew Wilcox  * @idr: idr handle
7444430612SMatthew Wilcox  * @val: new position
7544430612SMatthew Wilcox  *
7644430612SMatthew Wilcox  * The next call to idr_alloc_cyclic() will return @val if it is free
7744430612SMatthew Wilcox  * (otherwise the search will start from this position).
7844430612SMatthew Wilcox  */
idr_set_cursor(struct idr * idr,unsigned int val)7944430612SMatthew Wilcox static inline void idr_set_cursor(struct idr *idr, unsigned int val)
8044430612SMatthew Wilcox {
810a835c4fSMatthew Wilcox 	WRITE_ONCE(idr->idr_next, val);
8244430612SMatthew Wilcox }
8344430612SMatthew Wilcox 
8444430612SMatthew Wilcox /**
8556083ab1SRandy Dunlap  * DOC: idr sync
86f9c46d6eSNadia Derbey  * idr synchronization (stolen from radix-tree.h)
87f9c46d6eSNadia Derbey  *
88f9c46d6eSNadia Derbey  * idr_find() is able to be called locklessly, using RCU. The caller must
89f9c46d6eSNadia Derbey  * ensure calls to this function are made within rcu_read_lock() regions.
90f9c46d6eSNadia Derbey  * Other readers (lock-free or otherwise) and modifications may be running
91f9c46d6eSNadia Derbey  * concurrently.
92f9c46d6eSNadia Derbey  *
93f9c46d6eSNadia Derbey  * It is still required that the caller manage the synchronization and
94f9c46d6eSNadia Derbey  * lifetimes of the items. So if RCU lock-free lookups are used, typically
95f9c46d6eSNadia Derbey  * this would mean that the items have their own locks, or are amenable to
96f9c46d6eSNadia Derbey  * lock-free access; and that the items are freed by RCU (or only freed after
97f9c46d6eSNadia Derbey  * having been deleted from the idr tree *and* a synchronize_rcu() grace
98f9c46d6eSNadia Derbey  * period).
99f9c46d6eSNadia Derbey  */
100f9c46d6eSNadia Derbey 
1013c60e868Swilly@infradead.org #define idr_lock(idr)		xa_lock(&(idr)->idr_rt)
1023c60e868Swilly@infradead.org #define idr_unlock(idr)		xa_unlock(&(idr)->idr_rt)
1033c60e868Swilly@infradead.org #define idr_lock_bh(idr)	xa_lock_bh(&(idr)->idr_rt)
1043c60e868Swilly@infradead.org #define idr_unlock_bh(idr)	xa_unlock_bh(&(idr)->idr_rt)
1053c60e868Swilly@infradead.org #define idr_lock_irq(idr)	xa_lock_irq(&(idr)->idr_rt)
1063c60e868Swilly@infradead.org #define idr_unlock_irq(idr)	xa_unlock_irq(&(idr)->idr_rt)
1073c60e868Swilly@infradead.org #define idr_lock_irqsave(idr, flags) \
1083c60e868Swilly@infradead.org 				xa_lock_irqsave(&(idr)->idr_rt, flags)
1093c60e868Swilly@infradead.org #define idr_unlock_irqrestore(idr, flags) \
1103c60e868Swilly@infradead.org 				xa_unlock_irqrestore(&(idr)->idr_rt, flags)
1113c60e868Swilly@infradead.org 
112d5c7409fSTejun Heo void idr_preload(gfp_t gfp_mask);
113388f79fdSChris Mi 
1146ce711f2SMatthew Wilcox int idr_alloc(struct idr *, void *ptr, int start, int end, gfp_t);
1156ce711f2SMatthew Wilcox int __must_check idr_alloc_u32(struct idr *, void *ptr, u32 *id,
116e096f6a7SMatthew Wilcox 				unsigned long max, gfp_t);
1176ce711f2SMatthew Wilcox int idr_alloc_cyclic(struct idr *, void *ptr, int start, int end, gfp_t);
1186ce711f2SMatthew Wilcox void *idr_remove(struct idr *, unsigned long id);
1196ce711f2SMatthew Wilcox void *idr_find(const struct idr *, unsigned long id);
1200a835c4fSMatthew Wilcox int idr_for_each(const struct idr *,
12196d7fa42SKristian Hoegsberg 		 int (*fn)(int id, void *p, void *data), void *data);
1220a835c4fSMatthew Wilcox void *idr_get_next(struct idr *, int *nextid);
1237a457577SMatthew Wilcox void *idr_get_next_ul(struct idr *, unsigned long *nextid);
124234a4624SMatthew Wilcox void *idr_replace(struct idr *, void *, unsigned long id);
1250a835c4fSMatthew Wilcox void idr_destroy(struct idr *);
1260a835c4fSMatthew Wilcox 
1276ce711f2SMatthew Wilcox /**
1286ce711f2SMatthew Wilcox  * idr_init_base() - Initialise an IDR.
1296ce711f2SMatthew Wilcox  * @idr: IDR handle.
1306ce711f2SMatthew Wilcox  * @base: The base value for the IDR.
1316ce711f2SMatthew Wilcox  *
1326ce711f2SMatthew Wilcox  * This variation of idr_init() creates an IDR which will allocate IDs
1336ce711f2SMatthew Wilcox  * starting at %base.
1346ce711f2SMatthew Wilcox  */
idr_init_base(struct idr * idr,int base)1356ce711f2SMatthew Wilcox static inline void idr_init_base(struct idr *idr, int base)
1360a835c4fSMatthew Wilcox {
1370a835c4fSMatthew Wilcox 	INIT_RADIX_TREE(&idr->idr_rt, IDR_RT_MARKER);
1386ce711f2SMatthew Wilcox 	idr->idr_base = base;
1390a835c4fSMatthew Wilcox 	idr->idr_next = 0;
1400a835c4fSMatthew Wilcox }
1410a835c4fSMatthew Wilcox 
1426ce711f2SMatthew Wilcox /**
1436ce711f2SMatthew Wilcox  * idr_init() - Initialise an IDR.
1446ce711f2SMatthew Wilcox  * @idr: IDR handle.
1456ce711f2SMatthew Wilcox  *
1466ce711f2SMatthew Wilcox  * Initialise a dynamically allocated IDR.  To initialise a
1476ce711f2SMatthew Wilcox  * statically allocated IDR, use DEFINE_IDR().
1486ce711f2SMatthew Wilcox  */
idr_init(struct idr * idr)1496ce711f2SMatthew Wilcox static inline void idr_init(struct idr *idr)
1506ce711f2SMatthew Wilcox {
1516ce711f2SMatthew Wilcox 	idr_init_base(idr, 0);
1526ce711f2SMatthew Wilcox }
1536ce711f2SMatthew Wilcox 
154ac665d94SMatthew Wilcox /**
155ac665d94SMatthew Wilcox  * idr_is_empty() - Are there any IDs allocated?
156ac665d94SMatthew Wilcox  * @idr: IDR handle.
157ac665d94SMatthew Wilcox  *
158ac665d94SMatthew Wilcox  * Return: %true if any IDs have been allocated from this IDR.
159ac665d94SMatthew Wilcox  */
idr_is_empty(const struct idr * idr)1600a835c4fSMatthew Wilcox static inline bool idr_is_empty(const struct idr *idr)
1610a835c4fSMatthew Wilcox {
1620a835c4fSMatthew Wilcox 	return radix_tree_empty(&idr->idr_rt) &&
1630a835c4fSMatthew Wilcox 		radix_tree_tagged(&idr->idr_rt, IDR_FREE);
1640a835c4fSMatthew Wilcox }
165f668ab1aSLuben Tuikov 
16649038ef4STejun Heo /**
167d5c7409fSTejun Heo  * idr_preload_end - end preload section started with idr_preload()
168d5c7409fSTejun Heo  *
169d5c7409fSTejun Heo  * Each idr_preload() should be matched with an invocation of this
170d5c7409fSTejun Heo  * function.  See idr_preload() for details.
171d5c7409fSTejun Heo  */
idr_preload_end(void)172d5c7409fSTejun Heo static inline void idr_preload_end(void)
173d5c7409fSTejun Heo {
174cfa6705dSSebastian Andrzej Siewior 	local_unlock(&radix_tree_preloads.lock);
175d5c7409fSTejun Heo }
176d5c7409fSTejun Heo 
177d5c7409fSTejun Heo /**
1787a457577SMatthew Wilcox  * idr_for_each_entry() - Iterate over an IDR's elements of a given type.
1797a457577SMatthew Wilcox  * @idr: IDR handle.
1807a457577SMatthew Wilcox  * @entry: The type * to use as cursor
1817a457577SMatthew Wilcox  * @id: Entry ID.
182b949be58SGeorge Spelvin  *
183b949be58SGeorge Spelvin  * @entry and @id do not need to be initialized before the loop, and
1847a457577SMatthew Wilcox  * after normal termination @entry is left with the value NULL.  This
185b949be58SGeorge Spelvin  * is convenient for a "not found" value.
18649038ef4STejun Heo  */
1870a835c4fSMatthew Wilcox #define idr_for_each_entry(idr, entry, id)			\
188f6341c5aSMatthew Wilcox (Oracle) 	for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; id += 1U)
18949038ef4STejun Heo 
190a55bbd37SAndreas Gruenbacher /**
1917a457577SMatthew Wilcox  * idr_for_each_entry_ul() - Iterate over an IDR's elements of a given type.
1927a457577SMatthew Wilcox  * @idr: IDR handle.
1937a457577SMatthew Wilcox  * @entry: The type * to use as cursor.
194e33d2b74SCong Wang  * @tmp: A temporary placeholder for ID.
1957a457577SMatthew Wilcox  * @id: Entry ID.
196a55bbd37SAndreas Gruenbacher  *
1977a457577SMatthew Wilcox  * @entry and @id do not need to be initialized before the loop, and
1987a457577SMatthew Wilcox  * after normal termination @entry is left with the value NULL.  This
1997a457577SMatthew Wilcox  * is convenient for a "not found" value.
2007a457577SMatthew Wilcox  */
201e33d2b74SCong Wang #define idr_for_each_entry_ul(idr, entry, tmp, id)			\
202e33d2b74SCong Wang 	for (tmp = 0, id = 0;						\
203*4db74c18SNeilBrown 	     ((entry) = tmp <= id ? idr_get_next_ul(idr, &(id)) : NULL) != NULL; \
204e33d2b74SCong Wang 	     tmp = id, ++id)
2057a457577SMatthew Wilcox 
2067a457577SMatthew Wilcox /**
2077a457577SMatthew Wilcox  * idr_for_each_entry_continue() - Continue iteration over an IDR's elements of a given type
2087a457577SMatthew Wilcox  * @idr: IDR handle.
2097a457577SMatthew Wilcox  * @entry: The type * to use as a cursor.
2107a457577SMatthew Wilcox  * @id: Entry ID.
2117a457577SMatthew Wilcox  *
2127a457577SMatthew Wilcox  * Continue to iterate over entries, continuing after the current position.
213a55bbd37SAndreas Gruenbacher  */
2140a835c4fSMatthew Wilcox #define idr_for_each_entry_continue(idr, entry, id)			\
2150a835c4fSMatthew Wilcox 	for ((entry) = idr_get_next((idr), &(id));			\
216a55bbd37SAndreas Gruenbacher 	     entry;							\
2170a835c4fSMatthew Wilcox 	     ++id, (entry) = idr_get_next((idr), &(id)))
218a55bbd37SAndreas Gruenbacher 
219d39d7149SCong Wang /**
220d39d7149SCong Wang  * idr_for_each_entry_continue_ul() - Continue iteration over an IDR's elements of a given type
221d39d7149SCong Wang  * @idr: IDR handle.
222d39d7149SCong Wang  * @entry: The type * to use as a cursor.
223d39d7149SCong Wang  * @tmp: A temporary placeholder for ID.
224d39d7149SCong Wang  * @id: Entry ID.
225d39d7149SCong Wang  *
226d39d7149SCong Wang  * Continue to iterate over entries, continuing after the current position.
227*4db74c18SNeilBrown  * After normal termination @entry is left with the value NULL.  This
228*4db74c18SNeilBrown  * is convenient for a "not found" value.
229d39d7149SCong Wang  */
230d39d7149SCong Wang #define idr_for_each_entry_continue_ul(idr, entry, tmp, id)		\
231d39d7149SCong Wang 	for (tmp = id;							\
232*4db74c18SNeilBrown 	     ((entry) = tmp <= id ? idr_get_next_ul(idr, &(id)) : NULL) != NULL; \
233d39d7149SCong Wang 	     tmp = id, ++id)
234d39d7149SCong Wang 
235c8615d37STejun Heo /*
236f32f004cSMatthew Wilcox  * IDA - ID Allocator, use when translation from id to pointer isn't necessary.
23772dba584STejun Heo  */
23872dba584STejun Heo #define IDA_CHUNK_SIZE		128	/* 128 bytes per chunk */
2390a835c4fSMatthew Wilcox #define IDA_BITMAP_LONGS	(IDA_CHUNK_SIZE / sizeof(long))
24072dba584STejun Heo #define IDA_BITMAP_BITS 	(IDA_BITMAP_LONGS * sizeof(long) * 8)
24172dba584STejun Heo 
24272dba584STejun Heo struct ida_bitmap {
24372dba584STejun Heo 	unsigned long		bitmap[IDA_BITMAP_LONGS];
24472dba584STejun Heo };
24572dba584STejun Heo 
24672dba584STejun Heo struct ida {
247f32f004cSMatthew Wilcox 	struct xarray xa;
24872dba584STejun Heo };
24972dba584STejun Heo 
250f32f004cSMatthew Wilcox #define IDA_INIT_FLAGS	(XA_FLAGS_LOCK_IRQ | XA_FLAGS_ALLOC)
251f32f004cSMatthew Wilcox 
252f6bb2a2cSMatthew Wilcox #define IDA_INIT(name)	{						\
253f32f004cSMatthew Wilcox 	.xa = XARRAY_INIT(name, IDA_INIT_FLAGS)				\
2540a835c4fSMatthew Wilcox }
255f6bb2a2cSMatthew Wilcox #define DEFINE_IDA(name)	struct ida name = IDA_INIT(name)
25672dba584STejun Heo 
2575ade60ddSMatthew Wilcox int ida_alloc_range(struct ida *, unsigned int min, unsigned int max, gfp_t);
2585ade60ddSMatthew Wilcox void ida_free(struct ida *, unsigned int id);
25972dba584STejun Heo void ida_destroy(struct ida *ida);
26072dba584STejun Heo 
2615ade60ddSMatthew Wilcox /**
2625ade60ddSMatthew Wilcox  * ida_alloc() - Allocate an unused ID.
2635ade60ddSMatthew Wilcox  * @ida: IDA handle.
2645ade60ddSMatthew Wilcox  * @gfp: Memory allocation flags.
2655ade60ddSMatthew Wilcox  *
2665ade60ddSMatthew Wilcox  * Allocate an ID between 0 and %INT_MAX, inclusive.
2675ade60ddSMatthew Wilcox  *
2683b674261SStephen Boyd  * Context: Any context. It is safe to call this function without
2693b674261SStephen Boyd  * locking in your code.
2705ade60ddSMatthew Wilcox  * Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
2715ade60ddSMatthew Wilcox  * or %-ENOSPC if there are no free IDs.
2725ade60ddSMatthew Wilcox  */
ida_alloc(struct ida * ida,gfp_t gfp)2735ade60ddSMatthew Wilcox static inline int ida_alloc(struct ida *ida, gfp_t gfp)
2745ade60ddSMatthew Wilcox {
2755ade60ddSMatthew Wilcox 	return ida_alloc_range(ida, 0, ~0, gfp);
2765ade60ddSMatthew Wilcox }
2775ade60ddSMatthew Wilcox 
2785ade60ddSMatthew Wilcox /**
2795ade60ddSMatthew Wilcox  * ida_alloc_min() - Allocate an unused ID.
2805ade60ddSMatthew Wilcox  * @ida: IDA handle.
2815ade60ddSMatthew Wilcox  * @min: Lowest ID to allocate.
2825ade60ddSMatthew Wilcox  * @gfp: Memory allocation flags.
2835ade60ddSMatthew Wilcox  *
2845ade60ddSMatthew Wilcox  * Allocate an ID between @min and %INT_MAX, inclusive.
2855ade60ddSMatthew Wilcox  *
2863b674261SStephen Boyd  * Context: Any context. It is safe to call this function without
2873b674261SStephen Boyd  * locking in your code.
2885ade60ddSMatthew Wilcox  * Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
2895ade60ddSMatthew Wilcox  * or %-ENOSPC if there are no free IDs.
2905ade60ddSMatthew Wilcox  */
ida_alloc_min(struct ida * ida,unsigned int min,gfp_t gfp)2915ade60ddSMatthew Wilcox static inline int ida_alloc_min(struct ida *ida, unsigned int min, gfp_t gfp)
2925ade60ddSMatthew Wilcox {
2935ade60ddSMatthew Wilcox 	return ida_alloc_range(ida, min, ~0, gfp);
2945ade60ddSMatthew Wilcox }
2955ade60ddSMatthew Wilcox 
2965ade60ddSMatthew Wilcox /**
2975ade60ddSMatthew Wilcox  * ida_alloc_max() - Allocate an unused ID.
2985ade60ddSMatthew Wilcox  * @ida: IDA handle.
2995ade60ddSMatthew Wilcox  * @max: Highest ID to allocate.
3005ade60ddSMatthew Wilcox  * @gfp: Memory allocation flags.
3015ade60ddSMatthew Wilcox  *
3025ade60ddSMatthew Wilcox  * Allocate an ID between 0 and @max, inclusive.
3035ade60ddSMatthew Wilcox  *
3043b674261SStephen Boyd  * Context: Any context. It is safe to call this function without
3053b674261SStephen Boyd  * locking in your code.
3065ade60ddSMatthew Wilcox  * Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
3075ade60ddSMatthew Wilcox  * or %-ENOSPC if there are no free IDs.
3085ade60ddSMatthew Wilcox  */
ida_alloc_max(struct ida * ida,unsigned int max,gfp_t gfp)3095ade60ddSMatthew Wilcox static inline int ida_alloc_max(struct ida *ida, unsigned int max, gfp_t gfp)
3105ade60ddSMatthew Wilcox {
3115ade60ddSMatthew Wilcox 	return ida_alloc_range(ida, 0, max, gfp);
3125ade60ddSMatthew Wilcox }
31388eca020SRusty Russell 
ida_init(struct ida * ida)3140a835c4fSMatthew Wilcox static inline void ida_init(struct ida *ida)
3150a835c4fSMatthew Wilcox {
316f32f004cSMatthew Wilcox 	xa_init_flags(&ida->xa, IDA_INIT_FLAGS);
3170a835c4fSMatthew Wilcox }
3180a835c4fSMatthew Wilcox 
3193264ceecSStephen Boyd /*
3203264ceecSStephen Boyd  * ida_simple_get() and ida_simple_remove() are deprecated. Use
3213264ceecSStephen Boyd  * ida_alloc() and ida_free() instead respectively.
3223264ceecSStephen Boyd  */
3235ade60ddSMatthew Wilcox #define ida_simple_get(ida, start, end, gfp)	\
3245ade60ddSMatthew Wilcox 			ida_alloc_range(ida, start, (end) - 1, gfp)
3255ade60ddSMatthew Wilcox #define ida_simple_remove(ida, id)	ida_free(ida, id)
32649038ef4STejun Heo 
ida_is_empty(const struct ida * ida)3270a835c4fSMatthew Wilcox static inline bool ida_is_empty(const struct ida *ida)
32899c49407SMatthew Wilcox {
329f32f004cSMatthew Wilcox 	return xa_empty(&ida->xa);
33099c49407SMatthew Wilcox }
331f668ab1aSLuben Tuikov #endif /* __IDR_H__ */
332