xref: /openbmc/linux/lib/test_xarray.c (revision 7521a97b)
1ad3d6c72SMatthew Wilcox // SPDX-License-Identifier: GPL-2.0+
2ad3d6c72SMatthew Wilcox /*
3ad3d6c72SMatthew Wilcox  * test_xarray.c: Test the XArray API
4ad3d6c72SMatthew Wilcox  * Copyright (c) 2017-2018 Microsoft Corporation
5c44aa5e8SMatthew Wilcox (Oracle)  * Copyright (c) 2019-2020 Oracle
6ad3d6c72SMatthew Wilcox  * Author: Matthew Wilcox <willy@infradead.org>
7ad3d6c72SMatthew Wilcox  */
8ad3d6c72SMatthew Wilcox 
9ad3d6c72SMatthew Wilcox #include <linux/xarray.h>
10ad3d6c72SMatthew Wilcox #include <linux/module.h>
11ad3d6c72SMatthew Wilcox 
12ad3d6c72SMatthew Wilcox static unsigned int tests_run;
13ad3d6c72SMatthew Wilcox static unsigned int tests_passed;
14ad3d6c72SMatthew Wilcox 
15bd40b17cSMatthew Wilcox (Oracle) static const unsigned int order_limit =
16bd40b17cSMatthew Wilcox (Oracle) 		IS_ENABLED(CONFIG_XARRAY_MULTI) ? BITS_PER_LONG : 1;
17bd40b17cSMatthew Wilcox (Oracle) 
18ad3d6c72SMatthew Wilcox #ifndef XA_DEBUG
19ad3d6c72SMatthew Wilcox # ifdef __KERNEL__
xa_dump(const struct xarray * xa)20ad3d6c72SMatthew Wilcox void xa_dump(const struct xarray *xa) { }
21ad3d6c72SMatthew Wilcox # endif
22ad3d6c72SMatthew Wilcox #undef XA_BUG_ON
23ad3d6c72SMatthew Wilcox #define XA_BUG_ON(xa, x) do {					\
24ad3d6c72SMatthew Wilcox 	tests_run++;						\
25ad3d6c72SMatthew Wilcox 	if (x) {						\
26ad3d6c72SMatthew Wilcox 		printk("BUG at %s:%d\n", __func__, __LINE__);	\
27ad3d6c72SMatthew Wilcox 		xa_dump(xa);					\
28ad3d6c72SMatthew Wilcox 		dump_stack();					\
29ad3d6c72SMatthew Wilcox 	} else {						\
30ad3d6c72SMatthew Wilcox 		tests_passed++;					\
31ad3d6c72SMatthew Wilcox 	}							\
32ad3d6c72SMatthew Wilcox } while (0)
33ad3d6c72SMatthew Wilcox #endif
34ad3d6c72SMatthew Wilcox 
xa_mk_index(unsigned long index)35b7677a13SMatthew Wilcox static void *xa_mk_index(unsigned long index)
36b7677a13SMatthew Wilcox {
37b7677a13SMatthew Wilcox 	return xa_mk_value(index & LONG_MAX);
38b7677a13SMatthew Wilcox }
39b7677a13SMatthew Wilcox 
xa_store_index(struct xarray * xa,unsigned long index,gfp_t gfp)40ad3d6c72SMatthew Wilcox static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp)
41ad3d6c72SMatthew Wilcox {
42b7677a13SMatthew Wilcox 	return xa_store(xa, index, xa_mk_index(index), gfp);
43ad3d6c72SMatthew Wilcox }
44ad3d6c72SMatthew Wilcox 
xa_insert_index(struct xarray * xa,unsigned long index)4512fd2aeeSMatthew Wilcox static void xa_insert_index(struct xarray *xa, unsigned long index)
4612fd2aeeSMatthew Wilcox {
4712fd2aeeSMatthew Wilcox 	XA_BUG_ON(xa, xa_insert(xa, index, xa_mk_index(index),
4812fd2aeeSMatthew Wilcox 				GFP_KERNEL) != 0);
4912fd2aeeSMatthew Wilcox }
5012fd2aeeSMatthew Wilcox 
xa_alloc_index(struct xarray * xa,unsigned long index,gfp_t gfp)51371c752dSMatthew Wilcox static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp)
52371c752dSMatthew Wilcox {
53a3e4d3f9SMatthew Wilcox 	u32 id;
54371c752dSMatthew Wilcox 
55a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(index), xa_limit_32b,
56371c752dSMatthew Wilcox 				gfp) != 0);
57371c752dSMatthew Wilcox 	XA_BUG_ON(xa, id != index);
58371c752dSMatthew Wilcox }
59371c752dSMatthew Wilcox 
xa_erase_index(struct xarray * xa,unsigned long index)60ad3d6c72SMatthew Wilcox static void xa_erase_index(struct xarray *xa, unsigned long index)
61ad3d6c72SMatthew Wilcox {
62b7677a13SMatthew Wilcox 	XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_index(index));
6358d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, index) != NULL);
6458d6ea30SMatthew Wilcox }
6558d6ea30SMatthew Wilcox 
6658d6ea30SMatthew Wilcox /*
6758d6ea30SMatthew Wilcox  * If anyone needs this, please move it to xarray.c.  We have no current
6858d6ea30SMatthew Wilcox  * users outside the test suite because all current multislot users want
6958d6ea30SMatthew Wilcox  * to use the advanced API.
7058d6ea30SMatthew Wilcox  */
xa_store_order(struct xarray * xa,unsigned long index,unsigned order,void * entry,gfp_t gfp)7158d6ea30SMatthew Wilcox static void *xa_store_order(struct xarray *xa, unsigned long index,
7258d6ea30SMatthew Wilcox 		unsigned order, void *entry, gfp_t gfp)
7358d6ea30SMatthew Wilcox {
7458d6ea30SMatthew Wilcox 	XA_STATE_ORDER(xas, xa, index, order);
7558d6ea30SMatthew Wilcox 	void *curr;
7658d6ea30SMatthew Wilcox 
7758d6ea30SMatthew Wilcox 	do {
7858d6ea30SMatthew Wilcox 		xas_lock(&xas);
7958d6ea30SMatthew Wilcox 		curr = xas_store(&xas, entry);
8058d6ea30SMatthew Wilcox 		xas_unlock(&xas);
8158d6ea30SMatthew Wilcox 	} while (xas_nomem(&xas, gfp));
8258d6ea30SMatthew Wilcox 
8358d6ea30SMatthew Wilcox 	return curr;
8458d6ea30SMatthew Wilcox }
8558d6ea30SMatthew Wilcox 
check_xa_err(struct xarray * xa)8658d6ea30SMatthew Wilcox static noinline void check_xa_err(struct xarray *xa)
8758d6ea30SMatthew Wilcox {
8858d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_err(xa_store_index(xa, 0, GFP_NOWAIT)) != 0);
8958d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_err(xa_erase(xa, 0)) != 0);
9058d6ea30SMatthew Wilcox #ifndef __KERNEL__
9158d6ea30SMatthew Wilcox 	/* The kernel does not fail GFP_NOWAIT allocations */
9258d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM);
9358d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM);
9458d6ea30SMatthew Wilcox #endif
9558d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_KERNEL)) != 0);
9658d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_err(xa_store(xa, 1, xa_mk_value(0), GFP_KERNEL)) != 0);
9758d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_err(xa_erase(xa, 1)) != 0);
9858d6ea30SMatthew Wilcox // kills the test-suite :-(
9958d6ea30SMatthew Wilcox //	XA_BUG_ON(xa, xa_err(xa_store(xa, 0, xa_mk_internal(0), 0)) != -EINVAL);
100ad3d6c72SMatthew Wilcox }
101ad3d6c72SMatthew Wilcox 
check_xas_retry(struct xarray * xa)102b803b428SMatthew Wilcox static noinline void check_xas_retry(struct xarray *xa)
103b803b428SMatthew Wilcox {
104b803b428SMatthew Wilcox 	XA_STATE(xas, xa, 0);
105b803b428SMatthew Wilcox 	void *entry;
106b803b428SMatthew Wilcox 
107b803b428SMatthew Wilcox 	xa_store_index(xa, 0, GFP_KERNEL);
108b803b428SMatthew Wilcox 	xa_store_index(xa, 1, GFP_KERNEL);
109b803b428SMatthew Wilcox 
110b803b428SMatthew Wilcox 	rcu_read_lock();
111b803b428SMatthew Wilcox 	XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_value(0));
112b803b428SMatthew Wilcox 	xa_erase_index(xa, 1);
113b803b428SMatthew Wilcox 	XA_BUG_ON(xa, !xa_is_retry(xas_reload(&xas)));
114b803b428SMatthew Wilcox 	XA_BUG_ON(xa, xas_retry(&xas, NULL));
115b803b428SMatthew Wilcox 	XA_BUG_ON(xa, xas_retry(&xas, xa_mk_value(0)));
116b803b428SMatthew Wilcox 	xas_reset(&xas);
117b803b428SMatthew Wilcox 	XA_BUG_ON(xa, xas.xa_node != XAS_RESTART);
118b803b428SMatthew Wilcox 	XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0));
119b803b428SMatthew Wilcox 	XA_BUG_ON(xa, xas.xa_node != NULL);
120bd54211bSMatthew Wilcox 	rcu_read_unlock();
121b803b428SMatthew Wilcox 
122b803b428SMatthew Wilcox 	XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL);
123bd54211bSMatthew Wilcox 
124bd54211bSMatthew Wilcox 	rcu_read_lock();
125b803b428SMatthew Wilcox 	XA_BUG_ON(xa, !xa_is_internal(xas_reload(&xas)));
126b803b428SMatthew Wilcox 	xas.xa_node = XAS_RESTART;
127b803b428SMatthew Wilcox 	XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0));
128b803b428SMatthew Wilcox 	rcu_read_unlock();
129b803b428SMatthew Wilcox 
130b803b428SMatthew Wilcox 	/* Make sure we can iterate through retry entries */
131b803b428SMatthew Wilcox 	xas_lock(&xas);
132b803b428SMatthew Wilcox 	xas_set(&xas, 0);
133b803b428SMatthew Wilcox 	xas_store(&xas, XA_RETRY_ENTRY);
134b803b428SMatthew Wilcox 	xas_set(&xas, 1);
135b803b428SMatthew Wilcox 	xas_store(&xas, XA_RETRY_ENTRY);
136b803b428SMatthew Wilcox 
137b803b428SMatthew Wilcox 	xas_set(&xas, 0);
138b803b428SMatthew Wilcox 	xas_for_each(&xas, entry, ULONG_MAX) {
139b7677a13SMatthew Wilcox 		xas_store(&xas, xa_mk_index(xas.xa_index));
140b803b428SMatthew Wilcox 	}
141b803b428SMatthew Wilcox 	xas_unlock(&xas);
142b803b428SMatthew Wilcox 
143b803b428SMatthew Wilcox 	xa_erase_index(xa, 0);
144b803b428SMatthew Wilcox 	xa_erase_index(xa, 1);
145b803b428SMatthew Wilcox }
146b803b428SMatthew Wilcox 
check_xa_load(struct xarray * xa)147ad3d6c72SMatthew Wilcox static noinline void check_xa_load(struct xarray *xa)
148ad3d6c72SMatthew Wilcox {
149ad3d6c72SMatthew Wilcox 	unsigned long i, j;
150ad3d6c72SMatthew Wilcox 
151ad3d6c72SMatthew Wilcox 	for (i = 0; i < 1024; i++) {
152ad3d6c72SMatthew Wilcox 		for (j = 0; j < 1024; j++) {
153ad3d6c72SMatthew Wilcox 			void *entry = xa_load(xa, j);
154ad3d6c72SMatthew Wilcox 			if (j < i)
155ad3d6c72SMatthew Wilcox 				XA_BUG_ON(xa, xa_to_value(entry) != j);
156ad3d6c72SMatthew Wilcox 			else
157ad3d6c72SMatthew Wilcox 				XA_BUG_ON(xa, entry);
158ad3d6c72SMatthew Wilcox 		}
159ad3d6c72SMatthew Wilcox 		XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
160ad3d6c72SMatthew Wilcox 	}
161ad3d6c72SMatthew Wilcox 
162ad3d6c72SMatthew Wilcox 	for (i = 0; i < 1024; i++) {
163ad3d6c72SMatthew Wilcox 		for (j = 0; j < 1024; j++) {
164ad3d6c72SMatthew Wilcox 			void *entry = xa_load(xa, j);
165ad3d6c72SMatthew Wilcox 			if (j >= i)
166ad3d6c72SMatthew Wilcox 				XA_BUG_ON(xa, xa_to_value(entry) != j);
167ad3d6c72SMatthew Wilcox 			else
168ad3d6c72SMatthew Wilcox 				XA_BUG_ON(xa, entry);
169ad3d6c72SMatthew Wilcox 		}
170ad3d6c72SMatthew Wilcox 		xa_erase_index(xa, i);
171ad3d6c72SMatthew Wilcox 	}
172ad3d6c72SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
173ad3d6c72SMatthew Wilcox }
174ad3d6c72SMatthew Wilcox 
check_xa_mark_1(struct xarray * xa,unsigned long index)1759b89a035SMatthew Wilcox static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index)
1769b89a035SMatthew Wilcox {
17758d6ea30SMatthew Wilcox 	unsigned int order;
17858d6ea30SMatthew Wilcox 	unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 8 : 1;
17958d6ea30SMatthew Wilcox 
1809b89a035SMatthew Wilcox 	/* NULL elements have no marks set */
1819b89a035SMatthew Wilcox 	XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
1829b89a035SMatthew Wilcox 	xa_set_mark(xa, index, XA_MARK_0);
1839b89a035SMatthew Wilcox 	XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
1849b89a035SMatthew Wilcox 
1859b89a035SMatthew Wilcox 	/* Storing a pointer will not make a mark appear */
1869b89a035SMatthew Wilcox 	XA_BUG_ON(xa, xa_store_index(xa, index, GFP_KERNEL) != NULL);
1879b89a035SMatthew Wilcox 	XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
1889b89a035SMatthew Wilcox 	xa_set_mark(xa, index, XA_MARK_0);
1899b89a035SMatthew Wilcox 	XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0));
1909b89a035SMatthew Wilcox 
1919b89a035SMatthew Wilcox 	/* Setting one mark will not set another mark */
1929b89a035SMatthew Wilcox 	XA_BUG_ON(xa, xa_get_mark(xa, index + 1, XA_MARK_0));
1939b89a035SMatthew Wilcox 	XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_1));
1949b89a035SMatthew Wilcox 
1959b89a035SMatthew Wilcox 	/* Storing NULL clears marks, and they can't be set again */
1969b89a035SMatthew Wilcox 	xa_erase_index(xa, index);
1979b89a035SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
1989b89a035SMatthew Wilcox 	XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
1999b89a035SMatthew Wilcox 	xa_set_mark(xa, index, XA_MARK_0);
2009b89a035SMatthew Wilcox 	XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
20158d6ea30SMatthew Wilcox 
20258d6ea30SMatthew Wilcox 	/*
20358d6ea30SMatthew Wilcox 	 * Storing a multi-index entry over entries with marks gives the
20458d6ea30SMatthew Wilcox 	 * entire entry the union of the marks
20558d6ea30SMatthew Wilcox 	 */
20658d6ea30SMatthew Wilcox 	BUG_ON((index % 4) != 0);
20758d6ea30SMatthew Wilcox 	for (order = 2; order < max_order; order++) {
20858d6ea30SMatthew Wilcox 		unsigned long base = round_down(index, 1UL << order);
20958d6ea30SMatthew Wilcox 		unsigned long next = base + (1UL << order);
21058d6ea30SMatthew Wilcox 		unsigned long i;
21158d6ea30SMatthew Wilcox 
21258d6ea30SMatthew Wilcox 		XA_BUG_ON(xa, xa_store_index(xa, index + 1, GFP_KERNEL));
21358d6ea30SMatthew Wilcox 		xa_set_mark(xa, index + 1, XA_MARK_0);
21458d6ea30SMatthew Wilcox 		XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL));
215d69d287aSMatthew Wilcox 		xa_set_mark(xa, index + 2, XA_MARK_2);
21658d6ea30SMatthew Wilcox 		XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL));
217b7677a13SMatthew Wilcox 		xa_store_order(xa, index, order, xa_mk_index(index),
21858d6ea30SMatthew Wilcox 				GFP_KERNEL);
21958d6ea30SMatthew Wilcox 		for (i = base; i < next; i++) {
22093eb07f7SMatthew Wilcox 			XA_STATE(xas, xa, i);
22193eb07f7SMatthew Wilcox 			unsigned int seen = 0;
22293eb07f7SMatthew Wilcox 			void *entry;
22393eb07f7SMatthew Wilcox 
22458d6ea30SMatthew Wilcox 			XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0));
225d69d287aSMatthew Wilcox 			XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_1));
226d69d287aSMatthew Wilcox 			XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_2));
22793eb07f7SMatthew Wilcox 
22893eb07f7SMatthew Wilcox 			/* We should see two elements in the array */
229fffc9a26SMatthew Wilcox 			rcu_read_lock();
23093eb07f7SMatthew Wilcox 			xas_for_each(&xas, entry, ULONG_MAX)
23193eb07f7SMatthew Wilcox 				seen++;
232fffc9a26SMatthew Wilcox 			rcu_read_unlock();
23393eb07f7SMatthew Wilcox 			XA_BUG_ON(xa, seen != 2);
23493eb07f7SMatthew Wilcox 
23593eb07f7SMatthew Wilcox 			/* One of which is marked */
23693eb07f7SMatthew Wilcox 			xas_set(&xas, 0);
23793eb07f7SMatthew Wilcox 			seen = 0;
238fffc9a26SMatthew Wilcox 			rcu_read_lock();
23993eb07f7SMatthew Wilcox 			xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0)
24093eb07f7SMatthew Wilcox 				seen++;
241fffc9a26SMatthew Wilcox 			rcu_read_unlock();
24293eb07f7SMatthew Wilcox 			XA_BUG_ON(xa, seen != 1);
24358d6ea30SMatthew Wilcox 		}
24458d6ea30SMatthew Wilcox 		XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_0));
24558d6ea30SMatthew Wilcox 		XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_1));
24658d6ea30SMatthew Wilcox 		XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_2));
24758d6ea30SMatthew Wilcox 		xa_erase_index(xa, index);
24858d6ea30SMatthew Wilcox 		xa_erase_index(xa, next);
24958d6ea30SMatthew Wilcox 		XA_BUG_ON(xa, !xa_empty(xa));
25058d6ea30SMatthew Wilcox 	}
25158d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
2529b89a035SMatthew Wilcox }
2539b89a035SMatthew Wilcox 
check_xa_mark_2(struct xarray * xa)254adb9d9c4SMatthew Wilcox static noinline void check_xa_mark_2(struct xarray *xa)
255adb9d9c4SMatthew Wilcox {
256adb9d9c4SMatthew Wilcox 	XA_STATE(xas, xa, 0);
257adb9d9c4SMatthew Wilcox 	unsigned long index;
258adb9d9c4SMatthew Wilcox 	unsigned int count = 0;
259adb9d9c4SMatthew Wilcox 	void *entry;
260adb9d9c4SMatthew Wilcox 
261adb9d9c4SMatthew Wilcox 	xa_store_index(xa, 0, GFP_KERNEL);
262adb9d9c4SMatthew Wilcox 	xa_set_mark(xa, 0, XA_MARK_0);
263adb9d9c4SMatthew Wilcox 	xas_lock(&xas);
264adb9d9c4SMatthew Wilcox 	xas_load(&xas);
265adb9d9c4SMatthew Wilcox 	xas_init_marks(&xas);
266adb9d9c4SMatthew Wilcox 	xas_unlock(&xas);
267adb9d9c4SMatthew Wilcox 	XA_BUG_ON(xa, !xa_get_mark(xa, 0, XA_MARK_0) == 0);
268adb9d9c4SMatthew Wilcox 
269adb9d9c4SMatthew Wilcox 	for (index = 3500; index < 4500; index++) {
270adb9d9c4SMatthew Wilcox 		xa_store_index(xa, index, GFP_KERNEL);
271adb9d9c4SMatthew Wilcox 		xa_set_mark(xa, index, XA_MARK_0);
272adb9d9c4SMatthew Wilcox 	}
273adb9d9c4SMatthew Wilcox 
274adb9d9c4SMatthew Wilcox 	xas_reset(&xas);
275adb9d9c4SMatthew Wilcox 	rcu_read_lock();
276adb9d9c4SMatthew Wilcox 	xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0)
277adb9d9c4SMatthew Wilcox 		count++;
278adb9d9c4SMatthew Wilcox 	rcu_read_unlock();
279adb9d9c4SMatthew Wilcox 	XA_BUG_ON(xa, count != 1000);
280adb9d9c4SMatthew Wilcox 
281adb9d9c4SMatthew Wilcox 	xas_lock(&xas);
282adb9d9c4SMatthew Wilcox 	xas_for_each(&xas, entry, ULONG_MAX) {
283adb9d9c4SMatthew Wilcox 		xas_init_marks(&xas);
284adb9d9c4SMatthew Wilcox 		XA_BUG_ON(xa, !xa_get_mark(xa, xas.xa_index, XA_MARK_0));
285adb9d9c4SMatthew Wilcox 		XA_BUG_ON(xa, !xas_get_mark(&xas, XA_MARK_0));
286adb9d9c4SMatthew Wilcox 	}
287adb9d9c4SMatthew Wilcox 	xas_unlock(&xas);
288adb9d9c4SMatthew Wilcox 
289adb9d9c4SMatthew Wilcox 	xa_destroy(xa);
290adb9d9c4SMatthew Wilcox }
291adb9d9c4SMatthew Wilcox 
check_xa_mark_3(struct xarray * xa)29204e9e9bbSMatthew Wilcox (Oracle) static noinline void check_xa_mark_3(struct xarray *xa)
29304e9e9bbSMatthew Wilcox (Oracle) {
29404e9e9bbSMatthew Wilcox (Oracle) #ifdef CONFIG_XARRAY_MULTI
29504e9e9bbSMatthew Wilcox (Oracle) 	XA_STATE(xas, xa, 0x41);
29604e9e9bbSMatthew Wilcox (Oracle) 	void *entry;
29704e9e9bbSMatthew Wilcox (Oracle) 	int count = 0;
29804e9e9bbSMatthew Wilcox (Oracle) 
29904e9e9bbSMatthew Wilcox (Oracle) 	xa_store_order(xa, 0x40, 2, xa_mk_index(0x40), GFP_KERNEL);
30004e9e9bbSMatthew Wilcox (Oracle) 	xa_set_mark(xa, 0x41, XA_MARK_0);
30104e9e9bbSMatthew Wilcox (Oracle) 
30204e9e9bbSMatthew Wilcox (Oracle) 	rcu_read_lock();
30304e9e9bbSMatthew Wilcox (Oracle) 	xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) {
30404e9e9bbSMatthew Wilcox (Oracle) 		count++;
30504e9e9bbSMatthew Wilcox (Oracle) 		XA_BUG_ON(xa, entry != xa_mk_index(0x40));
30604e9e9bbSMatthew Wilcox (Oracle) 	}
30704e9e9bbSMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, count != 1);
30804e9e9bbSMatthew Wilcox (Oracle) 	rcu_read_unlock();
30904e9e9bbSMatthew Wilcox (Oracle) 	xa_destroy(xa);
31004e9e9bbSMatthew Wilcox (Oracle) #endif
31104e9e9bbSMatthew Wilcox (Oracle) }
31204e9e9bbSMatthew Wilcox (Oracle) 
check_xa_mark(struct xarray * xa)3139b89a035SMatthew Wilcox static noinline void check_xa_mark(struct xarray *xa)
3149b89a035SMatthew Wilcox {
3159b89a035SMatthew Wilcox 	unsigned long index;
3169b89a035SMatthew Wilcox 
3179b89a035SMatthew Wilcox 	for (index = 0; index < 16384; index += 4)
3189b89a035SMatthew Wilcox 		check_xa_mark_1(xa, index);
319adb9d9c4SMatthew Wilcox 
320adb9d9c4SMatthew Wilcox 	check_xa_mark_2(xa);
32104e9e9bbSMatthew Wilcox (Oracle) 	check_xa_mark_3(xa);
3229b89a035SMatthew Wilcox }
3239b89a035SMatthew Wilcox 
check_xa_shrink(struct xarray * xa)32458d6ea30SMatthew Wilcox static noinline void check_xa_shrink(struct xarray *xa)
32558d6ea30SMatthew Wilcox {
32658d6ea30SMatthew Wilcox 	XA_STATE(xas, xa, 1);
32758d6ea30SMatthew Wilcox 	struct xa_node *node;
32893eb07f7SMatthew Wilcox 	unsigned int order;
32993eb07f7SMatthew Wilcox 	unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 15 : 1;
33058d6ea30SMatthew Wilcox 
33158d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
33258d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_store_index(xa, 0, GFP_KERNEL) != NULL);
33358d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL);
33458d6ea30SMatthew Wilcox 
33558d6ea30SMatthew Wilcox 	/*
33658d6ea30SMatthew Wilcox 	 * Check that erasing the entry at 1 shrinks the tree and properly
33758d6ea30SMatthew Wilcox 	 * marks the node as being deleted.
33858d6ea30SMatthew Wilcox 	 */
33958d6ea30SMatthew Wilcox 	xas_lock(&xas);
34058d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xas_load(&xas) != xa_mk_value(1));
34158d6ea30SMatthew Wilcox 	node = xas.xa_node;
34258d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != xa_mk_value(0));
34358d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1));
34458d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 1) != NULL);
34558d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xas.xa_node != XAS_BOUNDS);
34658d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != XA_RETRY_ENTRY);
34758d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xas_load(&xas) != NULL);
34858d6ea30SMatthew Wilcox 	xas_unlock(&xas);
34958d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
35058d6ea30SMatthew Wilcox 	xa_erase_index(xa, 0);
35158d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
35293eb07f7SMatthew Wilcox 
35393eb07f7SMatthew Wilcox 	for (order = 0; order < max_order; order++) {
35493eb07f7SMatthew Wilcox 		unsigned long max = (1UL << order) - 1;
35593eb07f7SMatthew Wilcox 		xa_store_order(xa, 0, order, xa_mk_value(0), GFP_KERNEL);
35693eb07f7SMatthew Wilcox 		XA_BUG_ON(xa, xa_load(xa, max) != xa_mk_value(0));
35793eb07f7SMatthew Wilcox 		XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL);
35893eb07f7SMatthew Wilcox 		rcu_read_lock();
35993eb07f7SMatthew Wilcox 		node = xa_head(xa);
36093eb07f7SMatthew Wilcox 		rcu_read_unlock();
36193eb07f7SMatthew Wilcox 		XA_BUG_ON(xa, xa_store_index(xa, ULONG_MAX, GFP_KERNEL) !=
36293eb07f7SMatthew Wilcox 				NULL);
36393eb07f7SMatthew Wilcox 		rcu_read_lock();
36493eb07f7SMatthew Wilcox 		XA_BUG_ON(xa, xa_head(xa) == node);
36593eb07f7SMatthew Wilcox 		rcu_read_unlock();
36693eb07f7SMatthew Wilcox 		XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL);
36793eb07f7SMatthew Wilcox 		xa_erase_index(xa, ULONG_MAX);
36893eb07f7SMatthew Wilcox 		XA_BUG_ON(xa, xa->xa_head != node);
36993eb07f7SMatthew Wilcox 		xa_erase_index(xa, 0);
37093eb07f7SMatthew Wilcox 	}
37158d6ea30SMatthew Wilcox }
37258d6ea30SMatthew Wilcox 
check_insert(struct xarray * xa)37312fd2aeeSMatthew Wilcox static noinline void check_insert(struct xarray *xa)
37412fd2aeeSMatthew Wilcox {
37512fd2aeeSMatthew Wilcox 	unsigned long i;
37612fd2aeeSMatthew Wilcox 
37712fd2aeeSMatthew Wilcox 	for (i = 0; i < 1024; i++) {
37812fd2aeeSMatthew Wilcox 		xa_insert_index(xa, i);
37912fd2aeeSMatthew Wilcox 		XA_BUG_ON(xa, xa_load(xa, i - 1) != NULL);
38012fd2aeeSMatthew Wilcox 		XA_BUG_ON(xa, xa_load(xa, i + 1) != NULL);
38112fd2aeeSMatthew Wilcox 		xa_erase_index(xa, i);
38212fd2aeeSMatthew Wilcox 	}
38312fd2aeeSMatthew Wilcox 
38412fd2aeeSMatthew Wilcox 	for (i = 10; i < BITS_PER_LONG; i++) {
38512fd2aeeSMatthew Wilcox 		xa_insert_index(xa, 1UL << i);
38612fd2aeeSMatthew Wilcox 		XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 1) != NULL);
38712fd2aeeSMatthew Wilcox 		XA_BUG_ON(xa, xa_load(xa, (1UL << i) + 1) != NULL);
38812fd2aeeSMatthew Wilcox 		xa_erase_index(xa, 1UL << i);
38912fd2aeeSMatthew Wilcox 
39012fd2aeeSMatthew Wilcox 		xa_insert_index(xa, (1UL << i) - 1);
39112fd2aeeSMatthew Wilcox 		XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 2) != NULL);
39212fd2aeeSMatthew Wilcox 		XA_BUG_ON(xa, xa_load(xa, 1UL << i) != NULL);
39312fd2aeeSMatthew Wilcox 		xa_erase_index(xa, (1UL << i) - 1);
39412fd2aeeSMatthew Wilcox 	}
39512fd2aeeSMatthew Wilcox 
39612fd2aeeSMatthew Wilcox 	xa_insert_index(xa, ~0UL);
39712fd2aeeSMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 0UL) != NULL);
39812fd2aeeSMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, ~1UL) != NULL);
39912fd2aeeSMatthew Wilcox 	xa_erase_index(xa, ~0UL);
40012fd2aeeSMatthew Wilcox 
40112fd2aeeSMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
40212fd2aeeSMatthew Wilcox }
40312fd2aeeSMatthew Wilcox 
check_cmpxchg(struct xarray * xa)40441aec91fSMatthew Wilcox static noinline void check_cmpxchg(struct xarray *xa)
40541aec91fSMatthew Wilcox {
40641aec91fSMatthew Wilcox 	void *FIVE = xa_mk_value(5);
40741aec91fSMatthew Wilcox 	void *SIX = xa_mk_value(6);
40841aec91fSMatthew Wilcox 	void *LOTS = xa_mk_value(12345678);
40941aec91fSMatthew Wilcox 
41041aec91fSMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
41141aec91fSMatthew Wilcox 	XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_KERNEL) != NULL);
412fd9dc93eSMatthew Wilcox 	XA_BUG_ON(xa, xa_insert(xa, 12345678, xa, GFP_KERNEL) != -EBUSY);
41341aec91fSMatthew Wilcox 	XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, SIX, FIVE, GFP_KERNEL) != LOTS);
41441aec91fSMatthew Wilcox 	XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, LOTS, FIVE, GFP_KERNEL) != LOTS);
41541aec91fSMatthew Wilcox 	XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, FIVE, LOTS, GFP_KERNEL) != FIVE);
41641aec91fSMatthew Wilcox 	XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != NULL);
41741aec91fSMatthew Wilcox 	XA_BUG_ON(xa, xa_cmpxchg(xa, 5, NULL, FIVE, GFP_KERNEL) != NULL);
418062b7359SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) != -EBUSY);
419062b7359SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != FIVE);
420062b7359SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) == -EBUSY);
42141aec91fSMatthew Wilcox 	xa_erase_index(xa, 12345678);
42241aec91fSMatthew Wilcox 	xa_erase_index(xa, 5);
42341aec91fSMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
42441aec91fSMatthew Wilcox }
42541aec91fSMatthew Wilcox 
check_reserve(struct xarray * xa)4269f14d4f1SMatthew Wilcox static noinline void check_reserve(struct xarray *xa)
4279f14d4f1SMatthew Wilcox {
4289f14d4f1SMatthew Wilcox 	void *entry;
4294a31896cSMatthew Wilcox 	unsigned long index;
430b38f6c50SMatthew Wilcox 	int count;
4319f14d4f1SMatthew Wilcox 
4329f14d4f1SMatthew Wilcox 	/* An array with a reserved entry is not empty */
4339f14d4f1SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
434f818b82bSMatthew Wilcox 	XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
4359f14d4f1SMatthew Wilcox 	XA_BUG_ON(xa, xa_empty(xa));
4369f14d4f1SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 12345678));
4379f14d4f1SMatthew Wilcox 	xa_release(xa, 12345678);
4389f14d4f1SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
4399f14d4f1SMatthew Wilcox 
4409f14d4f1SMatthew Wilcox 	/* Releasing a used entry does nothing */
441f818b82bSMatthew Wilcox 	XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
4429f14d4f1SMatthew Wilcox 	XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_NOWAIT) != NULL);
4439f14d4f1SMatthew Wilcox 	xa_release(xa, 12345678);
4449f14d4f1SMatthew Wilcox 	xa_erase_index(xa, 12345678);
4459f14d4f1SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
4469f14d4f1SMatthew Wilcox 
447b38f6c50SMatthew Wilcox 	/* cmpxchg sees a reserved entry as ZERO */
448f818b82bSMatthew Wilcox 	XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
449b38f6c50SMatthew Wilcox 	XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, XA_ZERO_ENTRY,
450b38f6c50SMatthew Wilcox 				xa_mk_value(12345678), GFP_NOWAIT) != NULL);
4519f14d4f1SMatthew Wilcox 	xa_release(xa, 12345678);
4529f14d4f1SMatthew Wilcox 	xa_erase_index(xa, 12345678);
4539f14d4f1SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
4549f14d4f1SMatthew Wilcox 
455b38f6c50SMatthew Wilcox 	/* xa_insert treats it as busy */
456f818b82bSMatthew Wilcox 	XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
457b0606fedSMatthew Wilcox 	XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) !=
458fd9dc93eSMatthew Wilcox 			-EBUSY);
459b0606fedSMatthew Wilcox 	XA_BUG_ON(xa, xa_empty(xa));
460b0606fedSMatthew Wilcox 	XA_BUG_ON(xa, xa_erase(xa, 12345678) != NULL);
4614c0608f4SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
4624c0608f4SMatthew Wilcox 
4639f14d4f1SMatthew Wilcox 	/* Can iterate through a reserved entry */
4649f14d4f1SMatthew Wilcox 	xa_store_index(xa, 5, GFP_KERNEL);
465f818b82bSMatthew Wilcox 	XA_BUG_ON(xa, xa_reserve(xa, 6, GFP_KERNEL) != 0);
4669f14d4f1SMatthew Wilcox 	xa_store_index(xa, 7, GFP_KERNEL);
4679f14d4f1SMatthew Wilcox 
468b38f6c50SMatthew Wilcox 	count = 0;
4694a31896cSMatthew Wilcox 	xa_for_each(xa, index, entry) {
4709f14d4f1SMatthew Wilcox 		XA_BUG_ON(xa, index != 5 && index != 7);
471b38f6c50SMatthew Wilcox 		count++;
4729f14d4f1SMatthew Wilcox 	}
473b38f6c50SMatthew Wilcox 	XA_BUG_ON(xa, count != 2);
474b38f6c50SMatthew Wilcox 
475b38f6c50SMatthew Wilcox 	/* If we free a reserved entry, we should be able to allocate it */
476b38f6c50SMatthew Wilcox 	if (xa->xa_flags & XA_FLAGS_ALLOC) {
477b38f6c50SMatthew Wilcox 		u32 id;
478b38f6c50SMatthew Wilcox 
479b38f6c50SMatthew Wilcox 		XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_value(8),
480b38f6c50SMatthew Wilcox 					XA_LIMIT(5, 10), GFP_KERNEL) != 0);
481b38f6c50SMatthew Wilcox 		XA_BUG_ON(xa, id != 8);
482b38f6c50SMatthew Wilcox 
483b38f6c50SMatthew Wilcox 		xa_release(xa, 6);
484b38f6c50SMatthew Wilcox 		XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_value(6),
485b38f6c50SMatthew Wilcox 					XA_LIMIT(5, 10), GFP_KERNEL) != 0);
486b38f6c50SMatthew Wilcox 		XA_BUG_ON(xa, id != 6);
487b38f6c50SMatthew Wilcox 	}
488b38f6c50SMatthew Wilcox 
4899f14d4f1SMatthew Wilcox 	xa_destroy(xa);
4909f14d4f1SMatthew Wilcox }
4919f14d4f1SMatthew Wilcox 
check_xas_erase(struct xarray * xa)492b803b428SMatthew Wilcox static noinline void check_xas_erase(struct xarray *xa)
493b803b428SMatthew Wilcox {
494b803b428SMatthew Wilcox 	XA_STATE(xas, xa, 0);
495b803b428SMatthew Wilcox 	void *entry;
496b803b428SMatthew Wilcox 	unsigned long i, j;
497b803b428SMatthew Wilcox 
498b803b428SMatthew Wilcox 	for (i = 0; i < 200; i++) {
499b803b428SMatthew Wilcox 		for (j = i; j < 2 * i + 17; j++) {
500b803b428SMatthew Wilcox 			xas_set(&xas, j);
501b803b428SMatthew Wilcox 			do {
502b803b428SMatthew Wilcox 				xas_lock(&xas);
503b7677a13SMatthew Wilcox 				xas_store(&xas, xa_mk_index(j));
504b803b428SMatthew Wilcox 				xas_unlock(&xas);
505b803b428SMatthew Wilcox 			} while (xas_nomem(&xas, GFP_KERNEL));
506b803b428SMatthew Wilcox 		}
507b803b428SMatthew Wilcox 
508b803b428SMatthew Wilcox 		xas_set(&xas, ULONG_MAX);
509b803b428SMatthew Wilcox 		do {
510b803b428SMatthew Wilcox 			xas_lock(&xas);
511b803b428SMatthew Wilcox 			xas_store(&xas, xa_mk_value(0));
512b803b428SMatthew Wilcox 			xas_unlock(&xas);
513b803b428SMatthew Wilcox 		} while (xas_nomem(&xas, GFP_KERNEL));
514b803b428SMatthew Wilcox 
515b803b428SMatthew Wilcox 		xas_lock(&xas);
516b803b428SMatthew Wilcox 		xas_store(&xas, NULL);
517b803b428SMatthew Wilcox 
518b803b428SMatthew Wilcox 		xas_set(&xas, 0);
519b803b428SMatthew Wilcox 		j = i;
520b803b428SMatthew Wilcox 		xas_for_each(&xas, entry, ULONG_MAX) {
521b7677a13SMatthew Wilcox 			XA_BUG_ON(xa, entry != xa_mk_index(j));
522b803b428SMatthew Wilcox 			xas_store(&xas, NULL);
523b803b428SMatthew Wilcox 			j++;
524b803b428SMatthew Wilcox 		}
525b803b428SMatthew Wilcox 		xas_unlock(&xas);
526b803b428SMatthew Wilcox 		XA_BUG_ON(xa, !xa_empty(xa));
527b803b428SMatthew Wilcox 	}
528b803b428SMatthew Wilcox }
529b803b428SMatthew Wilcox 
5304f06d630SMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI
check_multi_store_1(struct xarray * xa,unsigned long index,unsigned int order)5314f06d630SMatthew Wilcox static noinline void check_multi_store_1(struct xarray *xa, unsigned long index,
5324f06d630SMatthew Wilcox 		unsigned int order)
5334f06d630SMatthew Wilcox {
5344f06d630SMatthew Wilcox 	XA_STATE(xas, xa, index);
5354f06d630SMatthew Wilcox 	unsigned long min = index & ~((1UL << order) - 1);
5364f06d630SMatthew Wilcox 	unsigned long max = min + (1UL << order);
5374f06d630SMatthew Wilcox 
538b7677a13SMatthew Wilcox 	xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
539b7677a13SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(index));
540b7677a13SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(index));
5414f06d630SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, max) != NULL);
5424f06d630SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL);
5434f06d630SMatthew Wilcox 
544fffc9a26SMatthew Wilcox 	xas_lock(&xas);
545b7677a13SMatthew Wilcox 	XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(min)) != xa_mk_index(index));
546fffc9a26SMatthew Wilcox 	xas_unlock(&xas);
547b7677a13SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(min));
548b7677a13SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(min));
5494f06d630SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, max) != NULL);
5504f06d630SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL);
5514f06d630SMatthew Wilcox 
5524f06d630SMatthew Wilcox 	xa_erase_index(xa, min);
5534f06d630SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
5544f06d630SMatthew Wilcox }
5554f06d630SMatthew Wilcox 
check_multi_store_2(struct xarray * xa,unsigned long index,unsigned int order)5564f06d630SMatthew Wilcox static noinline void check_multi_store_2(struct xarray *xa, unsigned long index,
5574f06d630SMatthew Wilcox 		unsigned int order)
5584f06d630SMatthew Wilcox {
5594f06d630SMatthew Wilcox 	XA_STATE(xas, xa, index);
5604f06d630SMatthew Wilcox 	xa_store_order(xa, index, order, xa_mk_value(0), GFP_KERNEL);
5614f06d630SMatthew Wilcox 
562fffc9a26SMatthew Wilcox 	xas_lock(&xas);
5634f06d630SMatthew Wilcox 	XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(1)) != xa_mk_value(0));
5644f06d630SMatthew Wilcox 	XA_BUG_ON(xa, xas.xa_index != index);
5654f06d630SMatthew Wilcox 	XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1));
566fffc9a26SMatthew Wilcox 	xas_unlock(&xas);
5674f06d630SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
5684f06d630SMatthew Wilcox }
5694f145cd6SMatthew Wilcox 
check_multi_store_3(struct xarray * xa,unsigned long index,unsigned int order)5704f145cd6SMatthew Wilcox static noinline void check_multi_store_3(struct xarray *xa, unsigned long index,
5714f145cd6SMatthew Wilcox 		unsigned int order)
5724f145cd6SMatthew Wilcox {
5734f145cd6SMatthew Wilcox 	XA_STATE(xas, xa, 0);
5744f145cd6SMatthew Wilcox 	void *entry;
5754f145cd6SMatthew Wilcox 	int n = 0;
5764f145cd6SMatthew Wilcox 
5774f145cd6SMatthew Wilcox 	xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
5784f145cd6SMatthew Wilcox 
5794f145cd6SMatthew Wilcox 	xas_lock(&xas);
5804f145cd6SMatthew Wilcox 	xas_for_each(&xas, entry, ULONG_MAX) {
5814f145cd6SMatthew Wilcox 		XA_BUG_ON(xa, entry != xa_mk_index(index));
5824f145cd6SMatthew Wilcox 		n++;
5834f145cd6SMatthew Wilcox 	}
5844f145cd6SMatthew Wilcox 	XA_BUG_ON(xa, n != 1);
5854f145cd6SMatthew Wilcox 	xas_set(&xas, index + 1);
5864f145cd6SMatthew Wilcox 	xas_for_each(&xas, entry, ULONG_MAX) {
5874f145cd6SMatthew Wilcox 		XA_BUG_ON(xa, entry != xa_mk_index(index));
5884f145cd6SMatthew Wilcox 		n++;
5894f145cd6SMatthew Wilcox 	}
5904f145cd6SMatthew Wilcox 	XA_BUG_ON(xa, n != 2);
5914f145cd6SMatthew Wilcox 	xas_unlock(&xas);
5924f145cd6SMatthew Wilcox 
5934f145cd6SMatthew Wilcox 	xa_destroy(xa);
5944f145cd6SMatthew Wilcox }
5954f06d630SMatthew Wilcox #endif
5964f06d630SMatthew Wilcox 
check_multi_store(struct xarray * xa)59758d6ea30SMatthew Wilcox static noinline void check_multi_store(struct xarray *xa)
59858d6ea30SMatthew Wilcox {
59958d6ea30SMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI
60058d6ea30SMatthew Wilcox 	unsigned long i, j, k;
60158d6ea30SMatthew Wilcox 	unsigned int max_order = (sizeof(long) == 4) ? 30 : 60;
60258d6ea30SMatthew Wilcox 
60358d6ea30SMatthew Wilcox 	/* Loading from any position returns the same value */
60458d6ea30SMatthew Wilcox 	xa_store_order(xa, 0, 1, xa_mk_value(0), GFP_KERNEL);
60558d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
60658d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0));
60758d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 2) != NULL);
60858d6ea30SMatthew Wilcox 	rcu_read_lock();
60958d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 2);
61058d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2);
61158d6ea30SMatthew Wilcox 	rcu_read_unlock();
61258d6ea30SMatthew Wilcox 
61358d6ea30SMatthew Wilcox 	/* Storing adjacent to the value does not alter the value */
61458d6ea30SMatthew Wilcox 	xa_store(xa, 3, xa, GFP_KERNEL);
61558d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
61658d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0));
61758d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 2) != NULL);
61858d6ea30SMatthew Wilcox 	rcu_read_lock();
61958d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 3);
62058d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2);
62158d6ea30SMatthew Wilcox 	rcu_read_unlock();
62258d6ea30SMatthew Wilcox 
62358d6ea30SMatthew Wilcox 	/* Overwriting multiple indexes works */
62458d6ea30SMatthew Wilcox 	xa_store_order(xa, 0, 2, xa_mk_value(1), GFP_KERNEL);
62558d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(1));
62658d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(1));
62758d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 2) != xa_mk_value(1));
62858d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 3) != xa_mk_value(1));
62958d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 4) != NULL);
63058d6ea30SMatthew Wilcox 	rcu_read_lock();
63158d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 4);
63258d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 4);
63358d6ea30SMatthew Wilcox 	rcu_read_unlock();
63458d6ea30SMatthew Wilcox 
63558d6ea30SMatthew Wilcox 	/* We can erase multiple values with a single store */
6365404a7f1SMatthew Wilcox 	xa_store_order(xa, 0, BITS_PER_LONG - 1, NULL, GFP_KERNEL);
63758d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
63858d6ea30SMatthew Wilcox 
63958d6ea30SMatthew Wilcox 	/* Even when the first slot is empty but the others aren't */
64058d6ea30SMatthew Wilcox 	xa_store_index(xa, 1, GFP_KERNEL);
64158d6ea30SMatthew Wilcox 	xa_store_index(xa, 2, GFP_KERNEL);
64258d6ea30SMatthew Wilcox 	xa_store_order(xa, 0, 2, NULL, GFP_KERNEL);
64358d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
64458d6ea30SMatthew Wilcox 
64558d6ea30SMatthew Wilcox 	for (i = 0; i < max_order; i++) {
64658d6ea30SMatthew Wilcox 		for (j = 0; j < max_order; j++) {
647b7677a13SMatthew Wilcox 			xa_store_order(xa, 0, i, xa_mk_index(i), GFP_KERNEL);
648b7677a13SMatthew Wilcox 			xa_store_order(xa, 0, j, xa_mk_index(j), GFP_KERNEL);
64958d6ea30SMatthew Wilcox 
65058d6ea30SMatthew Wilcox 			for (k = 0; k < max_order; k++) {
65158d6ea30SMatthew Wilcox 				void *entry = xa_load(xa, (1UL << k) - 1);
65258d6ea30SMatthew Wilcox 				if ((i < k) && (j < k))
65358d6ea30SMatthew Wilcox 					XA_BUG_ON(xa, entry != NULL);
65458d6ea30SMatthew Wilcox 				else
655b7677a13SMatthew Wilcox 					XA_BUG_ON(xa, entry != xa_mk_index(j));
65658d6ea30SMatthew Wilcox 			}
65758d6ea30SMatthew Wilcox 
65858d6ea30SMatthew Wilcox 			xa_erase(xa, 0);
65958d6ea30SMatthew Wilcox 			XA_BUG_ON(xa, !xa_empty(xa));
66058d6ea30SMatthew Wilcox 		}
66158d6ea30SMatthew Wilcox 	}
6624f06d630SMatthew Wilcox 
6634f06d630SMatthew Wilcox 	for (i = 0; i < 20; i++) {
6644f06d630SMatthew Wilcox 		check_multi_store_1(xa, 200, i);
6654f06d630SMatthew Wilcox 		check_multi_store_1(xa, 0, i);
6664f06d630SMatthew Wilcox 		check_multi_store_1(xa, (1UL << i) + 1, i);
6674f06d630SMatthew Wilcox 	}
6684f06d630SMatthew Wilcox 	check_multi_store_2(xa, 4095, 9);
6694f145cd6SMatthew Wilcox 
6704f145cd6SMatthew Wilcox 	for (i = 1; i < 20; i++) {
6714f145cd6SMatthew Wilcox 		check_multi_store_3(xa, 0, i);
6724f145cd6SMatthew Wilcox 		check_multi_store_3(xa, 1UL << i, i);
6734f145cd6SMatthew Wilcox 	}
67458d6ea30SMatthew Wilcox #endif
67558d6ea30SMatthew Wilcox }
67658d6ea30SMatthew Wilcox 
check_xa_alloc_1(struct xarray * xa,unsigned int base)6773ccaf57aSMatthew Wilcox static noinline void check_xa_alloc_1(struct xarray *xa, unsigned int base)
678371c752dSMatthew Wilcox {
679371c752dSMatthew Wilcox 	int i;
680371c752dSMatthew Wilcox 	u32 id;
681371c752dSMatthew Wilcox 
6823ccaf57aSMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
6833ccaf57aSMatthew Wilcox 	/* An empty array should assign %base to the first alloc */
6843ccaf57aSMatthew Wilcox 	xa_alloc_index(xa, base, GFP_KERNEL);
685371c752dSMatthew Wilcox 
686371c752dSMatthew Wilcox 	/* Erasing it should make the array empty again */
6873ccaf57aSMatthew Wilcox 	xa_erase_index(xa, base);
6883ccaf57aSMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
689371c752dSMatthew Wilcox 
6903ccaf57aSMatthew Wilcox 	/* And it should assign %base again */
6913ccaf57aSMatthew Wilcox 	xa_alloc_index(xa, base, GFP_KERNEL);
692371c752dSMatthew Wilcox 
6933ccaf57aSMatthew Wilcox 	/* Allocating and then erasing a lot should not lose base */
6943ccaf57aSMatthew Wilcox 	for (i = base + 1; i < 2 * XA_CHUNK_SIZE; i++)
6953ccaf57aSMatthew Wilcox 		xa_alloc_index(xa, i, GFP_KERNEL);
6963ccaf57aSMatthew Wilcox 	for (i = base; i < 2 * XA_CHUNK_SIZE; i++)
6973ccaf57aSMatthew Wilcox 		xa_erase_index(xa, i);
6983ccaf57aSMatthew Wilcox 	xa_alloc_index(xa, base, GFP_KERNEL);
6993ccaf57aSMatthew Wilcox 
7003ccaf57aSMatthew Wilcox 	/* Destroying the array should do the same as erasing */
7013ccaf57aSMatthew Wilcox 	xa_destroy(xa);
7023ccaf57aSMatthew Wilcox 
7033ccaf57aSMatthew Wilcox 	/* And it should assign %base again */
7043ccaf57aSMatthew Wilcox 	xa_alloc_index(xa, base, GFP_KERNEL);
7053ccaf57aSMatthew Wilcox 
7063ccaf57aSMatthew Wilcox 	/* The next assigned ID should be base+1 */
7073ccaf57aSMatthew Wilcox 	xa_alloc_index(xa, base + 1, GFP_KERNEL);
7083ccaf57aSMatthew Wilcox 	xa_erase_index(xa, base + 1);
709371c752dSMatthew Wilcox 
710371c752dSMatthew Wilcox 	/* Storing a value should mark it used */
7113ccaf57aSMatthew Wilcox 	xa_store_index(xa, base + 1, GFP_KERNEL);
7123ccaf57aSMatthew Wilcox 	xa_alloc_index(xa, base + 2, GFP_KERNEL);
713371c752dSMatthew Wilcox 
7143ccaf57aSMatthew Wilcox 	/* If we then erase base, it should be free */
7153ccaf57aSMatthew Wilcox 	xa_erase_index(xa, base);
7163ccaf57aSMatthew Wilcox 	xa_alloc_index(xa, base, GFP_KERNEL);
717371c752dSMatthew Wilcox 
7183ccaf57aSMatthew Wilcox 	xa_erase_index(xa, base + 1);
7193ccaf57aSMatthew Wilcox 	xa_erase_index(xa, base + 2);
720371c752dSMatthew Wilcox 
721371c752dSMatthew Wilcox 	for (i = 1; i < 5000; i++) {
7223ccaf57aSMatthew Wilcox 		xa_alloc_index(xa, base + i, GFP_KERNEL);
723371c752dSMatthew Wilcox 	}
724371c752dSMatthew Wilcox 
7253ccaf57aSMatthew Wilcox 	xa_destroy(xa);
726371c752dSMatthew Wilcox 
7273ccaf57aSMatthew Wilcox 	/* Check that we fail properly at the limit of allocation */
728a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX - 1),
729a3e4d3f9SMatthew Wilcox 				XA_LIMIT(UINT_MAX - 1, UINT_MAX),
730371c752dSMatthew Wilcox 				GFP_KERNEL) != 0);
7313ccaf57aSMatthew Wilcox 	XA_BUG_ON(xa, id != 0xfffffffeU);
732a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX),
733a3e4d3f9SMatthew Wilcox 				XA_LIMIT(UINT_MAX - 1, UINT_MAX),
734371c752dSMatthew Wilcox 				GFP_KERNEL) != 0);
7353ccaf57aSMatthew Wilcox 	XA_BUG_ON(xa, id != 0xffffffffU);
736a3e4d3f9SMatthew Wilcox 	id = 3;
737a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(0),
738a3e4d3f9SMatthew Wilcox 				XA_LIMIT(UINT_MAX - 1, UINT_MAX),
739a3e4d3f9SMatthew Wilcox 				GFP_KERNEL) != -EBUSY);
740a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, id != 3);
7413ccaf57aSMatthew Wilcox 	xa_destroy(xa);
74248483614SMatthew Wilcox 
743a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5),
744a3e4d3f9SMatthew Wilcox 				GFP_KERNEL) != -EBUSY);
7453ccaf57aSMatthew Wilcox 	XA_BUG_ON(xa, xa_store_index(xa, 3, GFP_KERNEL) != 0);
746a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5),
747a3e4d3f9SMatthew Wilcox 				GFP_KERNEL) != -EBUSY);
7483ccaf57aSMatthew Wilcox 	xa_erase_index(xa, 3);
7493ccaf57aSMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
7503ccaf57aSMatthew Wilcox }
7513ccaf57aSMatthew Wilcox 
check_xa_alloc_2(struct xarray * xa,unsigned int base)752a3e4d3f9SMatthew Wilcox static noinline void check_xa_alloc_2(struct xarray *xa, unsigned int base)
753a3e4d3f9SMatthew Wilcox {
754a3e4d3f9SMatthew Wilcox 	unsigned int i, id;
755a3e4d3f9SMatthew Wilcox 	unsigned long index;
756a3e4d3f9SMatthew Wilcox 	void *entry;
757a3e4d3f9SMatthew Wilcox 
758a3e4d3f9SMatthew Wilcox 	/* Allocate and free a NULL and check xa_empty() behaves */
759a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
760a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0);
761a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, id != base);
762a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_empty(xa));
763a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_erase(xa, id) != NULL);
764a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
765a3e4d3f9SMatthew Wilcox 
766a3e4d3f9SMatthew Wilcox 	/* Ditto, but check destroy instead of erase */
767a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
768a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0);
769a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, id != base);
770a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_empty(xa));
771a3e4d3f9SMatthew Wilcox 	xa_destroy(xa);
772a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
773a3e4d3f9SMatthew Wilcox 
774a3e4d3f9SMatthew Wilcox 	for (i = base; i < base + 10; i++) {
775a3e4d3f9SMatthew Wilcox 		XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b,
776a3e4d3f9SMatthew Wilcox 					GFP_KERNEL) != 0);
777a3e4d3f9SMatthew Wilcox 		XA_BUG_ON(xa, id != i);
778a3e4d3f9SMatthew Wilcox 	}
779a3e4d3f9SMatthew Wilcox 
780a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_store(xa, 3, xa_mk_index(3), GFP_KERNEL) != NULL);
781a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_store(xa, 4, xa_mk_index(4), GFP_KERNEL) != NULL);
782a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_store(xa, 4, NULL, GFP_KERNEL) != xa_mk_index(4));
783a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_erase(xa, 5) != NULL);
784a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0);
785a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, id != 5);
786a3e4d3f9SMatthew Wilcox 
787a3e4d3f9SMatthew Wilcox 	xa_for_each(xa, index, entry) {
788a3e4d3f9SMatthew Wilcox 		xa_erase_index(xa, index);
789a3e4d3f9SMatthew Wilcox 	}
790a3e4d3f9SMatthew Wilcox 
791a3e4d3f9SMatthew Wilcox 	for (i = base; i < base + 9; i++) {
792a3e4d3f9SMatthew Wilcox 		XA_BUG_ON(xa, xa_erase(xa, i) != NULL);
793a3e4d3f9SMatthew Wilcox 		XA_BUG_ON(xa, xa_empty(xa));
794a3e4d3f9SMatthew Wilcox 	}
795a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_erase(xa, 8) != NULL);
796a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_empty(xa));
797a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_erase(xa, base + 9) != NULL);
798a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
799a3e4d3f9SMatthew Wilcox 
800a3e4d3f9SMatthew Wilcox 	xa_destroy(xa);
801a3e4d3f9SMatthew Wilcox }
802a3e4d3f9SMatthew Wilcox 
check_xa_alloc_3(struct xarray * xa,unsigned int base)8032fa044e5SMatthew Wilcox static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base)
8042fa044e5SMatthew Wilcox {
8052fa044e5SMatthew Wilcox 	struct xa_limit limit = XA_LIMIT(1, 0x3fff);
8062fa044e5SMatthew Wilcox 	u32 next = 0;
8072fa044e5SMatthew Wilcox 	unsigned int i, id;
8082fa044e5SMatthew Wilcox 	unsigned long index;
8092fa044e5SMatthew Wilcox 	void *entry;
8102fa044e5SMatthew Wilcox 
8112fa044e5SMatthew Wilcox 	XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(1), limit,
8122fa044e5SMatthew Wilcox 				&next, GFP_KERNEL) != 0);
8132fa044e5SMatthew Wilcox 	XA_BUG_ON(xa, id != 1);
8142fa044e5SMatthew Wilcox 
8152fa044e5SMatthew Wilcox 	next = 0x3ffd;
8162fa044e5SMatthew Wilcox 	XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(0x3ffd), limit,
8172fa044e5SMatthew Wilcox 				&next, GFP_KERNEL) != 0);
8182fa044e5SMatthew Wilcox 	XA_BUG_ON(xa, id != 0x3ffd);
8192fa044e5SMatthew Wilcox 	xa_erase_index(xa, 0x3ffd);
8202fa044e5SMatthew Wilcox 	xa_erase_index(xa, 1);
8212fa044e5SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
8222fa044e5SMatthew Wilcox 
8232fa044e5SMatthew Wilcox 	for (i = 0x3ffe; i < 0x4003; i++) {
8242fa044e5SMatthew Wilcox 		if (i < 0x4000)
8252fa044e5SMatthew Wilcox 			entry = xa_mk_index(i);
8262fa044e5SMatthew Wilcox 		else
8272fa044e5SMatthew Wilcox 			entry = xa_mk_index(i - 0x3fff);
8282fa044e5SMatthew Wilcox 		XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, entry, limit,
8292fa044e5SMatthew Wilcox 					&next, GFP_KERNEL) != (id == 1));
8302fa044e5SMatthew Wilcox 		XA_BUG_ON(xa, xa_mk_index(id) != entry);
8312fa044e5SMatthew Wilcox 	}
8322fa044e5SMatthew Wilcox 
8332fa044e5SMatthew Wilcox 	/* Check wrap-around is handled correctly */
8342fa044e5SMatthew Wilcox 	if (base != 0)
8352fa044e5SMatthew Wilcox 		xa_erase_index(xa, base);
8362fa044e5SMatthew Wilcox 	xa_erase_index(xa, base + 1);
8372fa044e5SMatthew Wilcox 	next = UINT_MAX;
8382fa044e5SMatthew Wilcox 	XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(UINT_MAX),
8392fa044e5SMatthew Wilcox 				xa_limit_32b, &next, GFP_KERNEL) != 0);
8402fa044e5SMatthew Wilcox 	XA_BUG_ON(xa, id != UINT_MAX);
8412fa044e5SMatthew Wilcox 	XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base),
8422fa044e5SMatthew Wilcox 				xa_limit_32b, &next, GFP_KERNEL) != 1);
8432fa044e5SMatthew Wilcox 	XA_BUG_ON(xa, id != base);
8442fa044e5SMatthew Wilcox 	XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base + 1),
8452fa044e5SMatthew Wilcox 				xa_limit_32b, &next, GFP_KERNEL) != 0);
8462fa044e5SMatthew Wilcox 	XA_BUG_ON(xa, id != base + 1);
8472fa044e5SMatthew Wilcox 
8482fa044e5SMatthew Wilcox 	xa_for_each(xa, index, entry)
8492fa044e5SMatthew Wilcox 		xa_erase_index(xa, index);
8502fa044e5SMatthew Wilcox 
8512fa044e5SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
8522fa044e5SMatthew Wilcox }
8532fa044e5SMatthew Wilcox 
8543ccaf57aSMatthew Wilcox static DEFINE_XARRAY_ALLOC(xa0);
8553ccaf57aSMatthew Wilcox static DEFINE_XARRAY_ALLOC1(xa1);
8563ccaf57aSMatthew Wilcox 
check_xa_alloc(void)8573ccaf57aSMatthew Wilcox static noinline void check_xa_alloc(void)
8583ccaf57aSMatthew Wilcox {
8593ccaf57aSMatthew Wilcox 	check_xa_alloc_1(&xa0, 0);
8603ccaf57aSMatthew Wilcox 	check_xa_alloc_1(&xa1, 1);
861a3e4d3f9SMatthew Wilcox 	check_xa_alloc_2(&xa0, 0);
862a3e4d3f9SMatthew Wilcox 	check_xa_alloc_2(&xa1, 1);
8632fa044e5SMatthew Wilcox 	check_xa_alloc_3(&xa0, 0);
8642fa044e5SMatthew Wilcox 	check_xa_alloc_3(&xa1, 1);
865371c752dSMatthew Wilcox }
866371c752dSMatthew Wilcox 
__check_store_iter(struct xarray * xa,unsigned long start,unsigned int order,unsigned int present)8674e99d4e9SMatthew Wilcox static noinline void __check_store_iter(struct xarray *xa, unsigned long start,
8684e99d4e9SMatthew Wilcox 			unsigned int order, unsigned int present)
8694e99d4e9SMatthew Wilcox {
8704e99d4e9SMatthew Wilcox 	XA_STATE_ORDER(xas, xa, start, order);
8714e99d4e9SMatthew Wilcox 	void *entry;
8724e99d4e9SMatthew Wilcox 	unsigned int count = 0;
8734e99d4e9SMatthew Wilcox 
8744e99d4e9SMatthew Wilcox retry:
8754e99d4e9SMatthew Wilcox 	xas_lock(&xas);
8764e99d4e9SMatthew Wilcox 	xas_for_each_conflict(&xas, entry) {
8774e99d4e9SMatthew Wilcox 		XA_BUG_ON(xa, !xa_is_value(entry));
878b7677a13SMatthew Wilcox 		XA_BUG_ON(xa, entry < xa_mk_index(start));
879b7677a13SMatthew Wilcox 		XA_BUG_ON(xa, entry > xa_mk_index(start + (1UL << order) - 1));
8804e99d4e9SMatthew Wilcox 		count++;
8814e99d4e9SMatthew Wilcox 	}
882b7677a13SMatthew Wilcox 	xas_store(&xas, xa_mk_index(start));
8834e99d4e9SMatthew Wilcox 	xas_unlock(&xas);
8844e99d4e9SMatthew Wilcox 	if (xas_nomem(&xas, GFP_KERNEL)) {
8854e99d4e9SMatthew Wilcox 		count = 0;
8864e99d4e9SMatthew Wilcox 		goto retry;
8874e99d4e9SMatthew Wilcox 	}
8884e99d4e9SMatthew Wilcox 	XA_BUG_ON(xa, xas_error(&xas));
8894e99d4e9SMatthew Wilcox 	XA_BUG_ON(xa, count != present);
890b7677a13SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_index(start));
8914e99d4e9SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) !=
892b7677a13SMatthew Wilcox 			xa_mk_index(start));
8934e99d4e9SMatthew Wilcox 	xa_erase_index(xa, start);
8944e99d4e9SMatthew Wilcox }
8954e99d4e9SMatthew Wilcox 
check_store_iter(struct xarray * xa)8964e99d4e9SMatthew Wilcox static noinline void check_store_iter(struct xarray *xa)
8974e99d4e9SMatthew Wilcox {
8984e99d4e9SMatthew Wilcox 	unsigned int i, j;
8994e99d4e9SMatthew Wilcox 	unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
9004e99d4e9SMatthew Wilcox 
9014e99d4e9SMatthew Wilcox 	for (i = 0; i < max_order; i++) {
9024e99d4e9SMatthew Wilcox 		unsigned int min = 1 << i;
9034e99d4e9SMatthew Wilcox 		unsigned int max = (2 << i) - 1;
9044e99d4e9SMatthew Wilcox 		__check_store_iter(xa, 0, i, 0);
9054e99d4e9SMatthew Wilcox 		XA_BUG_ON(xa, !xa_empty(xa));
9064e99d4e9SMatthew Wilcox 		__check_store_iter(xa, min, i, 0);
9074e99d4e9SMatthew Wilcox 		XA_BUG_ON(xa, !xa_empty(xa));
9084e99d4e9SMatthew Wilcox 
9094e99d4e9SMatthew Wilcox 		xa_store_index(xa, min, GFP_KERNEL);
9104e99d4e9SMatthew Wilcox 		__check_store_iter(xa, min, i, 1);
9114e99d4e9SMatthew Wilcox 		XA_BUG_ON(xa, !xa_empty(xa));
9124e99d4e9SMatthew Wilcox 		xa_store_index(xa, max, GFP_KERNEL);
9134e99d4e9SMatthew Wilcox 		__check_store_iter(xa, min, i, 1);
9144e99d4e9SMatthew Wilcox 		XA_BUG_ON(xa, !xa_empty(xa));
9154e99d4e9SMatthew Wilcox 
9164e99d4e9SMatthew Wilcox 		for (j = 0; j < min; j++)
9174e99d4e9SMatthew Wilcox 			xa_store_index(xa, j, GFP_KERNEL);
9184e99d4e9SMatthew Wilcox 		__check_store_iter(xa, 0, i, min);
9194e99d4e9SMatthew Wilcox 		XA_BUG_ON(xa, !xa_empty(xa));
9204e99d4e9SMatthew Wilcox 		for (j = 0; j < min; j++)
9214e99d4e9SMatthew Wilcox 			xa_store_index(xa, min + j, GFP_KERNEL);
9224e99d4e9SMatthew Wilcox 		__check_store_iter(xa, min, i, min);
9234e99d4e9SMatthew Wilcox 		XA_BUG_ON(xa, !xa_empty(xa));
9244e99d4e9SMatthew Wilcox 	}
9254e99d4e9SMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI
9264e99d4e9SMatthew Wilcox 	xa_store_index(xa, 63, GFP_KERNEL);
9274e99d4e9SMatthew Wilcox 	xa_store_index(xa, 65, GFP_KERNEL);
9284e99d4e9SMatthew Wilcox 	__check_store_iter(xa, 64, 2, 1);
9294e99d4e9SMatthew Wilcox 	xa_erase_index(xa, 63);
9304e99d4e9SMatthew Wilcox #endif
9314e99d4e9SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
9324e99d4e9SMatthew Wilcox }
9334e99d4e9SMatthew Wilcox 
check_multi_find_1(struct xarray * xa,unsigned order)93419c30f4dSMatthew Wilcox (Oracle) static noinline void check_multi_find_1(struct xarray *xa, unsigned order)
935b803b428SMatthew Wilcox {
936b803b428SMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI
93719c30f4dSMatthew Wilcox (Oracle) 	unsigned long multi = 3 << order;
93819c30f4dSMatthew Wilcox (Oracle) 	unsigned long next = 4 << order;
939b803b428SMatthew Wilcox 	unsigned long index;
940b803b428SMatthew Wilcox 
94119c30f4dSMatthew Wilcox (Oracle) 	xa_store_order(xa, multi, order, xa_mk_value(multi), GFP_KERNEL);
94219c30f4dSMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL) != NULL);
943c44aa5e8SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, xa_store_index(xa, next + 1, GFP_KERNEL) != NULL);
944b803b428SMatthew Wilcox 
945b803b428SMatthew Wilcox 	index = 0;
946b803b428SMatthew Wilcox 	XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
94719c30f4dSMatthew Wilcox (Oracle) 			xa_mk_value(multi));
94819c30f4dSMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, index != multi);
94919c30f4dSMatthew Wilcox (Oracle) 	index = multi + 1;
950b803b428SMatthew Wilcox 	XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
95119c30f4dSMatthew Wilcox (Oracle) 			xa_mk_value(multi));
95219c30f4dSMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, (index < multi) || (index >= next));
953b803b428SMatthew Wilcox 	XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT) !=
95419c30f4dSMatthew Wilcox (Oracle) 			xa_mk_value(next));
95519c30f4dSMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, index != next);
956c44aa5e8SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, xa_find_after(xa, &index, next, XA_PRESENT) != NULL);
957c44aa5e8SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, index != next);
958b803b428SMatthew Wilcox 
95919c30f4dSMatthew Wilcox (Oracle) 	xa_erase_index(xa, multi);
96019c30f4dSMatthew Wilcox (Oracle) 	xa_erase_index(xa, next);
961c44aa5e8SMatthew Wilcox (Oracle) 	xa_erase_index(xa, next + 1);
962b803b428SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
963b803b428SMatthew Wilcox #endif
964b803b428SMatthew Wilcox }
965b803b428SMatthew Wilcox 
check_multi_find_2(struct xarray * xa)966b803b428SMatthew Wilcox static noinline void check_multi_find_2(struct xarray *xa)
967b803b428SMatthew Wilcox {
968b803b428SMatthew Wilcox 	unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 10 : 1;
969b803b428SMatthew Wilcox 	unsigned int i, j;
970b803b428SMatthew Wilcox 	void *entry;
971b803b428SMatthew Wilcox 
972b803b428SMatthew Wilcox 	for (i = 0; i < max_order; i++) {
973b803b428SMatthew Wilcox 		unsigned long index = 1UL << i;
974b803b428SMatthew Wilcox 		for (j = 0; j < index; j++) {
975b803b428SMatthew Wilcox 			XA_STATE(xas, xa, j + index);
976b803b428SMatthew Wilcox 			xa_store_index(xa, index - 1, GFP_KERNEL);
977b7677a13SMatthew Wilcox 			xa_store_order(xa, index, i, xa_mk_index(index),
978b803b428SMatthew Wilcox 					GFP_KERNEL);
979b803b428SMatthew Wilcox 			rcu_read_lock();
980b803b428SMatthew Wilcox 			xas_for_each(&xas, entry, ULONG_MAX) {
981b803b428SMatthew Wilcox 				xa_erase_index(xa, index);
982b803b428SMatthew Wilcox 			}
983b803b428SMatthew Wilcox 			rcu_read_unlock();
984b803b428SMatthew Wilcox 			xa_erase_index(xa, index - 1);
985b803b428SMatthew Wilcox 			XA_BUG_ON(xa, !xa_empty(xa));
986b803b428SMatthew Wilcox 		}
987b803b428SMatthew Wilcox 	}
988b803b428SMatthew Wilcox }
989b803b428SMatthew Wilcox 
check_multi_find_3(struct xarray * xa)990bd40b17cSMatthew Wilcox (Oracle) static noinline void check_multi_find_3(struct xarray *xa)
991bd40b17cSMatthew Wilcox (Oracle) {
992bd40b17cSMatthew Wilcox (Oracle) 	unsigned int order;
993bd40b17cSMatthew Wilcox (Oracle) 
994bd40b17cSMatthew Wilcox (Oracle) 	for (order = 5; order < order_limit; order++) {
995bd40b17cSMatthew Wilcox (Oracle) 		unsigned long index = 1UL << (order - 5);
996bd40b17cSMatthew Wilcox (Oracle) 
997bd40b17cSMatthew Wilcox (Oracle) 		XA_BUG_ON(xa, !xa_empty(xa));
998bd40b17cSMatthew Wilcox (Oracle) 		xa_store_order(xa, 0, order - 4, xa_mk_index(0), GFP_KERNEL);
999bd40b17cSMatthew Wilcox (Oracle) 		XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT));
1000bd40b17cSMatthew Wilcox (Oracle) 		xa_erase_index(xa, 0);
1001bd40b17cSMatthew Wilcox (Oracle) 	}
1002bd40b17cSMatthew Wilcox (Oracle) }
1003bd40b17cSMatthew Wilcox (Oracle) 
check_find_1(struct xarray * xa)10048229706eSMatthew Wilcox static noinline void check_find_1(struct xarray *xa)
1005b803b428SMatthew Wilcox {
1006b803b428SMatthew Wilcox 	unsigned long i, j, k;
1007b803b428SMatthew Wilcox 
1008b803b428SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
1009b803b428SMatthew Wilcox 
1010b803b428SMatthew Wilcox 	/*
1011b803b428SMatthew Wilcox 	 * Check xa_find with all pairs between 0 and 99 inclusive,
1012b803b428SMatthew Wilcox 	 * starting at every index between 0 and 99
1013b803b428SMatthew Wilcox 	 */
1014b803b428SMatthew Wilcox 	for (i = 0; i < 100; i++) {
1015b803b428SMatthew Wilcox 		XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
1016b803b428SMatthew Wilcox 		xa_set_mark(xa, i, XA_MARK_0);
1017b803b428SMatthew Wilcox 		for (j = 0; j < i; j++) {
1018b803b428SMatthew Wilcox 			XA_BUG_ON(xa, xa_store_index(xa, j, GFP_KERNEL) !=
1019b803b428SMatthew Wilcox 					NULL);
1020b803b428SMatthew Wilcox 			xa_set_mark(xa, j, XA_MARK_0);
1021b803b428SMatthew Wilcox 			for (k = 0; k < 100; k++) {
1022b803b428SMatthew Wilcox 				unsigned long index = k;
1023b803b428SMatthew Wilcox 				void *entry = xa_find(xa, &index, ULONG_MAX,
1024b803b428SMatthew Wilcox 								XA_PRESENT);
1025b803b428SMatthew Wilcox 				if (k <= j)
1026b803b428SMatthew Wilcox 					XA_BUG_ON(xa, index != j);
1027b803b428SMatthew Wilcox 				else if (k <= i)
1028b803b428SMatthew Wilcox 					XA_BUG_ON(xa, index != i);
1029b803b428SMatthew Wilcox 				else
1030b803b428SMatthew Wilcox 					XA_BUG_ON(xa, entry != NULL);
1031b803b428SMatthew Wilcox 
1032b803b428SMatthew Wilcox 				index = k;
1033b803b428SMatthew Wilcox 				entry = xa_find(xa, &index, ULONG_MAX,
1034b803b428SMatthew Wilcox 								XA_MARK_0);
1035b803b428SMatthew Wilcox 				if (k <= j)
1036b803b428SMatthew Wilcox 					XA_BUG_ON(xa, index != j);
1037b803b428SMatthew Wilcox 				else if (k <= i)
1038b803b428SMatthew Wilcox 					XA_BUG_ON(xa, index != i);
1039b803b428SMatthew Wilcox 				else
1040b803b428SMatthew Wilcox 					XA_BUG_ON(xa, entry != NULL);
1041b803b428SMatthew Wilcox 			}
1042b803b428SMatthew Wilcox 			xa_erase_index(xa, j);
1043b803b428SMatthew Wilcox 			XA_BUG_ON(xa, xa_get_mark(xa, j, XA_MARK_0));
1044b803b428SMatthew Wilcox 			XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0));
1045b803b428SMatthew Wilcox 		}
1046b803b428SMatthew Wilcox 		xa_erase_index(xa, i);
1047b803b428SMatthew Wilcox 		XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_0));
1048b803b428SMatthew Wilcox 	}
1049b803b428SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
10508229706eSMatthew Wilcox }
10518229706eSMatthew Wilcox 
check_find_2(struct xarray * xa)10528229706eSMatthew Wilcox static noinline void check_find_2(struct xarray *xa)
10538229706eSMatthew Wilcox {
10548229706eSMatthew Wilcox 	void *entry;
10554a31896cSMatthew Wilcox 	unsigned long i, j, index;
10568229706eSMatthew Wilcox 
10574a31896cSMatthew Wilcox 	xa_for_each(xa, index, entry) {
10588229706eSMatthew Wilcox 		XA_BUG_ON(xa, true);
10598229706eSMatthew Wilcox 	}
10608229706eSMatthew Wilcox 
10618229706eSMatthew Wilcox 	for (i = 0; i < 1024; i++) {
10628229706eSMatthew Wilcox 		xa_store_index(xa, index, GFP_KERNEL);
10638229706eSMatthew Wilcox 		j = 0;
10644a31896cSMatthew Wilcox 		xa_for_each(xa, index, entry) {
1065b7677a13SMatthew Wilcox 			XA_BUG_ON(xa, xa_mk_index(index) != entry);
10668229706eSMatthew Wilcox 			XA_BUG_ON(xa, index != j++);
10678229706eSMatthew Wilcox 		}
10688229706eSMatthew Wilcox 	}
10698229706eSMatthew Wilcox 
10708229706eSMatthew Wilcox 	xa_destroy(xa);
10718229706eSMatthew Wilcox }
10728229706eSMatthew Wilcox 
check_find_3(struct xarray * xa)107348483614SMatthew Wilcox static noinline void check_find_3(struct xarray *xa)
107448483614SMatthew Wilcox {
107548483614SMatthew Wilcox 	XA_STATE(xas, xa, 0);
107648483614SMatthew Wilcox 	unsigned long i, j, k;
107748483614SMatthew Wilcox 	void *entry;
107848483614SMatthew Wilcox 
107948483614SMatthew Wilcox 	for (i = 0; i < 100; i++) {
108048483614SMatthew Wilcox 		for (j = 0; j < 100; j++) {
1081490fd30fSMatthew Wilcox 			rcu_read_lock();
108248483614SMatthew Wilcox 			for (k = 0; k < 100; k++) {
108348483614SMatthew Wilcox 				xas_set(&xas, j);
108448483614SMatthew Wilcox 				xas_for_each_marked(&xas, entry, k, XA_MARK_0)
108548483614SMatthew Wilcox 					;
108648483614SMatthew Wilcox 				if (j > k)
108748483614SMatthew Wilcox 					XA_BUG_ON(xa,
108848483614SMatthew Wilcox 						xas.xa_node != XAS_RESTART);
108948483614SMatthew Wilcox 			}
1090490fd30fSMatthew Wilcox 			rcu_read_unlock();
109148483614SMatthew Wilcox 		}
109248483614SMatthew Wilcox 		xa_store_index(xa, i, GFP_KERNEL);
109348483614SMatthew Wilcox 		xa_set_mark(xa, i, XA_MARK_0);
109448483614SMatthew Wilcox 	}
109548483614SMatthew Wilcox 	xa_destroy(xa);
109648483614SMatthew Wilcox }
109748483614SMatthew Wilcox 
check_find_4(struct xarray * xa)1098430f24f9SMatthew Wilcox (Oracle) static noinline void check_find_4(struct xarray *xa)
1099430f24f9SMatthew Wilcox (Oracle) {
1100430f24f9SMatthew Wilcox (Oracle) 	unsigned long index = 0;
1101430f24f9SMatthew Wilcox (Oracle) 	void *entry;
1102430f24f9SMatthew Wilcox (Oracle) 
1103430f24f9SMatthew Wilcox (Oracle) 	xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
1104430f24f9SMatthew Wilcox (Oracle) 
1105430f24f9SMatthew Wilcox (Oracle) 	entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT);
1106430f24f9SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, entry != xa_mk_index(ULONG_MAX));
1107430f24f9SMatthew Wilcox (Oracle) 
1108430f24f9SMatthew Wilcox (Oracle) 	entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT);
1109430f24f9SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, entry);
1110430f24f9SMatthew Wilcox (Oracle) 
1111430f24f9SMatthew Wilcox (Oracle) 	xa_erase_index(xa, ULONG_MAX);
1112430f24f9SMatthew Wilcox (Oracle) }
1113430f24f9SMatthew Wilcox (Oracle) 
check_find(struct xarray * xa)11148229706eSMatthew Wilcox static noinline void check_find(struct xarray *xa)
11158229706eSMatthew Wilcox {
111619c30f4dSMatthew Wilcox (Oracle) 	unsigned i;
111719c30f4dSMatthew Wilcox (Oracle) 
11188229706eSMatthew Wilcox 	check_find_1(xa);
11198229706eSMatthew Wilcox 	check_find_2(xa);
112048483614SMatthew Wilcox 	check_find_3(xa);
1121430f24f9SMatthew Wilcox (Oracle) 	check_find_4(xa);
112219c30f4dSMatthew Wilcox (Oracle) 
112319c30f4dSMatthew Wilcox (Oracle) 	for (i = 2; i < 10; i++)
112419c30f4dSMatthew Wilcox (Oracle) 		check_multi_find_1(xa, i);
1125b803b428SMatthew Wilcox 	check_multi_find_2(xa);
1126bd40b17cSMatthew Wilcox (Oracle) 	check_multi_find_3(xa);
1127b803b428SMatthew Wilcox }
1128b803b428SMatthew Wilcox 
1129e21a2955SMatthew Wilcox /* See find_swap_entry() in mm/shmem.c */
xa_find_entry(struct xarray * xa,void * item)1130e21a2955SMatthew Wilcox static noinline unsigned long xa_find_entry(struct xarray *xa, void *item)
1131e21a2955SMatthew Wilcox {
1132e21a2955SMatthew Wilcox 	XA_STATE(xas, xa, 0);
1133e21a2955SMatthew Wilcox 	unsigned int checked = 0;
1134e21a2955SMatthew Wilcox 	void *entry;
1135e21a2955SMatthew Wilcox 
1136e21a2955SMatthew Wilcox 	rcu_read_lock();
1137e21a2955SMatthew Wilcox 	xas_for_each(&xas, entry, ULONG_MAX) {
1138e21a2955SMatthew Wilcox 		if (xas_retry(&xas, entry))
1139e21a2955SMatthew Wilcox 			continue;
1140e21a2955SMatthew Wilcox 		if (entry == item)
1141e21a2955SMatthew Wilcox 			break;
1142e21a2955SMatthew Wilcox 		checked++;
1143e21a2955SMatthew Wilcox 		if ((checked % 4) != 0)
1144e21a2955SMatthew Wilcox 			continue;
1145e21a2955SMatthew Wilcox 		xas_pause(&xas);
1146e21a2955SMatthew Wilcox 	}
1147e21a2955SMatthew Wilcox 	rcu_read_unlock();
1148e21a2955SMatthew Wilcox 
1149e21a2955SMatthew Wilcox 	return entry ? xas.xa_index : -1;
1150e21a2955SMatthew Wilcox }
1151e21a2955SMatthew Wilcox 
check_find_entry(struct xarray * xa)1152e21a2955SMatthew Wilcox static noinline void check_find_entry(struct xarray *xa)
1153e21a2955SMatthew Wilcox {
1154e21a2955SMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI
1155e21a2955SMatthew Wilcox 	unsigned int order;
1156e21a2955SMatthew Wilcox 	unsigned long offset, index;
1157e21a2955SMatthew Wilcox 
1158e21a2955SMatthew Wilcox 	for (order = 0; order < 20; order++) {
1159e21a2955SMatthew Wilcox 		for (offset = 0; offset < (1UL << (order + 3));
1160e21a2955SMatthew Wilcox 		     offset += (1UL << order)) {
1161e21a2955SMatthew Wilcox 			for (index = 0; index < (1UL << (order + 5));
1162e21a2955SMatthew Wilcox 			     index += (1UL << order)) {
1163e21a2955SMatthew Wilcox 				xa_store_order(xa, index, order,
1164b7677a13SMatthew Wilcox 						xa_mk_index(index), GFP_KERNEL);
1165e21a2955SMatthew Wilcox 				XA_BUG_ON(xa, xa_load(xa, index) !=
1166b7677a13SMatthew Wilcox 						xa_mk_index(index));
1167e21a2955SMatthew Wilcox 				XA_BUG_ON(xa, xa_find_entry(xa,
1168b7677a13SMatthew Wilcox 						xa_mk_index(index)) != index);
1169e21a2955SMatthew Wilcox 			}
1170e21a2955SMatthew Wilcox 			XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
1171e21a2955SMatthew Wilcox 			xa_destroy(xa);
1172e21a2955SMatthew Wilcox 		}
1173e21a2955SMatthew Wilcox 	}
1174e21a2955SMatthew Wilcox #endif
1175e21a2955SMatthew Wilcox 
1176e21a2955SMatthew Wilcox 	XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
1177e21a2955SMatthew Wilcox 	xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
1178e21a2955SMatthew Wilcox 	XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
1179b7677a13SMatthew Wilcox 	XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_index(ULONG_MAX)) != -1);
1180e21a2955SMatthew Wilcox 	xa_erase_index(xa, ULONG_MAX);
1181e21a2955SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
1182e21a2955SMatthew Wilcox }
1183e21a2955SMatthew Wilcox 
check_pause(struct xarray * xa)1184c36d451aSMatthew Wilcox (Oracle) static noinline void check_pause(struct xarray *xa)
1185c36d451aSMatthew Wilcox (Oracle) {
1186c36d451aSMatthew Wilcox (Oracle) 	XA_STATE(xas, xa, 0);
1187c36d451aSMatthew Wilcox (Oracle) 	void *entry;
1188c36d451aSMatthew Wilcox (Oracle) 	unsigned int order;
1189c36d451aSMatthew Wilcox (Oracle) 	unsigned long index = 1;
1190c36d451aSMatthew Wilcox (Oracle) 	unsigned int count = 0;
1191c36d451aSMatthew Wilcox (Oracle) 
1192c36d451aSMatthew Wilcox (Oracle) 	for (order = 0; order < order_limit; order++) {
1193c36d451aSMatthew Wilcox (Oracle) 		XA_BUG_ON(xa, xa_store_order(xa, index, order,
1194c36d451aSMatthew Wilcox (Oracle) 					xa_mk_index(index), GFP_KERNEL));
1195c36d451aSMatthew Wilcox (Oracle) 		index += 1UL << order;
1196c36d451aSMatthew Wilcox (Oracle) 	}
1197c36d451aSMatthew Wilcox (Oracle) 
1198c36d451aSMatthew Wilcox (Oracle) 	rcu_read_lock();
1199c36d451aSMatthew Wilcox (Oracle) 	xas_for_each(&xas, entry, ULONG_MAX) {
1200c36d451aSMatthew Wilcox (Oracle) 		XA_BUG_ON(xa, entry != xa_mk_index(1UL << count));
1201c36d451aSMatthew Wilcox (Oracle) 		count++;
1202c36d451aSMatthew Wilcox (Oracle) 	}
1203c36d451aSMatthew Wilcox (Oracle) 	rcu_read_unlock();
1204c36d451aSMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, count != order_limit);
1205c36d451aSMatthew Wilcox (Oracle) 
1206c36d451aSMatthew Wilcox (Oracle) 	count = 0;
1207c36d451aSMatthew Wilcox (Oracle) 	xas_set(&xas, 0);
1208c36d451aSMatthew Wilcox (Oracle) 	rcu_read_lock();
1209c36d451aSMatthew Wilcox (Oracle) 	xas_for_each(&xas, entry, ULONG_MAX) {
1210c36d451aSMatthew Wilcox (Oracle) 		XA_BUG_ON(xa, entry != xa_mk_index(1UL << count));
1211c36d451aSMatthew Wilcox (Oracle) 		count++;
1212c36d451aSMatthew Wilcox (Oracle) 		xas_pause(&xas);
1213c36d451aSMatthew Wilcox (Oracle) 	}
1214c36d451aSMatthew Wilcox (Oracle) 	rcu_read_unlock();
1215c36d451aSMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, count != order_limit);
1216c36d451aSMatthew Wilcox (Oracle) 
1217c36d451aSMatthew Wilcox (Oracle) 	xa_destroy(xa);
1218c36d451aSMatthew Wilcox (Oracle) }
1219c36d451aSMatthew Wilcox (Oracle) 
check_move_tiny(struct xarray * xa)122091abab83SMatthew Wilcox (Oracle) static noinline void check_move_tiny(struct xarray *xa)
122191abab83SMatthew Wilcox (Oracle) {
122291abab83SMatthew Wilcox (Oracle) 	XA_STATE(xas, xa, 0);
122391abab83SMatthew Wilcox (Oracle) 
122491abab83SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, !xa_empty(xa));
122591abab83SMatthew Wilcox (Oracle) 	rcu_read_lock();
122691abab83SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, xas_next(&xas) != NULL);
122791abab83SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, xas_next(&xas) != NULL);
122891abab83SMatthew Wilcox (Oracle) 	rcu_read_unlock();
122991abab83SMatthew Wilcox (Oracle) 	xa_store_index(xa, 0, GFP_KERNEL);
123091abab83SMatthew Wilcox (Oracle) 	rcu_read_lock();
123191abab83SMatthew Wilcox (Oracle) 	xas_set(&xas, 0);
123291abab83SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, xas_next(&xas) != xa_mk_index(0));
123391abab83SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, xas_next(&xas) != NULL);
123491abab83SMatthew Wilcox (Oracle) 	xas_set(&xas, 0);
123591abab83SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, xas_prev(&xas) != xa_mk_index(0));
123691abab83SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, xas_prev(&xas) != NULL);
123791abab83SMatthew Wilcox (Oracle) 	rcu_read_unlock();
123891abab83SMatthew Wilcox (Oracle) 	xa_erase_index(xa, 0);
123991abab83SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, !xa_empty(xa));
124091abab83SMatthew Wilcox (Oracle) }
124191abab83SMatthew Wilcox (Oracle) 
check_move_max(struct xarray * xa)124282a22311SMatthew Wilcox (Oracle) static noinline void check_move_max(struct xarray *xa)
124382a22311SMatthew Wilcox (Oracle) {
124482a22311SMatthew Wilcox (Oracle) 	XA_STATE(xas, xa, 0);
124582a22311SMatthew Wilcox (Oracle) 
124682a22311SMatthew Wilcox (Oracle) 	xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
124782a22311SMatthew Wilcox (Oracle) 	rcu_read_lock();
124882a22311SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX));
124982a22311SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL);
125082a22311SMatthew Wilcox (Oracle) 	rcu_read_unlock();
125182a22311SMatthew Wilcox (Oracle) 
125282a22311SMatthew Wilcox (Oracle) 	xas_set(&xas, 0);
125382a22311SMatthew Wilcox (Oracle) 	rcu_read_lock();
125482a22311SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX));
125582a22311SMatthew Wilcox (Oracle) 	xas_pause(&xas);
125682a22311SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL);
125782a22311SMatthew Wilcox (Oracle) 	rcu_read_unlock();
125882a22311SMatthew Wilcox (Oracle) 
125982a22311SMatthew Wilcox (Oracle) 	xa_erase_index(xa, ULONG_MAX);
126082a22311SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, !xa_empty(xa));
126182a22311SMatthew Wilcox (Oracle) }
126282a22311SMatthew Wilcox (Oracle) 
check_move_small(struct xarray * xa,unsigned long idx)126364d3e9a9SMatthew Wilcox static noinline void check_move_small(struct xarray *xa, unsigned long idx)
126464d3e9a9SMatthew Wilcox {
126564d3e9a9SMatthew Wilcox 	XA_STATE(xas, xa, 0);
126664d3e9a9SMatthew Wilcox 	unsigned long i;
126764d3e9a9SMatthew Wilcox 
126864d3e9a9SMatthew Wilcox 	xa_store_index(xa, 0, GFP_KERNEL);
126964d3e9a9SMatthew Wilcox 	xa_store_index(xa, idx, GFP_KERNEL);
127064d3e9a9SMatthew Wilcox 
127164d3e9a9SMatthew Wilcox 	rcu_read_lock();
127264d3e9a9SMatthew Wilcox 	for (i = 0; i < idx * 4; i++) {
127364d3e9a9SMatthew Wilcox 		void *entry = xas_next(&xas);
127464d3e9a9SMatthew Wilcox 		if (i <= idx)
127564d3e9a9SMatthew Wilcox 			XA_BUG_ON(xa, xas.xa_node == XAS_RESTART);
127664d3e9a9SMatthew Wilcox 		XA_BUG_ON(xa, xas.xa_index != i);
127764d3e9a9SMatthew Wilcox 		if (i == 0 || i == idx)
1278b7677a13SMatthew Wilcox 			XA_BUG_ON(xa, entry != xa_mk_index(i));
127964d3e9a9SMatthew Wilcox 		else
128064d3e9a9SMatthew Wilcox 			XA_BUG_ON(xa, entry != NULL);
128164d3e9a9SMatthew Wilcox 	}
128264d3e9a9SMatthew Wilcox 	xas_next(&xas);
128364d3e9a9SMatthew Wilcox 	XA_BUG_ON(xa, xas.xa_index != i);
128464d3e9a9SMatthew Wilcox 
128564d3e9a9SMatthew Wilcox 	do {
128664d3e9a9SMatthew Wilcox 		void *entry = xas_prev(&xas);
128764d3e9a9SMatthew Wilcox 		i--;
128864d3e9a9SMatthew Wilcox 		if (i <= idx)
128964d3e9a9SMatthew Wilcox 			XA_BUG_ON(xa, xas.xa_node == XAS_RESTART);
129064d3e9a9SMatthew Wilcox 		XA_BUG_ON(xa, xas.xa_index != i);
129164d3e9a9SMatthew Wilcox 		if (i == 0 || i == idx)
1292b7677a13SMatthew Wilcox 			XA_BUG_ON(xa, entry != xa_mk_index(i));
129364d3e9a9SMatthew Wilcox 		else
129464d3e9a9SMatthew Wilcox 			XA_BUG_ON(xa, entry != NULL);
129564d3e9a9SMatthew Wilcox 	} while (i > 0);
129664d3e9a9SMatthew Wilcox 
129764d3e9a9SMatthew Wilcox 	xas_set(&xas, ULONG_MAX);
129864d3e9a9SMatthew Wilcox 	XA_BUG_ON(xa, xas_next(&xas) != NULL);
129964d3e9a9SMatthew Wilcox 	XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
130064d3e9a9SMatthew Wilcox 	XA_BUG_ON(xa, xas_next(&xas) != xa_mk_value(0));
130164d3e9a9SMatthew Wilcox 	XA_BUG_ON(xa, xas.xa_index != 0);
130264d3e9a9SMatthew Wilcox 	XA_BUG_ON(xa, xas_prev(&xas) != NULL);
130364d3e9a9SMatthew Wilcox 	XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
130464d3e9a9SMatthew Wilcox 	rcu_read_unlock();
130564d3e9a9SMatthew Wilcox 
130664d3e9a9SMatthew Wilcox 	xa_erase_index(xa, 0);
130764d3e9a9SMatthew Wilcox 	xa_erase_index(xa, idx);
130864d3e9a9SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
130964d3e9a9SMatthew Wilcox }
131064d3e9a9SMatthew Wilcox 
check_move(struct xarray * xa)131164d3e9a9SMatthew Wilcox static noinline void check_move(struct xarray *xa)
131264d3e9a9SMatthew Wilcox {
131364d3e9a9SMatthew Wilcox 	XA_STATE(xas, xa, (1 << 16) - 1);
131464d3e9a9SMatthew Wilcox 	unsigned long i;
131564d3e9a9SMatthew Wilcox 
131664d3e9a9SMatthew Wilcox 	for (i = 0; i < (1 << 16); i++)
131764d3e9a9SMatthew Wilcox 		XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
131864d3e9a9SMatthew Wilcox 
131964d3e9a9SMatthew Wilcox 	rcu_read_lock();
132064d3e9a9SMatthew Wilcox 	do {
132164d3e9a9SMatthew Wilcox 		void *entry = xas_prev(&xas);
132264d3e9a9SMatthew Wilcox 		i--;
1323b7677a13SMatthew Wilcox 		XA_BUG_ON(xa, entry != xa_mk_index(i));
132464d3e9a9SMatthew Wilcox 		XA_BUG_ON(xa, i != xas.xa_index);
132564d3e9a9SMatthew Wilcox 	} while (i != 0);
132664d3e9a9SMatthew Wilcox 
132764d3e9a9SMatthew Wilcox 	XA_BUG_ON(xa, xas_prev(&xas) != NULL);
132864d3e9a9SMatthew Wilcox 	XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
132964d3e9a9SMatthew Wilcox 
133064d3e9a9SMatthew Wilcox 	do {
133164d3e9a9SMatthew Wilcox 		void *entry = xas_next(&xas);
1332b7677a13SMatthew Wilcox 		XA_BUG_ON(xa, entry != xa_mk_index(i));
133364d3e9a9SMatthew Wilcox 		XA_BUG_ON(xa, i != xas.xa_index);
133464d3e9a9SMatthew Wilcox 		i++;
133564d3e9a9SMatthew Wilcox 	} while (i < (1 << 16));
133664d3e9a9SMatthew Wilcox 	rcu_read_unlock();
133764d3e9a9SMatthew Wilcox 
133864d3e9a9SMatthew Wilcox 	for (i = (1 << 8); i < (1 << 15); i++)
133964d3e9a9SMatthew Wilcox 		xa_erase_index(xa, i);
134064d3e9a9SMatthew Wilcox 
134164d3e9a9SMatthew Wilcox 	i = xas.xa_index;
134264d3e9a9SMatthew Wilcox 
134364d3e9a9SMatthew Wilcox 	rcu_read_lock();
134464d3e9a9SMatthew Wilcox 	do {
134564d3e9a9SMatthew Wilcox 		void *entry = xas_prev(&xas);
134664d3e9a9SMatthew Wilcox 		i--;
134764d3e9a9SMatthew Wilcox 		if ((i < (1 << 8)) || (i >= (1 << 15)))
1348b7677a13SMatthew Wilcox 			XA_BUG_ON(xa, entry != xa_mk_index(i));
134964d3e9a9SMatthew Wilcox 		else
135064d3e9a9SMatthew Wilcox 			XA_BUG_ON(xa, entry != NULL);
135164d3e9a9SMatthew Wilcox 		XA_BUG_ON(xa, i != xas.xa_index);
135264d3e9a9SMatthew Wilcox 	} while (i != 0);
135364d3e9a9SMatthew Wilcox 
135464d3e9a9SMatthew Wilcox 	XA_BUG_ON(xa, xas_prev(&xas) != NULL);
135564d3e9a9SMatthew Wilcox 	XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
135664d3e9a9SMatthew Wilcox 
135764d3e9a9SMatthew Wilcox 	do {
135864d3e9a9SMatthew Wilcox 		void *entry = xas_next(&xas);
135964d3e9a9SMatthew Wilcox 		if ((i < (1 << 8)) || (i >= (1 << 15)))
1360b7677a13SMatthew Wilcox 			XA_BUG_ON(xa, entry != xa_mk_index(i));
136164d3e9a9SMatthew Wilcox 		else
136264d3e9a9SMatthew Wilcox 			XA_BUG_ON(xa, entry != NULL);
136364d3e9a9SMatthew Wilcox 		XA_BUG_ON(xa, i != xas.xa_index);
136464d3e9a9SMatthew Wilcox 		i++;
136564d3e9a9SMatthew Wilcox 	} while (i < (1 << 16));
136664d3e9a9SMatthew Wilcox 	rcu_read_unlock();
136764d3e9a9SMatthew Wilcox 
136864d3e9a9SMatthew Wilcox 	xa_destroy(xa);
136964d3e9a9SMatthew Wilcox 
137091abab83SMatthew Wilcox (Oracle) 	check_move_tiny(xa);
137182a22311SMatthew Wilcox (Oracle) 	check_move_max(xa);
137291abab83SMatthew Wilcox (Oracle) 
137364d3e9a9SMatthew Wilcox 	for (i = 0; i < 16; i++)
137464d3e9a9SMatthew Wilcox 		check_move_small(xa, 1UL << i);
137564d3e9a9SMatthew Wilcox 
137664d3e9a9SMatthew Wilcox 	for (i = 2; i < 16; i++)
137764d3e9a9SMatthew Wilcox 		check_move_small(xa, (1UL << i) - 1);
137864d3e9a9SMatthew Wilcox }
137964d3e9a9SMatthew Wilcox 
xa_store_many_order(struct xarray * xa,unsigned long index,unsigned order)13802264f513SMatthew Wilcox static noinline void xa_store_many_order(struct xarray *xa,
13812264f513SMatthew Wilcox 		unsigned long index, unsigned order)
13822264f513SMatthew Wilcox {
13832264f513SMatthew Wilcox 	XA_STATE_ORDER(xas, xa, index, order);
13842264f513SMatthew Wilcox 	unsigned int i = 0;
13852264f513SMatthew Wilcox 
13862264f513SMatthew Wilcox 	do {
13872264f513SMatthew Wilcox 		xas_lock(&xas);
13882264f513SMatthew Wilcox 		XA_BUG_ON(xa, xas_find_conflict(&xas));
13892264f513SMatthew Wilcox 		xas_create_range(&xas);
13902264f513SMatthew Wilcox 		if (xas_error(&xas))
13912264f513SMatthew Wilcox 			goto unlock;
13922264f513SMatthew Wilcox 		for (i = 0; i < (1U << order); i++) {
1393b7677a13SMatthew Wilcox 			XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(index + i)));
13942264f513SMatthew Wilcox 			xas_next(&xas);
13952264f513SMatthew Wilcox 		}
13962264f513SMatthew Wilcox unlock:
13972264f513SMatthew Wilcox 		xas_unlock(&xas);
13982264f513SMatthew Wilcox 	} while (xas_nomem(&xas, GFP_KERNEL));
13992264f513SMatthew Wilcox 
14002264f513SMatthew Wilcox 	XA_BUG_ON(xa, xas_error(&xas));
14012264f513SMatthew Wilcox }
14022264f513SMatthew Wilcox 
check_create_range_1(struct xarray * xa,unsigned long index,unsigned order)14032264f513SMatthew Wilcox static noinline void check_create_range_1(struct xarray *xa,
14042264f513SMatthew Wilcox 		unsigned long index, unsigned order)
14052264f513SMatthew Wilcox {
14062264f513SMatthew Wilcox 	unsigned long i;
14072264f513SMatthew Wilcox 
14082264f513SMatthew Wilcox 	xa_store_many_order(xa, index, order);
14092264f513SMatthew Wilcox 	for (i = index; i < index + (1UL << order); i++)
14102264f513SMatthew Wilcox 		xa_erase_index(xa, i);
14112264f513SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
14122264f513SMatthew Wilcox }
14132264f513SMatthew Wilcox 
check_create_range_2(struct xarray * xa,unsigned order)14142264f513SMatthew Wilcox static noinline void check_create_range_2(struct xarray *xa, unsigned order)
14152264f513SMatthew Wilcox {
14162264f513SMatthew Wilcox 	unsigned long i;
14172264f513SMatthew Wilcox 	unsigned long nr = 1UL << order;
14182264f513SMatthew Wilcox 
14192264f513SMatthew Wilcox 	for (i = 0; i < nr * nr; i += nr)
14202264f513SMatthew Wilcox 		xa_store_many_order(xa, i, order);
14212264f513SMatthew Wilcox 	for (i = 0; i < nr * nr; i++)
14222264f513SMatthew Wilcox 		xa_erase_index(xa, i);
14232264f513SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
14242264f513SMatthew Wilcox }
14252264f513SMatthew Wilcox 
check_create_range_3(void)14262264f513SMatthew Wilcox static noinline void check_create_range_3(void)
14272264f513SMatthew Wilcox {
14282264f513SMatthew Wilcox 	XA_STATE(xas, NULL, 0);
14292264f513SMatthew Wilcox 	xas_set_err(&xas, -EEXIST);
14302264f513SMatthew Wilcox 	xas_create_range(&xas);
14312264f513SMatthew Wilcox 	XA_BUG_ON(NULL, xas_error(&xas) != -EEXIST);
14322264f513SMatthew Wilcox }
14332264f513SMatthew Wilcox 
check_create_range_4(struct xarray * xa,unsigned long index,unsigned order)14342264f513SMatthew Wilcox static noinline void check_create_range_4(struct xarray *xa,
14352264f513SMatthew Wilcox 		unsigned long index, unsigned order)
14362264f513SMatthew Wilcox {
14372264f513SMatthew Wilcox 	XA_STATE_ORDER(xas, xa, index, order);
14382264f513SMatthew Wilcox 	unsigned long base = xas.xa_index;
14392264f513SMatthew Wilcox 	unsigned long i = 0;
14402264f513SMatthew Wilcox 
14412264f513SMatthew Wilcox 	xa_store_index(xa, index, GFP_KERNEL);
14422264f513SMatthew Wilcox 	do {
14432264f513SMatthew Wilcox 		xas_lock(&xas);
14442264f513SMatthew Wilcox 		xas_create_range(&xas);
14452264f513SMatthew Wilcox 		if (xas_error(&xas))
14462264f513SMatthew Wilcox 			goto unlock;
14472264f513SMatthew Wilcox 		for (i = 0; i < (1UL << order); i++) {
1448b7677a13SMatthew Wilcox 			void *old = xas_store(&xas, xa_mk_index(base + i));
14492264f513SMatthew Wilcox 			if (xas.xa_index == index)
1450b7677a13SMatthew Wilcox 				XA_BUG_ON(xa, old != xa_mk_index(base + i));
14512264f513SMatthew Wilcox 			else
14522264f513SMatthew Wilcox 				XA_BUG_ON(xa, old != NULL);
14532264f513SMatthew Wilcox 			xas_next(&xas);
14542264f513SMatthew Wilcox 		}
14552264f513SMatthew Wilcox unlock:
14562264f513SMatthew Wilcox 		xas_unlock(&xas);
14572264f513SMatthew Wilcox 	} while (xas_nomem(&xas, GFP_KERNEL));
14582264f513SMatthew Wilcox 
14592264f513SMatthew Wilcox 	XA_BUG_ON(xa, xas_error(&xas));
14602264f513SMatthew Wilcox 
14612264f513SMatthew Wilcox 	for (i = base; i < base + (1UL << order); i++)
14622264f513SMatthew Wilcox 		xa_erase_index(xa, i);
14632264f513SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
14642264f513SMatthew Wilcox }
14652264f513SMatthew Wilcox 
check_create_range_5(struct xarray * xa,unsigned long index,unsigned int order)1466*7521a97bSMatthew Wilcox (Oracle) static noinline void check_create_range_5(struct xarray *xa,
1467*7521a97bSMatthew Wilcox (Oracle) 		unsigned long index, unsigned int order)
1468*7521a97bSMatthew Wilcox (Oracle) {
1469*7521a97bSMatthew Wilcox (Oracle) 	XA_STATE_ORDER(xas, xa, index, order);
1470*7521a97bSMatthew Wilcox (Oracle) 	unsigned int i;
1471*7521a97bSMatthew Wilcox (Oracle) 
1472*7521a97bSMatthew Wilcox (Oracle) 	xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
1473*7521a97bSMatthew Wilcox (Oracle) 
1474*7521a97bSMatthew Wilcox (Oracle) 	for (i = 0; i < order + 10; i++) {
1475*7521a97bSMatthew Wilcox (Oracle) 		do {
1476*7521a97bSMatthew Wilcox (Oracle) 			xas_lock(&xas);
1477*7521a97bSMatthew Wilcox (Oracle) 			xas_create_range(&xas);
1478*7521a97bSMatthew Wilcox (Oracle) 			xas_unlock(&xas);
1479*7521a97bSMatthew Wilcox (Oracle) 		} while (xas_nomem(&xas, GFP_KERNEL));
1480*7521a97bSMatthew Wilcox (Oracle) 	}
1481*7521a97bSMatthew Wilcox (Oracle) 
1482*7521a97bSMatthew Wilcox (Oracle) 	xa_destroy(xa);
1483*7521a97bSMatthew Wilcox (Oracle) }
1484*7521a97bSMatthew Wilcox (Oracle) 
check_create_range(struct xarray * xa)14852264f513SMatthew Wilcox static noinline void check_create_range(struct xarray *xa)
14862264f513SMatthew Wilcox {
14872264f513SMatthew Wilcox 	unsigned int order;
14882264f513SMatthew Wilcox 	unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 12 : 1;
14892264f513SMatthew Wilcox 
14902264f513SMatthew Wilcox 	for (order = 0; order < max_order; order++) {
14912264f513SMatthew Wilcox 		check_create_range_1(xa, 0, order);
14922264f513SMatthew Wilcox 		check_create_range_1(xa, 1U << order, order);
14932264f513SMatthew Wilcox 		check_create_range_1(xa, 2U << order, order);
14942264f513SMatthew Wilcox 		check_create_range_1(xa, 3U << order, order);
14952264f513SMatthew Wilcox 		check_create_range_1(xa, 1U << 24, order);
14962264f513SMatthew Wilcox 		if (order < 10)
14972264f513SMatthew Wilcox 			check_create_range_2(xa, order);
14982264f513SMatthew Wilcox 
14992264f513SMatthew Wilcox 		check_create_range_4(xa, 0, order);
15002264f513SMatthew Wilcox 		check_create_range_4(xa, 1U << order, order);
15012264f513SMatthew Wilcox 		check_create_range_4(xa, 2U << order, order);
15022264f513SMatthew Wilcox 		check_create_range_4(xa, 3U << order, order);
15032264f513SMatthew Wilcox 		check_create_range_4(xa, 1U << 24, order);
15042264f513SMatthew Wilcox 
15052264f513SMatthew Wilcox 		check_create_range_4(xa, 1, order);
15062264f513SMatthew Wilcox 		check_create_range_4(xa, (1U << order) + 1, order);
15072264f513SMatthew Wilcox 		check_create_range_4(xa, (2U << order) + 1, order);
15082264f513SMatthew Wilcox 		check_create_range_4(xa, (2U << order) - 1, order);
15092264f513SMatthew Wilcox 		check_create_range_4(xa, (3U << order) + 1, order);
15102264f513SMatthew Wilcox 		check_create_range_4(xa, (3U << order) - 1, order);
15112264f513SMatthew Wilcox 		check_create_range_4(xa, (1U << 24) + 1, order);
1512*7521a97bSMatthew Wilcox (Oracle) 
1513*7521a97bSMatthew Wilcox (Oracle) 		check_create_range_5(xa, 0, order);
1514*7521a97bSMatthew Wilcox (Oracle) 		check_create_range_5(xa, (1U << order), order);
15152264f513SMatthew Wilcox 	}
15162264f513SMatthew Wilcox 
15172264f513SMatthew Wilcox 	check_create_range_3();
15182264f513SMatthew Wilcox }
15192264f513SMatthew Wilcox 
__check_store_range(struct xarray * xa,unsigned long first,unsigned long last)15200e9446c3SMatthew Wilcox static noinline void __check_store_range(struct xarray *xa, unsigned long first,
15210e9446c3SMatthew Wilcox 		unsigned long last)
15220e9446c3SMatthew Wilcox {
15230e9446c3SMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI
1524b7677a13SMatthew Wilcox 	xa_store_range(xa, first, last, xa_mk_index(first), GFP_KERNEL);
15250e9446c3SMatthew Wilcox 
1526b7677a13SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, first) != xa_mk_index(first));
1527b7677a13SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, last) != xa_mk_index(first));
15280e9446c3SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, first - 1) != NULL);
15290e9446c3SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, last + 1) != NULL);
15300e9446c3SMatthew Wilcox 
15310e9446c3SMatthew Wilcox 	xa_store_range(xa, first, last, NULL, GFP_KERNEL);
15320e9446c3SMatthew Wilcox #endif
15330e9446c3SMatthew Wilcox 
15340e9446c3SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
15350e9446c3SMatthew Wilcox }
15360e9446c3SMatthew Wilcox 
check_store_range(struct xarray * xa)15370e9446c3SMatthew Wilcox static noinline void check_store_range(struct xarray *xa)
15380e9446c3SMatthew Wilcox {
15390e9446c3SMatthew Wilcox 	unsigned long i, j;
15400e9446c3SMatthew Wilcox 
15410e9446c3SMatthew Wilcox 	for (i = 0; i < 128; i++) {
15420e9446c3SMatthew Wilcox 		for (j = i; j < 128; j++) {
15430e9446c3SMatthew Wilcox 			__check_store_range(xa, i, j);
15440e9446c3SMatthew Wilcox 			__check_store_range(xa, 128 + i, 128 + j);
15450e9446c3SMatthew Wilcox 			__check_store_range(xa, 4095 + i, 4095 + j);
15460e9446c3SMatthew Wilcox 			__check_store_range(xa, 4096 + i, 4096 + j);
15470e9446c3SMatthew Wilcox 			__check_store_range(xa, 123456 + i, 123456 + j);
15485404a7f1SMatthew Wilcox 			__check_store_range(xa, (1 << 24) + i, (1 << 24) + j);
15490e9446c3SMatthew Wilcox 		}
15500e9446c3SMatthew Wilcox 	}
15510e9446c3SMatthew Wilcox }
15520e9446c3SMatthew Wilcox 
15538fc75643SMatthew Wilcox (Oracle) #ifdef CONFIG_XARRAY_MULTI
check_split_1(struct xarray * xa,unsigned long index,unsigned int order,unsigned int new_order)15548fc75643SMatthew Wilcox (Oracle) static void check_split_1(struct xarray *xa, unsigned long index,
15553012110dSMatthew Wilcox (Oracle) 				unsigned int order, unsigned int new_order)
15568fc75643SMatthew Wilcox (Oracle) {
15573012110dSMatthew Wilcox (Oracle) 	XA_STATE_ORDER(xas, xa, index, new_order);
15583012110dSMatthew Wilcox (Oracle) 	unsigned int i;
15598fc75643SMatthew Wilcox (Oracle) 
15608fc75643SMatthew Wilcox (Oracle) 	xa_store_order(xa, index, order, xa, GFP_KERNEL);
15618fc75643SMatthew Wilcox (Oracle) 
15628fc75643SMatthew Wilcox (Oracle) 	xas_split_alloc(&xas, xa, order, GFP_KERNEL);
15638fc75643SMatthew Wilcox (Oracle) 	xas_lock(&xas);
15648fc75643SMatthew Wilcox (Oracle) 	xas_split(&xas, xa, order);
15653012110dSMatthew Wilcox (Oracle) 	for (i = 0; i < (1 << order); i += (1 << new_order))
15663012110dSMatthew Wilcox (Oracle) 		__xa_store(xa, index + i, xa_mk_index(index + i), 0);
15678fc75643SMatthew Wilcox (Oracle) 	xas_unlock(&xas);
15688fc75643SMatthew Wilcox (Oracle) 
15693012110dSMatthew Wilcox (Oracle) 	for (i = 0; i < (1 << order); i++) {
15703012110dSMatthew Wilcox (Oracle) 		unsigned int val = index + (i & ~((1 << new_order) - 1));
15713012110dSMatthew Wilcox (Oracle) 		XA_BUG_ON(xa, xa_load(xa, index + i) != xa_mk_index(val));
15728fc75643SMatthew Wilcox (Oracle) 	}
15738fc75643SMatthew Wilcox (Oracle) 
15748fc75643SMatthew Wilcox (Oracle) 	xa_set_mark(xa, index, XA_MARK_0);
15758fc75643SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0));
15768fc75643SMatthew Wilcox (Oracle) 
15778fc75643SMatthew Wilcox (Oracle) 	xa_destroy(xa);
15788fc75643SMatthew Wilcox (Oracle) }
15798fc75643SMatthew Wilcox (Oracle) 
check_split(struct xarray * xa)15808fc75643SMatthew Wilcox (Oracle) static noinline void check_split(struct xarray *xa)
15818fc75643SMatthew Wilcox (Oracle) {
15823012110dSMatthew Wilcox (Oracle) 	unsigned int order, new_order;
15838fc75643SMatthew Wilcox (Oracle) 
15848fc75643SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, !xa_empty(xa));
15858fc75643SMatthew Wilcox (Oracle) 
15868fc75643SMatthew Wilcox (Oracle) 	for (order = 1; order < 2 * XA_CHUNK_SHIFT; order++) {
15873012110dSMatthew Wilcox (Oracle) 		for (new_order = 0; new_order < order; new_order++) {
15883012110dSMatthew Wilcox (Oracle) 			check_split_1(xa, 0, order, new_order);
15893012110dSMatthew Wilcox (Oracle) 			check_split_1(xa, 1UL << order, order, new_order);
15903012110dSMatthew Wilcox (Oracle) 			check_split_1(xa, 3UL << order, order, new_order);
15913012110dSMatthew Wilcox (Oracle) 		}
15928fc75643SMatthew Wilcox (Oracle) 	}
15938fc75643SMatthew Wilcox (Oracle) }
15948fc75643SMatthew Wilcox (Oracle) #else
check_split(struct xarray * xa)15958fc75643SMatthew Wilcox (Oracle) static void check_split(struct xarray *xa) { }
15968fc75643SMatthew Wilcox (Oracle) #endif
15978fc75643SMatthew Wilcox (Oracle) 
check_align_1(struct xarray * xa,char * name)159876b4e529SMatthew Wilcox static void check_align_1(struct xarray *xa, char *name)
159976b4e529SMatthew Wilcox {
160076b4e529SMatthew Wilcox 	int i;
160176b4e529SMatthew Wilcox 	unsigned int id;
160276b4e529SMatthew Wilcox 	unsigned long index;
160376b4e529SMatthew Wilcox 	void *entry;
160476b4e529SMatthew Wilcox 
160576b4e529SMatthew Wilcox 	for (i = 0; i < 8; i++) {
1606a3e4d3f9SMatthew Wilcox 		XA_BUG_ON(xa, xa_alloc(xa, &id, name + i, xa_limit_32b,
1607a3e4d3f9SMatthew Wilcox 					GFP_KERNEL) != 0);
160876b4e529SMatthew Wilcox 		XA_BUG_ON(xa, id != i);
160976b4e529SMatthew Wilcox 	}
161076b4e529SMatthew Wilcox 	xa_for_each(xa, index, entry)
161176b4e529SMatthew Wilcox 		XA_BUG_ON(xa, xa_is_err(entry));
161276b4e529SMatthew Wilcox 	xa_destroy(xa);
161376b4e529SMatthew Wilcox }
161476b4e529SMatthew Wilcox 
16154a5c8d89SMatthew Wilcox /*
16164a5c8d89SMatthew Wilcox  * We should always be able to store without allocating memory after
16174a5c8d89SMatthew Wilcox  * reserving a slot.
16184a5c8d89SMatthew Wilcox  */
check_align_2(struct xarray * xa,char * name)16192fbe967bSMatthew Wilcox static void check_align_2(struct xarray *xa, char *name)
16202fbe967bSMatthew Wilcox {
16212fbe967bSMatthew Wilcox 	int i;
16222fbe967bSMatthew Wilcox 
16232fbe967bSMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
16242fbe967bSMatthew Wilcox 
16252fbe967bSMatthew Wilcox 	for (i = 0; i < 8; i++) {
16262fbe967bSMatthew Wilcox 		XA_BUG_ON(xa, xa_store(xa, 0, name + i, GFP_KERNEL) != NULL);
16272fbe967bSMatthew Wilcox 		xa_erase(xa, 0);
16282fbe967bSMatthew Wilcox 	}
16292fbe967bSMatthew Wilcox 
16304a5c8d89SMatthew Wilcox 	for (i = 0; i < 8; i++) {
16314a5c8d89SMatthew Wilcox 		XA_BUG_ON(xa, xa_reserve(xa, 0, GFP_KERNEL) != 0);
16324a5c8d89SMatthew Wilcox 		XA_BUG_ON(xa, xa_store(xa, 0, name + i, 0) != NULL);
16334a5c8d89SMatthew Wilcox 		xa_erase(xa, 0);
16344a5c8d89SMatthew Wilcox 	}
16354a5c8d89SMatthew Wilcox 
16362fbe967bSMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
16372fbe967bSMatthew Wilcox }
16382fbe967bSMatthew Wilcox 
check_align(struct xarray * xa)163976b4e529SMatthew Wilcox static noinline void check_align(struct xarray *xa)
164076b4e529SMatthew Wilcox {
164176b4e529SMatthew Wilcox 	char name[] = "Motorola 68000";
164276b4e529SMatthew Wilcox 
164376b4e529SMatthew Wilcox 	check_align_1(xa, name);
164476b4e529SMatthew Wilcox 	check_align_1(xa, name + 1);
164576b4e529SMatthew Wilcox 	check_align_1(xa, name + 2);
164676b4e529SMatthew Wilcox 	check_align_1(xa, name + 3);
16472fbe967bSMatthew Wilcox 	check_align_2(xa, name);
164876b4e529SMatthew Wilcox }
164976b4e529SMatthew Wilcox 
1650a97e7904SMatthew Wilcox static LIST_HEAD(shadow_nodes);
1651a97e7904SMatthew Wilcox 
test_update_node(struct xa_node * node)1652a97e7904SMatthew Wilcox static void test_update_node(struct xa_node *node)
1653a97e7904SMatthew Wilcox {
1654a97e7904SMatthew Wilcox 	if (node->count && node->count == node->nr_values) {
1655a97e7904SMatthew Wilcox 		if (list_empty(&node->private_list))
1656a97e7904SMatthew Wilcox 			list_add(&shadow_nodes, &node->private_list);
1657a97e7904SMatthew Wilcox 	} else {
1658a97e7904SMatthew Wilcox 		if (!list_empty(&node->private_list))
1659a97e7904SMatthew Wilcox 			list_del_init(&node->private_list);
1660a97e7904SMatthew Wilcox 	}
1661a97e7904SMatthew Wilcox }
1662a97e7904SMatthew Wilcox 
shadow_remove(struct xarray * xa)1663a97e7904SMatthew Wilcox static noinline void shadow_remove(struct xarray *xa)
1664a97e7904SMatthew Wilcox {
1665a97e7904SMatthew Wilcox 	struct xa_node *node;
1666a97e7904SMatthew Wilcox 
1667a97e7904SMatthew Wilcox 	xa_lock(xa);
1668a97e7904SMatthew Wilcox 	while ((node = list_first_entry_or_null(&shadow_nodes,
1669a97e7904SMatthew Wilcox 					struct xa_node, private_list))) {
1670a97e7904SMatthew Wilcox 		XA_BUG_ON(xa, node->array != xa);
1671a97e7904SMatthew Wilcox 		list_del_init(&node->private_list);
1672f82cd2f0SMatthew Wilcox (Oracle) 		xa_delete_node(node, test_update_node);
1673a97e7904SMatthew Wilcox 	}
1674a97e7904SMatthew Wilcox 	xa_unlock(xa);
1675a97e7904SMatthew Wilcox }
1676a97e7904SMatthew Wilcox 
check_workingset(struct xarray * xa,unsigned long index)1677a97e7904SMatthew Wilcox static noinline void check_workingset(struct xarray *xa, unsigned long index)
1678a97e7904SMatthew Wilcox {
1679a97e7904SMatthew Wilcox 	XA_STATE(xas, xa, index);
1680a97e7904SMatthew Wilcox 	xas_set_update(&xas, test_update_node);
1681a97e7904SMatthew Wilcox 
1682a97e7904SMatthew Wilcox 	do {
1683a97e7904SMatthew Wilcox 		xas_lock(&xas);
1684a97e7904SMatthew Wilcox 		xas_store(&xas, xa_mk_value(0));
1685a97e7904SMatthew Wilcox 		xas_next(&xas);
1686a97e7904SMatthew Wilcox 		xas_store(&xas, xa_mk_value(1));
1687a97e7904SMatthew Wilcox 		xas_unlock(&xas);
1688a97e7904SMatthew Wilcox 	} while (xas_nomem(&xas, GFP_KERNEL));
1689a97e7904SMatthew Wilcox 
1690a97e7904SMatthew Wilcox 	XA_BUG_ON(xa, list_empty(&shadow_nodes));
1691a97e7904SMatthew Wilcox 
1692a97e7904SMatthew Wilcox 	xas_lock(&xas);
1693a97e7904SMatthew Wilcox 	xas_next(&xas);
1694a97e7904SMatthew Wilcox 	xas_store(&xas, &xas);
1695a97e7904SMatthew Wilcox 	XA_BUG_ON(xa, !list_empty(&shadow_nodes));
1696a97e7904SMatthew Wilcox 
1697a97e7904SMatthew Wilcox 	xas_store(&xas, xa_mk_value(2));
1698a97e7904SMatthew Wilcox 	xas_unlock(&xas);
1699a97e7904SMatthew Wilcox 	XA_BUG_ON(xa, list_empty(&shadow_nodes));
1700a97e7904SMatthew Wilcox 
1701a97e7904SMatthew Wilcox 	shadow_remove(xa);
1702a97e7904SMatthew Wilcox 	XA_BUG_ON(xa, !list_empty(&shadow_nodes));
1703a97e7904SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
1704a97e7904SMatthew Wilcox }
1705a97e7904SMatthew Wilcox 
1706d6427f81SMatthew Wilcox /*
1707d6427f81SMatthew Wilcox  * Check that the pointer / value / sibling entries are accounted the
1708d6427f81SMatthew Wilcox  * way we expect them to be.
1709d6427f81SMatthew Wilcox  */
check_account(struct xarray * xa)1710d6427f81SMatthew Wilcox static noinline void check_account(struct xarray *xa)
1711d6427f81SMatthew Wilcox {
1712d6427f81SMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI
1713d6427f81SMatthew Wilcox 	unsigned int order;
1714d6427f81SMatthew Wilcox 
1715d6427f81SMatthew Wilcox 	for (order = 1; order < 12; order++) {
1716d6427f81SMatthew Wilcox 		XA_STATE(xas, xa, 1 << order);
1717d6427f81SMatthew Wilcox 
1718d6427f81SMatthew Wilcox 		xa_store_order(xa, 0, order, xa, GFP_KERNEL);
1719fffc9a26SMatthew Wilcox 		rcu_read_lock();
1720d6427f81SMatthew Wilcox 		xas_load(&xas);
1721d6427f81SMatthew Wilcox 		XA_BUG_ON(xa, xas.xa_node->count == 0);
1722d6427f81SMatthew Wilcox 		XA_BUG_ON(xa, xas.xa_node->count > (1 << order));
1723d6427f81SMatthew Wilcox 		XA_BUG_ON(xa, xas.xa_node->nr_values != 0);
1724fffc9a26SMatthew Wilcox 		rcu_read_unlock();
1725d6427f81SMatthew Wilcox 
1726b7677a13SMatthew Wilcox 		xa_store_order(xa, 1 << order, order, xa_mk_index(1UL << order),
1727d6427f81SMatthew Wilcox 				GFP_KERNEL);
1728d6427f81SMatthew Wilcox 		XA_BUG_ON(xa, xas.xa_node->count != xas.xa_node->nr_values * 2);
1729d6427f81SMatthew Wilcox 
1730d6427f81SMatthew Wilcox 		xa_erase(xa, 1 << order);
1731d6427f81SMatthew Wilcox 		XA_BUG_ON(xa, xas.xa_node->nr_values != 0);
1732d6427f81SMatthew Wilcox 
1733d6427f81SMatthew Wilcox 		xa_erase(xa, 0);
1734d6427f81SMatthew Wilcox 		XA_BUG_ON(xa, !xa_empty(xa));
1735d6427f81SMatthew Wilcox 	}
1736d6427f81SMatthew Wilcox #endif
1737d6427f81SMatthew Wilcox }
1738d6427f81SMatthew Wilcox 
check_get_order(struct xarray * xa)173957417cebSMatthew Wilcox (Oracle) static noinline void check_get_order(struct xarray *xa)
174057417cebSMatthew Wilcox (Oracle) {
174157417cebSMatthew Wilcox (Oracle) 	unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
174257417cebSMatthew Wilcox (Oracle) 	unsigned int order;
174357417cebSMatthew Wilcox (Oracle) 	unsigned long i, j;
174457417cebSMatthew Wilcox (Oracle) 
174557417cebSMatthew Wilcox (Oracle) 	for (i = 0; i < 3; i++)
174657417cebSMatthew Wilcox (Oracle) 		XA_BUG_ON(xa, xa_get_order(xa, i) != 0);
174757417cebSMatthew Wilcox (Oracle) 
174857417cebSMatthew Wilcox (Oracle) 	for (order = 0; order < max_order; order++) {
174957417cebSMatthew Wilcox (Oracle) 		for (i = 0; i < 10; i++) {
175057417cebSMatthew Wilcox (Oracle) 			xa_store_order(xa, i << order, order,
175157417cebSMatthew Wilcox (Oracle) 					xa_mk_index(i << order), GFP_KERNEL);
175257417cebSMatthew Wilcox (Oracle) 			for (j = i << order; j < (i + 1) << order; j++)
175357417cebSMatthew Wilcox (Oracle) 				XA_BUG_ON(xa, xa_get_order(xa, j) != order);
175457417cebSMatthew Wilcox (Oracle) 			xa_erase(xa, i << order);
175557417cebSMatthew Wilcox (Oracle) 		}
175657417cebSMatthew Wilcox (Oracle) 	}
175757417cebSMatthew Wilcox (Oracle) }
175857417cebSMatthew Wilcox (Oracle) 
check_destroy(struct xarray * xa)1759687149fcSMatthew Wilcox static noinline void check_destroy(struct xarray *xa)
1760687149fcSMatthew Wilcox {
1761687149fcSMatthew Wilcox 	unsigned long index;
1762687149fcSMatthew Wilcox 
1763687149fcSMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
1764687149fcSMatthew Wilcox 
1765687149fcSMatthew Wilcox 	/* Destroying an empty array is a no-op */
1766687149fcSMatthew Wilcox 	xa_destroy(xa);
1767687149fcSMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
1768687149fcSMatthew Wilcox 
1769687149fcSMatthew Wilcox 	/* Destroying an array with a single entry */
1770687149fcSMatthew Wilcox 	for (index = 0; index < 1000; index++) {
1771687149fcSMatthew Wilcox 		xa_store_index(xa, index, GFP_KERNEL);
1772687149fcSMatthew Wilcox 		XA_BUG_ON(xa, xa_empty(xa));
1773687149fcSMatthew Wilcox 		xa_destroy(xa);
1774687149fcSMatthew Wilcox 		XA_BUG_ON(xa, !xa_empty(xa));
1775687149fcSMatthew Wilcox 	}
1776687149fcSMatthew Wilcox 
1777687149fcSMatthew Wilcox 	/* Destroying an array with a single entry at ULONG_MAX */
1778687149fcSMatthew Wilcox 	xa_store(xa, ULONG_MAX, xa, GFP_KERNEL);
1779687149fcSMatthew Wilcox 	XA_BUG_ON(xa, xa_empty(xa));
1780687149fcSMatthew Wilcox 	xa_destroy(xa);
1781687149fcSMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
1782687149fcSMatthew Wilcox 
1783687149fcSMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI
1784687149fcSMatthew Wilcox 	/* Destroying an array with a multi-index entry */
1785687149fcSMatthew Wilcox 	xa_store_order(xa, 1 << 11, 11, xa, GFP_KERNEL);
1786687149fcSMatthew Wilcox 	XA_BUG_ON(xa, xa_empty(xa));
1787687149fcSMatthew Wilcox 	xa_destroy(xa);
1788687149fcSMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
1789687149fcSMatthew Wilcox #endif
1790687149fcSMatthew Wilcox }
1791687149fcSMatthew Wilcox 
179258d6ea30SMatthew Wilcox static DEFINE_XARRAY(array);
1793ad3d6c72SMatthew Wilcox 
xarray_checks(void)1794ad3d6c72SMatthew Wilcox static int xarray_checks(void)
1795ad3d6c72SMatthew Wilcox {
179658d6ea30SMatthew Wilcox 	check_xa_err(&array);
1797b803b428SMatthew Wilcox 	check_xas_retry(&array);
1798ad3d6c72SMatthew Wilcox 	check_xa_load(&array);
17999b89a035SMatthew Wilcox 	check_xa_mark(&array);
180058d6ea30SMatthew Wilcox 	check_xa_shrink(&array);
1801b803b428SMatthew Wilcox 	check_xas_erase(&array);
180212fd2aeeSMatthew Wilcox 	check_insert(&array);
180341aec91fSMatthew Wilcox 	check_cmpxchg(&array);
18049f14d4f1SMatthew Wilcox 	check_reserve(&array);
1805b38f6c50SMatthew Wilcox 	check_reserve(&xa0);
180658d6ea30SMatthew Wilcox 	check_multi_store(&array);
180757417cebSMatthew Wilcox (Oracle) 	check_get_order(&array);
1808371c752dSMatthew Wilcox 	check_xa_alloc();
1809b803b428SMatthew Wilcox 	check_find(&array);
1810e21a2955SMatthew Wilcox 	check_find_entry(&array);
1811c36d451aSMatthew Wilcox (Oracle) 	check_pause(&array);
1812d6427f81SMatthew Wilcox 	check_account(&array);
1813687149fcSMatthew Wilcox 	check_destroy(&array);
181464d3e9a9SMatthew Wilcox 	check_move(&array);
18152264f513SMatthew Wilcox 	check_create_range(&array);
18160e9446c3SMatthew Wilcox 	check_store_range(&array);
18174e99d4e9SMatthew Wilcox 	check_store_iter(&array);
181876b4e529SMatthew Wilcox 	check_align(&xa0);
18198fc75643SMatthew Wilcox (Oracle) 	check_split(&array);
1820ad3d6c72SMatthew Wilcox 
1821a97e7904SMatthew Wilcox 	check_workingset(&array, 0);
1822a97e7904SMatthew Wilcox 	check_workingset(&array, 64);
1823a97e7904SMatthew Wilcox 	check_workingset(&array, 4096);
1824a97e7904SMatthew Wilcox 
1825ad3d6c72SMatthew Wilcox 	printk("XArray: %u of %u tests passed\n", tests_passed, tests_run);
1826ad3d6c72SMatthew Wilcox 	return (tests_run == tests_passed) ? 0 : -EINVAL;
1827ad3d6c72SMatthew Wilcox }
1828ad3d6c72SMatthew Wilcox 
xarray_exit(void)1829ad3d6c72SMatthew Wilcox static void xarray_exit(void)
1830ad3d6c72SMatthew Wilcox {
1831ad3d6c72SMatthew Wilcox }
1832ad3d6c72SMatthew Wilcox 
1833ad3d6c72SMatthew Wilcox module_init(xarray_checks);
1834ad3d6c72SMatthew Wilcox module_exit(xarray_exit);
1835ad3d6c72SMatthew Wilcox MODULE_AUTHOR("Matthew Wilcox <willy@infradead.org>");
1836ad3d6c72SMatthew Wilcox MODULE_LICENSE("GPL");
1837