xref: /openbmc/linux/lib/test_xarray.c (revision c44aa5e8)
1ad3d6c72SMatthew Wilcox // SPDX-License-Identifier: GPL-2.0+
2ad3d6c72SMatthew Wilcox /*
3ad3d6c72SMatthew Wilcox  * test_xarray.c: Test the XArray API
4ad3d6c72SMatthew Wilcox  * Copyright (c) 2017-2018 Microsoft Corporation
5c44aa5e8SMatthew Wilcox (Oracle)  * Copyright (c) 2019-2020 Oracle
6ad3d6c72SMatthew Wilcox  * Author: Matthew Wilcox <willy@infradead.org>
7ad3d6c72SMatthew Wilcox  */
8ad3d6c72SMatthew Wilcox 
9ad3d6c72SMatthew Wilcox #include <linux/xarray.h>
10ad3d6c72SMatthew Wilcox #include <linux/module.h>
11ad3d6c72SMatthew Wilcox 
12ad3d6c72SMatthew Wilcox static unsigned int tests_run;
13ad3d6c72SMatthew Wilcox static unsigned int tests_passed;
14ad3d6c72SMatthew Wilcox 
15ad3d6c72SMatthew Wilcox #ifndef XA_DEBUG
16ad3d6c72SMatthew Wilcox # ifdef __KERNEL__
17ad3d6c72SMatthew Wilcox void xa_dump(const struct xarray *xa) { }
18ad3d6c72SMatthew Wilcox # endif
19ad3d6c72SMatthew Wilcox #undef XA_BUG_ON
20ad3d6c72SMatthew Wilcox #define XA_BUG_ON(xa, x) do {					\
21ad3d6c72SMatthew Wilcox 	tests_run++;						\
22ad3d6c72SMatthew Wilcox 	if (x) {						\
23ad3d6c72SMatthew Wilcox 		printk("BUG at %s:%d\n", __func__, __LINE__);	\
24ad3d6c72SMatthew Wilcox 		xa_dump(xa);					\
25ad3d6c72SMatthew Wilcox 		dump_stack();					\
26ad3d6c72SMatthew Wilcox 	} else {						\
27ad3d6c72SMatthew Wilcox 		tests_passed++;					\
28ad3d6c72SMatthew Wilcox 	}							\
29ad3d6c72SMatthew Wilcox } while (0)
30ad3d6c72SMatthew Wilcox #endif
31ad3d6c72SMatthew Wilcox 
32b7677a13SMatthew Wilcox static void *xa_mk_index(unsigned long index)
33b7677a13SMatthew Wilcox {
34b7677a13SMatthew Wilcox 	return xa_mk_value(index & LONG_MAX);
35b7677a13SMatthew Wilcox }
36b7677a13SMatthew Wilcox 
37ad3d6c72SMatthew Wilcox static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp)
38ad3d6c72SMatthew Wilcox {
39b7677a13SMatthew Wilcox 	return xa_store(xa, index, xa_mk_index(index), gfp);
40ad3d6c72SMatthew Wilcox }
41ad3d6c72SMatthew Wilcox 
4212fd2aeeSMatthew Wilcox static void xa_insert_index(struct xarray *xa, unsigned long index)
4312fd2aeeSMatthew Wilcox {
4412fd2aeeSMatthew Wilcox 	XA_BUG_ON(xa, xa_insert(xa, index, xa_mk_index(index),
4512fd2aeeSMatthew Wilcox 				GFP_KERNEL) != 0);
4612fd2aeeSMatthew Wilcox }
4712fd2aeeSMatthew Wilcox 
48371c752dSMatthew Wilcox static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp)
49371c752dSMatthew Wilcox {
50a3e4d3f9SMatthew Wilcox 	u32 id;
51371c752dSMatthew Wilcox 
52a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(index), xa_limit_32b,
53371c752dSMatthew Wilcox 				gfp) != 0);
54371c752dSMatthew Wilcox 	XA_BUG_ON(xa, id != index);
55371c752dSMatthew Wilcox }
56371c752dSMatthew Wilcox 
57ad3d6c72SMatthew Wilcox static void xa_erase_index(struct xarray *xa, unsigned long index)
58ad3d6c72SMatthew Wilcox {
59b7677a13SMatthew Wilcox 	XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_index(index));
6058d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, index) != NULL);
6158d6ea30SMatthew Wilcox }
6258d6ea30SMatthew Wilcox 
6358d6ea30SMatthew Wilcox /*
6458d6ea30SMatthew Wilcox  * If anyone needs this, please move it to xarray.c.  We have no current
6558d6ea30SMatthew Wilcox  * users outside the test suite because all current multislot users want
6658d6ea30SMatthew Wilcox  * to use the advanced API.
6758d6ea30SMatthew Wilcox  */
6858d6ea30SMatthew Wilcox static void *xa_store_order(struct xarray *xa, unsigned long index,
6958d6ea30SMatthew Wilcox 		unsigned order, void *entry, gfp_t gfp)
7058d6ea30SMatthew Wilcox {
7158d6ea30SMatthew Wilcox 	XA_STATE_ORDER(xas, xa, index, order);
7258d6ea30SMatthew Wilcox 	void *curr;
7358d6ea30SMatthew Wilcox 
7458d6ea30SMatthew Wilcox 	do {
7558d6ea30SMatthew Wilcox 		xas_lock(&xas);
7658d6ea30SMatthew Wilcox 		curr = xas_store(&xas, entry);
7758d6ea30SMatthew Wilcox 		xas_unlock(&xas);
7858d6ea30SMatthew Wilcox 	} while (xas_nomem(&xas, gfp));
7958d6ea30SMatthew Wilcox 
8058d6ea30SMatthew Wilcox 	return curr;
8158d6ea30SMatthew Wilcox }
8258d6ea30SMatthew Wilcox 
8358d6ea30SMatthew Wilcox static noinline void check_xa_err(struct xarray *xa)
8458d6ea30SMatthew Wilcox {
8558d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_err(xa_store_index(xa, 0, GFP_NOWAIT)) != 0);
8658d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_err(xa_erase(xa, 0)) != 0);
8758d6ea30SMatthew Wilcox #ifndef __KERNEL__
8858d6ea30SMatthew Wilcox 	/* The kernel does not fail GFP_NOWAIT allocations */
8958d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM);
9058d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM);
9158d6ea30SMatthew Wilcox #endif
9258d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_KERNEL)) != 0);
9358d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_err(xa_store(xa, 1, xa_mk_value(0), GFP_KERNEL)) != 0);
9458d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_err(xa_erase(xa, 1)) != 0);
9558d6ea30SMatthew Wilcox // kills the test-suite :-(
9658d6ea30SMatthew Wilcox //	XA_BUG_ON(xa, xa_err(xa_store(xa, 0, xa_mk_internal(0), 0)) != -EINVAL);
97ad3d6c72SMatthew Wilcox }
98ad3d6c72SMatthew Wilcox 
99b803b428SMatthew Wilcox static noinline void check_xas_retry(struct xarray *xa)
100b803b428SMatthew Wilcox {
101b803b428SMatthew Wilcox 	XA_STATE(xas, xa, 0);
102b803b428SMatthew Wilcox 	void *entry;
103b803b428SMatthew Wilcox 
104b803b428SMatthew Wilcox 	xa_store_index(xa, 0, GFP_KERNEL);
105b803b428SMatthew Wilcox 	xa_store_index(xa, 1, GFP_KERNEL);
106b803b428SMatthew Wilcox 
107b803b428SMatthew Wilcox 	rcu_read_lock();
108b803b428SMatthew Wilcox 	XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_value(0));
109b803b428SMatthew Wilcox 	xa_erase_index(xa, 1);
110b803b428SMatthew Wilcox 	XA_BUG_ON(xa, !xa_is_retry(xas_reload(&xas)));
111b803b428SMatthew Wilcox 	XA_BUG_ON(xa, xas_retry(&xas, NULL));
112b803b428SMatthew Wilcox 	XA_BUG_ON(xa, xas_retry(&xas, xa_mk_value(0)));
113b803b428SMatthew Wilcox 	xas_reset(&xas);
114b803b428SMatthew Wilcox 	XA_BUG_ON(xa, xas.xa_node != XAS_RESTART);
115b803b428SMatthew Wilcox 	XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0));
116b803b428SMatthew Wilcox 	XA_BUG_ON(xa, xas.xa_node != NULL);
117bd54211bSMatthew Wilcox 	rcu_read_unlock();
118b803b428SMatthew Wilcox 
119b803b428SMatthew Wilcox 	XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL);
120bd54211bSMatthew Wilcox 
121bd54211bSMatthew Wilcox 	rcu_read_lock();
122b803b428SMatthew Wilcox 	XA_BUG_ON(xa, !xa_is_internal(xas_reload(&xas)));
123b803b428SMatthew Wilcox 	xas.xa_node = XAS_RESTART;
124b803b428SMatthew Wilcox 	XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0));
125b803b428SMatthew Wilcox 	rcu_read_unlock();
126b803b428SMatthew Wilcox 
127b803b428SMatthew Wilcox 	/* Make sure we can iterate through retry entries */
128b803b428SMatthew Wilcox 	xas_lock(&xas);
129b803b428SMatthew Wilcox 	xas_set(&xas, 0);
130b803b428SMatthew Wilcox 	xas_store(&xas, XA_RETRY_ENTRY);
131b803b428SMatthew Wilcox 	xas_set(&xas, 1);
132b803b428SMatthew Wilcox 	xas_store(&xas, XA_RETRY_ENTRY);
133b803b428SMatthew Wilcox 
134b803b428SMatthew Wilcox 	xas_set(&xas, 0);
135b803b428SMatthew Wilcox 	xas_for_each(&xas, entry, ULONG_MAX) {
136b7677a13SMatthew Wilcox 		xas_store(&xas, xa_mk_index(xas.xa_index));
137b803b428SMatthew Wilcox 	}
138b803b428SMatthew Wilcox 	xas_unlock(&xas);
139b803b428SMatthew Wilcox 
140b803b428SMatthew Wilcox 	xa_erase_index(xa, 0);
141b803b428SMatthew Wilcox 	xa_erase_index(xa, 1);
142b803b428SMatthew Wilcox }
143b803b428SMatthew Wilcox 
144ad3d6c72SMatthew Wilcox static noinline void check_xa_load(struct xarray *xa)
145ad3d6c72SMatthew Wilcox {
146ad3d6c72SMatthew Wilcox 	unsigned long i, j;
147ad3d6c72SMatthew Wilcox 
148ad3d6c72SMatthew Wilcox 	for (i = 0; i < 1024; i++) {
149ad3d6c72SMatthew Wilcox 		for (j = 0; j < 1024; j++) {
150ad3d6c72SMatthew Wilcox 			void *entry = xa_load(xa, j);
151ad3d6c72SMatthew Wilcox 			if (j < i)
152ad3d6c72SMatthew Wilcox 				XA_BUG_ON(xa, xa_to_value(entry) != j);
153ad3d6c72SMatthew Wilcox 			else
154ad3d6c72SMatthew Wilcox 				XA_BUG_ON(xa, entry);
155ad3d6c72SMatthew Wilcox 		}
156ad3d6c72SMatthew Wilcox 		XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
157ad3d6c72SMatthew Wilcox 	}
158ad3d6c72SMatthew Wilcox 
159ad3d6c72SMatthew Wilcox 	for (i = 0; i < 1024; i++) {
160ad3d6c72SMatthew Wilcox 		for (j = 0; j < 1024; j++) {
161ad3d6c72SMatthew Wilcox 			void *entry = xa_load(xa, j);
162ad3d6c72SMatthew Wilcox 			if (j >= i)
163ad3d6c72SMatthew Wilcox 				XA_BUG_ON(xa, xa_to_value(entry) != j);
164ad3d6c72SMatthew Wilcox 			else
165ad3d6c72SMatthew Wilcox 				XA_BUG_ON(xa, entry);
166ad3d6c72SMatthew Wilcox 		}
167ad3d6c72SMatthew Wilcox 		xa_erase_index(xa, i);
168ad3d6c72SMatthew Wilcox 	}
169ad3d6c72SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
170ad3d6c72SMatthew Wilcox }
171ad3d6c72SMatthew Wilcox 
1729b89a035SMatthew Wilcox static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index)
1739b89a035SMatthew Wilcox {
17458d6ea30SMatthew Wilcox 	unsigned int order;
17558d6ea30SMatthew Wilcox 	unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 8 : 1;
17658d6ea30SMatthew Wilcox 
1779b89a035SMatthew Wilcox 	/* NULL elements have no marks set */
1789b89a035SMatthew Wilcox 	XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
1799b89a035SMatthew Wilcox 	xa_set_mark(xa, index, XA_MARK_0);
1809b89a035SMatthew Wilcox 	XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
1819b89a035SMatthew Wilcox 
1829b89a035SMatthew Wilcox 	/* Storing a pointer will not make a mark appear */
1839b89a035SMatthew Wilcox 	XA_BUG_ON(xa, xa_store_index(xa, index, GFP_KERNEL) != NULL);
1849b89a035SMatthew Wilcox 	XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
1859b89a035SMatthew Wilcox 	xa_set_mark(xa, index, XA_MARK_0);
1869b89a035SMatthew Wilcox 	XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0));
1879b89a035SMatthew Wilcox 
1889b89a035SMatthew Wilcox 	/* Setting one mark will not set another mark */
1899b89a035SMatthew Wilcox 	XA_BUG_ON(xa, xa_get_mark(xa, index + 1, XA_MARK_0));
1909b89a035SMatthew Wilcox 	XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_1));
1919b89a035SMatthew Wilcox 
1929b89a035SMatthew Wilcox 	/* Storing NULL clears marks, and they can't be set again */
1939b89a035SMatthew Wilcox 	xa_erase_index(xa, index);
1949b89a035SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
1959b89a035SMatthew Wilcox 	XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
1969b89a035SMatthew Wilcox 	xa_set_mark(xa, index, XA_MARK_0);
1979b89a035SMatthew Wilcox 	XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
19858d6ea30SMatthew Wilcox 
19958d6ea30SMatthew Wilcox 	/*
20058d6ea30SMatthew Wilcox 	 * Storing a multi-index entry over entries with marks gives the
20158d6ea30SMatthew Wilcox 	 * entire entry the union of the marks
20258d6ea30SMatthew Wilcox 	 */
20358d6ea30SMatthew Wilcox 	BUG_ON((index % 4) != 0);
20458d6ea30SMatthew Wilcox 	for (order = 2; order < max_order; order++) {
20558d6ea30SMatthew Wilcox 		unsigned long base = round_down(index, 1UL << order);
20658d6ea30SMatthew Wilcox 		unsigned long next = base + (1UL << order);
20758d6ea30SMatthew Wilcox 		unsigned long i;
20858d6ea30SMatthew Wilcox 
20958d6ea30SMatthew Wilcox 		XA_BUG_ON(xa, xa_store_index(xa, index + 1, GFP_KERNEL));
21058d6ea30SMatthew Wilcox 		xa_set_mark(xa, index + 1, XA_MARK_0);
21158d6ea30SMatthew Wilcox 		XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL));
212d69d287aSMatthew Wilcox 		xa_set_mark(xa, index + 2, XA_MARK_2);
21358d6ea30SMatthew Wilcox 		XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL));
214b7677a13SMatthew Wilcox 		xa_store_order(xa, index, order, xa_mk_index(index),
21558d6ea30SMatthew Wilcox 				GFP_KERNEL);
21658d6ea30SMatthew Wilcox 		for (i = base; i < next; i++) {
21793eb07f7SMatthew Wilcox 			XA_STATE(xas, xa, i);
21893eb07f7SMatthew Wilcox 			unsigned int seen = 0;
21993eb07f7SMatthew Wilcox 			void *entry;
22093eb07f7SMatthew Wilcox 
22158d6ea30SMatthew Wilcox 			XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0));
222d69d287aSMatthew Wilcox 			XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_1));
223d69d287aSMatthew Wilcox 			XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_2));
22493eb07f7SMatthew Wilcox 
22593eb07f7SMatthew Wilcox 			/* We should see two elements in the array */
226fffc9a26SMatthew Wilcox 			rcu_read_lock();
22793eb07f7SMatthew Wilcox 			xas_for_each(&xas, entry, ULONG_MAX)
22893eb07f7SMatthew Wilcox 				seen++;
229fffc9a26SMatthew Wilcox 			rcu_read_unlock();
23093eb07f7SMatthew Wilcox 			XA_BUG_ON(xa, seen != 2);
23193eb07f7SMatthew Wilcox 
23293eb07f7SMatthew Wilcox 			/* One of which is marked */
23393eb07f7SMatthew Wilcox 			xas_set(&xas, 0);
23493eb07f7SMatthew Wilcox 			seen = 0;
235fffc9a26SMatthew Wilcox 			rcu_read_lock();
23693eb07f7SMatthew Wilcox 			xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0)
23793eb07f7SMatthew Wilcox 				seen++;
238fffc9a26SMatthew Wilcox 			rcu_read_unlock();
23993eb07f7SMatthew Wilcox 			XA_BUG_ON(xa, seen != 1);
24058d6ea30SMatthew Wilcox 		}
24158d6ea30SMatthew Wilcox 		XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_0));
24258d6ea30SMatthew Wilcox 		XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_1));
24358d6ea30SMatthew Wilcox 		XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_2));
24458d6ea30SMatthew Wilcox 		xa_erase_index(xa, index);
24558d6ea30SMatthew Wilcox 		xa_erase_index(xa, next);
24658d6ea30SMatthew Wilcox 		XA_BUG_ON(xa, !xa_empty(xa));
24758d6ea30SMatthew Wilcox 	}
24858d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
2499b89a035SMatthew Wilcox }
2509b89a035SMatthew Wilcox 
251adb9d9c4SMatthew Wilcox static noinline void check_xa_mark_2(struct xarray *xa)
252adb9d9c4SMatthew Wilcox {
253adb9d9c4SMatthew Wilcox 	XA_STATE(xas, xa, 0);
254adb9d9c4SMatthew Wilcox 	unsigned long index;
255adb9d9c4SMatthew Wilcox 	unsigned int count = 0;
256adb9d9c4SMatthew Wilcox 	void *entry;
257adb9d9c4SMatthew Wilcox 
258adb9d9c4SMatthew Wilcox 	xa_store_index(xa, 0, GFP_KERNEL);
259adb9d9c4SMatthew Wilcox 	xa_set_mark(xa, 0, XA_MARK_0);
260adb9d9c4SMatthew Wilcox 	xas_lock(&xas);
261adb9d9c4SMatthew Wilcox 	xas_load(&xas);
262adb9d9c4SMatthew Wilcox 	xas_init_marks(&xas);
263adb9d9c4SMatthew Wilcox 	xas_unlock(&xas);
264adb9d9c4SMatthew Wilcox 	XA_BUG_ON(xa, !xa_get_mark(xa, 0, XA_MARK_0) == 0);
265adb9d9c4SMatthew Wilcox 
266adb9d9c4SMatthew Wilcox 	for (index = 3500; index < 4500; index++) {
267adb9d9c4SMatthew Wilcox 		xa_store_index(xa, index, GFP_KERNEL);
268adb9d9c4SMatthew Wilcox 		xa_set_mark(xa, index, XA_MARK_0);
269adb9d9c4SMatthew Wilcox 	}
270adb9d9c4SMatthew Wilcox 
271adb9d9c4SMatthew Wilcox 	xas_reset(&xas);
272adb9d9c4SMatthew Wilcox 	rcu_read_lock();
273adb9d9c4SMatthew Wilcox 	xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0)
274adb9d9c4SMatthew Wilcox 		count++;
275adb9d9c4SMatthew Wilcox 	rcu_read_unlock();
276adb9d9c4SMatthew Wilcox 	XA_BUG_ON(xa, count != 1000);
277adb9d9c4SMatthew Wilcox 
278adb9d9c4SMatthew Wilcox 	xas_lock(&xas);
279adb9d9c4SMatthew Wilcox 	xas_for_each(&xas, entry, ULONG_MAX) {
280adb9d9c4SMatthew Wilcox 		xas_init_marks(&xas);
281adb9d9c4SMatthew Wilcox 		XA_BUG_ON(xa, !xa_get_mark(xa, xas.xa_index, XA_MARK_0));
282adb9d9c4SMatthew Wilcox 		XA_BUG_ON(xa, !xas_get_mark(&xas, XA_MARK_0));
283adb9d9c4SMatthew Wilcox 	}
284adb9d9c4SMatthew Wilcox 	xas_unlock(&xas);
285adb9d9c4SMatthew Wilcox 
286adb9d9c4SMatthew Wilcox 	xa_destroy(xa);
287adb9d9c4SMatthew Wilcox }
288adb9d9c4SMatthew Wilcox 
2899b89a035SMatthew Wilcox static noinline void check_xa_mark(struct xarray *xa)
2909b89a035SMatthew Wilcox {
2919b89a035SMatthew Wilcox 	unsigned long index;
2929b89a035SMatthew Wilcox 
2939b89a035SMatthew Wilcox 	for (index = 0; index < 16384; index += 4)
2949b89a035SMatthew Wilcox 		check_xa_mark_1(xa, index);
295adb9d9c4SMatthew Wilcox 
296adb9d9c4SMatthew Wilcox 	check_xa_mark_2(xa);
2979b89a035SMatthew Wilcox }
2989b89a035SMatthew Wilcox 
29958d6ea30SMatthew Wilcox static noinline void check_xa_shrink(struct xarray *xa)
30058d6ea30SMatthew Wilcox {
30158d6ea30SMatthew Wilcox 	XA_STATE(xas, xa, 1);
30258d6ea30SMatthew Wilcox 	struct xa_node *node;
30393eb07f7SMatthew Wilcox 	unsigned int order;
30493eb07f7SMatthew Wilcox 	unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 15 : 1;
30558d6ea30SMatthew Wilcox 
30658d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
30758d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_store_index(xa, 0, GFP_KERNEL) != NULL);
30858d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL);
30958d6ea30SMatthew Wilcox 
31058d6ea30SMatthew Wilcox 	/*
31158d6ea30SMatthew Wilcox 	 * Check that erasing the entry at 1 shrinks the tree and properly
31258d6ea30SMatthew Wilcox 	 * marks the node as being deleted.
31358d6ea30SMatthew Wilcox 	 */
31458d6ea30SMatthew Wilcox 	xas_lock(&xas);
31558d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xas_load(&xas) != xa_mk_value(1));
31658d6ea30SMatthew Wilcox 	node = xas.xa_node;
31758d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != xa_mk_value(0));
31858d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1));
31958d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 1) != NULL);
32058d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xas.xa_node != XAS_BOUNDS);
32158d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != XA_RETRY_ENTRY);
32258d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xas_load(&xas) != NULL);
32358d6ea30SMatthew Wilcox 	xas_unlock(&xas);
32458d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
32558d6ea30SMatthew Wilcox 	xa_erase_index(xa, 0);
32658d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
32793eb07f7SMatthew Wilcox 
32893eb07f7SMatthew Wilcox 	for (order = 0; order < max_order; order++) {
32993eb07f7SMatthew Wilcox 		unsigned long max = (1UL << order) - 1;
33093eb07f7SMatthew Wilcox 		xa_store_order(xa, 0, order, xa_mk_value(0), GFP_KERNEL);
33193eb07f7SMatthew Wilcox 		XA_BUG_ON(xa, xa_load(xa, max) != xa_mk_value(0));
33293eb07f7SMatthew Wilcox 		XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL);
33393eb07f7SMatthew Wilcox 		rcu_read_lock();
33493eb07f7SMatthew Wilcox 		node = xa_head(xa);
33593eb07f7SMatthew Wilcox 		rcu_read_unlock();
33693eb07f7SMatthew Wilcox 		XA_BUG_ON(xa, xa_store_index(xa, ULONG_MAX, GFP_KERNEL) !=
33793eb07f7SMatthew Wilcox 				NULL);
33893eb07f7SMatthew Wilcox 		rcu_read_lock();
33993eb07f7SMatthew Wilcox 		XA_BUG_ON(xa, xa_head(xa) == node);
34093eb07f7SMatthew Wilcox 		rcu_read_unlock();
34193eb07f7SMatthew Wilcox 		XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL);
34293eb07f7SMatthew Wilcox 		xa_erase_index(xa, ULONG_MAX);
34393eb07f7SMatthew Wilcox 		XA_BUG_ON(xa, xa->xa_head != node);
34493eb07f7SMatthew Wilcox 		xa_erase_index(xa, 0);
34593eb07f7SMatthew Wilcox 	}
34658d6ea30SMatthew Wilcox }
34758d6ea30SMatthew Wilcox 
34812fd2aeeSMatthew Wilcox static noinline void check_insert(struct xarray *xa)
34912fd2aeeSMatthew Wilcox {
35012fd2aeeSMatthew Wilcox 	unsigned long i;
35112fd2aeeSMatthew Wilcox 
35212fd2aeeSMatthew Wilcox 	for (i = 0; i < 1024; i++) {
35312fd2aeeSMatthew Wilcox 		xa_insert_index(xa, i);
35412fd2aeeSMatthew Wilcox 		XA_BUG_ON(xa, xa_load(xa, i - 1) != NULL);
35512fd2aeeSMatthew Wilcox 		XA_BUG_ON(xa, xa_load(xa, i + 1) != NULL);
35612fd2aeeSMatthew Wilcox 		xa_erase_index(xa, i);
35712fd2aeeSMatthew Wilcox 	}
35812fd2aeeSMatthew Wilcox 
35912fd2aeeSMatthew Wilcox 	for (i = 10; i < BITS_PER_LONG; i++) {
36012fd2aeeSMatthew Wilcox 		xa_insert_index(xa, 1UL << i);
36112fd2aeeSMatthew Wilcox 		XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 1) != NULL);
36212fd2aeeSMatthew Wilcox 		XA_BUG_ON(xa, xa_load(xa, (1UL << i) + 1) != NULL);
36312fd2aeeSMatthew Wilcox 		xa_erase_index(xa, 1UL << i);
36412fd2aeeSMatthew Wilcox 
36512fd2aeeSMatthew Wilcox 		xa_insert_index(xa, (1UL << i) - 1);
36612fd2aeeSMatthew Wilcox 		XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 2) != NULL);
36712fd2aeeSMatthew Wilcox 		XA_BUG_ON(xa, xa_load(xa, 1UL << i) != NULL);
36812fd2aeeSMatthew Wilcox 		xa_erase_index(xa, (1UL << i) - 1);
36912fd2aeeSMatthew Wilcox 	}
37012fd2aeeSMatthew Wilcox 
37112fd2aeeSMatthew Wilcox 	xa_insert_index(xa, ~0UL);
37212fd2aeeSMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 0UL) != NULL);
37312fd2aeeSMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, ~1UL) != NULL);
37412fd2aeeSMatthew Wilcox 	xa_erase_index(xa, ~0UL);
37512fd2aeeSMatthew Wilcox 
37612fd2aeeSMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
37712fd2aeeSMatthew Wilcox }
37812fd2aeeSMatthew Wilcox 
37941aec91fSMatthew Wilcox static noinline void check_cmpxchg(struct xarray *xa)
38041aec91fSMatthew Wilcox {
38141aec91fSMatthew Wilcox 	void *FIVE = xa_mk_value(5);
38241aec91fSMatthew Wilcox 	void *SIX = xa_mk_value(6);
38341aec91fSMatthew Wilcox 	void *LOTS = xa_mk_value(12345678);
38441aec91fSMatthew Wilcox 
38541aec91fSMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
38641aec91fSMatthew Wilcox 	XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_KERNEL) != NULL);
387fd9dc93eSMatthew Wilcox 	XA_BUG_ON(xa, xa_insert(xa, 12345678, xa, GFP_KERNEL) != -EBUSY);
38841aec91fSMatthew Wilcox 	XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, SIX, FIVE, GFP_KERNEL) != LOTS);
38941aec91fSMatthew Wilcox 	XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, LOTS, FIVE, GFP_KERNEL) != LOTS);
39041aec91fSMatthew Wilcox 	XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, FIVE, LOTS, GFP_KERNEL) != FIVE);
39141aec91fSMatthew Wilcox 	XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != NULL);
39241aec91fSMatthew Wilcox 	XA_BUG_ON(xa, xa_cmpxchg(xa, 5, NULL, FIVE, GFP_KERNEL) != NULL);
39341aec91fSMatthew Wilcox 	xa_erase_index(xa, 12345678);
39441aec91fSMatthew Wilcox 	xa_erase_index(xa, 5);
39541aec91fSMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
39641aec91fSMatthew Wilcox }
39741aec91fSMatthew Wilcox 
3989f14d4f1SMatthew Wilcox static noinline void check_reserve(struct xarray *xa)
3999f14d4f1SMatthew Wilcox {
4009f14d4f1SMatthew Wilcox 	void *entry;
4014a31896cSMatthew Wilcox 	unsigned long index;
402b38f6c50SMatthew Wilcox 	int count;
4039f14d4f1SMatthew Wilcox 
4049f14d4f1SMatthew Wilcox 	/* An array with a reserved entry is not empty */
4059f14d4f1SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
406f818b82bSMatthew Wilcox 	XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
4079f14d4f1SMatthew Wilcox 	XA_BUG_ON(xa, xa_empty(xa));
4089f14d4f1SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 12345678));
4099f14d4f1SMatthew Wilcox 	xa_release(xa, 12345678);
4109f14d4f1SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
4119f14d4f1SMatthew Wilcox 
4129f14d4f1SMatthew Wilcox 	/* Releasing a used entry does nothing */
413f818b82bSMatthew Wilcox 	XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
4149f14d4f1SMatthew Wilcox 	XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_NOWAIT) != NULL);
4159f14d4f1SMatthew Wilcox 	xa_release(xa, 12345678);
4169f14d4f1SMatthew Wilcox 	xa_erase_index(xa, 12345678);
4179f14d4f1SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
4189f14d4f1SMatthew Wilcox 
419b38f6c50SMatthew Wilcox 	/* cmpxchg sees a reserved entry as ZERO */
420f818b82bSMatthew Wilcox 	XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
421b38f6c50SMatthew Wilcox 	XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, XA_ZERO_ENTRY,
422b38f6c50SMatthew Wilcox 				xa_mk_value(12345678), GFP_NOWAIT) != NULL);
4239f14d4f1SMatthew Wilcox 	xa_release(xa, 12345678);
4249f14d4f1SMatthew Wilcox 	xa_erase_index(xa, 12345678);
4259f14d4f1SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
4269f14d4f1SMatthew Wilcox 
427b38f6c50SMatthew Wilcox 	/* xa_insert treats it as busy */
428f818b82bSMatthew Wilcox 	XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
429b0606fedSMatthew Wilcox 	XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) !=
430fd9dc93eSMatthew Wilcox 			-EBUSY);
431b0606fedSMatthew Wilcox 	XA_BUG_ON(xa, xa_empty(xa));
432b0606fedSMatthew Wilcox 	XA_BUG_ON(xa, xa_erase(xa, 12345678) != NULL);
4334c0608f4SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
4344c0608f4SMatthew Wilcox 
4359f14d4f1SMatthew Wilcox 	/* Can iterate through a reserved entry */
4369f14d4f1SMatthew Wilcox 	xa_store_index(xa, 5, GFP_KERNEL);
437f818b82bSMatthew Wilcox 	XA_BUG_ON(xa, xa_reserve(xa, 6, GFP_KERNEL) != 0);
4389f14d4f1SMatthew Wilcox 	xa_store_index(xa, 7, GFP_KERNEL);
4399f14d4f1SMatthew Wilcox 
440b38f6c50SMatthew Wilcox 	count = 0;
4414a31896cSMatthew Wilcox 	xa_for_each(xa, index, entry) {
4429f14d4f1SMatthew Wilcox 		XA_BUG_ON(xa, index != 5 && index != 7);
443b38f6c50SMatthew Wilcox 		count++;
4449f14d4f1SMatthew Wilcox 	}
445b38f6c50SMatthew Wilcox 	XA_BUG_ON(xa, count != 2);
446b38f6c50SMatthew Wilcox 
447b38f6c50SMatthew Wilcox 	/* If we free a reserved entry, we should be able to allocate it */
448b38f6c50SMatthew Wilcox 	if (xa->xa_flags & XA_FLAGS_ALLOC) {
449b38f6c50SMatthew Wilcox 		u32 id;
450b38f6c50SMatthew Wilcox 
451b38f6c50SMatthew Wilcox 		XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_value(8),
452b38f6c50SMatthew Wilcox 					XA_LIMIT(5, 10), GFP_KERNEL) != 0);
453b38f6c50SMatthew Wilcox 		XA_BUG_ON(xa, id != 8);
454b38f6c50SMatthew Wilcox 
455b38f6c50SMatthew Wilcox 		xa_release(xa, 6);
456b38f6c50SMatthew Wilcox 		XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_value(6),
457b38f6c50SMatthew Wilcox 					XA_LIMIT(5, 10), GFP_KERNEL) != 0);
458b38f6c50SMatthew Wilcox 		XA_BUG_ON(xa, id != 6);
459b38f6c50SMatthew Wilcox 	}
460b38f6c50SMatthew Wilcox 
4619f14d4f1SMatthew Wilcox 	xa_destroy(xa);
4629f14d4f1SMatthew Wilcox }
4639f14d4f1SMatthew Wilcox 
464b803b428SMatthew Wilcox static noinline void check_xas_erase(struct xarray *xa)
465b803b428SMatthew Wilcox {
466b803b428SMatthew Wilcox 	XA_STATE(xas, xa, 0);
467b803b428SMatthew Wilcox 	void *entry;
468b803b428SMatthew Wilcox 	unsigned long i, j;
469b803b428SMatthew Wilcox 
470b803b428SMatthew Wilcox 	for (i = 0; i < 200; i++) {
471b803b428SMatthew Wilcox 		for (j = i; j < 2 * i + 17; j++) {
472b803b428SMatthew Wilcox 			xas_set(&xas, j);
473b803b428SMatthew Wilcox 			do {
474b803b428SMatthew Wilcox 				xas_lock(&xas);
475b7677a13SMatthew Wilcox 				xas_store(&xas, xa_mk_index(j));
476b803b428SMatthew Wilcox 				xas_unlock(&xas);
477b803b428SMatthew Wilcox 			} while (xas_nomem(&xas, GFP_KERNEL));
478b803b428SMatthew Wilcox 		}
479b803b428SMatthew Wilcox 
480b803b428SMatthew Wilcox 		xas_set(&xas, ULONG_MAX);
481b803b428SMatthew Wilcox 		do {
482b803b428SMatthew Wilcox 			xas_lock(&xas);
483b803b428SMatthew Wilcox 			xas_store(&xas, xa_mk_value(0));
484b803b428SMatthew Wilcox 			xas_unlock(&xas);
485b803b428SMatthew Wilcox 		} while (xas_nomem(&xas, GFP_KERNEL));
486b803b428SMatthew Wilcox 
487b803b428SMatthew Wilcox 		xas_lock(&xas);
488b803b428SMatthew Wilcox 		xas_store(&xas, NULL);
489b803b428SMatthew Wilcox 
490b803b428SMatthew Wilcox 		xas_set(&xas, 0);
491b803b428SMatthew Wilcox 		j = i;
492b803b428SMatthew Wilcox 		xas_for_each(&xas, entry, ULONG_MAX) {
493b7677a13SMatthew Wilcox 			XA_BUG_ON(xa, entry != xa_mk_index(j));
494b803b428SMatthew Wilcox 			xas_store(&xas, NULL);
495b803b428SMatthew Wilcox 			j++;
496b803b428SMatthew Wilcox 		}
497b803b428SMatthew Wilcox 		xas_unlock(&xas);
498b803b428SMatthew Wilcox 		XA_BUG_ON(xa, !xa_empty(xa));
499b803b428SMatthew Wilcox 	}
500b803b428SMatthew Wilcox }
501b803b428SMatthew Wilcox 
5024f06d630SMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI
5034f06d630SMatthew Wilcox static noinline void check_multi_store_1(struct xarray *xa, unsigned long index,
5044f06d630SMatthew Wilcox 		unsigned int order)
5054f06d630SMatthew Wilcox {
5064f06d630SMatthew Wilcox 	XA_STATE(xas, xa, index);
5074f06d630SMatthew Wilcox 	unsigned long min = index & ~((1UL << order) - 1);
5084f06d630SMatthew Wilcox 	unsigned long max = min + (1UL << order);
5094f06d630SMatthew Wilcox 
510b7677a13SMatthew Wilcox 	xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
511b7677a13SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(index));
512b7677a13SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(index));
5134f06d630SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, max) != NULL);
5144f06d630SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL);
5154f06d630SMatthew Wilcox 
516fffc9a26SMatthew Wilcox 	xas_lock(&xas);
517b7677a13SMatthew Wilcox 	XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(min)) != xa_mk_index(index));
518fffc9a26SMatthew Wilcox 	xas_unlock(&xas);
519b7677a13SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(min));
520b7677a13SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(min));
5214f06d630SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, max) != NULL);
5224f06d630SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL);
5234f06d630SMatthew Wilcox 
5244f06d630SMatthew Wilcox 	xa_erase_index(xa, min);
5254f06d630SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
5264f06d630SMatthew Wilcox }
5274f06d630SMatthew Wilcox 
5284f06d630SMatthew Wilcox static noinline void check_multi_store_2(struct xarray *xa, unsigned long index,
5294f06d630SMatthew Wilcox 		unsigned int order)
5304f06d630SMatthew Wilcox {
5314f06d630SMatthew Wilcox 	XA_STATE(xas, xa, index);
5324f06d630SMatthew Wilcox 	xa_store_order(xa, index, order, xa_mk_value(0), GFP_KERNEL);
5334f06d630SMatthew Wilcox 
534fffc9a26SMatthew Wilcox 	xas_lock(&xas);
5354f06d630SMatthew Wilcox 	XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(1)) != xa_mk_value(0));
5364f06d630SMatthew Wilcox 	XA_BUG_ON(xa, xas.xa_index != index);
5374f06d630SMatthew Wilcox 	XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1));
538fffc9a26SMatthew Wilcox 	xas_unlock(&xas);
5394f06d630SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
5404f06d630SMatthew Wilcox }
5414f145cd6SMatthew Wilcox 
5424f145cd6SMatthew Wilcox static noinline void check_multi_store_3(struct xarray *xa, unsigned long index,
5434f145cd6SMatthew Wilcox 		unsigned int order)
5444f145cd6SMatthew Wilcox {
5454f145cd6SMatthew Wilcox 	XA_STATE(xas, xa, 0);
5464f145cd6SMatthew Wilcox 	void *entry;
5474f145cd6SMatthew Wilcox 	int n = 0;
5484f145cd6SMatthew Wilcox 
5494f145cd6SMatthew Wilcox 	xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
5504f145cd6SMatthew Wilcox 
5514f145cd6SMatthew Wilcox 	xas_lock(&xas);
5524f145cd6SMatthew Wilcox 	xas_for_each(&xas, entry, ULONG_MAX) {
5534f145cd6SMatthew Wilcox 		XA_BUG_ON(xa, entry != xa_mk_index(index));
5544f145cd6SMatthew Wilcox 		n++;
5554f145cd6SMatthew Wilcox 	}
5564f145cd6SMatthew Wilcox 	XA_BUG_ON(xa, n != 1);
5574f145cd6SMatthew Wilcox 	xas_set(&xas, index + 1);
5584f145cd6SMatthew Wilcox 	xas_for_each(&xas, entry, ULONG_MAX) {
5594f145cd6SMatthew Wilcox 		XA_BUG_ON(xa, entry != xa_mk_index(index));
5604f145cd6SMatthew Wilcox 		n++;
5614f145cd6SMatthew Wilcox 	}
5624f145cd6SMatthew Wilcox 	XA_BUG_ON(xa, n != 2);
5634f145cd6SMatthew Wilcox 	xas_unlock(&xas);
5644f145cd6SMatthew Wilcox 
5654f145cd6SMatthew Wilcox 	xa_destroy(xa);
5664f145cd6SMatthew Wilcox }
5674f06d630SMatthew Wilcox #endif
5684f06d630SMatthew Wilcox 
56958d6ea30SMatthew Wilcox static noinline void check_multi_store(struct xarray *xa)
57058d6ea30SMatthew Wilcox {
57158d6ea30SMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI
57258d6ea30SMatthew Wilcox 	unsigned long i, j, k;
57358d6ea30SMatthew Wilcox 	unsigned int max_order = (sizeof(long) == 4) ? 30 : 60;
57458d6ea30SMatthew Wilcox 
57558d6ea30SMatthew Wilcox 	/* Loading from any position returns the same value */
57658d6ea30SMatthew Wilcox 	xa_store_order(xa, 0, 1, xa_mk_value(0), GFP_KERNEL);
57758d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
57858d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0));
57958d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 2) != NULL);
58058d6ea30SMatthew Wilcox 	rcu_read_lock();
58158d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 2);
58258d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2);
58358d6ea30SMatthew Wilcox 	rcu_read_unlock();
58458d6ea30SMatthew Wilcox 
58558d6ea30SMatthew Wilcox 	/* Storing adjacent to the value does not alter the value */
58658d6ea30SMatthew Wilcox 	xa_store(xa, 3, xa, GFP_KERNEL);
58758d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
58858d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0));
58958d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 2) != NULL);
59058d6ea30SMatthew Wilcox 	rcu_read_lock();
59158d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 3);
59258d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2);
59358d6ea30SMatthew Wilcox 	rcu_read_unlock();
59458d6ea30SMatthew Wilcox 
59558d6ea30SMatthew Wilcox 	/* Overwriting multiple indexes works */
59658d6ea30SMatthew Wilcox 	xa_store_order(xa, 0, 2, xa_mk_value(1), GFP_KERNEL);
59758d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(1));
59858d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(1));
59958d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 2) != xa_mk_value(1));
60058d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 3) != xa_mk_value(1));
60158d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 4) != NULL);
60258d6ea30SMatthew Wilcox 	rcu_read_lock();
60358d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 4);
60458d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 4);
60558d6ea30SMatthew Wilcox 	rcu_read_unlock();
60658d6ea30SMatthew Wilcox 
60758d6ea30SMatthew Wilcox 	/* We can erase multiple values with a single store */
6085404a7f1SMatthew Wilcox 	xa_store_order(xa, 0, BITS_PER_LONG - 1, NULL, GFP_KERNEL);
60958d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
61058d6ea30SMatthew Wilcox 
61158d6ea30SMatthew Wilcox 	/* Even when the first slot is empty but the others aren't */
61258d6ea30SMatthew Wilcox 	xa_store_index(xa, 1, GFP_KERNEL);
61358d6ea30SMatthew Wilcox 	xa_store_index(xa, 2, GFP_KERNEL);
61458d6ea30SMatthew Wilcox 	xa_store_order(xa, 0, 2, NULL, GFP_KERNEL);
61558d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
61658d6ea30SMatthew Wilcox 
61758d6ea30SMatthew Wilcox 	for (i = 0; i < max_order; i++) {
61858d6ea30SMatthew Wilcox 		for (j = 0; j < max_order; j++) {
619b7677a13SMatthew Wilcox 			xa_store_order(xa, 0, i, xa_mk_index(i), GFP_KERNEL);
620b7677a13SMatthew Wilcox 			xa_store_order(xa, 0, j, xa_mk_index(j), GFP_KERNEL);
62158d6ea30SMatthew Wilcox 
62258d6ea30SMatthew Wilcox 			for (k = 0; k < max_order; k++) {
62358d6ea30SMatthew Wilcox 				void *entry = xa_load(xa, (1UL << k) - 1);
62458d6ea30SMatthew Wilcox 				if ((i < k) && (j < k))
62558d6ea30SMatthew Wilcox 					XA_BUG_ON(xa, entry != NULL);
62658d6ea30SMatthew Wilcox 				else
627b7677a13SMatthew Wilcox 					XA_BUG_ON(xa, entry != xa_mk_index(j));
62858d6ea30SMatthew Wilcox 			}
62958d6ea30SMatthew Wilcox 
63058d6ea30SMatthew Wilcox 			xa_erase(xa, 0);
63158d6ea30SMatthew Wilcox 			XA_BUG_ON(xa, !xa_empty(xa));
63258d6ea30SMatthew Wilcox 		}
63358d6ea30SMatthew Wilcox 	}
6344f06d630SMatthew Wilcox 
6354f06d630SMatthew Wilcox 	for (i = 0; i < 20; i++) {
6364f06d630SMatthew Wilcox 		check_multi_store_1(xa, 200, i);
6374f06d630SMatthew Wilcox 		check_multi_store_1(xa, 0, i);
6384f06d630SMatthew Wilcox 		check_multi_store_1(xa, (1UL << i) + 1, i);
6394f06d630SMatthew Wilcox 	}
6404f06d630SMatthew Wilcox 	check_multi_store_2(xa, 4095, 9);
6414f145cd6SMatthew Wilcox 
6424f145cd6SMatthew Wilcox 	for (i = 1; i < 20; i++) {
6434f145cd6SMatthew Wilcox 		check_multi_store_3(xa, 0, i);
6444f145cd6SMatthew Wilcox 		check_multi_store_3(xa, 1UL << i, i);
6454f145cd6SMatthew Wilcox 	}
64658d6ea30SMatthew Wilcox #endif
64758d6ea30SMatthew Wilcox }
64858d6ea30SMatthew Wilcox 
6493ccaf57aSMatthew Wilcox static noinline void check_xa_alloc_1(struct xarray *xa, unsigned int base)
650371c752dSMatthew Wilcox {
651371c752dSMatthew Wilcox 	int i;
652371c752dSMatthew Wilcox 	u32 id;
653371c752dSMatthew Wilcox 
6543ccaf57aSMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
6553ccaf57aSMatthew Wilcox 	/* An empty array should assign %base to the first alloc */
6563ccaf57aSMatthew Wilcox 	xa_alloc_index(xa, base, GFP_KERNEL);
657371c752dSMatthew Wilcox 
658371c752dSMatthew Wilcox 	/* Erasing it should make the array empty again */
6593ccaf57aSMatthew Wilcox 	xa_erase_index(xa, base);
6603ccaf57aSMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
661371c752dSMatthew Wilcox 
6623ccaf57aSMatthew Wilcox 	/* And it should assign %base again */
6633ccaf57aSMatthew Wilcox 	xa_alloc_index(xa, base, GFP_KERNEL);
664371c752dSMatthew Wilcox 
6653ccaf57aSMatthew Wilcox 	/* Allocating and then erasing a lot should not lose base */
6663ccaf57aSMatthew Wilcox 	for (i = base + 1; i < 2 * XA_CHUNK_SIZE; i++)
6673ccaf57aSMatthew Wilcox 		xa_alloc_index(xa, i, GFP_KERNEL);
6683ccaf57aSMatthew Wilcox 	for (i = base; i < 2 * XA_CHUNK_SIZE; i++)
6693ccaf57aSMatthew Wilcox 		xa_erase_index(xa, i);
6703ccaf57aSMatthew Wilcox 	xa_alloc_index(xa, base, GFP_KERNEL);
6713ccaf57aSMatthew Wilcox 
6723ccaf57aSMatthew Wilcox 	/* Destroying the array should do the same as erasing */
6733ccaf57aSMatthew Wilcox 	xa_destroy(xa);
6743ccaf57aSMatthew Wilcox 
6753ccaf57aSMatthew Wilcox 	/* And it should assign %base again */
6763ccaf57aSMatthew Wilcox 	xa_alloc_index(xa, base, GFP_KERNEL);
6773ccaf57aSMatthew Wilcox 
6783ccaf57aSMatthew Wilcox 	/* The next assigned ID should be base+1 */
6793ccaf57aSMatthew Wilcox 	xa_alloc_index(xa, base + 1, GFP_KERNEL);
6803ccaf57aSMatthew Wilcox 	xa_erase_index(xa, base + 1);
681371c752dSMatthew Wilcox 
682371c752dSMatthew Wilcox 	/* Storing a value should mark it used */
6833ccaf57aSMatthew Wilcox 	xa_store_index(xa, base + 1, GFP_KERNEL);
6843ccaf57aSMatthew Wilcox 	xa_alloc_index(xa, base + 2, GFP_KERNEL);
685371c752dSMatthew Wilcox 
6863ccaf57aSMatthew Wilcox 	/* If we then erase base, it should be free */
6873ccaf57aSMatthew Wilcox 	xa_erase_index(xa, base);
6883ccaf57aSMatthew Wilcox 	xa_alloc_index(xa, base, GFP_KERNEL);
689371c752dSMatthew Wilcox 
6903ccaf57aSMatthew Wilcox 	xa_erase_index(xa, base + 1);
6913ccaf57aSMatthew Wilcox 	xa_erase_index(xa, base + 2);
692371c752dSMatthew Wilcox 
693371c752dSMatthew Wilcox 	for (i = 1; i < 5000; i++) {
6943ccaf57aSMatthew Wilcox 		xa_alloc_index(xa, base + i, GFP_KERNEL);
695371c752dSMatthew Wilcox 	}
696371c752dSMatthew Wilcox 
6973ccaf57aSMatthew Wilcox 	xa_destroy(xa);
698371c752dSMatthew Wilcox 
6993ccaf57aSMatthew Wilcox 	/* Check that we fail properly at the limit of allocation */
700a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX - 1),
701a3e4d3f9SMatthew Wilcox 				XA_LIMIT(UINT_MAX - 1, UINT_MAX),
702371c752dSMatthew Wilcox 				GFP_KERNEL) != 0);
7033ccaf57aSMatthew Wilcox 	XA_BUG_ON(xa, id != 0xfffffffeU);
704a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX),
705a3e4d3f9SMatthew Wilcox 				XA_LIMIT(UINT_MAX - 1, UINT_MAX),
706371c752dSMatthew Wilcox 				GFP_KERNEL) != 0);
7073ccaf57aSMatthew Wilcox 	XA_BUG_ON(xa, id != 0xffffffffU);
708a3e4d3f9SMatthew Wilcox 	id = 3;
709a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(0),
710a3e4d3f9SMatthew Wilcox 				XA_LIMIT(UINT_MAX - 1, UINT_MAX),
711a3e4d3f9SMatthew Wilcox 				GFP_KERNEL) != -EBUSY);
712a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, id != 3);
7133ccaf57aSMatthew Wilcox 	xa_destroy(xa);
71448483614SMatthew Wilcox 
715a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5),
716a3e4d3f9SMatthew Wilcox 				GFP_KERNEL) != -EBUSY);
7173ccaf57aSMatthew Wilcox 	XA_BUG_ON(xa, xa_store_index(xa, 3, GFP_KERNEL) != 0);
718a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5),
719a3e4d3f9SMatthew Wilcox 				GFP_KERNEL) != -EBUSY);
7203ccaf57aSMatthew Wilcox 	xa_erase_index(xa, 3);
7213ccaf57aSMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
7223ccaf57aSMatthew Wilcox }
7233ccaf57aSMatthew Wilcox 
724a3e4d3f9SMatthew Wilcox static noinline void check_xa_alloc_2(struct xarray *xa, unsigned int base)
725a3e4d3f9SMatthew Wilcox {
726a3e4d3f9SMatthew Wilcox 	unsigned int i, id;
727a3e4d3f9SMatthew Wilcox 	unsigned long index;
728a3e4d3f9SMatthew Wilcox 	void *entry;
729a3e4d3f9SMatthew Wilcox 
730a3e4d3f9SMatthew Wilcox 	/* Allocate and free a NULL and check xa_empty() behaves */
731a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
732a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0);
733a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, id != base);
734a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_empty(xa));
735a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_erase(xa, id) != NULL);
736a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
737a3e4d3f9SMatthew Wilcox 
738a3e4d3f9SMatthew Wilcox 	/* Ditto, but check destroy instead of erase */
739a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
740a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0);
741a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, id != base);
742a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_empty(xa));
743a3e4d3f9SMatthew Wilcox 	xa_destroy(xa);
744a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
745a3e4d3f9SMatthew Wilcox 
746a3e4d3f9SMatthew Wilcox 	for (i = base; i < base + 10; i++) {
747a3e4d3f9SMatthew Wilcox 		XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b,
748a3e4d3f9SMatthew Wilcox 					GFP_KERNEL) != 0);
749a3e4d3f9SMatthew Wilcox 		XA_BUG_ON(xa, id != i);
750a3e4d3f9SMatthew Wilcox 	}
751a3e4d3f9SMatthew Wilcox 
752a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_store(xa, 3, xa_mk_index(3), GFP_KERNEL) != NULL);
753a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_store(xa, 4, xa_mk_index(4), GFP_KERNEL) != NULL);
754a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_store(xa, 4, NULL, GFP_KERNEL) != xa_mk_index(4));
755a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_erase(xa, 5) != NULL);
756a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0);
757a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, id != 5);
758a3e4d3f9SMatthew Wilcox 
759a3e4d3f9SMatthew Wilcox 	xa_for_each(xa, index, entry) {
760a3e4d3f9SMatthew Wilcox 		xa_erase_index(xa, index);
761a3e4d3f9SMatthew Wilcox 	}
762a3e4d3f9SMatthew Wilcox 
763a3e4d3f9SMatthew Wilcox 	for (i = base; i < base + 9; i++) {
764a3e4d3f9SMatthew Wilcox 		XA_BUG_ON(xa, xa_erase(xa, i) != NULL);
765a3e4d3f9SMatthew Wilcox 		XA_BUG_ON(xa, xa_empty(xa));
766a3e4d3f9SMatthew Wilcox 	}
767a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_erase(xa, 8) != NULL);
768a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_empty(xa));
769a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_erase(xa, base + 9) != NULL);
770a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
771a3e4d3f9SMatthew Wilcox 
772a3e4d3f9SMatthew Wilcox 	xa_destroy(xa);
773a3e4d3f9SMatthew Wilcox }
774a3e4d3f9SMatthew Wilcox 
7752fa044e5SMatthew Wilcox static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base)
7762fa044e5SMatthew Wilcox {
7772fa044e5SMatthew Wilcox 	struct xa_limit limit = XA_LIMIT(1, 0x3fff);
7782fa044e5SMatthew Wilcox 	u32 next = 0;
7792fa044e5SMatthew Wilcox 	unsigned int i, id;
7802fa044e5SMatthew Wilcox 	unsigned long index;
7812fa044e5SMatthew Wilcox 	void *entry;
7822fa044e5SMatthew Wilcox 
7832fa044e5SMatthew Wilcox 	XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(1), limit,
7842fa044e5SMatthew Wilcox 				&next, GFP_KERNEL) != 0);
7852fa044e5SMatthew Wilcox 	XA_BUG_ON(xa, id != 1);
7862fa044e5SMatthew Wilcox 
7872fa044e5SMatthew Wilcox 	next = 0x3ffd;
7882fa044e5SMatthew Wilcox 	XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(0x3ffd), limit,
7892fa044e5SMatthew Wilcox 				&next, GFP_KERNEL) != 0);
7902fa044e5SMatthew Wilcox 	XA_BUG_ON(xa, id != 0x3ffd);
7912fa044e5SMatthew Wilcox 	xa_erase_index(xa, 0x3ffd);
7922fa044e5SMatthew Wilcox 	xa_erase_index(xa, 1);
7932fa044e5SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
7942fa044e5SMatthew Wilcox 
7952fa044e5SMatthew Wilcox 	for (i = 0x3ffe; i < 0x4003; i++) {
7962fa044e5SMatthew Wilcox 		if (i < 0x4000)
7972fa044e5SMatthew Wilcox 			entry = xa_mk_index(i);
7982fa044e5SMatthew Wilcox 		else
7992fa044e5SMatthew Wilcox 			entry = xa_mk_index(i - 0x3fff);
8002fa044e5SMatthew Wilcox 		XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, entry, limit,
8012fa044e5SMatthew Wilcox 					&next, GFP_KERNEL) != (id == 1));
8022fa044e5SMatthew Wilcox 		XA_BUG_ON(xa, xa_mk_index(id) != entry);
8032fa044e5SMatthew Wilcox 	}
8042fa044e5SMatthew Wilcox 
8052fa044e5SMatthew Wilcox 	/* Check wrap-around is handled correctly */
8062fa044e5SMatthew Wilcox 	if (base != 0)
8072fa044e5SMatthew Wilcox 		xa_erase_index(xa, base);
8082fa044e5SMatthew Wilcox 	xa_erase_index(xa, base + 1);
8092fa044e5SMatthew Wilcox 	next = UINT_MAX;
8102fa044e5SMatthew Wilcox 	XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(UINT_MAX),
8112fa044e5SMatthew Wilcox 				xa_limit_32b, &next, GFP_KERNEL) != 0);
8122fa044e5SMatthew Wilcox 	XA_BUG_ON(xa, id != UINT_MAX);
8132fa044e5SMatthew Wilcox 	XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base),
8142fa044e5SMatthew Wilcox 				xa_limit_32b, &next, GFP_KERNEL) != 1);
8152fa044e5SMatthew Wilcox 	XA_BUG_ON(xa, id != base);
8162fa044e5SMatthew Wilcox 	XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base + 1),
8172fa044e5SMatthew Wilcox 				xa_limit_32b, &next, GFP_KERNEL) != 0);
8182fa044e5SMatthew Wilcox 	XA_BUG_ON(xa, id != base + 1);
8192fa044e5SMatthew Wilcox 
8202fa044e5SMatthew Wilcox 	xa_for_each(xa, index, entry)
8212fa044e5SMatthew Wilcox 		xa_erase_index(xa, index);
8222fa044e5SMatthew Wilcox 
8232fa044e5SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
8242fa044e5SMatthew Wilcox }
8252fa044e5SMatthew Wilcox 
8263ccaf57aSMatthew Wilcox static DEFINE_XARRAY_ALLOC(xa0);
8273ccaf57aSMatthew Wilcox static DEFINE_XARRAY_ALLOC1(xa1);
8283ccaf57aSMatthew Wilcox 
8293ccaf57aSMatthew Wilcox static noinline void check_xa_alloc(void)
8303ccaf57aSMatthew Wilcox {
8313ccaf57aSMatthew Wilcox 	check_xa_alloc_1(&xa0, 0);
8323ccaf57aSMatthew Wilcox 	check_xa_alloc_1(&xa1, 1);
833a3e4d3f9SMatthew Wilcox 	check_xa_alloc_2(&xa0, 0);
834a3e4d3f9SMatthew Wilcox 	check_xa_alloc_2(&xa1, 1);
8352fa044e5SMatthew Wilcox 	check_xa_alloc_3(&xa0, 0);
8362fa044e5SMatthew Wilcox 	check_xa_alloc_3(&xa1, 1);
837371c752dSMatthew Wilcox }
838371c752dSMatthew Wilcox 
8394e99d4e9SMatthew Wilcox static noinline void __check_store_iter(struct xarray *xa, unsigned long start,
8404e99d4e9SMatthew Wilcox 			unsigned int order, unsigned int present)
8414e99d4e9SMatthew Wilcox {
8424e99d4e9SMatthew Wilcox 	XA_STATE_ORDER(xas, xa, start, order);
8434e99d4e9SMatthew Wilcox 	void *entry;
8444e99d4e9SMatthew Wilcox 	unsigned int count = 0;
8454e99d4e9SMatthew Wilcox 
8464e99d4e9SMatthew Wilcox retry:
8474e99d4e9SMatthew Wilcox 	xas_lock(&xas);
8484e99d4e9SMatthew Wilcox 	xas_for_each_conflict(&xas, entry) {
8494e99d4e9SMatthew Wilcox 		XA_BUG_ON(xa, !xa_is_value(entry));
850b7677a13SMatthew Wilcox 		XA_BUG_ON(xa, entry < xa_mk_index(start));
851b7677a13SMatthew Wilcox 		XA_BUG_ON(xa, entry > xa_mk_index(start + (1UL << order) - 1));
8524e99d4e9SMatthew Wilcox 		count++;
8534e99d4e9SMatthew Wilcox 	}
854b7677a13SMatthew Wilcox 	xas_store(&xas, xa_mk_index(start));
8554e99d4e9SMatthew Wilcox 	xas_unlock(&xas);
8564e99d4e9SMatthew Wilcox 	if (xas_nomem(&xas, GFP_KERNEL)) {
8574e99d4e9SMatthew Wilcox 		count = 0;
8584e99d4e9SMatthew Wilcox 		goto retry;
8594e99d4e9SMatthew Wilcox 	}
8604e99d4e9SMatthew Wilcox 	XA_BUG_ON(xa, xas_error(&xas));
8614e99d4e9SMatthew Wilcox 	XA_BUG_ON(xa, count != present);
862b7677a13SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_index(start));
8634e99d4e9SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) !=
864b7677a13SMatthew Wilcox 			xa_mk_index(start));
8654e99d4e9SMatthew Wilcox 	xa_erase_index(xa, start);
8664e99d4e9SMatthew Wilcox }
8674e99d4e9SMatthew Wilcox 
8684e99d4e9SMatthew Wilcox static noinline void check_store_iter(struct xarray *xa)
8694e99d4e9SMatthew Wilcox {
8704e99d4e9SMatthew Wilcox 	unsigned int i, j;
8714e99d4e9SMatthew Wilcox 	unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
8724e99d4e9SMatthew Wilcox 
8734e99d4e9SMatthew Wilcox 	for (i = 0; i < max_order; i++) {
8744e99d4e9SMatthew Wilcox 		unsigned int min = 1 << i;
8754e99d4e9SMatthew Wilcox 		unsigned int max = (2 << i) - 1;
8764e99d4e9SMatthew Wilcox 		__check_store_iter(xa, 0, i, 0);
8774e99d4e9SMatthew Wilcox 		XA_BUG_ON(xa, !xa_empty(xa));
8784e99d4e9SMatthew Wilcox 		__check_store_iter(xa, min, i, 0);
8794e99d4e9SMatthew Wilcox 		XA_BUG_ON(xa, !xa_empty(xa));
8804e99d4e9SMatthew Wilcox 
8814e99d4e9SMatthew Wilcox 		xa_store_index(xa, min, GFP_KERNEL);
8824e99d4e9SMatthew Wilcox 		__check_store_iter(xa, min, i, 1);
8834e99d4e9SMatthew Wilcox 		XA_BUG_ON(xa, !xa_empty(xa));
8844e99d4e9SMatthew Wilcox 		xa_store_index(xa, max, GFP_KERNEL);
8854e99d4e9SMatthew Wilcox 		__check_store_iter(xa, min, i, 1);
8864e99d4e9SMatthew Wilcox 		XA_BUG_ON(xa, !xa_empty(xa));
8874e99d4e9SMatthew Wilcox 
8884e99d4e9SMatthew Wilcox 		for (j = 0; j < min; j++)
8894e99d4e9SMatthew Wilcox 			xa_store_index(xa, j, GFP_KERNEL);
8904e99d4e9SMatthew Wilcox 		__check_store_iter(xa, 0, i, min);
8914e99d4e9SMatthew Wilcox 		XA_BUG_ON(xa, !xa_empty(xa));
8924e99d4e9SMatthew Wilcox 		for (j = 0; j < min; j++)
8934e99d4e9SMatthew Wilcox 			xa_store_index(xa, min + j, GFP_KERNEL);
8944e99d4e9SMatthew Wilcox 		__check_store_iter(xa, min, i, min);
8954e99d4e9SMatthew Wilcox 		XA_BUG_ON(xa, !xa_empty(xa));
8964e99d4e9SMatthew Wilcox 	}
8974e99d4e9SMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI
8984e99d4e9SMatthew Wilcox 	xa_store_index(xa, 63, GFP_KERNEL);
8994e99d4e9SMatthew Wilcox 	xa_store_index(xa, 65, GFP_KERNEL);
9004e99d4e9SMatthew Wilcox 	__check_store_iter(xa, 64, 2, 1);
9014e99d4e9SMatthew Wilcox 	xa_erase_index(xa, 63);
9024e99d4e9SMatthew Wilcox #endif
9034e99d4e9SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
9044e99d4e9SMatthew Wilcox }
9054e99d4e9SMatthew Wilcox 
90619c30f4dSMatthew Wilcox (Oracle) static noinline void check_multi_find_1(struct xarray *xa, unsigned order)
907b803b428SMatthew Wilcox {
908b803b428SMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI
90919c30f4dSMatthew Wilcox (Oracle) 	unsigned long multi = 3 << order;
91019c30f4dSMatthew Wilcox (Oracle) 	unsigned long next = 4 << order;
911b803b428SMatthew Wilcox 	unsigned long index;
912b803b428SMatthew Wilcox 
91319c30f4dSMatthew Wilcox (Oracle) 	xa_store_order(xa, multi, order, xa_mk_value(multi), GFP_KERNEL);
91419c30f4dSMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL) != NULL);
915c44aa5e8SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, xa_store_index(xa, next + 1, GFP_KERNEL) != NULL);
916b803b428SMatthew Wilcox 
917b803b428SMatthew Wilcox 	index = 0;
918b803b428SMatthew Wilcox 	XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
91919c30f4dSMatthew Wilcox (Oracle) 			xa_mk_value(multi));
92019c30f4dSMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, index != multi);
92119c30f4dSMatthew Wilcox (Oracle) 	index = multi + 1;
922b803b428SMatthew Wilcox 	XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
92319c30f4dSMatthew Wilcox (Oracle) 			xa_mk_value(multi));
92419c30f4dSMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, (index < multi) || (index >= next));
925b803b428SMatthew Wilcox 	XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT) !=
92619c30f4dSMatthew Wilcox (Oracle) 			xa_mk_value(next));
92719c30f4dSMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, index != next);
928c44aa5e8SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, xa_find_after(xa, &index, next, XA_PRESENT) != NULL);
929c44aa5e8SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, index != next);
930b803b428SMatthew Wilcox 
93119c30f4dSMatthew Wilcox (Oracle) 	xa_erase_index(xa, multi);
93219c30f4dSMatthew Wilcox (Oracle) 	xa_erase_index(xa, next);
933c44aa5e8SMatthew Wilcox (Oracle) 	xa_erase_index(xa, next + 1);
934b803b428SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
935b803b428SMatthew Wilcox #endif
936b803b428SMatthew Wilcox }
937b803b428SMatthew Wilcox 
938b803b428SMatthew Wilcox static noinline void check_multi_find_2(struct xarray *xa)
939b803b428SMatthew Wilcox {
940b803b428SMatthew Wilcox 	unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 10 : 1;
941b803b428SMatthew Wilcox 	unsigned int i, j;
942b803b428SMatthew Wilcox 	void *entry;
943b803b428SMatthew Wilcox 
944b803b428SMatthew Wilcox 	for (i = 0; i < max_order; i++) {
945b803b428SMatthew Wilcox 		unsigned long index = 1UL << i;
946b803b428SMatthew Wilcox 		for (j = 0; j < index; j++) {
947b803b428SMatthew Wilcox 			XA_STATE(xas, xa, j + index);
948b803b428SMatthew Wilcox 			xa_store_index(xa, index - 1, GFP_KERNEL);
949b7677a13SMatthew Wilcox 			xa_store_order(xa, index, i, xa_mk_index(index),
950b803b428SMatthew Wilcox 					GFP_KERNEL);
951b803b428SMatthew Wilcox 			rcu_read_lock();
952b803b428SMatthew Wilcox 			xas_for_each(&xas, entry, ULONG_MAX) {
953b803b428SMatthew Wilcox 				xa_erase_index(xa, index);
954b803b428SMatthew Wilcox 			}
955b803b428SMatthew Wilcox 			rcu_read_unlock();
956b803b428SMatthew Wilcox 			xa_erase_index(xa, index - 1);
957b803b428SMatthew Wilcox 			XA_BUG_ON(xa, !xa_empty(xa));
958b803b428SMatthew Wilcox 		}
959b803b428SMatthew Wilcox 	}
960b803b428SMatthew Wilcox }
961b803b428SMatthew Wilcox 
9628229706eSMatthew Wilcox static noinline void check_find_1(struct xarray *xa)
963b803b428SMatthew Wilcox {
964b803b428SMatthew Wilcox 	unsigned long i, j, k;
965b803b428SMatthew Wilcox 
966b803b428SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
967b803b428SMatthew Wilcox 
968b803b428SMatthew Wilcox 	/*
969b803b428SMatthew Wilcox 	 * Check xa_find with all pairs between 0 and 99 inclusive,
970b803b428SMatthew Wilcox 	 * starting at every index between 0 and 99
971b803b428SMatthew Wilcox 	 */
972b803b428SMatthew Wilcox 	for (i = 0; i < 100; i++) {
973b803b428SMatthew Wilcox 		XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
974b803b428SMatthew Wilcox 		xa_set_mark(xa, i, XA_MARK_0);
975b803b428SMatthew Wilcox 		for (j = 0; j < i; j++) {
976b803b428SMatthew Wilcox 			XA_BUG_ON(xa, xa_store_index(xa, j, GFP_KERNEL) !=
977b803b428SMatthew Wilcox 					NULL);
978b803b428SMatthew Wilcox 			xa_set_mark(xa, j, XA_MARK_0);
979b803b428SMatthew Wilcox 			for (k = 0; k < 100; k++) {
980b803b428SMatthew Wilcox 				unsigned long index = k;
981b803b428SMatthew Wilcox 				void *entry = xa_find(xa, &index, ULONG_MAX,
982b803b428SMatthew Wilcox 								XA_PRESENT);
983b803b428SMatthew Wilcox 				if (k <= j)
984b803b428SMatthew Wilcox 					XA_BUG_ON(xa, index != j);
985b803b428SMatthew Wilcox 				else if (k <= i)
986b803b428SMatthew Wilcox 					XA_BUG_ON(xa, index != i);
987b803b428SMatthew Wilcox 				else
988b803b428SMatthew Wilcox 					XA_BUG_ON(xa, entry != NULL);
989b803b428SMatthew Wilcox 
990b803b428SMatthew Wilcox 				index = k;
991b803b428SMatthew Wilcox 				entry = xa_find(xa, &index, ULONG_MAX,
992b803b428SMatthew Wilcox 								XA_MARK_0);
993b803b428SMatthew Wilcox 				if (k <= j)
994b803b428SMatthew Wilcox 					XA_BUG_ON(xa, index != j);
995b803b428SMatthew Wilcox 				else if (k <= i)
996b803b428SMatthew Wilcox 					XA_BUG_ON(xa, index != i);
997b803b428SMatthew Wilcox 				else
998b803b428SMatthew Wilcox 					XA_BUG_ON(xa, entry != NULL);
999b803b428SMatthew Wilcox 			}
1000b803b428SMatthew Wilcox 			xa_erase_index(xa, j);
1001b803b428SMatthew Wilcox 			XA_BUG_ON(xa, xa_get_mark(xa, j, XA_MARK_0));
1002b803b428SMatthew Wilcox 			XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0));
1003b803b428SMatthew Wilcox 		}
1004b803b428SMatthew Wilcox 		xa_erase_index(xa, i);
1005b803b428SMatthew Wilcox 		XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_0));
1006b803b428SMatthew Wilcox 	}
1007b803b428SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
10088229706eSMatthew Wilcox }
10098229706eSMatthew Wilcox 
10108229706eSMatthew Wilcox static noinline void check_find_2(struct xarray *xa)
10118229706eSMatthew Wilcox {
10128229706eSMatthew Wilcox 	void *entry;
10134a31896cSMatthew Wilcox 	unsigned long i, j, index;
10148229706eSMatthew Wilcox 
10154a31896cSMatthew Wilcox 	xa_for_each(xa, index, entry) {
10168229706eSMatthew Wilcox 		XA_BUG_ON(xa, true);
10178229706eSMatthew Wilcox 	}
10188229706eSMatthew Wilcox 
10198229706eSMatthew Wilcox 	for (i = 0; i < 1024; i++) {
10208229706eSMatthew Wilcox 		xa_store_index(xa, index, GFP_KERNEL);
10218229706eSMatthew Wilcox 		j = 0;
10224a31896cSMatthew Wilcox 		xa_for_each(xa, index, entry) {
1023b7677a13SMatthew Wilcox 			XA_BUG_ON(xa, xa_mk_index(index) != entry);
10248229706eSMatthew Wilcox 			XA_BUG_ON(xa, index != j++);
10258229706eSMatthew Wilcox 		}
10268229706eSMatthew Wilcox 	}
10278229706eSMatthew Wilcox 
10288229706eSMatthew Wilcox 	xa_destroy(xa);
10298229706eSMatthew Wilcox }
10308229706eSMatthew Wilcox 
103148483614SMatthew Wilcox static noinline void check_find_3(struct xarray *xa)
103248483614SMatthew Wilcox {
103348483614SMatthew Wilcox 	XA_STATE(xas, xa, 0);
103448483614SMatthew Wilcox 	unsigned long i, j, k;
103548483614SMatthew Wilcox 	void *entry;
103648483614SMatthew Wilcox 
103748483614SMatthew Wilcox 	for (i = 0; i < 100; i++) {
103848483614SMatthew Wilcox 		for (j = 0; j < 100; j++) {
1039490fd30fSMatthew Wilcox 			rcu_read_lock();
104048483614SMatthew Wilcox 			for (k = 0; k < 100; k++) {
104148483614SMatthew Wilcox 				xas_set(&xas, j);
104248483614SMatthew Wilcox 				xas_for_each_marked(&xas, entry, k, XA_MARK_0)
104348483614SMatthew Wilcox 					;
104448483614SMatthew Wilcox 				if (j > k)
104548483614SMatthew Wilcox 					XA_BUG_ON(xa,
104648483614SMatthew Wilcox 						xas.xa_node != XAS_RESTART);
104748483614SMatthew Wilcox 			}
1048490fd30fSMatthew Wilcox 			rcu_read_unlock();
104948483614SMatthew Wilcox 		}
105048483614SMatthew Wilcox 		xa_store_index(xa, i, GFP_KERNEL);
105148483614SMatthew Wilcox 		xa_set_mark(xa, i, XA_MARK_0);
105248483614SMatthew Wilcox 	}
105348483614SMatthew Wilcox 	xa_destroy(xa);
105448483614SMatthew Wilcox }
105548483614SMatthew Wilcox 
1056430f24f9SMatthew Wilcox (Oracle) static noinline void check_find_4(struct xarray *xa)
1057430f24f9SMatthew Wilcox (Oracle) {
1058430f24f9SMatthew Wilcox (Oracle) 	unsigned long index = 0;
1059430f24f9SMatthew Wilcox (Oracle) 	void *entry;
1060430f24f9SMatthew Wilcox (Oracle) 
1061430f24f9SMatthew Wilcox (Oracle) 	xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
1062430f24f9SMatthew Wilcox (Oracle) 
1063430f24f9SMatthew Wilcox (Oracle) 	entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT);
1064430f24f9SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, entry != xa_mk_index(ULONG_MAX));
1065430f24f9SMatthew Wilcox (Oracle) 
1066430f24f9SMatthew Wilcox (Oracle) 	entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT);
1067430f24f9SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, entry);
1068430f24f9SMatthew Wilcox (Oracle) 
1069430f24f9SMatthew Wilcox (Oracle) 	xa_erase_index(xa, ULONG_MAX);
1070430f24f9SMatthew Wilcox (Oracle) }
1071430f24f9SMatthew Wilcox (Oracle) 
10728229706eSMatthew Wilcox static noinline void check_find(struct xarray *xa)
10738229706eSMatthew Wilcox {
107419c30f4dSMatthew Wilcox (Oracle) 	unsigned i;
107519c30f4dSMatthew Wilcox (Oracle) 
10768229706eSMatthew Wilcox 	check_find_1(xa);
10778229706eSMatthew Wilcox 	check_find_2(xa);
107848483614SMatthew Wilcox 	check_find_3(xa);
1079430f24f9SMatthew Wilcox (Oracle) 	check_find_4(xa);
108019c30f4dSMatthew Wilcox (Oracle) 
108119c30f4dSMatthew Wilcox (Oracle) 	for (i = 2; i < 10; i++)
108219c30f4dSMatthew Wilcox (Oracle) 		check_multi_find_1(xa, i);
1083b803b428SMatthew Wilcox 	check_multi_find_2(xa);
1084b803b428SMatthew Wilcox }
1085b803b428SMatthew Wilcox 
1086e21a2955SMatthew Wilcox /* See find_swap_entry() in mm/shmem.c */
1087e21a2955SMatthew Wilcox static noinline unsigned long xa_find_entry(struct xarray *xa, void *item)
1088e21a2955SMatthew Wilcox {
1089e21a2955SMatthew Wilcox 	XA_STATE(xas, xa, 0);
1090e21a2955SMatthew Wilcox 	unsigned int checked = 0;
1091e21a2955SMatthew Wilcox 	void *entry;
1092e21a2955SMatthew Wilcox 
1093e21a2955SMatthew Wilcox 	rcu_read_lock();
1094e21a2955SMatthew Wilcox 	xas_for_each(&xas, entry, ULONG_MAX) {
1095e21a2955SMatthew Wilcox 		if (xas_retry(&xas, entry))
1096e21a2955SMatthew Wilcox 			continue;
1097e21a2955SMatthew Wilcox 		if (entry == item)
1098e21a2955SMatthew Wilcox 			break;
1099e21a2955SMatthew Wilcox 		checked++;
1100e21a2955SMatthew Wilcox 		if ((checked % 4) != 0)
1101e21a2955SMatthew Wilcox 			continue;
1102e21a2955SMatthew Wilcox 		xas_pause(&xas);
1103e21a2955SMatthew Wilcox 	}
1104e21a2955SMatthew Wilcox 	rcu_read_unlock();
1105e21a2955SMatthew Wilcox 
1106e21a2955SMatthew Wilcox 	return entry ? xas.xa_index : -1;
1107e21a2955SMatthew Wilcox }
1108e21a2955SMatthew Wilcox 
1109e21a2955SMatthew Wilcox static noinline void check_find_entry(struct xarray *xa)
1110e21a2955SMatthew Wilcox {
1111e21a2955SMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI
1112e21a2955SMatthew Wilcox 	unsigned int order;
1113e21a2955SMatthew Wilcox 	unsigned long offset, index;
1114e21a2955SMatthew Wilcox 
1115e21a2955SMatthew Wilcox 	for (order = 0; order < 20; order++) {
1116e21a2955SMatthew Wilcox 		for (offset = 0; offset < (1UL << (order + 3));
1117e21a2955SMatthew Wilcox 		     offset += (1UL << order)) {
1118e21a2955SMatthew Wilcox 			for (index = 0; index < (1UL << (order + 5));
1119e21a2955SMatthew Wilcox 			     index += (1UL << order)) {
1120e21a2955SMatthew Wilcox 				xa_store_order(xa, index, order,
1121b7677a13SMatthew Wilcox 						xa_mk_index(index), GFP_KERNEL);
1122e21a2955SMatthew Wilcox 				XA_BUG_ON(xa, xa_load(xa, index) !=
1123b7677a13SMatthew Wilcox 						xa_mk_index(index));
1124e21a2955SMatthew Wilcox 				XA_BUG_ON(xa, xa_find_entry(xa,
1125b7677a13SMatthew Wilcox 						xa_mk_index(index)) != index);
1126e21a2955SMatthew Wilcox 			}
1127e21a2955SMatthew Wilcox 			XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
1128e21a2955SMatthew Wilcox 			xa_destroy(xa);
1129e21a2955SMatthew Wilcox 		}
1130e21a2955SMatthew Wilcox 	}
1131e21a2955SMatthew Wilcox #endif
1132e21a2955SMatthew Wilcox 
1133e21a2955SMatthew Wilcox 	XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
1134e21a2955SMatthew Wilcox 	xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
1135e21a2955SMatthew Wilcox 	XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
1136b7677a13SMatthew Wilcox 	XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_index(ULONG_MAX)) != -1);
1137e21a2955SMatthew Wilcox 	xa_erase_index(xa, ULONG_MAX);
1138e21a2955SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
1139e21a2955SMatthew Wilcox }
1140e21a2955SMatthew Wilcox 
114191abab83SMatthew Wilcox (Oracle) static noinline void check_move_tiny(struct xarray *xa)
114291abab83SMatthew Wilcox (Oracle) {
114391abab83SMatthew Wilcox (Oracle) 	XA_STATE(xas, xa, 0);
114491abab83SMatthew Wilcox (Oracle) 
114591abab83SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, !xa_empty(xa));
114691abab83SMatthew Wilcox (Oracle) 	rcu_read_lock();
114791abab83SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, xas_next(&xas) != NULL);
114891abab83SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, xas_next(&xas) != NULL);
114991abab83SMatthew Wilcox (Oracle) 	rcu_read_unlock();
115091abab83SMatthew Wilcox (Oracle) 	xa_store_index(xa, 0, GFP_KERNEL);
115191abab83SMatthew Wilcox (Oracle) 	rcu_read_lock();
115291abab83SMatthew Wilcox (Oracle) 	xas_set(&xas, 0);
115391abab83SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, xas_next(&xas) != xa_mk_index(0));
115491abab83SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, xas_next(&xas) != NULL);
115591abab83SMatthew Wilcox (Oracle) 	xas_set(&xas, 0);
115691abab83SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, xas_prev(&xas) != xa_mk_index(0));
115791abab83SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, xas_prev(&xas) != NULL);
115891abab83SMatthew Wilcox (Oracle) 	rcu_read_unlock();
115991abab83SMatthew Wilcox (Oracle) 	xa_erase_index(xa, 0);
116091abab83SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, !xa_empty(xa));
116191abab83SMatthew Wilcox (Oracle) }
116291abab83SMatthew Wilcox (Oracle) 
116382a22311SMatthew Wilcox (Oracle) static noinline void check_move_max(struct xarray *xa)
116482a22311SMatthew Wilcox (Oracle) {
116582a22311SMatthew Wilcox (Oracle) 	XA_STATE(xas, xa, 0);
116682a22311SMatthew Wilcox (Oracle) 
116782a22311SMatthew Wilcox (Oracle) 	xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
116882a22311SMatthew Wilcox (Oracle) 	rcu_read_lock();
116982a22311SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX));
117082a22311SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL);
117182a22311SMatthew Wilcox (Oracle) 	rcu_read_unlock();
117282a22311SMatthew Wilcox (Oracle) 
117382a22311SMatthew Wilcox (Oracle) 	xas_set(&xas, 0);
117482a22311SMatthew Wilcox (Oracle) 	rcu_read_lock();
117582a22311SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX));
117682a22311SMatthew Wilcox (Oracle) 	xas_pause(&xas);
117782a22311SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL);
117882a22311SMatthew Wilcox (Oracle) 	rcu_read_unlock();
117982a22311SMatthew Wilcox (Oracle) 
118082a22311SMatthew Wilcox (Oracle) 	xa_erase_index(xa, ULONG_MAX);
118182a22311SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, !xa_empty(xa));
118282a22311SMatthew Wilcox (Oracle) }
118382a22311SMatthew Wilcox (Oracle) 
118464d3e9a9SMatthew Wilcox static noinline void check_move_small(struct xarray *xa, unsigned long idx)
118564d3e9a9SMatthew Wilcox {
118664d3e9a9SMatthew Wilcox 	XA_STATE(xas, xa, 0);
118764d3e9a9SMatthew Wilcox 	unsigned long i;
118864d3e9a9SMatthew Wilcox 
118964d3e9a9SMatthew Wilcox 	xa_store_index(xa, 0, GFP_KERNEL);
119064d3e9a9SMatthew Wilcox 	xa_store_index(xa, idx, GFP_KERNEL);
119164d3e9a9SMatthew Wilcox 
119264d3e9a9SMatthew Wilcox 	rcu_read_lock();
119364d3e9a9SMatthew Wilcox 	for (i = 0; i < idx * 4; i++) {
119464d3e9a9SMatthew Wilcox 		void *entry = xas_next(&xas);
119564d3e9a9SMatthew Wilcox 		if (i <= idx)
119664d3e9a9SMatthew Wilcox 			XA_BUG_ON(xa, xas.xa_node == XAS_RESTART);
119764d3e9a9SMatthew Wilcox 		XA_BUG_ON(xa, xas.xa_index != i);
119864d3e9a9SMatthew Wilcox 		if (i == 0 || i == idx)
1199b7677a13SMatthew Wilcox 			XA_BUG_ON(xa, entry != xa_mk_index(i));
120064d3e9a9SMatthew Wilcox 		else
120164d3e9a9SMatthew Wilcox 			XA_BUG_ON(xa, entry != NULL);
120264d3e9a9SMatthew Wilcox 	}
120364d3e9a9SMatthew Wilcox 	xas_next(&xas);
120464d3e9a9SMatthew Wilcox 	XA_BUG_ON(xa, xas.xa_index != i);
120564d3e9a9SMatthew Wilcox 
120664d3e9a9SMatthew Wilcox 	do {
120764d3e9a9SMatthew Wilcox 		void *entry = xas_prev(&xas);
120864d3e9a9SMatthew Wilcox 		i--;
120964d3e9a9SMatthew Wilcox 		if (i <= idx)
121064d3e9a9SMatthew Wilcox 			XA_BUG_ON(xa, xas.xa_node == XAS_RESTART);
121164d3e9a9SMatthew Wilcox 		XA_BUG_ON(xa, xas.xa_index != i);
121264d3e9a9SMatthew Wilcox 		if (i == 0 || i == idx)
1213b7677a13SMatthew Wilcox 			XA_BUG_ON(xa, entry != xa_mk_index(i));
121464d3e9a9SMatthew Wilcox 		else
121564d3e9a9SMatthew Wilcox 			XA_BUG_ON(xa, entry != NULL);
121664d3e9a9SMatthew Wilcox 	} while (i > 0);
121764d3e9a9SMatthew Wilcox 
121864d3e9a9SMatthew Wilcox 	xas_set(&xas, ULONG_MAX);
121964d3e9a9SMatthew Wilcox 	XA_BUG_ON(xa, xas_next(&xas) != NULL);
122064d3e9a9SMatthew Wilcox 	XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
122164d3e9a9SMatthew Wilcox 	XA_BUG_ON(xa, xas_next(&xas) != xa_mk_value(0));
122264d3e9a9SMatthew Wilcox 	XA_BUG_ON(xa, xas.xa_index != 0);
122364d3e9a9SMatthew Wilcox 	XA_BUG_ON(xa, xas_prev(&xas) != NULL);
122464d3e9a9SMatthew Wilcox 	XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
122564d3e9a9SMatthew Wilcox 	rcu_read_unlock();
122664d3e9a9SMatthew Wilcox 
122764d3e9a9SMatthew Wilcox 	xa_erase_index(xa, 0);
122864d3e9a9SMatthew Wilcox 	xa_erase_index(xa, idx);
122964d3e9a9SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
123064d3e9a9SMatthew Wilcox }
123164d3e9a9SMatthew Wilcox 
123264d3e9a9SMatthew Wilcox static noinline void check_move(struct xarray *xa)
123364d3e9a9SMatthew Wilcox {
123464d3e9a9SMatthew Wilcox 	XA_STATE(xas, xa, (1 << 16) - 1);
123564d3e9a9SMatthew Wilcox 	unsigned long i;
123664d3e9a9SMatthew Wilcox 
123764d3e9a9SMatthew Wilcox 	for (i = 0; i < (1 << 16); i++)
123864d3e9a9SMatthew Wilcox 		XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
123964d3e9a9SMatthew Wilcox 
124064d3e9a9SMatthew Wilcox 	rcu_read_lock();
124164d3e9a9SMatthew Wilcox 	do {
124264d3e9a9SMatthew Wilcox 		void *entry = xas_prev(&xas);
124364d3e9a9SMatthew Wilcox 		i--;
1244b7677a13SMatthew Wilcox 		XA_BUG_ON(xa, entry != xa_mk_index(i));
124564d3e9a9SMatthew Wilcox 		XA_BUG_ON(xa, i != xas.xa_index);
124664d3e9a9SMatthew Wilcox 	} while (i != 0);
124764d3e9a9SMatthew Wilcox 
124864d3e9a9SMatthew Wilcox 	XA_BUG_ON(xa, xas_prev(&xas) != NULL);
124964d3e9a9SMatthew Wilcox 	XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
125064d3e9a9SMatthew Wilcox 
125164d3e9a9SMatthew Wilcox 	do {
125264d3e9a9SMatthew Wilcox 		void *entry = xas_next(&xas);
1253b7677a13SMatthew Wilcox 		XA_BUG_ON(xa, entry != xa_mk_index(i));
125464d3e9a9SMatthew Wilcox 		XA_BUG_ON(xa, i != xas.xa_index);
125564d3e9a9SMatthew Wilcox 		i++;
125664d3e9a9SMatthew Wilcox 	} while (i < (1 << 16));
125764d3e9a9SMatthew Wilcox 	rcu_read_unlock();
125864d3e9a9SMatthew Wilcox 
125964d3e9a9SMatthew Wilcox 	for (i = (1 << 8); i < (1 << 15); i++)
126064d3e9a9SMatthew Wilcox 		xa_erase_index(xa, i);
126164d3e9a9SMatthew Wilcox 
126264d3e9a9SMatthew Wilcox 	i = xas.xa_index;
126364d3e9a9SMatthew Wilcox 
126464d3e9a9SMatthew Wilcox 	rcu_read_lock();
126564d3e9a9SMatthew Wilcox 	do {
126664d3e9a9SMatthew Wilcox 		void *entry = xas_prev(&xas);
126764d3e9a9SMatthew Wilcox 		i--;
126864d3e9a9SMatthew Wilcox 		if ((i < (1 << 8)) || (i >= (1 << 15)))
1269b7677a13SMatthew Wilcox 			XA_BUG_ON(xa, entry != xa_mk_index(i));
127064d3e9a9SMatthew Wilcox 		else
127164d3e9a9SMatthew Wilcox 			XA_BUG_ON(xa, entry != NULL);
127264d3e9a9SMatthew Wilcox 		XA_BUG_ON(xa, i != xas.xa_index);
127364d3e9a9SMatthew Wilcox 	} while (i != 0);
127464d3e9a9SMatthew Wilcox 
127564d3e9a9SMatthew Wilcox 	XA_BUG_ON(xa, xas_prev(&xas) != NULL);
127664d3e9a9SMatthew Wilcox 	XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
127764d3e9a9SMatthew Wilcox 
127864d3e9a9SMatthew Wilcox 	do {
127964d3e9a9SMatthew Wilcox 		void *entry = xas_next(&xas);
128064d3e9a9SMatthew Wilcox 		if ((i < (1 << 8)) || (i >= (1 << 15)))
1281b7677a13SMatthew Wilcox 			XA_BUG_ON(xa, entry != xa_mk_index(i));
128264d3e9a9SMatthew Wilcox 		else
128364d3e9a9SMatthew Wilcox 			XA_BUG_ON(xa, entry != NULL);
128464d3e9a9SMatthew Wilcox 		XA_BUG_ON(xa, i != xas.xa_index);
128564d3e9a9SMatthew Wilcox 		i++;
128664d3e9a9SMatthew Wilcox 	} while (i < (1 << 16));
128764d3e9a9SMatthew Wilcox 	rcu_read_unlock();
128864d3e9a9SMatthew Wilcox 
128964d3e9a9SMatthew Wilcox 	xa_destroy(xa);
129064d3e9a9SMatthew Wilcox 
129191abab83SMatthew Wilcox (Oracle) 	check_move_tiny(xa);
129282a22311SMatthew Wilcox (Oracle) 	check_move_max(xa);
129391abab83SMatthew Wilcox (Oracle) 
129464d3e9a9SMatthew Wilcox 	for (i = 0; i < 16; i++)
129564d3e9a9SMatthew Wilcox 		check_move_small(xa, 1UL << i);
129664d3e9a9SMatthew Wilcox 
129764d3e9a9SMatthew Wilcox 	for (i = 2; i < 16; i++)
129864d3e9a9SMatthew Wilcox 		check_move_small(xa, (1UL << i) - 1);
129964d3e9a9SMatthew Wilcox }
130064d3e9a9SMatthew Wilcox 
13012264f513SMatthew Wilcox static noinline void xa_store_many_order(struct xarray *xa,
13022264f513SMatthew Wilcox 		unsigned long index, unsigned order)
13032264f513SMatthew Wilcox {
13042264f513SMatthew Wilcox 	XA_STATE_ORDER(xas, xa, index, order);
13052264f513SMatthew Wilcox 	unsigned int i = 0;
13062264f513SMatthew Wilcox 
13072264f513SMatthew Wilcox 	do {
13082264f513SMatthew Wilcox 		xas_lock(&xas);
13092264f513SMatthew Wilcox 		XA_BUG_ON(xa, xas_find_conflict(&xas));
13102264f513SMatthew Wilcox 		xas_create_range(&xas);
13112264f513SMatthew Wilcox 		if (xas_error(&xas))
13122264f513SMatthew Wilcox 			goto unlock;
13132264f513SMatthew Wilcox 		for (i = 0; i < (1U << order); i++) {
1314b7677a13SMatthew Wilcox 			XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(index + i)));
13152264f513SMatthew Wilcox 			xas_next(&xas);
13162264f513SMatthew Wilcox 		}
13172264f513SMatthew Wilcox unlock:
13182264f513SMatthew Wilcox 		xas_unlock(&xas);
13192264f513SMatthew Wilcox 	} while (xas_nomem(&xas, GFP_KERNEL));
13202264f513SMatthew Wilcox 
13212264f513SMatthew Wilcox 	XA_BUG_ON(xa, xas_error(&xas));
13222264f513SMatthew Wilcox }
13232264f513SMatthew Wilcox 
13242264f513SMatthew Wilcox static noinline void check_create_range_1(struct xarray *xa,
13252264f513SMatthew Wilcox 		unsigned long index, unsigned order)
13262264f513SMatthew Wilcox {
13272264f513SMatthew Wilcox 	unsigned long i;
13282264f513SMatthew Wilcox 
13292264f513SMatthew Wilcox 	xa_store_many_order(xa, index, order);
13302264f513SMatthew Wilcox 	for (i = index; i < index + (1UL << order); i++)
13312264f513SMatthew Wilcox 		xa_erase_index(xa, i);
13322264f513SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
13332264f513SMatthew Wilcox }
13342264f513SMatthew Wilcox 
13352264f513SMatthew Wilcox static noinline void check_create_range_2(struct xarray *xa, unsigned order)
13362264f513SMatthew Wilcox {
13372264f513SMatthew Wilcox 	unsigned long i;
13382264f513SMatthew Wilcox 	unsigned long nr = 1UL << order;
13392264f513SMatthew Wilcox 
13402264f513SMatthew Wilcox 	for (i = 0; i < nr * nr; i += nr)
13412264f513SMatthew Wilcox 		xa_store_many_order(xa, i, order);
13422264f513SMatthew Wilcox 	for (i = 0; i < nr * nr; i++)
13432264f513SMatthew Wilcox 		xa_erase_index(xa, i);
13442264f513SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
13452264f513SMatthew Wilcox }
13462264f513SMatthew Wilcox 
13472264f513SMatthew Wilcox static noinline void check_create_range_3(void)
13482264f513SMatthew Wilcox {
13492264f513SMatthew Wilcox 	XA_STATE(xas, NULL, 0);
13502264f513SMatthew Wilcox 	xas_set_err(&xas, -EEXIST);
13512264f513SMatthew Wilcox 	xas_create_range(&xas);
13522264f513SMatthew Wilcox 	XA_BUG_ON(NULL, xas_error(&xas) != -EEXIST);
13532264f513SMatthew Wilcox }
13542264f513SMatthew Wilcox 
13552264f513SMatthew Wilcox static noinline void check_create_range_4(struct xarray *xa,
13562264f513SMatthew Wilcox 		unsigned long index, unsigned order)
13572264f513SMatthew Wilcox {
13582264f513SMatthew Wilcox 	XA_STATE_ORDER(xas, xa, index, order);
13592264f513SMatthew Wilcox 	unsigned long base = xas.xa_index;
13602264f513SMatthew Wilcox 	unsigned long i = 0;
13612264f513SMatthew Wilcox 
13622264f513SMatthew Wilcox 	xa_store_index(xa, index, GFP_KERNEL);
13632264f513SMatthew Wilcox 	do {
13642264f513SMatthew Wilcox 		xas_lock(&xas);
13652264f513SMatthew Wilcox 		xas_create_range(&xas);
13662264f513SMatthew Wilcox 		if (xas_error(&xas))
13672264f513SMatthew Wilcox 			goto unlock;
13682264f513SMatthew Wilcox 		for (i = 0; i < (1UL << order); i++) {
1369b7677a13SMatthew Wilcox 			void *old = xas_store(&xas, xa_mk_index(base + i));
13702264f513SMatthew Wilcox 			if (xas.xa_index == index)
1371b7677a13SMatthew Wilcox 				XA_BUG_ON(xa, old != xa_mk_index(base + i));
13722264f513SMatthew Wilcox 			else
13732264f513SMatthew Wilcox 				XA_BUG_ON(xa, old != NULL);
13742264f513SMatthew Wilcox 			xas_next(&xas);
13752264f513SMatthew Wilcox 		}
13762264f513SMatthew Wilcox unlock:
13772264f513SMatthew Wilcox 		xas_unlock(&xas);
13782264f513SMatthew Wilcox 	} while (xas_nomem(&xas, GFP_KERNEL));
13792264f513SMatthew Wilcox 
13802264f513SMatthew Wilcox 	XA_BUG_ON(xa, xas_error(&xas));
13812264f513SMatthew Wilcox 
13822264f513SMatthew Wilcox 	for (i = base; i < base + (1UL << order); i++)
13832264f513SMatthew Wilcox 		xa_erase_index(xa, i);
13842264f513SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
13852264f513SMatthew Wilcox }
13862264f513SMatthew Wilcox 
13872264f513SMatthew Wilcox static noinline void check_create_range(struct xarray *xa)
13882264f513SMatthew Wilcox {
13892264f513SMatthew Wilcox 	unsigned int order;
13902264f513SMatthew Wilcox 	unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 12 : 1;
13912264f513SMatthew Wilcox 
13922264f513SMatthew Wilcox 	for (order = 0; order < max_order; order++) {
13932264f513SMatthew Wilcox 		check_create_range_1(xa, 0, order);
13942264f513SMatthew Wilcox 		check_create_range_1(xa, 1U << order, order);
13952264f513SMatthew Wilcox 		check_create_range_1(xa, 2U << order, order);
13962264f513SMatthew Wilcox 		check_create_range_1(xa, 3U << order, order);
13972264f513SMatthew Wilcox 		check_create_range_1(xa, 1U << 24, order);
13982264f513SMatthew Wilcox 		if (order < 10)
13992264f513SMatthew Wilcox 			check_create_range_2(xa, order);
14002264f513SMatthew Wilcox 
14012264f513SMatthew Wilcox 		check_create_range_4(xa, 0, order);
14022264f513SMatthew Wilcox 		check_create_range_4(xa, 1U << order, order);
14032264f513SMatthew Wilcox 		check_create_range_4(xa, 2U << order, order);
14042264f513SMatthew Wilcox 		check_create_range_4(xa, 3U << order, order);
14052264f513SMatthew Wilcox 		check_create_range_4(xa, 1U << 24, order);
14062264f513SMatthew Wilcox 
14072264f513SMatthew Wilcox 		check_create_range_4(xa, 1, order);
14082264f513SMatthew Wilcox 		check_create_range_4(xa, (1U << order) + 1, order);
14092264f513SMatthew Wilcox 		check_create_range_4(xa, (2U << order) + 1, order);
14102264f513SMatthew Wilcox 		check_create_range_4(xa, (2U << order) - 1, order);
14112264f513SMatthew Wilcox 		check_create_range_4(xa, (3U << order) + 1, order);
14122264f513SMatthew Wilcox 		check_create_range_4(xa, (3U << order) - 1, order);
14132264f513SMatthew Wilcox 		check_create_range_4(xa, (1U << 24) + 1, order);
14142264f513SMatthew Wilcox 	}
14152264f513SMatthew Wilcox 
14162264f513SMatthew Wilcox 	check_create_range_3();
14172264f513SMatthew Wilcox }
14182264f513SMatthew Wilcox 
14190e9446c3SMatthew Wilcox static noinline void __check_store_range(struct xarray *xa, unsigned long first,
14200e9446c3SMatthew Wilcox 		unsigned long last)
14210e9446c3SMatthew Wilcox {
14220e9446c3SMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI
1423b7677a13SMatthew Wilcox 	xa_store_range(xa, first, last, xa_mk_index(first), GFP_KERNEL);
14240e9446c3SMatthew Wilcox 
1425b7677a13SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, first) != xa_mk_index(first));
1426b7677a13SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, last) != xa_mk_index(first));
14270e9446c3SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, first - 1) != NULL);
14280e9446c3SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, last + 1) != NULL);
14290e9446c3SMatthew Wilcox 
14300e9446c3SMatthew Wilcox 	xa_store_range(xa, first, last, NULL, GFP_KERNEL);
14310e9446c3SMatthew Wilcox #endif
14320e9446c3SMatthew Wilcox 
14330e9446c3SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
14340e9446c3SMatthew Wilcox }
14350e9446c3SMatthew Wilcox 
14360e9446c3SMatthew Wilcox static noinline void check_store_range(struct xarray *xa)
14370e9446c3SMatthew Wilcox {
14380e9446c3SMatthew Wilcox 	unsigned long i, j;
14390e9446c3SMatthew Wilcox 
14400e9446c3SMatthew Wilcox 	for (i = 0; i < 128; i++) {
14410e9446c3SMatthew Wilcox 		for (j = i; j < 128; j++) {
14420e9446c3SMatthew Wilcox 			__check_store_range(xa, i, j);
14430e9446c3SMatthew Wilcox 			__check_store_range(xa, 128 + i, 128 + j);
14440e9446c3SMatthew Wilcox 			__check_store_range(xa, 4095 + i, 4095 + j);
14450e9446c3SMatthew Wilcox 			__check_store_range(xa, 4096 + i, 4096 + j);
14460e9446c3SMatthew Wilcox 			__check_store_range(xa, 123456 + i, 123456 + j);
14475404a7f1SMatthew Wilcox 			__check_store_range(xa, (1 << 24) + i, (1 << 24) + j);
14480e9446c3SMatthew Wilcox 		}
14490e9446c3SMatthew Wilcox 	}
14500e9446c3SMatthew Wilcox }
14510e9446c3SMatthew Wilcox 
145276b4e529SMatthew Wilcox static void check_align_1(struct xarray *xa, char *name)
145376b4e529SMatthew Wilcox {
145476b4e529SMatthew Wilcox 	int i;
145576b4e529SMatthew Wilcox 	unsigned int id;
145676b4e529SMatthew Wilcox 	unsigned long index;
145776b4e529SMatthew Wilcox 	void *entry;
145876b4e529SMatthew Wilcox 
145976b4e529SMatthew Wilcox 	for (i = 0; i < 8; i++) {
1460a3e4d3f9SMatthew Wilcox 		XA_BUG_ON(xa, xa_alloc(xa, &id, name + i, xa_limit_32b,
1461a3e4d3f9SMatthew Wilcox 					GFP_KERNEL) != 0);
146276b4e529SMatthew Wilcox 		XA_BUG_ON(xa, id != i);
146376b4e529SMatthew Wilcox 	}
146476b4e529SMatthew Wilcox 	xa_for_each(xa, index, entry)
146576b4e529SMatthew Wilcox 		XA_BUG_ON(xa, xa_is_err(entry));
146676b4e529SMatthew Wilcox 	xa_destroy(xa);
146776b4e529SMatthew Wilcox }
146876b4e529SMatthew Wilcox 
14694a5c8d89SMatthew Wilcox /*
14704a5c8d89SMatthew Wilcox  * We should always be able to store without allocating memory after
14714a5c8d89SMatthew Wilcox  * reserving a slot.
14724a5c8d89SMatthew Wilcox  */
14732fbe967bSMatthew Wilcox static void check_align_2(struct xarray *xa, char *name)
14742fbe967bSMatthew Wilcox {
14752fbe967bSMatthew Wilcox 	int i;
14762fbe967bSMatthew Wilcox 
14772fbe967bSMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
14782fbe967bSMatthew Wilcox 
14792fbe967bSMatthew Wilcox 	for (i = 0; i < 8; i++) {
14802fbe967bSMatthew Wilcox 		XA_BUG_ON(xa, xa_store(xa, 0, name + i, GFP_KERNEL) != NULL);
14812fbe967bSMatthew Wilcox 		xa_erase(xa, 0);
14822fbe967bSMatthew Wilcox 	}
14832fbe967bSMatthew Wilcox 
14844a5c8d89SMatthew Wilcox 	for (i = 0; i < 8; i++) {
14854a5c8d89SMatthew Wilcox 		XA_BUG_ON(xa, xa_reserve(xa, 0, GFP_KERNEL) != 0);
14864a5c8d89SMatthew Wilcox 		XA_BUG_ON(xa, xa_store(xa, 0, name + i, 0) != NULL);
14874a5c8d89SMatthew Wilcox 		xa_erase(xa, 0);
14884a5c8d89SMatthew Wilcox 	}
14894a5c8d89SMatthew Wilcox 
14902fbe967bSMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
14912fbe967bSMatthew Wilcox }
14922fbe967bSMatthew Wilcox 
149376b4e529SMatthew Wilcox static noinline void check_align(struct xarray *xa)
149476b4e529SMatthew Wilcox {
149576b4e529SMatthew Wilcox 	char name[] = "Motorola 68000";
149676b4e529SMatthew Wilcox 
149776b4e529SMatthew Wilcox 	check_align_1(xa, name);
149876b4e529SMatthew Wilcox 	check_align_1(xa, name + 1);
149976b4e529SMatthew Wilcox 	check_align_1(xa, name + 2);
150076b4e529SMatthew Wilcox 	check_align_1(xa, name + 3);
15012fbe967bSMatthew Wilcox 	check_align_2(xa, name);
150276b4e529SMatthew Wilcox }
150376b4e529SMatthew Wilcox 
1504a97e7904SMatthew Wilcox static LIST_HEAD(shadow_nodes);
1505a97e7904SMatthew Wilcox 
1506a97e7904SMatthew Wilcox static void test_update_node(struct xa_node *node)
1507a97e7904SMatthew Wilcox {
1508a97e7904SMatthew Wilcox 	if (node->count && node->count == node->nr_values) {
1509a97e7904SMatthew Wilcox 		if (list_empty(&node->private_list))
1510a97e7904SMatthew Wilcox 			list_add(&shadow_nodes, &node->private_list);
1511a97e7904SMatthew Wilcox 	} else {
1512a97e7904SMatthew Wilcox 		if (!list_empty(&node->private_list))
1513a97e7904SMatthew Wilcox 			list_del_init(&node->private_list);
1514a97e7904SMatthew Wilcox 	}
1515a97e7904SMatthew Wilcox }
1516a97e7904SMatthew Wilcox 
1517a97e7904SMatthew Wilcox static noinline void shadow_remove(struct xarray *xa)
1518a97e7904SMatthew Wilcox {
1519a97e7904SMatthew Wilcox 	struct xa_node *node;
1520a97e7904SMatthew Wilcox 
1521a97e7904SMatthew Wilcox 	xa_lock(xa);
1522a97e7904SMatthew Wilcox 	while ((node = list_first_entry_or_null(&shadow_nodes,
1523a97e7904SMatthew Wilcox 					struct xa_node, private_list))) {
1524a97e7904SMatthew Wilcox 		XA_STATE(xas, node->array, 0);
1525a97e7904SMatthew Wilcox 		XA_BUG_ON(xa, node->array != xa);
1526a97e7904SMatthew Wilcox 		list_del_init(&node->private_list);
1527a97e7904SMatthew Wilcox 		xas.xa_node = xa_parent_locked(node->array, node);
1528a97e7904SMatthew Wilcox 		xas.xa_offset = node->offset;
1529a97e7904SMatthew Wilcox 		xas.xa_shift = node->shift + XA_CHUNK_SHIFT;
1530a97e7904SMatthew Wilcox 		xas_set_update(&xas, test_update_node);
1531a97e7904SMatthew Wilcox 		xas_store(&xas, NULL);
1532a97e7904SMatthew Wilcox 	}
1533a97e7904SMatthew Wilcox 	xa_unlock(xa);
1534a97e7904SMatthew Wilcox }
1535a97e7904SMatthew Wilcox 
1536a97e7904SMatthew Wilcox static noinline void check_workingset(struct xarray *xa, unsigned long index)
1537a97e7904SMatthew Wilcox {
1538a97e7904SMatthew Wilcox 	XA_STATE(xas, xa, index);
1539a97e7904SMatthew Wilcox 	xas_set_update(&xas, test_update_node);
1540a97e7904SMatthew Wilcox 
1541a97e7904SMatthew Wilcox 	do {
1542a97e7904SMatthew Wilcox 		xas_lock(&xas);
1543a97e7904SMatthew Wilcox 		xas_store(&xas, xa_mk_value(0));
1544a97e7904SMatthew Wilcox 		xas_next(&xas);
1545a97e7904SMatthew Wilcox 		xas_store(&xas, xa_mk_value(1));
1546a97e7904SMatthew Wilcox 		xas_unlock(&xas);
1547a97e7904SMatthew Wilcox 	} while (xas_nomem(&xas, GFP_KERNEL));
1548a97e7904SMatthew Wilcox 
1549a97e7904SMatthew Wilcox 	XA_BUG_ON(xa, list_empty(&shadow_nodes));
1550a97e7904SMatthew Wilcox 
1551a97e7904SMatthew Wilcox 	xas_lock(&xas);
1552a97e7904SMatthew Wilcox 	xas_next(&xas);
1553a97e7904SMatthew Wilcox 	xas_store(&xas, &xas);
1554a97e7904SMatthew Wilcox 	XA_BUG_ON(xa, !list_empty(&shadow_nodes));
1555a97e7904SMatthew Wilcox 
1556a97e7904SMatthew Wilcox 	xas_store(&xas, xa_mk_value(2));
1557a97e7904SMatthew Wilcox 	xas_unlock(&xas);
1558a97e7904SMatthew Wilcox 	XA_BUG_ON(xa, list_empty(&shadow_nodes));
1559a97e7904SMatthew Wilcox 
1560a97e7904SMatthew Wilcox 	shadow_remove(xa);
1561a97e7904SMatthew Wilcox 	XA_BUG_ON(xa, !list_empty(&shadow_nodes));
1562a97e7904SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
1563a97e7904SMatthew Wilcox }
1564a97e7904SMatthew Wilcox 
1565d6427f81SMatthew Wilcox /*
1566d6427f81SMatthew Wilcox  * Check that the pointer / value / sibling entries are accounted the
1567d6427f81SMatthew Wilcox  * way we expect them to be.
1568d6427f81SMatthew Wilcox  */
1569d6427f81SMatthew Wilcox static noinline void check_account(struct xarray *xa)
1570d6427f81SMatthew Wilcox {
1571d6427f81SMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI
1572d6427f81SMatthew Wilcox 	unsigned int order;
1573d6427f81SMatthew Wilcox 
1574d6427f81SMatthew Wilcox 	for (order = 1; order < 12; order++) {
1575d6427f81SMatthew Wilcox 		XA_STATE(xas, xa, 1 << order);
1576d6427f81SMatthew Wilcox 
1577d6427f81SMatthew Wilcox 		xa_store_order(xa, 0, order, xa, GFP_KERNEL);
1578fffc9a26SMatthew Wilcox 		rcu_read_lock();
1579d6427f81SMatthew Wilcox 		xas_load(&xas);
1580d6427f81SMatthew Wilcox 		XA_BUG_ON(xa, xas.xa_node->count == 0);
1581d6427f81SMatthew Wilcox 		XA_BUG_ON(xa, xas.xa_node->count > (1 << order));
1582d6427f81SMatthew Wilcox 		XA_BUG_ON(xa, xas.xa_node->nr_values != 0);
1583fffc9a26SMatthew Wilcox 		rcu_read_unlock();
1584d6427f81SMatthew Wilcox 
1585b7677a13SMatthew Wilcox 		xa_store_order(xa, 1 << order, order, xa_mk_index(1UL << order),
1586d6427f81SMatthew Wilcox 				GFP_KERNEL);
1587d6427f81SMatthew Wilcox 		XA_BUG_ON(xa, xas.xa_node->count != xas.xa_node->nr_values * 2);
1588d6427f81SMatthew Wilcox 
1589d6427f81SMatthew Wilcox 		xa_erase(xa, 1 << order);
1590d6427f81SMatthew Wilcox 		XA_BUG_ON(xa, xas.xa_node->nr_values != 0);
1591d6427f81SMatthew Wilcox 
1592d6427f81SMatthew Wilcox 		xa_erase(xa, 0);
1593d6427f81SMatthew Wilcox 		XA_BUG_ON(xa, !xa_empty(xa));
1594d6427f81SMatthew Wilcox 	}
1595d6427f81SMatthew Wilcox #endif
1596d6427f81SMatthew Wilcox }
1597d6427f81SMatthew Wilcox 
1598687149fcSMatthew Wilcox static noinline void check_destroy(struct xarray *xa)
1599687149fcSMatthew Wilcox {
1600687149fcSMatthew Wilcox 	unsigned long index;
1601687149fcSMatthew Wilcox 
1602687149fcSMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
1603687149fcSMatthew Wilcox 
1604687149fcSMatthew Wilcox 	/* Destroying an empty array is a no-op */
1605687149fcSMatthew Wilcox 	xa_destroy(xa);
1606687149fcSMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
1607687149fcSMatthew Wilcox 
1608687149fcSMatthew Wilcox 	/* Destroying an array with a single entry */
1609687149fcSMatthew Wilcox 	for (index = 0; index < 1000; index++) {
1610687149fcSMatthew Wilcox 		xa_store_index(xa, index, GFP_KERNEL);
1611687149fcSMatthew Wilcox 		XA_BUG_ON(xa, xa_empty(xa));
1612687149fcSMatthew Wilcox 		xa_destroy(xa);
1613687149fcSMatthew Wilcox 		XA_BUG_ON(xa, !xa_empty(xa));
1614687149fcSMatthew Wilcox 	}
1615687149fcSMatthew Wilcox 
1616687149fcSMatthew Wilcox 	/* Destroying an array with a single entry at ULONG_MAX */
1617687149fcSMatthew Wilcox 	xa_store(xa, ULONG_MAX, xa, GFP_KERNEL);
1618687149fcSMatthew Wilcox 	XA_BUG_ON(xa, xa_empty(xa));
1619687149fcSMatthew Wilcox 	xa_destroy(xa);
1620687149fcSMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
1621687149fcSMatthew Wilcox 
1622687149fcSMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI
1623687149fcSMatthew Wilcox 	/* Destroying an array with a multi-index entry */
1624687149fcSMatthew Wilcox 	xa_store_order(xa, 1 << 11, 11, xa, GFP_KERNEL);
1625687149fcSMatthew Wilcox 	XA_BUG_ON(xa, xa_empty(xa));
1626687149fcSMatthew Wilcox 	xa_destroy(xa);
1627687149fcSMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
1628687149fcSMatthew Wilcox #endif
1629687149fcSMatthew Wilcox }
1630687149fcSMatthew Wilcox 
163158d6ea30SMatthew Wilcox static DEFINE_XARRAY(array);
1632ad3d6c72SMatthew Wilcox 
1633ad3d6c72SMatthew Wilcox static int xarray_checks(void)
1634ad3d6c72SMatthew Wilcox {
163558d6ea30SMatthew Wilcox 	check_xa_err(&array);
1636b803b428SMatthew Wilcox 	check_xas_retry(&array);
1637ad3d6c72SMatthew Wilcox 	check_xa_load(&array);
16389b89a035SMatthew Wilcox 	check_xa_mark(&array);
163958d6ea30SMatthew Wilcox 	check_xa_shrink(&array);
1640b803b428SMatthew Wilcox 	check_xas_erase(&array);
164112fd2aeeSMatthew Wilcox 	check_insert(&array);
164241aec91fSMatthew Wilcox 	check_cmpxchg(&array);
16439f14d4f1SMatthew Wilcox 	check_reserve(&array);
1644b38f6c50SMatthew Wilcox 	check_reserve(&xa0);
164558d6ea30SMatthew Wilcox 	check_multi_store(&array);
1646371c752dSMatthew Wilcox 	check_xa_alloc();
1647b803b428SMatthew Wilcox 	check_find(&array);
1648e21a2955SMatthew Wilcox 	check_find_entry(&array);
1649d6427f81SMatthew Wilcox 	check_account(&array);
1650687149fcSMatthew Wilcox 	check_destroy(&array);
165164d3e9a9SMatthew Wilcox 	check_move(&array);
16522264f513SMatthew Wilcox 	check_create_range(&array);
16530e9446c3SMatthew Wilcox 	check_store_range(&array);
16544e99d4e9SMatthew Wilcox 	check_store_iter(&array);
165576b4e529SMatthew Wilcox 	check_align(&xa0);
1656ad3d6c72SMatthew Wilcox 
1657a97e7904SMatthew Wilcox 	check_workingset(&array, 0);
1658a97e7904SMatthew Wilcox 	check_workingset(&array, 64);
1659a97e7904SMatthew Wilcox 	check_workingset(&array, 4096);
1660a97e7904SMatthew Wilcox 
1661ad3d6c72SMatthew Wilcox 	printk("XArray: %u of %u tests passed\n", tests_passed, tests_run);
1662ad3d6c72SMatthew Wilcox 	return (tests_run == tests_passed) ? 0 : -EINVAL;
1663ad3d6c72SMatthew Wilcox }
1664ad3d6c72SMatthew Wilcox 
1665ad3d6c72SMatthew Wilcox static void xarray_exit(void)
1666ad3d6c72SMatthew Wilcox {
1667ad3d6c72SMatthew Wilcox }
1668ad3d6c72SMatthew Wilcox 
1669ad3d6c72SMatthew Wilcox module_init(xarray_checks);
1670ad3d6c72SMatthew Wilcox module_exit(xarray_exit);
1671ad3d6c72SMatthew Wilcox MODULE_AUTHOR("Matthew Wilcox <willy@infradead.org>");
1672ad3d6c72SMatthew Wilcox MODULE_LICENSE("GPL");
1673