1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * test_xarray.c: Test the XArray API 4 * Copyright (c) 2017-2018 Microsoft Corporation 5 * Author: Matthew Wilcox <willy@infradead.org> 6 */ 7 8 #include <linux/xarray.h> 9 #include <linux/module.h> 10 11 static unsigned int tests_run; 12 static unsigned int tests_passed; 13 14 #ifndef XA_DEBUG 15 # ifdef __KERNEL__ 16 void xa_dump(const struct xarray *xa) { } 17 # endif 18 #undef XA_BUG_ON 19 #define XA_BUG_ON(xa, x) do { \ 20 tests_run++; \ 21 if (x) { \ 22 printk("BUG at %s:%d\n", __func__, __LINE__); \ 23 xa_dump(xa); \ 24 dump_stack(); \ 25 } else { \ 26 tests_passed++; \ 27 } \ 28 } while (0) 29 #endif 30 31 static void *xa_mk_index(unsigned long index) 32 { 33 return xa_mk_value(index & LONG_MAX); 34 } 35 36 static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp) 37 { 38 return xa_store(xa, index, xa_mk_index(index), gfp); 39 } 40 41 static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp) 42 { 43 u32 id; 44 45 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(index), xa_limit_32b, 46 gfp) != 0); 47 XA_BUG_ON(xa, id != index); 48 } 49 50 static void xa_erase_index(struct xarray *xa, unsigned long index) 51 { 52 XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_index(index)); 53 XA_BUG_ON(xa, xa_load(xa, index) != NULL); 54 } 55 56 /* 57 * If anyone needs this, please move it to xarray.c. We have no current 58 * users outside the test suite because all current multislot users want 59 * to use the advanced API. 60 */ 61 static void *xa_store_order(struct xarray *xa, unsigned long index, 62 unsigned order, void *entry, gfp_t gfp) 63 { 64 XA_STATE_ORDER(xas, xa, index, order); 65 void *curr; 66 67 do { 68 xas_lock(&xas); 69 curr = xas_store(&xas, entry); 70 xas_unlock(&xas); 71 } while (xas_nomem(&xas, gfp)); 72 73 return curr; 74 } 75 76 static noinline void check_xa_err(struct xarray *xa) 77 { 78 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 0, GFP_NOWAIT)) != 0); 79 XA_BUG_ON(xa, xa_err(xa_erase(xa, 0)) != 0); 80 #ifndef __KERNEL__ 81 /* The kernel does not fail GFP_NOWAIT allocations */ 82 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM); 83 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM); 84 #endif 85 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_KERNEL)) != 0); 86 XA_BUG_ON(xa, xa_err(xa_store(xa, 1, xa_mk_value(0), GFP_KERNEL)) != 0); 87 XA_BUG_ON(xa, xa_err(xa_erase(xa, 1)) != 0); 88 // kills the test-suite :-( 89 // XA_BUG_ON(xa, xa_err(xa_store(xa, 0, xa_mk_internal(0), 0)) != -EINVAL); 90 } 91 92 static noinline void check_xas_retry(struct xarray *xa) 93 { 94 XA_STATE(xas, xa, 0); 95 void *entry; 96 97 xa_store_index(xa, 0, GFP_KERNEL); 98 xa_store_index(xa, 1, GFP_KERNEL); 99 100 rcu_read_lock(); 101 XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_value(0)); 102 xa_erase_index(xa, 1); 103 XA_BUG_ON(xa, !xa_is_retry(xas_reload(&xas))); 104 XA_BUG_ON(xa, xas_retry(&xas, NULL)); 105 XA_BUG_ON(xa, xas_retry(&xas, xa_mk_value(0))); 106 xas_reset(&xas); 107 XA_BUG_ON(xa, xas.xa_node != XAS_RESTART); 108 XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0)); 109 XA_BUG_ON(xa, xas.xa_node != NULL); 110 rcu_read_unlock(); 111 112 XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL); 113 114 rcu_read_lock(); 115 XA_BUG_ON(xa, !xa_is_internal(xas_reload(&xas))); 116 xas.xa_node = XAS_RESTART; 117 XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0)); 118 rcu_read_unlock(); 119 120 /* Make sure we can iterate through retry entries */ 121 xas_lock(&xas); 122 xas_set(&xas, 0); 123 xas_store(&xas, XA_RETRY_ENTRY); 124 xas_set(&xas, 1); 125 xas_store(&xas, XA_RETRY_ENTRY); 126 127 xas_set(&xas, 0); 128 xas_for_each(&xas, entry, ULONG_MAX) { 129 xas_store(&xas, xa_mk_index(xas.xa_index)); 130 } 131 xas_unlock(&xas); 132 133 xa_erase_index(xa, 0); 134 xa_erase_index(xa, 1); 135 } 136 137 static noinline void check_xa_load(struct xarray *xa) 138 { 139 unsigned long i, j; 140 141 for (i = 0; i < 1024; i++) { 142 for (j = 0; j < 1024; j++) { 143 void *entry = xa_load(xa, j); 144 if (j < i) 145 XA_BUG_ON(xa, xa_to_value(entry) != j); 146 else 147 XA_BUG_ON(xa, entry); 148 } 149 XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL); 150 } 151 152 for (i = 0; i < 1024; i++) { 153 for (j = 0; j < 1024; j++) { 154 void *entry = xa_load(xa, j); 155 if (j >= i) 156 XA_BUG_ON(xa, xa_to_value(entry) != j); 157 else 158 XA_BUG_ON(xa, entry); 159 } 160 xa_erase_index(xa, i); 161 } 162 XA_BUG_ON(xa, !xa_empty(xa)); 163 } 164 165 static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index) 166 { 167 unsigned int order; 168 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 8 : 1; 169 170 /* NULL elements have no marks set */ 171 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 172 xa_set_mark(xa, index, XA_MARK_0); 173 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 174 175 /* Storing a pointer will not make a mark appear */ 176 XA_BUG_ON(xa, xa_store_index(xa, index, GFP_KERNEL) != NULL); 177 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 178 xa_set_mark(xa, index, XA_MARK_0); 179 XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0)); 180 181 /* Setting one mark will not set another mark */ 182 XA_BUG_ON(xa, xa_get_mark(xa, index + 1, XA_MARK_0)); 183 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_1)); 184 185 /* Storing NULL clears marks, and they can't be set again */ 186 xa_erase_index(xa, index); 187 XA_BUG_ON(xa, !xa_empty(xa)); 188 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 189 xa_set_mark(xa, index, XA_MARK_0); 190 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 191 192 /* 193 * Storing a multi-index entry over entries with marks gives the 194 * entire entry the union of the marks 195 */ 196 BUG_ON((index % 4) != 0); 197 for (order = 2; order < max_order; order++) { 198 unsigned long base = round_down(index, 1UL << order); 199 unsigned long next = base + (1UL << order); 200 unsigned long i; 201 202 XA_BUG_ON(xa, xa_store_index(xa, index + 1, GFP_KERNEL)); 203 xa_set_mark(xa, index + 1, XA_MARK_0); 204 XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL)); 205 xa_set_mark(xa, index + 2, XA_MARK_2); 206 XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL)); 207 xa_store_order(xa, index, order, xa_mk_index(index), 208 GFP_KERNEL); 209 for (i = base; i < next; i++) { 210 XA_STATE(xas, xa, i); 211 unsigned int seen = 0; 212 void *entry; 213 214 XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0)); 215 XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_1)); 216 XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_2)); 217 218 /* We should see two elements in the array */ 219 rcu_read_lock(); 220 xas_for_each(&xas, entry, ULONG_MAX) 221 seen++; 222 rcu_read_unlock(); 223 XA_BUG_ON(xa, seen != 2); 224 225 /* One of which is marked */ 226 xas_set(&xas, 0); 227 seen = 0; 228 rcu_read_lock(); 229 xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) 230 seen++; 231 rcu_read_unlock(); 232 XA_BUG_ON(xa, seen != 1); 233 } 234 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_0)); 235 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_1)); 236 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_2)); 237 xa_erase_index(xa, index); 238 xa_erase_index(xa, next); 239 XA_BUG_ON(xa, !xa_empty(xa)); 240 } 241 XA_BUG_ON(xa, !xa_empty(xa)); 242 } 243 244 static noinline void check_xa_mark_2(struct xarray *xa) 245 { 246 XA_STATE(xas, xa, 0); 247 unsigned long index; 248 unsigned int count = 0; 249 void *entry; 250 251 xa_store_index(xa, 0, GFP_KERNEL); 252 xa_set_mark(xa, 0, XA_MARK_0); 253 xas_lock(&xas); 254 xas_load(&xas); 255 xas_init_marks(&xas); 256 xas_unlock(&xas); 257 XA_BUG_ON(xa, !xa_get_mark(xa, 0, XA_MARK_0) == 0); 258 259 for (index = 3500; index < 4500; index++) { 260 xa_store_index(xa, index, GFP_KERNEL); 261 xa_set_mark(xa, index, XA_MARK_0); 262 } 263 264 xas_reset(&xas); 265 rcu_read_lock(); 266 xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) 267 count++; 268 rcu_read_unlock(); 269 XA_BUG_ON(xa, count != 1000); 270 271 xas_lock(&xas); 272 xas_for_each(&xas, entry, ULONG_MAX) { 273 xas_init_marks(&xas); 274 XA_BUG_ON(xa, !xa_get_mark(xa, xas.xa_index, XA_MARK_0)); 275 XA_BUG_ON(xa, !xas_get_mark(&xas, XA_MARK_0)); 276 } 277 xas_unlock(&xas); 278 279 xa_destroy(xa); 280 } 281 282 static noinline void check_xa_mark(struct xarray *xa) 283 { 284 unsigned long index; 285 286 for (index = 0; index < 16384; index += 4) 287 check_xa_mark_1(xa, index); 288 289 check_xa_mark_2(xa); 290 } 291 292 static noinline void check_xa_shrink(struct xarray *xa) 293 { 294 XA_STATE(xas, xa, 1); 295 struct xa_node *node; 296 unsigned int order; 297 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 15 : 1; 298 299 XA_BUG_ON(xa, !xa_empty(xa)); 300 XA_BUG_ON(xa, xa_store_index(xa, 0, GFP_KERNEL) != NULL); 301 XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL); 302 303 /* 304 * Check that erasing the entry at 1 shrinks the tree and properly 305 * marks the node as being deleted. 306 */ 307 xas_lock(&xas); 308 XA_BUG_ON(xa, xas_load(&xas) != xa_mk_value(1)); 309 node = xas.xa_node; 310 XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != xa_mk_value(0)); 311 XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1)); 312 XA_BUG_ON(xa, xa_load(xa, 1) != NULL); 313 XA_BUG_ON(xa, xas.xa_node != XAS_BOUNDS); 314 XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != XA_RETRY_ENTRY); 315 XA_BUG_ON(xa, xas_load(&xas) != NULL); 316 xas_unlock(&xas); 317 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0)); 318 xa_erase_index(xa, 0); 319 XA_BUG_ON(xa, !xa_empty(xa)); 320 321 for (order = 0; order < max_order; order++) { 322 unsigned long max = (1UL << order) - 1; 323 xa_store_order(xa, 0, order, xa_mk_value(0), GFP_KERNEL); 324 XA_BUG_ON(xa, xa_load(xa, max) != xa_mk_value(0)); 325 XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL); 326 rcu_read_lock(); 327 node = xa_head(xa); 328 rcu_read_unlock(); 329 XA_BUG_ON(xa, xa_store_index(xa, ULONG_MAX, GFP_KERNEL) != 330 NULL); 331 rcu_read_lock(); 332 XA_BUG_ON(xa, xa_head(xa) == node); 333 rcu_read_unlock(); 334 XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL); 335 xa_erase_index(xa, ULONG_MAX); 336 XA_BUG_ON(xa, xa->xa_head != node); 337 xa_erase_index(xa, 0); 338 } 339 } 340 341 static noinline void check_cmpxchg(struct xarray *xa) 342 { 343 void *FIVE = xa_mk_value(5); 344 void *SIX = xa_mk_value(6); 345 void *LOTS = xa_mk_value(12345678); 346 347 XA_BUG_ON(xa, !xa_empty(xa)); 348 XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_KERNEL) != NULL); 349 XA_BUG_ON(xa, xa_insert(xa, 12345678, xa, GFP_KERNEL) != -EBUSY); 350 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, SIX, FIVE, GFP_KERNEL) != LOTS); 351 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, LOTS, FIVE, GFP_KERNEL) != LOTS); 352 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, FIVE, LOTS, GFP_KERNEL) != FIVE); 353 XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != NULL); 354 XA_BUG_ON(xa, xa_cmpxchg(xa, 5, NULL, FIVE, GFP_KERNEL) != NULL); 355 xa_erase_index(xa, 12345678); 356 xa_erase_index(xa, 5); 357 XA_BUG_ON(xa, !xa_empty(xa)); 358 } 359 360 static noinline void check_reserve(struct xarray *xa) 361 { 362 void *entry; 363 unsigned long index; 364 int count; 365 366 /* An array with a reserved entry is not empty */ 367 XA_BUG_ON(xa, !xa_empty(xa)); 368 XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0); 369 XA_BUG_ON(xa, xa_empty(xa)); 370 XA_BUG_ON(xa, xa_load(xa, 12345678)); 371 xa_release(xa, 12345678); 372 XA_BUG_ON(xa, !xa_empty(xa)); 373 374 /* Releasing a used entry does nothing */ 375 XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0); 376 XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_NOWAIT) != NULL); 377 xa_release(xa, 12345678); 378 xa_erase_index(xa, 12345678); 379 XA_BUG_ON(xa, !xa_empty(xa)); 380 381 /* cmpxchg sees a reserved entry as ZERO */ 382 XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0); 383 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, XA_ZERO_ENTRY, 384 xa_mk_value(12345678), GFP_NOWAIT) != NULL); 385 xa_release(xa, 12345678); 386 xa_erase_index(xa, 12345678); 387 XA_BUG_ON(xa, !xa_empty(xa)); 388 389 /* xa_insert treats it as busy */ 390 XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0); 391 XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) != 392 -EBUSY); 393 XA_BUG_ON(xa, xa_empty(xa)); 394 XA_BUG_ON(xa, xa_erase(xa, 12345678) != NULL); 395 XA_BUG_ON(xa, !xa_empty(xa)); 396 397 /* Can iterate through a reserved entry */ 398 xa_store_index(xa, 5, GFP_KERNEL); 399 XA_BUG_ON(xa, xa_reserve(xa, 6, GFP_KERNEL) != 0); 400 xa_store_index(xa, 7, GFP_KERNEL); 401 402 count = 0; 403 xa_for_each(xa, index, entry) { 404 XA_BUG_ON(xa, index != 5 && index != 7); 405 count++; 406 } 407 XA_BUG_ON(xa, count != 2); 408 409 /* If we free a reserved entry, we should be able to allocate it */ 410 if (xa->xa_flags & XA_FLAGS_ALLOC) { 411 u32 id; 412 413 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_value(8), 414 XA_LIMIT(5, 10), GFP_KERNEL) != 0); 415 XA_BUG_ON(xa, id != 8); 416 417 xa_release(xa, 6); 418 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_value(6), 419 XA_LIMIT(5, 10), GFP_KERNEL) != 0); 420 XA_BUG_ON(xa, id != 6); 421 } 422 423 xa_destroy(xa); 424 } 425 426 static noinline void check_xas_erase(struct xarray *xa) 427 { 428 XA_STATE(xas, xa, 0); 429 void *entry; 430 unsigned long i, j; 431 432 for (i = 0; i < 200; i++) { 433 for (j = i; j < 2 * i + 17; j++) { 434 xas_set(&xas, j); 435 do { 436 xas_lock(&xas); 437 xas_store(&xas, xa_mk_index(j)); 438 xas_unlock(&xas); 439 } while (xas_nomem(&xas, GFP_KERNEL)); 440 } 441 442 xas_set(&xas, ULONG_MAX); 443 do { 444 xas_lock(&xas); 445 xas_store(&xas, xa_mk_value(0)); 446 xas_unlock(&xas); 447 } while (xas_nomem(&xas, GFP_KERNEL)); 448 449 xas_lock(&xas); 450 xas_store(&xas, NULL); 451 452 xas_set(&xas, 0); 453 j = i; 454 xas_for_each(&xas, entry, ULONG_MAX) { 455 XA_BUG_ON(xa, entry != xa_mk_index(j)); 456 xas_store(&xas, NULL); 457 j++; 458 } 459 xas_unlock(&xas); 460 XA_BUG_ON(xa, !xa_empty(xa)); 461 } 462 } 463 464 #ifdef CONFIG_XARRAY_MULTI 465 static noinline void check_multi_store_1(struct xarray *xa, unsigned long index, 466 unsigned int order) 467 { 468 XA_STATE(xas, xa, index); 469 unsigned long min = index & ~((1UL << order) - 1); 470 unsigned long max = min + (1UL << order); 471 472 xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL); 473 XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(index)); 474 XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(index)); 475 XA_BUG_ON(xa, xa_load(xa, max) != NULL); 476 XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); 477 478 xas_lock(&xas); 479 XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(min)) != xa_mk_index(index)); 480 xas_unlock(&xas); 481 XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(min)); 482 XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(min)); 483 XA_BUG_ON(xa, xa_load(xa, max) != NULL); 484 XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); 485 486 xa_erase_index(xa, min); 487 XA_BUG_ON(xa, !xa_empty(xa)); 488 } 489 490 static noinline void check_multi_store_2(struct xarray *xa, unsigned long index, 491 unsigned int order) 492 { 493 XA_STATE(xas, xa, index); 494 xa_store_order(xa, index, order, xa_mk_value(0), GFP_KERNEL); 495 496 xas_lock(&xas); 497 XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(1)) != xa_mk_value(0)); 498 XA_BUG_ON(xa, xas.xa_index != index); 499 XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1)); 500 xas_unlock(&xas); 501 XA_BUG_ON(xa, !xa_empty(xa)); 502 } 503 504 static noinline void check_multi_store_3(struct xarray *xa, unsigned long index, 505 unsigned int order) 506 { 507 XA_STATE(xas, xa, 0); 508 void *entry; 509 int n = 0; 510 511 xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL); 512 513 xas_lock(&xas); 514 xas_for_each(&xas, entry, ULONG_MAX) { 515 XA_BUG_ON(xa, entry != xa_mk_index(index)); 516 n++; 517 } 518 XA_BUG_ON(xa, n != 1); 519 xas_set(&xas, index + 1); 520 xas_for_each(&xas, entry, ULONG_MAX) { 521 XA_BUG_ON(xa, entry != xa_mk_index(index)); 522 n++; 523 } 524 XA_BUG_ON(xa, n != 2); 525 xas_unlock(&xas); 526 527 xa_destroy(xa); 528 } 529 #endif 530 531 static noinline void check_multi_store(struct xarray *xa) 532 { 533 #ifdef CONFIG_XARRAY_MULTI 534 unsigned long i, j, k; 535 unsigned int max_order = (sizeof(long) == 4) ? 30 : 60; 536 537 /* Loading from any position returns the same value */ 538 xa_store_order(xa, 0, 1, xa_mk_value(0), GFP_KERNEL); 539 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0)); 540 XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0)); 541 XA_BUG_ON(xa, xa_load(xa, 2) != NULL); 542 rcu_read_lock(); 543 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 2); 544 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2); 545 rcu_read_unlock(); 546 547 /* Storing adjacent to the value does not alter the value */ 548 xa_store(xa, 3, xa, GFP_KERNEL); 549 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0)); 550 XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0)); 551 XA_BUG_ON(xa, xa_load(xa, 2) != NULL); 552 rcu_read_lock(); 553 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 3); 554 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2); 555 rcu_read_unlock(); 556 557 /* Overwriting multiple indexes works */ 558 xa_store_order(xa, 0, 2, xa_mk_value(1), GFP_KERNEL); 559 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(1)); 560 XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(1)); 561 XA_BUG_ON(xa, xa_load(xa, 2) != xa_mk_value(1)); 562 XA_BUG_ON(xa, xa_load(xa, 3) != xa_mk_value(1)); 563 XA_BUG_ON(xa, xa_load(xa, 4) != NULL); 564 rcu_read_lock(); 565 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 4); 566 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 4); 567 rcu_read_unlock(); 568 569 /* We can erase multiple values with a single store */ 570 xa_store_order(xa, 0, BITS_PER_LONG - 1, NULL, GFP_KERNEL); 571 XA_BUG_ON(xa, !xa_empty(xa)); 572 573 /* Even when the first slot is empty but the others aren't */ 574 xa_store_index(xa, 1, GFP_KERNEL); 575 xa_store_index(xa, 2, GFP_KERNEL); 576 xa_store_order(xa, 0, 2, NULL, GFP_KERNEL); 577 XA_BUG_ON(xa, !xa_empty(xa)); 578 579 for (i = 0; i < max_order; i++) { 580 for (j = 0; j < max_order; j++) { 581 xa_store_order(xa, 0, i, xa_mk_index(i), GFP_KERNEL); 582 xa_store_order(xa, 0, j, xa_mk_index(j), GFP_KERNEL); 583 584 for (k = 0; k < max_order; k++) { 585 void *entry = xa_load(xa, (1UL << k) - 1); 586 if ((i < k) && (j < k)) 587 XA_BUG_ON(xa, entry != NULL); 588 else 589 XA_BUG_ON(xa, entry != xa_mk_index(j)); 590 } 591 592 xa_erase(xa, 0); 593 XA_BUG_ON(xa, !xa_empty(xa)); 594 } 595 } 596 597 for (i = 0; i < 20; i++) { 598 check_multi_store_1(xa, 200, i); 599 check_multi_store_1(xa, 0, i); 600 check_multi_store_1(xa, (1UL << i) + 1, i); 601 } 602 check_multi_store_2(xa, 4095, 9); 603 604 for (i = 1; i < 20; i++) { 605 check_multi_store_3(xa, 0, i); 606 check_multi_store_3(xa, 1UL << i, i); 607 } 608 #endif 609 } 610 611 static noinline void check_xa_alloc_1(struct xarray *xa, unsigned int base) 612 { 613 int i; 614 u32 id; 615 616 XA_BUG_ON(xa, !xa_empty(xa)); 617 /* An empty array should assign %base to the first alloc */ 618 xa_alloc_index(xa, base, GFP_KERNEL); 619 620 /* Erasing it should make the array empty again */ 621 xa_erase_index(xa, base); 622 XA_BUG_ON(xa, !xa_empty(xa)); 623 624 /* And it should assign %base again */ 625 xa_alloc_index(xa, base, GFP_KERNEL); 626 627 /* Allocating and then erasing a lot should not lose base */ 628 for (i = base + 1; i < 2 * XA_CHUNK_SIZE; i++) 629 xa_alloc_index(xa, i, GFP_KERNEL); 630 for (i = base; i < 2 * XA_CHUNK_SIZE; i++) 631 xa_erase_index(xa, i); 632 xa_alloc_index(xa, base, GFP_KERNEL); 633 634 /* Destroying the array should do the same as erasing */ 635 xa_destroy(xa); 636 637 /* And it should assign %base again */ 638 xa_alloc_index(xa, base, GFP_KERNEL); 639 640 /* The next assigned ID should be base+1 */ 641 xa_alloc_index(xa, base + 1, GFP_KERNEL); 642 xa_erase_index(xa, base + 1); 643 644 /* Storing a value should mark it used */ 645 xa_store_index(xa, base + 1, GFP_KERNEL); 646 xa_alloc_index(xa, base + 2, GFP_KERNEL); 647 648 /* If we then erase base, it should be free */ 649 xa_erase_index(xa, base); 650 xa_alloc_index(xa, base, GFP_KERNEL); 651 652 xa_erase_index(xa, base + 1); 653 xa_erase_index(xa, base + 2); 654 655 for (i = 1; i < 5000; i++) { 656 xa_alloc_index(xa, base + i, GFP_KERNEL); 657 } 658 659 xa_destroy(xa); 660 661 /* Check that we fail properly at the limit of allocation */ 662 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX - 1), 663 XA_LIMIT(UINT_MAX - 1, UINT_MAX), 664 GFP_KERNEL) != 0); 665 XA_BUG_ON(xa, id != 0xfffffffeU); 666 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX), 667 XA_LIMIT(UINT_MAX - 1, UINT_MAX), 668 GFP_KERNEL) != 0); 669 XA_BUG_ON(xa, id != 0xffffffffU); 670 id = 3; 671 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(0), 672 XA_LIMIT(UINT_MAX - 1, UINT_MAX), 673 GFP_KERNEL) != -EBUSY); 674 XA_BUG_ON(xa, id != 3); 675 xa_destroy(xa); 676 677 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5), 678 GFP_KERNEL) != -EBUSY); 679 XA_BUG_ON(xa, xa_store_index(xa, 3, GFP_KERNEL) != 0); 680 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5), 681 GFP_KERNEL) != -EBUSY); 682 xa_erase_index(xa, 3); 683 XA_BUG_ON(xa, !xa_empty(xa)); 684 } 685 686 static noinline void check_xa_alloc_2(struct xarray *xa, unsigned int base) 687 { 688 unsigned int i, id; 689 unsigned long index; 690 void *entry; 691 692 /* Allocate and free a NULL and check xa_empty() behaves */ 693 XA_BUG_ON(xa, !xa_empty(xa)); 694 XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0); 695 XA_BUG_ON(xa, id != base); 696 XA_BUG_ON(xa, xa_empty(xa)); 697 XA_BUG_ON(xa, xa_erase(xa, id) != NULL); 698 XA_BUG_ON(xa, !xa_empty(xa)); 699 700 /* Ditto, but check destroy instead of erase */ 701 XA_BUG_ON(xa, !xa_empty(xa)); 702 XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0); 703 XA_BUG_ON(xa, id != base); 704 XA_BUG_ON(xa, xa_empty(xa)); 705 xa_destroy(xa); 706 XA_BUG_ON(xa, !xa_empty(xa)); 707 708 for (i = base; i < base + 10; i++) { 709 XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, 710 GFP_KERNEL) != 0); 711 XA_BUG_ON(xa, id != i); 712 } 713 714 XA_BUG_ON(xa, xa_store(xa, 3, xa_mk_index(3), GFP_KERNEL) != NULL); 715 XA_BUG_ON(xa, xa_store(xa, 4, xa_mk_index(4), GFP_KERNEL) != NULL); 716 XA_BUG_ON(xa, xa_store(xa, 4, NULL, GFP_KERNEL) != xa_mk_index(4)); 717 XA_BUG_ON(xa, xa_erase(xa, 5) != NULL); 718 XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0); 719 XA_BUG_ON(xa, id != 5); 720 721 xa_for_each(xa, index, entry) { 722 xa_erase_index(xa, index); 723 } 724 725 for (i = base; i < base + 9; i++) { 726 XA_BUG_ON(xa, xa_erase(xa, i) != NULL); 727 XA_BUG_ON(xa, xa_empty(xa)); 728 } 729 XA_BUG_ON(xa, xa_erase(xa, 8) != NULL); 730 XA_BUG_ON(xa, xa_empty(xa)); 731 XA_BUG_ON(xa, xa_erase(xa, base + 9) != NULL); 732 XA_BUG_ON(xa, !xa_empty(xa)); 733 734 xa_destroy(xa); 735 } 736 737 static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base) 738 { 739 struct xa_limit limit = XA_LIMIT(1, 0x3fff); 740 u32 next = 0; 741 unsigned int i, id; 742 unsigned long index; 743 void *entry; 744 745 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(1), limit, 746 &next, GFP_KERNEL) != 0); 747 XA_BUG_ON(xa, id != 1); 748 749 next = 0x3ffd; 750 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(0x3ffd), limit, 751 &next, GFP_KERNEL) != 0); 752 XA_BUG_ON(xa, id != 0x3ffd); 753 xa_erase_index(xa, 0x3ffd); 754 xa_erase_index(xa, 1); 755 XA_BUG_ON(xa, !xa_empty(xa)); 756 757 for (i = 0x3ffe; i < 0x4003; i++) { 758 if (i < 0x4000) 759 entry = xa_mk_index(i); 760 else 761 entry = xa_mk_index(i - 0x3fff); 762 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, entry, limit, 763 &next, GFP_KERNEL) != (id == 1)); 764 XA_BUG_ON(xa, xa_mk_index(id) != entry); 765 } 766 767 /* Check wrap-around is handled correctly */ 768 if (base != 0) 769 xa_erase_index(xa, base); 770 xa_erase_index(xa, base + 1); 771 next = UINT_MAX; 772 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(UINT_MAX), 773 xa_limit_32b, &next, GFP_KERNEL) != 0); 774 XA_BUG_ON(xa, id != UINT_MAX); 775 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base), 776 xa_limit_32b, &next, GFP_KERNEL) != 1); 777 XA_BUG_ON(xa, id != base); 778 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base + 1), 779 xa_limit_32b, &next, GFP_KERNEL) != 0); 780 XA_BUG_ON(xa, id != base + 1); 781 782 xa_for_each(xa, index, entry) 783 xa_erase_index(xa, index); 784 785 XA_BUG_ON(xa, !xa_empty(xa)); 786 } 787 788 static DEFINE_XARRAY_ALLOC(xa0); 789 static DEFINE_XARRAY_ALLOC1(xa1); 790 791 static noinline void check_xa_alloc(void) 792 { 793 check_xa_alloc_1(&xa0, 0); 794 check_xa_alloc_1(&xa1, 1); 795 check_xa_alloc_2(&xa0, 0); 796 check_xa_alloc_2(&xa1, 1); 797 check_xa_alloc_3(&xa0, 0); 798 check_xa_alloc_3(&xa1, 1); 799 } 800 801 static noinline void __check_store_iter(struct xarray *xa, unsigned long start, 802 unsigned int order, unsigned int present) 803 { 804 XA_STATE_ORDER(xas, xa, start, order); 805 void *entry; 806 unsigned int count = 0; 807 808 retry: 809 xas_lock(&xas); 810 xas_for_each_conflict(&xas, entry) { 811 XA_BUG_ON(xa, !xa_is_value(entry)); 812 XA_BUG_ON(xa, entry < xa_mk_index(start)); 813 XA_BUG_ON(xa, entry > xa_mk_index(start + (1UL << order) - 1)); 814 count++; 815 } 816 xas_store(&xas, xa_mk_index(start)); 817 xas_unlock(&xas); 818 if (xas_nomem(&xas, GFP_KERNEL)) { 819 count = 0; 820 goto retry; 821 } 822 XA_BUG_ON(xa, xas_error(&xas)); 823 XA_BUG_ON(xa, count != present); 824 XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_index(start)); 825 XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) != 826 xa_mk_index(start)); 827 xa_erase_index(xa, start); 828 } 829 830 static noinline void check_store_iter(struct xarray *xa) 831 { 832 unsigned int i, j; 833 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; 834 835 for (i = 0; i < max_order; i++) { 836 unsigned int min = 1 << i; 837 unsigned int max = (2 << i) - 1; 838 __check_store_iter(xa, 0, i, 0); 839 XA_BUG_ON(xa, !xa_empty(xa)); 840 __check_store_iter(xa, min, i, 0); 841 XA_BUG_ON(xa, !xa_empty(xa)); 842 843 xa_store_index(xa, min, GFP_KERNEL); 844 __check_store_iter(xa, min, i, 1); 845 XA_BUG_ON(xa, !xa_empty(xa)); 846 xa_store_index(xa, max, GFP_KERNEL); 847 __check_store_iter(xa, min, i, 1); 848 XA_BUG_ON(xa, !xa_empty(xa)); 849 850 for (j = 0; j < min; j++) 851 xa_store_index(xa, j, GFP_KERNEL); 852 __check_store_iter(xa, 0, i, min); 853 XA_BUG_ON(xa, !xa_empty(xa)); 854 for (j = 0; j < min; j++) 855 xa_store_index(xa, min + j, GFP_KERNEL); 856 __check_store_iter(xa, min, i, min); 857 XA_BUG_ON(xa, !xa_empty(xa)); 858 } 859 #ifdef CONFIG_XARRAY_MULTI 860 xa_store_index(xa, 63, GFP_KERNEL); 861 xa_store_index(xa, 65, GFP_KERNEL); 862 __check_store_iter(xa, 64, 2, 1); 863 xa_erase_index(xa, 63); 864 #endif 865 XA_BUG_ON(xa, !xa_empty(xa)); 866 } 867 868 static noinline void check_multi_find(struct xarray *xa) 869 { 870 #ifdef CONFIG_XARRAY_MULTI 871 unsigned long index; 872 873 xa_store_order(xa, 12, 2, xa_mk_value(12), GFP_KERNEL); 874 XA_BUG_ON(xa, xa_store_index(xa, 16, GFP_KERNEL) != NULL); 875 876 index = 0; 877 XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) != 878 xa_mk_value(12)); 879 XA_BUG_ON(xa, index != 12); 880 index = 13; 881 XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) != 882 xa_mk_value(12)); 883 XA_BUG_ON(xa, (index < 12) || (index >= 16)); 884 XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT) != 885 xa_mk_value(16)); 886 XA_BUG_ON(xa, index != 16); 887 888 xa_erase_index(xa, 12); 889 xa_erase_index(xa, 16); 890 XA_BUG_ON(xa, !xa_empty(xa)); 891 #endif 892 } 893 894 static noinline void check_multi_find_2(struct xarray *xa) 895 { 896 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 10 : 1; 897 unsigned int i, j; 898 void *entry; 899 900 for (i = 0; i < max_order; i++) { 901 unsigned long index = 1UL << i; 902 for (j = 0; j < index; j++) { 903 XA_STATE(xas, xa, j + index); 904 xa_store_index(xa, index - 1, GFP_KERNEL); 905 xa_store_order(xa, index, i, xa_mk_index(index), 906 GFP_KERNEL); 907 rcu_read_lock(); 908 xas_for_each(&xas, entry, ULONG_MAX) { 909 xa_erase_index(xa, index); 910 } 911 rcu_read_unlock(); 912 xa_erase_index(xa, index - 1); 913 XA_BUG_ON(xa, !xa_empty(xa)); 914 } 915 } 916 } 917 918 static noinline void check_find_1(struct xarray *xa) 919 { 920 unsigned long i, j, k; 921 922 XA_BUG_ON(xa, !xa_empty(xa)); 923 924 /* 925 * Check xa_find with all pairs between 0 and 99 inclusive, 926 * starting at every index between 0 and 99 927 */ 928 for (i = 0; i < 100; i++) { 929 XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL); 930 xa_set_mark(xa, i, XA_MARK_0); 931 for (j = 0; j < i; j++) { 932 XA_BUG_ON(xa, xa_store_index(xa, j, GFP_KERNEL) != 933 NULL); 934 xa_set_mark(xa, j, XA_MARK_0); 935 for (k = 0; k < 100; k++) { 936 unsigned long index = k; 937 void *entry = xa_find(xa, &index, ULONG_MAX, 938 XA_PRESENT); 939 if (k <= j) 940 XA_BUG_ON(xa, index != j); 941 else if (k <= i) 942 XA_BUG_ON(xa, index != i); 943 else 944 XA_BUG_ON(xa, entry != NULL); 945 946 index = k; 947 entry = xa_find(xa, &index, ULONG_MAX, 948 XA_MARK_0); 949 if (k <= j) 950 XA_BUG_ON(xa, index != j); 951 else if (k <= i) 952 XA_BUG_ON(xa, index != i); 953 else 954 XA_BUG_ON(xa, entry != NULL); 955 } 956 xa_erase_index(xa, j); 957 XA_BUG_ON(xa, xa_get_mark(xa, j, XA_MARK_0)); 958 XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0)); 959 } 960 xa_erase_index(xa, i); 961 XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_0)); 962 } 963 XA_BUG_ON(xa, !xa_empty(xa)); 964 } 965 966 static noinline void check_find_2(struct xarray *xa) 967 { 968 void *entry; 969 unsigned long i, j, index; 970 971 xa_for_each(xa, index, entry) { 972 XA_BUG_ON(xa, true); 973 } 974 975 for (i = 0; i < 1024; i++) { 976 xa_store_index(xa, index, GFP_KERNEL); 977 j = 0; 978 xa_for_each(xa, index, entry) { 979 XA_BUG_ON(xa, xa_mk_index(index) != entry); 980 XA_BUG_ON(xa, index != j++); 981 } 982 } 983 984 xa_destroy(xa); 985 } 986 987 static noinline void check_find_3(struct xarray *xa) 988 { 989 XA_STATE(xas, xa, 0); 990 unsigned long i, j, k; 991 void *entry; 992 993 for (i = 0; i < 100; i++) { 994 for (j = 0; j < 100; j++) { 995 rcu_read_lock(); 996 for (k = 0; k < 100; k++) { 997 xas_set(&xas, j); 998 xas_for_each_marked(&xas, entry, k, XA_MARK_0) 999 ; 1000 if (j > k) 1001 XA_BUG_ON(xa, 1002 xas.xa_node != XAS_RESTART); 1003 } 1004 rcu_read_unlock(); 1005 } 1006 xa_store_index(xa, i, GFP_KERNEL); 1007 xa_set_mark(xa, i, XA_MARK_0); 1008 } 1009 xa_destroy(xa); 1010 } 1011 1012 static noinline void check_find(struct xarray *xa) 1013 { 1014 check_find_1(xa); 1015 check_find_2(xa); 1016 check_find_3(xa); 1017 check_multi_find(xa); 1018 check_multi_find_2(xa); 1019 } 1020 1021 /* See find_swap_entry() in mm/shmem.c */ 1022 static noinline unsigned long xa_find_entry(struct xarray *xa, void *item) 1023 { 1024 XA_STATE(xas, xa, 0); 1025 unsigned int checked = 0; 1026 void *entry; 1027 1028 rcu_read_lock(); 1029 xas_for_each(&xas, entry, ULONG_MAX) { 1030 if (xas_retry(&xas, entry)) 1031 continue; 1032 if (entry == item) 1033 break; 1034 checked++; 1035 if ((checked % 4) != 0) 1036 continue; 1037 xas_pause(&xas); 1038 } 1039 rcu_read_unlock(); 1040 1041 return entry ? xas.xa_index : -1; 1042 } 1043 1044 static noinline void check_find_entry(struct xarray *xa) 1045 { 1046 #ifdef CONFIG_XARRAY_MULTI 1047 unsigned int order; 1048 unsigned long offset, index; 1049 1050 for (order = 0; order < 20; order++) { 1051 for (offset = 0; offset < (1UL << (order + 3)); 1052 offset += (1UL << order)) { 1053 for (index = 0; index < (1UL << (order + 5)); 1054 index += (1UL << order)) { 1055 xa_store_order(xa, index, order, 1056 xa_mk_index(index), GFP_KERNEL); 1057 XA_BUG_ON(xa, xa_load(xa, index) != 1058 xa_mk_index(index)); 1059 XA_BUG_ON(xa, xa_find_entry(xa, 1060 xa_mk_index(index)) != index); 1061 } 1062 XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); 1063 xa_destroy(xa); 1064 } 1065 } 1066 #endif 1067 1068 XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); 1069 xa_store_index(xa, ULONG_MAX, GFP_KERNEL); 1070 XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); 1071 XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_index(ULONG_MAX)) != -1); 1072 xa_erase_index(xa, ULONG_MAX); 1073 XA_BUG_ON(xa, !xa_empty(xa)); 1074 } 1075 1076 static noinline void check_move_small(struct xarray *xa, unsigned long idx) 1077 { 1078 XA_STATE(xas, xa, 0); 1079 unsigned long i; 1080 1081 xa_store_index(xa, 0, GFP_KERNEL); 1082 xa_store_index(xa, idx, GFP_KERNEL); 1083 1084 rcu_read_lock(); 1085 for (i = 0; i < idx * 4; i++) { 1086 void *entry = xas_next(&xas); 1087 if (i <= idx) 1088 XA_BUG_ON(xa, xas.xa_node == XAS_RESTART); 1089 XA_BUG_ON(xa, xas.xa_index != i); 1090 if (i == 0 || i == idx) 1091 XA_BUG_ON(xa, entry != xa_mk_index(i)); 1092 else 1093 XA_BUG_ON(xa, entry != NULL); 1094 } 1095 xas_next(&xas); 1096 XA_BUG_ON(xa, xas.xa_index != i); 1097 1098 do { 1099 void *entry = xas_prev(&xas); 1100 i--; 1101 if (i <= idx) 1102 XA_BUG_ON(xa, xas.xa_node == XAS_RESTART); 1103 XA_BUG_ON(xa, xas.xa_index != i); 1104 if (i == 0 || i == idx) 1105 XA_BUG_ON(xa, entry != xa_mk_index(i)); 1106 else 1107 XA_BUG_ON(xa, entry != NULL); 1108 } while (i > 0); 1109 1110 xas_set(&xas, ULONG_MAX); 1111 XA_BUG_ON(xa, xas_next(&xas) != NULL); 1112 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); 1113 XA_BUG_ON(xa, xas_next(&xas) != xa_mk_value(0)); 1114 XA_BUG_ON(xa, xas.xa_index != 0); 1115 XA_BUG_ON(xa, xas_prev(&xas) != NULL); 1116 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); 1117 rcu_read_unlock(); 1118 1119 xa_erase_index(xa, 0); 1120 xa_erase_index(xa, idx); 1121 XA_BUG_ON(xa, !xa_empty(xa)); 1122 } 1123 1124 static noinline void check_move(struct xarray *xa) 1125 { 1126 XA_STATE(xas, xa, (1 << 16) - 1); 1127 unsigned long i; 1128 1129 for (i = 0; i < (1 << 16); i++) 1130 XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL); 1131 1132 rcu_read_lock(); 1133 do { 1134 void *entry = xas_prev(&xas); 1135 i--; 1136 XA_BUG_ON(xa, entry != xa_mk_index(i)); 1137 XA_BUG_ON(xa, i != xas.xa_index); 1138 } while (i != 0); 1139 1140 XA_BUG_ON(xa, xas_prev(&xas) != NULL); 1141 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); 1142 1143 do { 1144 void *entry = xas_next(&xas); 1145 XA_BUG_ON(xa, entry != xa_mk_index(i)); 1146 XA_BUG_ON(xa, i != xas.xa_index); 1147 i++; 1148 } while (i < (1 << 16)); 1149 rcu_read_unlock(); 1150 1151 for (i = (1 << 8); i < (1 << 15); i++) 1152 xa_erase_index(xa, i); 1153 1154 i = xas.xa_index; 1155 1156 rcu_read_lock(); 1157 do { 1158 void *entry = xas_prev(&xas); 1159 i--; 1160 if ((i < (1 << 8)) || (i >= (1 << 15))) 1161 XA_BUG_ON(xa, entry != xa_mk_index(i)); 1162 else 1163 XA_BUG_ON(xa, entry != NULL); 1164 XA_BUG_ON(xa, i != xas.xa_index); 1165 } while (i != 0); 1166 1167 XA_BUG_ON(xa, xas_prev(&xas) != NULL); 1168 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); 1169 1170 do { 1171 void *entry = xas_next(&xas); 1172 if ((i < (1 << 8)) || (i >= (1 << 15))) 1173 XA_BUG_ON(xa, entry != xa_mk_index(i)); 1174 else 1175 XA_BUG_ON(xa, entry != NULL); 1176 XA_BUG_ON(xa, i != xas.xa_index); 1177 i++; 1178 } while (i < (1 << 16)); 1179 rcu_read_unlock(); 1180 1181 xa_destroy(xa); 1182 1183 for (i = 0; i < 16; i++) 1184 check_move_small(xa, 1UL << i); 1185 1186 for (i = 2; i < 16; i++) 1187 check_move_small(xa, (1UL << i) - 1); 1188 } 1189 1190 static noinline void xa_store_many_order(struct xarray *xa, 1191 unsigned long index, unsigned order) 1192 { 1193 XA_STATE_ORDER(xas, xa, index, order); 1194 unsigned int i = 0; 1195 1196 do { 1197 xas_lock(&xas); 1198 XA_BUG_ON(xa, xas_find_conflict(&xas)); 1199 xas_create_range(&xas); 1200 if (xas_error(&xas)) 1201 goto unlock; 1202 for (i = 0; i < (1U << order); i++) { 1203 XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(index + i))); 1204 xas_next(&xas); 1205 } 1206 unlock: 1207 xas_unlock(&xas); 1208 } while (xas_nomem(&xas, GFP_KERNEL)); 1209 1210 XA_BUG_ON(xa, xas_error(&xas)); 1211 } 1212 1213 static noinline void check_create_range_1(struct xarray *xa, 1214 unsigned long index, unsigned order) 1215 { 1216 unsigned long i; 1217 1218 xa_store_many_order(xa, index, order); 1219 for (i = index; i < index + (1UL << order); i++) 1220 xa_erase_index(xa, i); 1221 XA_BUG_ON(xa, !xa_empty(xa)); 1222 } 1223 1224 static noinline void check_create_range_2(struct xarray *xa, unsigned order) 1225 { 1226 unsigned long i; 1227 unsigned long nr = 1UL << order; 1228 1229 for (i = 0; i < nr * nr; i += nr) 1230 xa_store_many_order(xa, i, order); 1231 for (i = 0; i < nr * nr; i++) 1232 xa_erase_index(xa, i); 1233 XA_BUG_ON(xa, !xa_empty(xa)); 1234 } 1235 1236 static noinline void check_create_range_3(void) 1237 { 1238 XA_STATE(xas, NULL, 0); 1239 xas_set_err(&xas, -EEXIST); 1240 xas_create_range(&xas); 1241 XA_BUG_ON(NULL, xas_error(&xas) != -EEXIST); 1242 } 1243 1244 static noinline void check_create_range_4(struct xarray *xa, 1245 unsigned long index, unsigned order) 1246 { 1247 XA_STATE_ORDER(xas, xa, index, order); 1248 unsigned long base = xas.xa_index; 1249 unsigned long i = 0; 1250 1251 xa_store_index(xa, index, GFP_KERNEL); 1252 do { 1253 xas_lock(&xas); 1254 xas_create_range(&xas); 1255 if (xas_error(&xas)) 1256 goto unlock; 1257 for (i = 0; i < (1UL << order); i++) { 1258 void *old = xas_store(&xas, xa_mk_index(base + i)); 1259 if (xas.xa_index == index) 1260 XA_BUG_ON(xa, old != xa_mk_index(base + i)); 1261 else 1262 XA_BUG_ON(xa, old != NULL); 1263 xas_next(&xas); 1264 } 1265 unlock: 1266 xas_unlock(&xas); 1267 } while (xas_nomem(&xas, GFP_KERNEL)); 1268 1269 XA_BUG_ON(xa, xas_error(&xas)); 1270 1271 for (i = base; i < base + (1UL << order); i++) 1272 xa_erase_index(xa, i); 1273 XA_BUG_ON(xa, !xa_empty(xa)); 1274 } 1275 1276 static noinline void check_create_range(struct xarray *xa) 1277 { 1278 unsigned int order; 1279 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 12 : 1; 1280 1281 for (order = 0; order < max_order; order++) { 1282 check_create_range_1(xa, 0, order); 1283 check_create_range_1(xa, 1U << order, order); 1284 check_create_range_1(xa, 2U << order, order); 1285 check_create_range_1(xa, 3U << order, order); 1286 check_create_range_1(xa, 1U << 24, order); 1287 if (order < 10) 1288 check_create_range_2(xa, order); 1289 1290 check_create_range_4(xa, 0, order); 1291 check_create_range_4(xa, 1U << order, order); 1292 check_create_range_4(xa, 2U << order, order); 1293 check_create_range_4(xa, 3U << order, order); 1294 check_create_range_4(xa, 1U << 24, order); 1295 1296 check_create_range_4(xa, 1, order); 1297 check_create_range_4(xa, (1U << order) + 1, order); 1298 check_create_range_4(xa, (2U << order) + 1, order); 1299 check_create_range_4(xa, (2U << order) - 1, order); 1300 check_create_range_4(xa, (3U << order) + 1, order); 1301 check_create_range_4(xa, (3U << order) - 1, order); 1302 check_create_range_4(xa, (1U << 24) + 1, order); 1303 } 1304 1305 check_create_range_3(); 1306 } 1307 1308 static noinline void __check_store_range(struct xarray *xa, unsigned long first, 1309 unsigned long last) 1310 { 1311 #ifdef CONFIG_XARRAY_MULTI 1312 xa_store_range(xa, first, last, xa_mk_index(first), GFP_KERNEL); 1313 1314 XA_BUG_ON(xa, xa_load(xa, first) != xa_mk_index(first)); 1315 XA_BUG_ON(xa, xa_load(xa, last) != xa_mk_index(first)); 1316 XA_BUG_ON(xa, xa_load(xa, first - 1) != NULL); 1317 XA_BUG_ON(xa, xa_load(xa, last + 1) != NULL); 1318 1319 xa_store_range(xa, first, last, NULL, GFP_KERNEL); 1320 #endif 1321 1322 XA_BUG_ON(xa, !xa_empty(xa)); 1323 } 1324 1325 static noinline void check_store_range(struct xarray *xa) 1326 { 1327 unsigned long i, j; 1328 1329 for (i = 0; i < 128; i++) { 1330 for (j = i; j < 128; j++) { 1331 __check_store_range(xa, i, j); 1332 __check_store_range(xa, 128 + i, 128 + j); 1333 __check_store_range(xa, 4095 + i, 4095 + j); 1334 __check_store_range(xa, 4096 + i, 4096 + j); 1335 __check_store_range(xa, 123456 + i, 123456 + j); 1336 __check_store_range(xa, (1 << 24) + i, (1 << 24) + j); 1337 } 1338 } 1339 } 1340 1341 static void check_align_1(struct xarray *xa, char *name) 1342 { 1343 int i; 1344 unsigned int id; 1345 unsigned long index; 1346 void *entry; 1347 1348 for (i = 0; i < 8; i++) { 1349 XA_BUG_ON(xa, xa_alloc(xa, &id, name + i, xa_limit_32b, 1350 GFP_KERNEL) != 0); 1351 XA_BUG_ON(xa, id != i); 1352 } 1353 xa_for_each(xa, index, entry) 1354 XA_BUG_ON(xa, xa_is_err(entry)); 1355 xa_destroy(xa); 1356 } 1357 1358 /* 1359 * We should always be able to store without allocating memory after 1360 * reserving a slot. 1361 */ 1362 static void check_align_2(struct xarray *xa, char *name) 1363 { 1364 int i; 1365 1366 XA_BUG_ON(xa, !xa_empty(xa)); 1367 1368 for (i = 0; i < 8; i++) { 1369 XA_BUG_ON(xa, xa_store(xa, 0, name + i, GFP_KERNEL) != NULL); 1370 xa_erase(xa, 0); 1371 } 1372 1373 for (i = 0; i < 8; i++) { 1374 XA_BUG_ON(xa, xa_reserve(xa, 0, GFP_KERNEL) != 0); 1375 XA_BUG_ON(xa, xa_store(xa, 0, name + i, 0) != NULL); 1376 xa_erase(xa, 0); 1377 } 1378 1379 XA_BUG_ON(xa, !xa_empty(xa)); 1380 } 1381 1382 static noinline void check_align(struct xarray *xa) 1383 { 1384 char name[] = "Motorola 68000"; 1385 1386 check_align_1(xa, name); 1387 check_align_1(xa, name + 1); 1388 check_align_1(xa, name + 2); 1389 check_align_1(xa, name + 3); 1390 check_align_2(xa, name); 1391 } 1392 1393 static LIST_HEAD(shadow_nodes); 1394 1395 static void test_update_node(struct xa_node *node) 1396 { 1397 if (node->count && node->count == node->nr_values) { 1398 if (list_empty(&node->private_list)) 1399 list_add(&shadow_nodes, &node->private_list); 1400 } else { 1401 if (!list_empty(&node->private_list)) 1402 list_del_init(&node->private_list); 1403 } 1404 } 1405 1406 static noinline void shadow_remove(struct xarray *xa) 1407 { 1408 struct xa_node *node; 1409 1410 xa_lock(xa); 1411 while ((node = list_first_entry_or_null(&shadow_nodes, 1412 struct xa_node, private_list))) { 1413 XA_STATE(xas, node->array, 0); 1414 XA_BUG_ON(xa, node->array != xa); 1415 list_del_init(&node->private_list); 1416 xas.xa_node = xa_parent_locked(node->array, node); 1417 xas.xa_offset = node->offset; 1418 xas.xa_shift = node->shift + XA_CHUNK_SHIFT; 1419 xas_set_update(&xas, test_update_node); 1420 xas_store(&xas, NULL); 1421 } 1422 xa_unlock(xa); 1423 } 1424 1425 static noinline void check_workingset(struct xarray *xa, unsigned long index) 1426 { 1427 XA_STATE(xas, xa, index); 1428 xas_set_update(&xas, test_update_node); 1429 1430 do { 1431 xas_lock(&xas); 1432 xas_store(&xas, xa_mk_value(0)); 1433 xas_next(&xas); 1434 xas_store(&xas, xa_mk_value(1)); 1435 xas_unlock(&xas); 1436 } while (xas_nomem(&xas, GFP_KERNEL)); 1437 1438 XA_BUG_ON(xa, list_empty(&shadow_nodes)); 1439 1440 xas_lock(&xas); 1441 xas_next(&xas); 1442 xas_store(&xas, &xas); 1443 XA_BUG_ON(xa, !list_empty(&shadow_nodes)); 1444 1445 xas_store(&xas, xa_mk_value(2)); 1446 xas_unlock(&xas); 1447 XA_BUG_ON(xa, list_empty(&shadow_nodes)); 1448 1449 shadow_remove(xa); 1450 XA_BUG_ON(xa, !list_empty(&shadow_nodes)); 1451 XA_BUG_ON(xa, !xa_empty(xa)); 1452 } 1453 1454 /* 1455 * Check that the pointer / value / sibling entries are accounted the 1456 * way we expect them to be. 1457 */ 1458 static noinline void check_account(struct xarray *xa) 1459 { 1460 #ifdef CONFIG_XARRAY_MULTI 1461 unsigned int order; 1462 1463 for (order = 1; order < 12; order++) { 1464 XA_STATE(xas, xa, 1 << order); 1465 1466 xa_store_order(xa, 0, order, xa, GFP_KERNEL); 1467 rcu_read_lock(); 1468 xas_load(&xas); 1469 XA_BUG_ON(xa, xas.xa_node->count == 0); 1470 XA_BUG_ON(xa, xas.xa_node->count > (1 << order)); 1471 XA_BUG_ON(xa, xas.xa_node->nr_values != 0); 1472 rcu_read_unlock(); 1473 1474 xa_store_order(xa, 1 << order, order, xa_mk_index(1UL << order), 1475 GFP_KERNEL); 1476 XA_BUG_ON(xa, xas.xa_node->count != xas.xa_node->nr_values * 2); 1477 1478 xa_erase(xa, 1 << order); 1479 XA_BUG_ON(xa, xas.xa_node->nr_values != 0); 1480 1481 xa_erase(xa, 0); 1482 XA_BUG_ON(xa, !xa_empty(xa)); 1483 } 1484 #endif 1485 } 1486 1487 static noinline void check_destroy(struct xarray *xa) 1488 { 1489 unsigned long index; 1490 1491 XA_BUG_ON(xa, !xa_empty(xa)); 1492 1493 /* Destroying an empty array is a no-op */ 1494 xa_destroy(xa); 1495 XA_BUG_ON(xa, !xa_empty(xa)); 1496 1497 /* Destroying an array with a single entry */ 1498 for (index = 0; index < 1000; index++) { 1499 xa_store_index(xa, index, GFP_KERNEL); 1500 XA_BUG_ON(xa, xa_empty(xa)); 1501 xa_destroy(xa); 1502 XA_BUG_ON(xa, !xa_empty(xa)); 1503 } 1504 1505 /* Destroying an array with a single entry at ULONG_MAX */ 1506 xa_store(xa, ULONG_MAX, xa, GFP_KERNEL); 1507 XA_BUG_ON(xa, xa_empty(xa)); 1508 xa_destroy(xa); 1509 XA_BUG_ON(xa, !xa_empty(xa)); 1510 1511 #ifdef CONFIG_XARRAY_MULTI 1512 /* Destroying an array with a multi-index entry */ 1513 xa_store_order(xa, 1 << 11, 11, xa, GFP_KERNEL); 1514 XA_BUG_ON(xa, xa_empty(xa)); 1515 xa_destroy(xa); 1516 XA_BUG_ON(xa, !xa_empty(xa)); 1517 #endif 1518 } 1519 1520 static DEFINE_XARRAY(array); 1521 1522 static int xarray_checks(void) 1523 { 1524 check_xa_err(&array); 1525 check_xas_retry(&array); 1526 check_xa_load(&array); 1527 check_xa_mark(&array); 1528 check_xa_shrink(&array); 1529 check_xas_erase(&array); 1530 check_cmpxchg(&array); 1531 check_reserve(&array); 1532 check_reserve(&xa0); 1533 check_multi_store(&array); 1534 check_xa_alloc(); 1535 check_find(&array); 1536 check_find_entry(&array); 1537 check_account(&array); 1538 check_destroy(&array); 1539 check_move(&array); 1540 check_create_range(&array); 1541 check_store_range(&array); 1542 check_store_iter(&array); 1543 check_align(&xa0); 1544 1545 check_workingset(&array, 0); 1546 check_workingset(&array, 64); 1547 check_workingset(&array, 4096); 1548 1549 printk("XArray: %u of %u tests passed\n", tests_passed, tests_run); 1550 return (tests_run == tests_passed) ? 0 : -EINVAL; 1551 } 1552 1553 static void xarray_exit(void) 1554 { 1555 } 1556 1557 module_init(xarray_checks); 1558 module_exit(xarray_exit); 1559 MODULE_AUTHOR("Matthew Wilcox <willy@infradead.org>"); 1560 MODULE_LICENSE("GPL"); 1561