1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * test_xarray.c: Test the XArray API 4 * Copyright (c) 2017-2018 Microsoft Corporation 5 * Author: Matthew Wilcox <willy@infradead.org> 6 */ 7 8 #include <linux/xarray.h> 9 #include <linux/module.h> 10 11 static unsigned int tests_run; 12 static unsigned int tests_passed; 13 14 #ifndef XA_DEBUG 15 # ifdef __KERNEL__ 16 void xa_dump(const struct xarray *xa) { } 17 # endif 18 #undef XA_BUG_ON 19 #define XA_BUG_ON(xa, x) do { \ 20 tests_run++; \ 21 if (x) { \ 22 printk("BUG at %s:%d\n", __func__, __LINE__); \ 23 xa_dump(xa); \ 24 dump_stack(); \ 25 } else { \ 26 tests_passed++; \ 27 } \ 28 } while (0) 29 #endif 30 31 static void *xa_mk_index(unsigned long index) 32 { 33 return xa_mk_value(index & LONG_MAX); 34 } 35 36 static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp) 37 { 38 return xa_store(xa, index, xa_mk_index(index), gfp); 39 } 40 41 static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp) 42 { 43 u32 id = 0; 44 45 XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_index(index), 46 gfp) != 0); 47 XA_BUG_ON(xa, id != index); 48 } 49 50 static void xa_erase_index(struct xarray *xa, unsigned long index) 51 { 52 XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_index(index)); 53 XA_BUG_ON(xa, xa_load(xa, index) != NULL); 54 } 55 56 /* 57 * If anyone needs this, please move it to xarray.c. We have no current 58 * users outside the test suite because all current multislot users want 59 * to use the advanced API. 60 */ 61 static void *xa_store_order(struct xarray *xa, unsigned long index, 62 unsigned order, void *entry, gfp_t gfp) 63 { 64 XA_STATE_ORDER(xas, xa, index, order); 65 void *curr; 66 67 do { 68 xas_lock(&xas); 69 curr = xas_store(&xas, entry); 70 xas_unlock(&xas); 71 } while (xas_nomem(&xas, gfp)); 72 73 return curr; 74 } 75 76 static noinline void check_xa_err(struct xarray *xa) 77 { 78 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 0, GFP_NOWAIT)) != 0); 79 XA_BUG_ON(xa, xa_err(xa_erase(xa, 0)) != 0); 80 #ifndef __KERNEL__ 81 /* The kernel does not fail GFP_NOWAIT allocations */ 82 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM); 83 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM); 84 #endif 85 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_KERNEL)) != 0); 86 XA_BUG_ON(xa, xa_err(xa_store(xa, 1, xa_mk_value(0), GFP_KERNEL)) != 0); 87 XA_BUG_ON(xa, xa_err(xa_erase(xa, 1)) != 0); 88 // kills the test-suite :-( 89 // XA_BUG_ON(xa, xa_err(xa_store(xa, 0, xa_mk_internal(0), 0)) != -EINVAL); 90 } 91 92 static noinline void check_xas_retry(struct xarray *xa) 93 { 94 XA_STATE(xas, xa, 0); 95 void *entry; 96 97 xa_store_index(xa, 0, GFP_KERNEL); 98 xa_store_index(xa, 1, GFP_KERNEL); 99 100 rcu_read_lock(); 101 XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_value(0)); 102 xa_erase_index(xa, 1); 103 XA_BUG_ON(xa, !xa_is_retry(xas_reload(&xas))); 104 XA_BUG_ON(xa, xas_retry(&xas, NULL)); 105 XA_BUG_ON(xa, xas_retry(&xas, xa_mk_value(0))); 106 xas_reset(&xas); 107 XA_BUG_ON(xa, xas.xa_node != XAS_RESTART); 108 XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0)); 109 XA_BUG_ON(xa, xas.xa_node != NULL); 110 111 XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL); 112 XA_BUG_ON(xa, !xa_is_internal(xas_reload(&xas))); 113 xas.xa_node = XAS_RESTART; 114 XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0)); 115 rcu_read_unlock(); 116 117 /* Make sure we can iterate through retry entries */ 118 xas_lock(&xas); 119 xas_set(&xas, 0); 120 xas_store(&xas, XA_RETRY_ENTRY); 121 xas_set(&xas, 1); 122 xas_store(&xas, XA_RETRY_ENTRY); 123 124 xas_set(&xas, 0); 125 xas_for_each(&xas, entry, ULONG_MAX) { 126 xas_store(&xas, xa_mk_index(xas.xa_index)); 127 } 128 xas_unlock(&xas); 129 130 xa_erase_index(xa, 0); 131 xa_erase_index(xa, 1); 132 } 133 134 static noinline void check_xa_load(struct xarray *xa) 135 { 136 unsigned long i, j; 137 138 for (i = 0; i < 1024; i++) { 139 for (j = 0; j < 1024; j++) { 140 void *entry = xa_load(xa, j); 141 if (j < i) 142 XA_BUG_ON(xa, xa_to_value(entry) != j); 143 else 144 XA_BUG_ON(xa, entry); 145 } 146 XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL); 147 } 148 149 for (i = 0; i < 1024; i++) { 150 for (j = 0; j < 1024; j++) { 151 void *entry = xa_load(xa, j); 152 if (j >= i) 153 XA_BUG_ON(xa, xa_to_value(entry) != j); 154 else 155 XA_BUG_ON(xa, entry); 156 } 157 xa_erase_index(xa, i); 158 } 159 XA_BUG_ON(xa, !xa_empty(xa)); 160 } 161 162 static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index) 163 { 164 unsigned int order; 165 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 8 : 1; 166 167 /* NULL elements have no marks set */ 168 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 169 xa_set_mark(xa, index, XA_MARK_0); 170 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 171 172 /* Storing a pointer will not make a mark appear */ 173 XA_BUG_ON(xa, xa_store_index(xa, index, GFP_KERNEL) != NULL); 174 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 175 xa_set_mark(xa, index, XA_MARK_0); 176 XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0)); 177 178 /* Setting one mark will not set another mark */ 179 XA_BUG_ON(xa, xa_get_mark(xa, index + 1, XA_MARK_0)); 180 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_1)); 181 182 /* Storing NULL clears marks, and they can't be set again */ 183 xa_erase_index(xa, index); 184 XA_BUG_ON(xa, !xa_empty(xa)); 185 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 186 xa_set_mark(xa, index, XA_MARK_0); 187 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 188 189 /* 190 * Storing a multi-index entry over entries with marks gives the 191 * entire entry the union of the marks 192 */ 193 BUG_ON((index % 4) != 0); 194 for (order = 2; order < max_order; order++) { 195 unsigned long base = round_down(index, 1UL << order); 196 unsigned long next = base + (1UL << order); 197 unsigned long i; 198 199 XA_BUG_ON(xa, xa_store_index(xa, index + 1, GFP_KERNEL)); 200 xa_set_mark(xa, index + 1, XA_MARK_0); 201 XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL)); 202 xa_set_mark(xa, index + 2, XA_MARK_1); 203 XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL)); 204 xa_store_order(xa, index, order, xa_mk_index(index), 205 GFP_KERNEL); 206 for (i = base; i < next; i++) { 207 XA_STATE(xas, xa, i); 208 unsigned int seen = 0; 209 void *entry; 210 211 XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0)); 212 XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_1)); 213 XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_2)); 214 215 /* We should see two elements in the array */ 216 rcu_read_lock(); 217 xas_for_each(&xas, entry, ULONG_MAX) 218 seen++; 219 rcu_read_unlock(); 220 XA_BUG_ON(xa, seen != 2); 221 222 /* One of which is marked */ 223 xas_set(&xas, 0); 224 seen = 0; 225 rcu_read_lock(); 226 xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) 227 seen++; 228 rcu_read_unlock(); 229 XA_BUG_ON(xa, seen != 1); 230 } 231 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_0)); 232 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_1)); 233 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_2)); 234 xa_erase_index(xa, index); 235 xa_erase_index(xa, next); 236 XA_BUG_ON(xa, !xa_empty(xa)); 237 } 238 XA_BUG_ON(xa, !xa_empty(xa)); 239 } 240 241 static noinline void check_xa_mark_2(struct xarray *xa) 242 { 243 XA_STATE(xas, xa, 0); 244 unsigned long index; 245 unsigned int count = 0; 246 void *entry; 247 248 xa_store_index(xa, 0, GFP_KERNEL); 249 xa_set_mark(xa, 0, XA_MARK_0); 250 xas_lock(&xas); 251 xas_load(&xas); 252 xas_init_marks(&xas); 253 xas_unlock(&xas); 254 XA_BUG_ON(xa, !xa_get_mark(xa, 0, XA_MARK_0) == 0); 255 256 for (index = 3500; index < 4500; index++) { 257 xa_store_index(xa, index, GFP_KERNEL); 258 xa_set_mark(xa, index, XA_MARK_0); 259 } 260 261 xas_reset(&xas); 262 rcu_read_lock(); 263 xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) 264 count++; 265 rcu_read_unlock(); 266 XA_BUG_ON(xa, count != 1000); 267 268 xas_lock(&xas); 269 xas_for_each(&xas, entry, ULONG_MAX) { 270 xas_init_marks(&xas); 271 XA_BUG_ON(xa, !xa_get_mark(xa, xas.xa_index, XA_MARK_0)); 272 XA_BUG_ON(xa, !xas_get_mark(&xas, XA_MARK_0)); 273 } 274 xas_unlock(&xas); 275 276 xa_destroy(xa); 277 } 278 279 static noinline void check_xa_mark(struct xarray *xa) 280 { 281 unsigned long index; 282 283 for (index = 0; index < 16384; index += 4) 284 check_xa_mark_1(xa, index); 285 286 check_xa_mark_2(xa); 287 } 288 289 static noinline void check_xa_shrink(struct xarray *xa) 290 { 291 XA_STATE(xas, xa, 1); 292 struct xa_node *node; 293 unsigned int order; 294 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 15 : 1; 295 296 XA_BUG_ON(xa, !xa_empty(xa)); 297 XA_BUG_ON(xa, xa_store_index(xa, 0, GFP_KERNEL) != NULL); 298 XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL); 299 300 /* 301 * Check that erasing the entry at 1 shrinks the tree and properly 302 * marks the node as being deleted. 303 */ 304 xas_lock(&xas); 305 XA_BUG_ON(xa, xas_load(&xas) != xa_mk_value(1)); 306 node = xas.xa_node; 307 XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != xa_mk_value(0)); 308 XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1)); 309 XA_BUG_ON(xa, xa_load(xa, 1) != NULL); 310 XA_BUG_ON(xa, xas.xa_node != XAS_BOUNDS); 311 XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != XA_RETRY_ENTRY); 312 XA_BUG_ON(xa, xas_load(&xas) != NULL); 313 xas_unlock(&xas); 314 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0)); 315 xa_erase_index(xa, 0); 316 XA_BUG_ON(xa, !xa_empty(xa)); 317 318 for (order = 0; order < max_order; order++) { 319 unsigned long max = (1UL << order) - 1; 320 xa_store_order(xa, 0, order, xa_mk_value(0), GFP_KERNEL); 321 XA_BUG_ON(xa, xa_load(xa, max) != xa_mk_value(0)); 322 XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL); 323 rcu_read_lock(); 324 node = xa_head(xa); 325 rcu_read_unlock(); 326 XA_BUG_ON(xa, xa_store_index(xa, ULONG_MAX, GFP_KERNEL) != 327 NULL); 328 rcu_read_lock(); 329 XA_BUG_ON(xa, xa_head(xa) == node); 330 rcu_read_unlock(); 331 XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL); 332 xa_erase_index(xa, ULONG_MAX); 333 XA_BUG_ON(xa, xa->xa_head != node); 334 xa_erase_index(xa, 0); 335 } 336 } 337 338 static noinline void check_cmpxchg(struct xarray *xa) 339 { 340 void *FIVE = xa_mk_value(5); 341 void *SIX = xa_mk_value(6); 342 void *LOTS = xa_mk_value(12345678); 343 344 XA_BUG_ON(xa, !xa_empty(xa)); 345 XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_KERNEL) != NULL); 346 XA_BUG_ON(xa, xa_insert(xa, 12345678, xa, GFP_KERNEL) != -EEXIST); 347 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, SIX, FIVE, GFP_KERNEL) != LOTS); 348 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, LOTS, FIVE, GFP_KERNEL) != LOTS); 349 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, FIVE, LOTS, GFP_KERNEL) != FIVE); 350 XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != NULL); 351 XA_BUG_ON(xa, xa_cmpxchg(xa, 5, NULL, FIVE, GFP_KERNEL) != NULL); 352 xa_erase_index(xa, 12345678); 353 xa_erase_index(xa, 5); 354 XA_BUG_ON(xa, !xa_empty(xa)); 355 } 356 357 static noinline void check_reserve(struct xarray *xa) 358 { 359 void *entry; 360 unsigned long index = 0; 361 362 /* An array with a reserved entry is not empty */ 363 XA_BUG_ON(xa, !xa_empty(xa)); 364 xa_reserve(xa, 12345678, GFP_KERNEL); 365 XA_BUG_ON(xa, xa_empty(xa)); 366 XA_BUG_ON(xa, xa_load(xa, 12345678)); 367 xa_release(xa, 12345678); 368 XA_BUG_ON(xa, !xa_empty(xa)); 369 370 /* Releasing a used entry does nothing */ 371 xa_reserve(xa, 12345678, GFP_KERNEL); 372 XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_NOWAIT) != NULL); 373 xa_release(xa, 12345678); 374 xa_erase_index(xa, 12345678); 375 XA_BUG_ON(xa, !xa_empty(xa)); 376 377 /* cmpxchg sees a reserved entry as NULL */ 378 xa_reserve(xa, 12345678, GFP_KERNEL); 379 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, NULL, xa_mk_value(12345678), 380 GFP_NOWAIT) != NULL); 381 xa_release(xa, 12345678); 382 xa_erase_index(xa, 12345678); 383 XA_BUG_ON(xa, !xa_empty(xa)); 384 385 /* And so does xa_insert */ 386 xa_reserve(xa, 12345678, GFP_KERNEL); 387 XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) != 0); 388 xa_erase_index(xa, 12345678); 389 XA_BUG_ON(xa, !xa_empty(xa)); 390 391 /* Can iterate through a reserved entry */ 392 xa_store_index(xa, 5, GFP_KERNEL); 393 xa_reserve(xa, 6, GFP_KERNEL); 394 xa_store_index(xa, 7, GFP_KERNEL); 395 396 xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) { 397 XA_BUG_ON(xa, index != 5 && index != 7); 398 } 399 xa_destroy(xa); 400 } 401 402 static noinline void check_xas_erase(struct xarray *xa) 403 { 404 XA_STATE(xas, xa, 0); 405 void *entry; 406 unsigned long i, j; 407 408 for (i = 0; i < 200; i++) { 409 for (j = i; j < 2 * i + 17; j++) { 410 xas_set(&xas, j); 411 do { 412 xas_lock(&xas); 413 xas_store(&xas, xa_mk_index(j)); 414 xas_unlock(&xas); 415 } while (xas_nomem(&xas, GFP_KERNEL)); 416 } 417 418 xas_set(&xas, ULONG_MAX); 419 do { 420 xas_lock(&xas); 421 xas_store(&xas, xa_mk_value(0)); 422 xas_unlock(&xas); 423 } while (xas_nomem(&xas, GFP_KERNEL)); 424 425 xas_lock(&xas); 426 xas_store(&xas, NULL); 427 428 xas_set(&xas, 0); 429 j = i; 430 xas_for_each(&xas, entry, ULONG_MAX) { 431 XA_BUG_ON(xa, entry != xa_mk_index(j)); 432 xas_store(&xas, NULL); 433 j++; 434 } 435 xas_unlock(&xas); 436 XA_BUG_ON(xa, !xa_empty(xa)); 437 } 438 } 439 440 #ifdef CONFIG_XARRAY_MULTI 441 static noinline void check_multi_store_1(struct xarray *xa, unsigned long index, 442 unsigned int order) 443 { 444 XA_STATE(xas, xa, index); 445 unsigned long min = index & ~((1UL << order) - 1); 446 unsigned long max = min + (1UL << order); 447 448 xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL); 449 XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(index)); 450 XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(index)); 451 XA_BUG_ON(xa, xa_load(xa, max) != NULL); 452 XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); 453 454 xas_lock(&xas); 455 XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(min)) != xa_mk_index(index)); 456 xas_unlock(&xas); 457 XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(min)); 458 XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(min)); 459 XA_BUG_ON(xa, xa_load(xa, max) != NULL); 460 XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); 461 462 xa_erase_index(xa, min); 463 XA_BUG_ON(xa, !xa_empty(xa)); 464 } 465 466 static noinline void check_multi_store_2(struct xarray *xa, unsigned long index, 467 unsigned int order) 468 { 469 XA_STATE(xas, xa, index); 470 xa_store_order(xa, index, order, xa_mk_value(0), GFP_KERNEL); 471 472 xas_lock(&xas); 473 XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(1)) != xa_mk_value(0)); 474 XA_BUG_ON(xa, xas.xa_index != index); 475 XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1)); 476 xas_unlock(&xas); 477 XA_BUG_ON(xa, !xa_empty(xa)); 478 } 479 480 static noinline void check_multi_store_3(struct xarray *xa, unsigned long index, 481 unsigned int order) 482 { 483 XA_STATE(xas, xa, 0); 484 void *entry; 485 int n = 0; 486 487 xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL); 488 489 xas_lock(&xas); 490 xas_for_each(&xas, entry, ULONG_MAX) { 491 XA_BUG_ON(xa, entry != xa_mk_index(index)); 492 n++; 493 } 494 XA_BUG_ON(xa, n != 1); 495 xas_set(&xas, index + 1); 496 xas_for_each(&xas, entry, ULONG_MAX) { 497 XA_BUG_ON(xa, entry != xa_mk_index(index)); 498 n++; 499 } 500 XA_BUG_ON(xa, n != 2); 501 xas_unlock(&xas); 502 503 xa_destroy(xa); 504 } 505 #endif 506 507 static noinline void check_multi_store(struct xarray *xa) 508 { 509 #ifdef CONFIG_XARRAY_MULTI 510 unsigned long i, j, k; 511 unsigned int max_order = (sizeof(long) == 4) ? 30 : 60; 512 513 /* Loading from any position returns the same value */ 514 xa_store_order(xa, 0, 1, xa_mk_value(0), GFP_KERNEL); 515 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0)); 516 XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0)); 517 XA_BUG_ON(xa, xa_load(xa, 2) != NULL); 518 rcu_read_lock(); 519 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 2); 520 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2); 521 rcu_read_unlock(); 522 523 /* Storing adjacent to the value does not alter the value */ 524 xa_store(xa, 3, xa, GFP_KERNEL); 525 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0)); 526 XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0)); 527 XA_BUG_ON(xa, xa_load(xa, 2) != NULL); 528 rcu_read_lock(); 529 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 3); 530 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2); 531 rcu_read_unlock(); 532 533 /* Overwriting multiple indexes works */ 534 xa_store_order(xa, 0, 2, xa_mk_value(1), GFP_KERNEL); 535 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(1)); 536 XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(1)); 537 XA_BUG_ON(xa, xa_load(xa, 2) != xa_mk_value(1)); 538 XA_BUG_ON(xa, xa_load(xa, 3) != xa_mk_value(1)); 539 XA_BUG_ON(xa, xa_load(xa, 4) != NULL); 540 rcu_read_lock(); 541 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 4); 542 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 4); 543 rcu_read_unlock(); 544 545 /* We can erase multiple values with a single store */ 546 xa_store_order(xa, 0, BITS_PER_LONG - 1, NULL, GFP_KERNEL); 547 XA_BUG_ON(xa, !xa_empty(xa)); 548 549 /* Even when the first slot is empty but the others aren't */ 550 xa_store_index(xa, 1, GFP_KERNEL); 551 xa_store_index(xa, 2, GFP_KERNEL); 552 xa_store_order(xa, 0, 2, NULL, GFP_KERNEL); 553 XA_BUG_ON(xa, !xa_empty(xa)); 554 555 for (i = 0; i < max_order; i++) { 556 for (j = 0; j < max_order; j++) { 557 xa_store_order(xa, 0, i, xa_mk_index(i), GFP_KERNEL); 558 xa_store_order(xa, 0, j, xa_mk_index(j), GFP_KERNEL); 559 560 for (k = 0; k < max_order; k++) { 561 void *entry = xa_load(xa, (1UL << k) - 1); 562 if ((i < k) && (j < k)) 563 XA_BUG_ON(xa, entry != NULL); 564 else 565 XA_BUG_ON(xa, entry != xa_mk_index(j)); 566 } 567 568 xa_erase(xa, 0); 569 XA_BUG_ON(xa, !xa_empty(xa)); 570 } 571 } 572 573 for (i = 0; i < 20; i++) { 574 check_multi_store_1(xa, 200, i); 575 check_multi_store_1(xa, 0, i); 576 check_multi_store_1(xa, (1UL << i) + 1, i); 577 } 578 check_multi_store_2(xa, 4095, 9); 579 580 for (i = 1; i < 20; i++) { 581 check_multi_store_3(xa, 0, i); 582 check_multi_store_3(xa, 1UL << i, i); 583 } 584 #endif 585 } 586 587 static DEFINE_XARRAY_ALLOC(xa0); 588 589 static noinline void check_xa_alloc(void) 590 { 591 int i; 592 u32 id; 593 594 /* An empty array should assign 0 to the first alloc */ 595 xa_alloc_index(&xa0, 0, GFP_KERNEL); 596 597 /* Erasing it should make the array empty again */ 598 xa_erase_index(&xa0, 0); 599 XA_BUG_ON(&xa0, !xa_empty(&xa0)); 600 601 /* And it should assign 0 again */ 602 xa_alloc_index(&xa0, 0, GFP_KERNEL); 603 604 /* The next assigned ID should be 1 */ 605 xa_alloc_index(&xa0, 1, GFP_KERNEL); 606 xa_erase_index(&xa0, 1); 607 608 /* Storing a value should mark it used */ 609 xa_store_index(&xa0, 1, GFP_KERNEL); 610 xa_alloc_index(&xa0, 2, GFP_KERNEL); 611 612 /* If we then erase 0, it should be free */ 613 xa_erase_index(&xa0, 0); 614 xa_alloc_index(&xa0, 0, GFP_KERNEL); 615 616 xa_erase_index(&xa0, 1); 617 xa_erase_index(&xa0, 2); 618 619 for (i = 1; i < 5000; i++) { 620 xa_alloc_index(&xa0, i, GFP_KERNEL); 621 } 622 623 xa_destroy(&xa0); 624 625 id = 0xfffffffeU; 626 XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id), 627 GFP_KERNEL) != 0); 628 XA_BUG_ON(&xa0, id != 0xfffffffeU); 629 XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id), 630 GFP_KERNEL) != 0); 631 XA_BUG_ON(&xa0, id != 0xffffffffU); 632 XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id), 633 GFP_KERNEL) != -ENOSPC); 634 XA_BUG_ON(&xa0, id != 0xffffffffU); 635 xa_destroy(&xa0); 636 637 id = 10; 638 XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, 5, xa_mk_index(id), 639 GFP_KERNEL) != -ENOSPC); 640 XA_BUG_ON(&xa0, xa_store_index(&xa0, 3, GFP_KERNEL) != 0); 641 XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, 5, xa_mk_index(id), 642 GFP_KERNEL) != -ENOSPC); 643 xa_erase_index(&xa0, 3); 644 XA_BUG_ON(&xa0, !xa_empty(&xa0)); 645 } 646 647 static noinline void __check_store_iter(struct xarray *xa, unsigned long start, 648 unsigned int order, unsigned int present) 649 { 650 XA_STATE_ORDER(xas, xa, start, order); 651 void *entry; 652 unsigned int count = 0; 653 654 retry: 655 xas_lock(&xas); 656 xas_for_each_conflict(&xas, entry) { 657 XA_BUG_ON(xa, !xa_is_value(entry)); 658 XA_BUG_ON(xa, entry < xa_mk_index(start)); 659 XA_BUG_ON(xa, entry > xa_mk_index(start + (1UL << order) - 1)); 660 count++; 661 } 662 xas_store(&xas, xa_mk_index(start)); 663 xas_unlock(&xas); 664 if (xas_nomem(&xas, GFP_KERNEL)) { 665 count = 0; 666 goto retry; 667 } 668 XA_BUG_ON(xa, xas_error(&xas)); 669 XA_BUG_ON(xa, count != present); 670 XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_index(start)); 671 XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) != 672 xa_mk_index(start)); 673 xa_erase_index(xa, start); 674 } 675 676 static noinline void check_store_iter(struct xarray *xa) 677 { 678 unsigned int i, j; 679 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; 680 681 for (i = 0; i < max_order; i++) { 682 unsigned int min = 1 << i; 683 unsigned int max = (2 << i) - 1; 684 __check_store_iter(xa, 0, i, 0); 685 XA_BUG_ON(xa, !xa_empty(xa)); 686 __check_store_iter(xa, min, i, 0); 687 XA_BUG_ON(xa, !xa_empty(xa)); 688 689 xa_store_index(xa, min, GFP_KERNEL); 690 __check_store_iter(xa, min, i, 1); 691 XA_BUG_ON(xa, !xa_empty(xa)); 692 xa_store_index(xa, max, GFP_KERNEL); 693 __check_store_iter(xa, min, i, 1); 694 XA_BUG_ON(xa, !xa_empty(xa)); 695 696 for (j = 0; j < min; j++) 697 xa_store_index(xa, j, GFP_KERNEL); 698 __check_store_iter(xa, 0, i, min); 699 XA_BUG_ON(xa, !xa_empty(xa)); 700 for (j = 0; j < min; j++) 701 xa_store_index(xa, min + j, GFP_KERNEL); 702 __check_store_iter(xa, min, i, min); 703 XA_BUG_ON(xa, !xa_empty(xa)); 704 } 705 #ifdef CONFIG_XARRAY_MULTI 706 xa_store_index(xa, 63, GFP_KERNEL); 707 xa_store_index(xa, 65, GFP_KERNEL); 708 __check_store_iter(xa, 64, 2, 1); 709 xa_erase_index(xa, 63); 710 #endif 711 XA_BUG_ON(xa, !xa_empty(xa)); 712 } 713 714 static noinline void check_multi_find(struct xarray *xa) 715 { 716 #ifdef CONFIG_XARRAY_MULTI 717 unsigned long index; 718 719 xa_store_order(xa, 12, 2, xa_mk_value(12), GFP_KERNEL); 720 XA_BUG_ON(xa, xa_store_index(xa, 16, GFP_KERNEL) != NULL); 721 722 index = 0; 723 XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) != 724 xa_mk_value(12)); 725 XA_BUG_ON(xa, index != 12); 726 index = 13; 727 XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) != 728 xa_mk_value(12)); 729 XA_BUG_ON(xa, (index < 12) || (index >= 16)); 730 XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT) != 731 xa_mk_value(16)); 732 XA_BUG_ON(xa, index != 16); 733 734 xa_erase_index(xa, 12); 735 xa_erase_index(xa, 16); 736 XA_BUG_ON(xa, !xa_empty(xa)); 737 #endif 738 } 739 740 static noinline void check_multi_find_2(struct xarray *xa) 741 { 742 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 10 : 1; 743 unsigned int i, j; 744 void *entry; 745 746 for (i = 0; i < max_order; i++) { 747 unsigned long index = 1UL << i; 748 for (j = 0; j < index; j++) { 749 XA_STATE(xas, xa, j + index); 750 xa_store_index(xa, index - 1, GFP_KERNEL); 751 xa_store_order(xa, index, i, xa_mk_index(index), 752 GFP_KERNEL); 753 rcu_read_lock(); 754 xas_for_each(&xas, entry, ULONG_MAX) { 755 xa_erase_index(xa, index); 756 } 757 rcu_read_unlock(); 758 xa_erase_index(xa, index - 1); 759 XA_BUG_ON(xa, !xa_empty(xa)); 760 } 761 } 762 } 763 764 static noinline void check_find_1(struct xarray *xa) 765 { 766 unsigned long i, j, k; 767 768 XA_BUG_ON(xa, !xa_empty(xa)); 769 770 /* 771 * Check xa_find with all pairs between 0 and 99 inclusive, 772 * starting at every index between 0 and 99 773 */ 774 for (i = 0; i < 100; i++) { 775 XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL); 776 xa_set_mark(xa, i, XA_MARK_0); 777 for (j = 0; j < i; j++) { 778 XA_BUG_ON(xa, xa_store_index(xa, j, GFP_KERNEL) != 779 NULL); 780 xa_set_mark(xa, j, XA_MARK_0); 781 for (k = 0; k < 100; k++) { 782 unsigned long index = k; 783 void *entry = xa_find(xa, &index, ULONG_MAX, 784 XA_PRESENT); 785 if (k <= j) 786 XA_BUG_ON(xa, index != j); 787 else if (k <= i) 788 XA_BUG_ON(xa, index != i); 789 else 790 XA_BUG_ON(xa, entry != NULL); 791 792 index = k; 793 entry = xa_find(xa, &index, ULONG_MAX, 794 XA_MARK_0); 795 if (k <= j) 796 XA_BUG_ON(xa, index != j); 797 else if (k <= i) 798 XA_BUG_ON(xa, index != i); 799 else 800 XA_BUG_ON(xa, entry != NULL); 801 } 802 xa_erase_index(xa, j); 803 XA_BUG_ON(xa, xa_get_mark(xa, j, XA_MARK_0)); 804 XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0)); 805 } 806 xa_erase_index(xa, i); 807 XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_0)); 808 } 809 XA_BUG_ON(xa, !xa_empty(xa)); 810 } 811 812 static noinline void check_find_2(struct xarray *xa) 813 { 814 void *entry; 815 unsigned long i, j, index = 0; 816 817 xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) { 818 XA_BUG_ON(xa, true); 819 } 820 821 for (i = 0; i < 1024; i++) { 822 xa_store_index(xa, index, GFP_KERNEL); 823 j = 0; 824 index = 0; 825 xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) { 826 XA_BUG_ON(xa, xa_mk_index(index) != entry); 827 XA_BUG_ON(xa, index != j++); 828 } 829 } 830 831 xa_destroy(xa); 832 } 833 834 static noinline void check_find_3(struct xarray *xa) 835 { 836 XA_STATE(xas, xa, 0); 837 unsigned long i, j, k; 838 void *entry; 839 840 for (i = 0; i < 100; i++) { 841 for (j = 0; j < 100; j++) { 842 for (k = 0; k < 100; k++) { 843 xas_set(&xas, j); 844 xas_for_each_marked(&xas, entry, k, XA_MARK_0) 845 ; 846 if (j > k) 847 XA_BUG_ON(xa, 848 xas.xa_node != XAS_RESTART); 849 } 850 } 851 xa_store_index(xa, i, GFP_KERNEL); 852 xa_set_mark(xa, i, XA_MARK_0); 853 } 854 xa_destroy(xa); 855 } 856 857 static noinline void check_find(struct xarray *xa) 858 { 859 check_find_1(xa); 860 check_find_2(xa); 861 check_find_3(xa); 862 check_multi_find(xa); 863 check_multi_find_2(xa); 864 } 865 866 /* See find_swap_entry() in mm/shmem.c */ 867 static noinline unsigned long xa_find_entry(struct xarray *xa, void *item) 868 { 869 XA_STATE(xas, xa, 0); 870 unsigned int checked = 0; 871 void *entry; 872 873 rcu_read_lock(); 874 xas_for_each(&xas, entry, ULONG_MAX) { 875 if (xas_retry(&xas, entry)) 876 continue; 877 if (entry == item) 878 break; 879 checked++; 880 if ((checked % 4) != 0) 881 continue; 882 xas_pause(&xas); 883 } 884 rcu_read_unlock(); 885 886 return entry ? xas.xa_index : -1; 887 } 888 889 static noinline void check_find_entry(struct xarray *xa) 890 { 891 #ifdef CONFIG_XARRAY_MULTI 892 unsigned int order; 893 unsigned long offset, index; 894 895 for (order = 0; order < 20; order++) { 896 for (offset = 0; offset < (1UL << (order + 3)); 897 offset += (1UL << order)) { 898 for (index = 0; index < (1UL << (order + 5)); 899 index += (1UL << order)) { 900 xa_store_order(xa, index, order, 901 xa_mk_index(index), GFP_KERNEL); 902 XA_BUG_ON(xa, xa_load(xa, index) != 903 xa_mk_index(index)); 904 XA_BUG_ON(xa, xa_find_entry(xa, 905 xa_mk_index(index)) != index); 906 } 907 XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); 908 xa_destroy(xa); 909 } 910 } 911 #endif 912 913 XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); 914 xa_store_index(xa, ULONG_MAX, GFP_KERNEL); 915 XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); 916 XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_index(ULONG_MAX)) != -1); 917 xa_erase_index(xa, ULONG_MAX); 918 XA_BUG_ON(xa, !xa_empty(xa)); 919 } 920 921 static noinline void check_move_small(struct xarray *xa, unsigned long idx) 922 { 923 XA_STATE(xas, xa, 0); 924 unsigned long i; 925 926 xa_store_index(xa, 0, GFP_KERNEL); 927 xa_store_index(xa, idx, GFP_KERNEL); 928 929 rcu_read_lock(); 930 for (i = 0; i < idx * 4; i++) { 931 void *entry = xas_next(&xas); 932 if (i <= idx) 933 XA_BUG_ON(xa, xas.xa_node == XAS_RESTART); 934 XA_BUG_ON(xa, xas.xa_index != i); 935 if (i == 0 || i == idx) 936 XA_BUG_ON(xa, entry != xa_mk_index(i)); 937 else 938 XA_BUG_ON(xa, entry != NULL); 939 } 940 xas_next(&xas); 941 XA_BUG_ON(xa, xas.xa_index != i); 942 943 do { 944 void *entry = xas_prev(&xas); 945 i--; 946 if (i <= idx) 947 XA_BUG_ON(xa, xas.xa_node == XAS_RESTART); 948 XA_BUG_ON(xa, xas.xa_index != i); 949 if (i == 0 || i == idx) 950 XA_BUG_ON(xa, entry != xa_mk_index(i)); 951 else 952 XA_BUG_ON(xa, entry != NULL); 953 } while (i > 0); 954 955 xas_set(&xas, ULONG_MAX); 956 XA_BUG_ON(xa, xas_next(&xas) != NULL); 957 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); 958 XA_BUG_ON(xa, xas_next(&xas) != xa_mk_value(0)); 959 XA_BUG_ON(xa, xas.xa_index != 0); 960 XA_BUG_ON(xa, xas_prev(&xas) != NULL); 961 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); 962 rcu_read_unlock(); 963 964 xa_erase_index(xa, 0); 965 xa_erase_index(xa, idx); 966 XA_BUG_ON(xa, !xa_empty(xa)); 967 } 968 969 static noinline void check_move(struct xarray *xa) 970 { 971 XA_STATE(xas, xa, (1 << 16) - 1); 972 unsigned long i; 973 974 for (i = 0; i < (1 << 16); i++) 975 XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL); 976 977 rcu_read_lock(); 978 do { 979 void *entry = xas_prev(&xas); 980 i--; 981 XA_BUG_ON(xa, entry != xa_mk_index(i)); 982 XA_BUG_ON(xa, i != xas.xa_index); 983 } while (i != 0); 984 985 XA_BUG_ON(xa, xas_prev(&xas) != NULL); 986 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); 987 988 do { 989 void *entry = xas_next(&xas); 990 XA_BUG_ON(xa, entry != xa_mk_index(i)); 991 XA_BUG_ON(xa, i != xas.xa_index); 992 i++; 993 } while (i < (1 << 16)); 994 rcu_read_unlock(); 995 996 for (i = (1 << 8); i < (1 << 15); i++) 997 xa_erase_index(xa, i); 998 999 i = xas.xa_index; 1000 1001 rcu_read_lock(); 1002 do { 1003 void *entry = xas_prev(&xas); 1004 i--; 1005 if ((i < (1 << 8)) || (i >= (1 << 15))) 1006 XA_BUG_ON(xa, entry != xa_mk_index(i)); 1007 else 1008 XA_BUG_ON(xa, entry != NULL); 1009 XA_BUG_ON(xa, i != xas.xa_index); 1010 } while (i != 0); 1011 1012 XA_BUG_ON(xa, xas_prev(&xas) != NULL); 1013 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); 1014 1015 do { 1016 void *entry = xas_next(&xas); 1017 if ((i < (1 << 8)) || (i >= (1 << 15))) 1018 XA_BUG_ON(xa, entry != xa_mk_index(i)); 1019 else 1020 XA_BUG_ON(xa, entry != NULL); 1021 XA_BUG_ON(xa, i != xas.xa_index); 1022 i++; 1023 } while (i < (1 << 16)); 1024 rcu_read_unlock(); 1025 1026 xa_destroy(xa); 1027 1028 for (i = 0; i < 16; i++) 1029 check_move_small(xa, 1UL << i); 1030 1031 for (i = 2; i < 16; i++) 1032 check_move_small(xa, (1UL << i) - 1); 1033 } 1034 1035 static noinline void xa_store_many_order(struct xarray *xa, 1036 unsigned long index, unsigned order) 1037 { 1038 XA_STATE_ORDER(xas, xa, index, order); 1039 unsigned int i = 0; 1040 1041 do { 1042 xas_lock(&xas); 1043 XA_BUG_ON(xa, xas_find_conflict(&xas)); 1044 xas_create_range(&xas); 1045 if (xas_error(&xas)) 1046 goto unlock; 1047 for (i = 0; i < (1U << order); i++) { 1048 XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(index + i))); 1049 xas_next(&xas); 1050 } 1051 unlock: 1052 xas_unlock(&xas); 1053 } while (xas_nomem(&xas, GFP_KERNEL)); 1054 1055 XA_BUG_ON(xa, xas_error(&xas)); 1056 } 1057 1058 static noinline void check_create_range_1(struct xarray *xa, 1059 unsigned long index, unsigned order) 1060 { 1061 unsigned long i; 1062 1063 xa_store_many_order(xa, index, order); 1064 for (i = index; i < index + (1UL << order); i++) 1065 xa_erase_index(xa, i); 1066 XA_BUG_ON(xa, !xa_empty(xa)); 1067 } 1068 1069 static noinline void check_create_range_2(struct xarray *xa, unsigned order) 1070 { 1071 unsigned long i; 1072 unsigned long nr = 1UL << order; 1073 1074 for (i = 0; i < nr * nr; i += nr) 1075 xa_store_many_order(xa, i, order); 1076 for (i = 0; i < nr * nr; i++) 1077 xa_erase_index(xa, i); 1078 XA_BUG_ON(xa, !xa_empty(xa)); 1079 } 1080 1081 static noinline void check_create_range_3(void) 1082 { 1083 XA_STATE(xas, NULL, 0); 1084 xas_set_err(&xas, -EEXIST); 1085 xas_create_range(&xas); 1086 XA_BUG_ON(NULL, xas_error(&xas) != -EEXIST); 1087 } 1088 1089 static noinline void check_create_range_4(struct xarray *xa, 1090 unsigned long index, unsigned order) 1091 { 1092 XA_STATE_ORDER(xas, xa, index, order); 1093 unsigned long base = xas.xa_index; 1094 unsigned long i = 0; 1095 1096 xa_store_index(xa, index, GFP_KERNEL); 1097 do { 1098 xas_lock(&xas); 1099 xas_create_range(&xas); 1100 if (xas_error(&xas)) 1101 goto unlock; 1102 for (i = 0; i < (1UL << order); i++) { 1103 void *old = xas_store(&xas, xa_mk_index(base + i)); 1104 if (xas.xa_index == index) 1105 XA_BUG_ON(xa, old != xa_mk_index(base + i)); 1106 else 1107 XA_BUG_ON(xa, old != NULL); 1108 xas_next(&xas); 1109 } 1110 unlock: 1111 xas_unlock(&xas); 1112 } while (xas_nomem(&xas, GFP_KERNEL)); 1113 1114 XA_BUG_ON(xa, xas_error(&xas)); 1115 1116 for (i = base; i < base + (1UL << order); i++) 1117 xa_erase_index(xa, i); 1118 XA_BUG_ON(xa, !xa_empty(xa)); 1119 } 1120 1121 static noinline void check_create_range(struct xarray *xa) 1122 { 1123 unsigned int order; 1124 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 12 : 1; 1125 1126 for (order = 0; order < max_order; order++) { 1127 check_create_range_1(xa, 0, order); 1128 check_create_range_1(xa, 1U << order, order); 1129 check_create_range_1(xa, 2U << order, order); 1130 check_create_range_1(xa, 3U << order, order); 1131 check_create_range_1(xa, 1U << 24, order); 1132 if (order < 10) 1133 check_create_range_2(xa, order); 1134 1135 check_create_range_4(xa, 0, order); 1136 check_create_range_4(xa, 1U << order, order); 1137 check_create_range_4(xa, 2U << order, order); 1138 check_create_range_4(xa, 3U << order, order); 1139 check_create_range_4(xa, 1U << 24, order); 1140 1141 check_create_range_4(xa, 1, order); 1142 check_create_range_4(xa, (1U << order) + 1, order); 1143 check_create_range_4(xa, (2U << order) + 1, order); 1144 check_create_range_4(xa, (2U << order) - 1, order); 1145 check_create_range_4(xa, (3U << order) + 1, order); 1146 check_create_range_4(xa, (3U << order) - 1, order); 1147 check_create_range_4(xa, (1U << 24) + 1, order); 1148 } 1149 1150 check_create_range_3(); 1151 } 1152 1153 static noinline void __check_store_range(struct xarray *xa, unsigned long first, 1154 unsigned long last) 1155 { 1156 #ifdef CONFIG_XARRAY_MULTI 1157 xa_store_range(xa, first, last, xa_mk_index(first), GFP_KERNEL); 1158 1159 XA_BUG_ON(xa, xa_load(xa, first) != xa_mk_index(first)); 1160 XA_BUG_ON(xa, xa_load(xa, last) != xa_mk_index(first)); 1161 XA_BUG_ON(xa, xa_load(xa, first - 1) != NULL); 1162 XA_BUG_ON(xa, xa_load(xa, last + 1) != NULL); 1163 1164 xa_store_range(xa, first, last, NULL, GFP_KERNEL); 1165 #endif 1166 1167 XA_BUG_ON(xa, !xa_empty(xa)); 1168 } 1169 1170 static noinline void check_store_range(struct xarray *xa) 1171 { 1172 unsigned long i, j; 1173 1174 for (i = 0; i < 128; i++) { 1175 for (j = i; j < 128; j++) { 1176 __check_store_range(xa, i, j); 1177 __check_store_range(xa, 128 + i, 128 + j); 1178 __check_store_range(xa, 4095 + i, 4095 + j); 1179 __check_store_range(xa, 4096 + i, 4096 + j); 1180 __check_store_range(xa, 123456 + i, 123456 + j); 1181 __check_store_range(xa, (1 << 24) + i, (1 << 24) + j); 1182 } 1183 } 1184 } 1185 1186 static LIST_HEAD(shadow_nodes); 1187 1188 static void test_update_node(struct xa_node *node) 1189 { 1190 if (node->count && node->count == node->nr_values) { 1191 if (list_empty(&node->private_list)) 1192 list_add(&shadow_nodes, &node->private_list); 1193 } else { 1194 if (!list_empty(&node->private_list)) 1195 list_del_init(&node->private_list); 1196 } 1197 } 1198 1199 static noinline void shadow_remove(struct xarray *xa) 1200 { 1201 struct xa_node *node; 1202 1203 xa_lock(xa); 1204 while ((node = list_first_entry_or_null(&shadow_nodes, 1205 struct xa_node, private_list))) { 1206 XA_STATE(xas, node->array, 0); 1207 XA_BUG_ON(xa, node->array != xa); 1208 list_del_init(&node->private_list); 1209 xas.xa_node = xa_parent_locked(node->array, node); 1210 xas.xa_offset = node->offset; 1211 xas.xa_shift = node->shift + XA_CHUNK_SHIFT; 1212 xas_set_update(&xas, test_update_node); 1213 xas_store(&xas, NULL); 1214 } 1215 xa_unlock(xa); 1216 } 1217 1218 static noinline void check_workingset(struct xarray *xa, unsigned long index) 1219 { 1220 XA_STATE(xas, xa, index); 1221 xas_set_update(&xas, test_update_node); 1222 1223 do { 1224 xas_lock(&xas); 1225 xas_store(&xas, xa_mk_value(0)); 1226 xas_next(&xas); 1227 xas_store(&xas, xa_mk_value(1)); 1228 xas_unlock(&xas); 1229 } while (xas_nomem(&xas, GFP_KERNEL)); 1230 1231 XA_BUG_ON(xa, list_empty(&shadow_nodes)); 1232 1233 xas_lock(&xas); 1234 xas_next(&xas); 1235 xas_store(&xas, &xas); 1236 XA_BUG_ON(xa, !list_empty(&shadow_nodes)); 1237 1238 xas_store(&xas, xa_mk_value(2)); 1239 xas_unlock(&xas); 1240 XA_BUG_ON(xa, list_empty(&shadow_nodes)); 1241 1242 shadow_remove(xa); 1243 XA_BUG_ON(xa, !list_empty(&shadow_nodes)); 1244 XA_BUG_ON(xa, !xa_empty(xa)); 1245 } 1246 1247 /* 1248 * Check that the pointer / value / sibling entries are accounted the 1249 * way we expect them to be. 1250 */ 1251 static noinline void check_account(struct xarray *xa) 1252 { 1253 #ifdef CONFIG_XARRAY_MULTI 1254 unsigned int order; 1255 1256 for (order = 1; order < 12; order++) { 1257 XA_STATE(xas, xa, 1 << order); 1258 1259 xa_store_order(xa, 0, order, xa, GFP_KERNEL); 1260 rcu_read_lock(); 1261 xas_load(&xas); 1262 XA_BUG_ON(xa, xas.xa_node->count == 0); 1263 XA_BUG_ON(xa, xas.xa_node->count > (1 << order)); 1264 XA_BUG_ON(xa, xas.xa_node->nr_values != 0); 1265 rcu_read_unlock(); 1266 1267 xa_store_order(xa, 1 << order, order, xa_mk_index(1UL << order), 1268 GFP_KERNEL); 1269 XA_BUG_ON(xa, xas.xa_node->count != xas.xa_node->nr_values * 2); 1270 1271 xa_erase(xa, 1 << order); 1272 XA_BUG_ON(xa, xas.xa_node->nr_values != 0); 1273 1274 xa_erase(xa, 0); 1275 XA_BUG_ON(xa, !xa_empty(xa)); 1276 } 1277 #endif 1278 } 1279 1280 static noinline void check_destroy(struct xarray *xa) 1281 { 1282 unsigned long index; 1283 1284 XA_BUG_ON(xa, !xa_empty(xa)); 1285 1286 /* Destroying an empty array is a no-op */ 1287 xa_destroy(xa); 1288 XA_BUG_ON(xa, !xa_empty(xa)); 1289 1290 /* Destroying an array with a single entry */ 1291 for (index = 0; index < 1000; index++) { 1292 xa_store_index(xa, index, GFP_KERNEL); 1293 XA_BUG_ON(xa, xa_empty(xa)); 1294 xa_destroy(xa); 1295 XA_BUG_ON(xa, !xa_empty(xa)); 1296 } 1297 1298 /* Destroying an array with a single entry at ULONG_MAX */ 1299 xa_store(xa, ULONG_MAX, xa, GFP_KERNEL); 1300 XA_BUG_ON(xa, xa_empty(xa)); 1301 xa_destroy(xa); 1302 XA_BUG_ON(xa, !xa_empty(xa)); 1303 1304 #ifdef CONFIG_XARRAY_MULTI 1305 /* Destroying an array with a multi-index entry */ 1306 xa_store_order(xa, 1 << 11, 11, xa, GFP_KERNEL); 1307 XA_BUG_ON(xa, xa_empty(xa)); 1308 xa_destroy(xa); 1309 XA_BUG_ON(xa, !xa_empty(xa)); 1310 #endif 1311 } 1312 1313 static DEFINE_XARRAY(array); 1314 1315 static int xarray_checks(void) 1316 { 1317 check_xa_err(&array); 1318 check_xas_retry(&array); 1319 check_xa_load(&array); 1320 check_xa_mark(&array); 1321 check_xa_shrink(&array); 1322 check_xas_erase(&array); 1323 check_cmpxchg(&array); 1324 check_reserve(&array); 1325 check_multi_store(&array); 1326 check_xa_alloc(); 1327 check_find(&array); 1328 check_find_entry(&array); 1329 check_account(&array); 1330 check_destroy(&array); 1331 check_move(&array); 1332 check_create_range(&array); 1333 check_store_range(&array); 1334 check_store_iter(&array); 1335 1336 check_workingset(&array, 0); 1337 check_workingset(&array, 64); 1338 check_workingset(&array, 4096); 1339 1340 printk("XArray: %u of %u tests passed\n", tests_passed, tests_run); 1341 return (tests_run == tests_passed) ? 0 : -EINVAL; 1342 } 1343 1344 static void xarray_exit(void) 1345 { 1346 } 1347 1348 module_init(xarray_checks); 1349 module_exit(xarray_exit); 1350 MODULE_AUTHOR("Matthew Wilcox <willy@infradead.org>"); 1351 MODULE_LICENSE("GPL"); 1352