1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * test_xarray.c: Test the XArray API 4 * Copyright (c) 2017-2018 Microsoft Corporation 5 * Author: Matthew Wilcox <willy@infradead.org> 6 */ 7 8 #include <linux/xarray.h> 9 #include <linux/module.h> 10 11 static unsigned int tests_run; 12 static unsigned int tests_passed; 13 14 #ifndef XA_DEBUG 15 # ifdef __KERNEL__ 16 void xa_dump(const struct xarray *xa) { } 17 # endif 18 #undef XA_BUG_ON 19 #define XA_BUG_ON(xa, x) do { \ 20 tests_run++; \ 21 if (x) { \ 22 printk("BUG at %s:%d\n", __func__, __LINE__); \ 23 xa_dump(xa); \ 24 dump_stack(); \ 25 } else { \ 26 tests_passed++; \ 27 } \ 28 } while (0) 29 #endif 30 31 static void *xa_mk_index(unsigned long index) 32 { 33 return xa_mk_value(index & LONG_MAX); 34 } 35 36 static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp) 37 { 38 return xa_store(xa, index, xa_mk_index(index), gfp); 39 } 40 41 static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp) 42 { 43 u32 id = 0; 44 45 XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_index(index), 46 gfp) != 0); 47 XA_BUG_ON(xa, id != index); 48 } 49 50 static void xa_erase_index(struct xarray *xa, unsigned long index) 51 { 52 XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_index(index)); 53 XA_BUG_ON(xa, xa_load(xa, index) != NULL); 54 } 55 56 /* 57 * If anyone needs this, please move it to xarray.c. We have no current 58 * users outside the test suite because all current multislot users want 59 * to use the advanced API. 60 */ 61 static void *xa_store_order(struct xarray *xa, unsigned long index, 62 unsigned order, void *entry, gfp_t gfp) 63 { 64 XA_STATE_ORDER(xas, xa, index, order); 65 void *curr; 66 67 do { 68 xas_lock(&xas); 69 curr = xas_store(&xas, entry); 70 xas_unlock(&xas); 71 } while (xas_nomem(&xas, gfp)); 72 73 return curr; 74 } 75 76 static noinline void check_xa_err(struct xarray *xa) 77 { 78 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 0, GFP_NOWAIT)) != 0); 79 XA_BUG_ON(xa, xa_err(xa_erase(xa, 0)) != 0); 80 #ifndef __KERNEL__ 81 /* The kernel does not fail GFP_NOWAIT allocations */ 82 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM); 83 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM); 84 #endif 85 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_KERNEL)) != 0); 86 XA_BUG_ON(xa, xa_err(xa_store(xa, 1, xa_mk_value(0), GFP_KERNEL)) != 0); 87 XA_BUG_ON(xa, xa_err(xa_erase(xa, 1)) != 0); 88 // kills the test-suite :-( 89 // XA_BUG_ON(xa, xa_err(xa_store(xa, 0, xa_mk_internal(0), 0)) != -EINVAL); 90 } 91 92 static noinline void check_xas_retry(struct xarray *xa) 93 { 94 XA_STATE(xas, xa, 0); 95 void *entry; 96 97 xa_store_index(xa, 0, GFP_KERNEL); 98 xa_store_index(xa, 1, GFP_KERNEL); 99 100 rcu_read_lock(); 101 XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_value(0)); 102 xa_erase_index(xa, 1); 103 XA_BUG_ON(xa, !xa_is_retry(xas_reload(&xas))); 104 XA_BUG_ON(xa, xas_retry(&xas, NULL)); 105 XA_BUG_ON(xa, xas_retry(&xas, xa_mk_value(0))); 106 xas_reset(&xas); 107 XA_BUG_ON(xa, xas.xa_node != XAS_RESTART); 108 XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0)); 109 XA_BUG_ON(xa, xas.xa_node != NULL); 110 111 XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL); 112 XA_BUG_ON(xa, !xa_is_internal(xas_reload(&xas))); 113 xas.xa_node = XAS_RESTART; 114 XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0)); 115 rcu_read_unlock(); 116 117 /* Make sure we can iterate through retry entries */ 118 xas_lock(&xas); 119 xas_set(&xas, 0); 120 xas_store(&xas, XA_RETRY_ENTRY); 121 xas_set(&xas, 1); 122 xas_store(&xas, XA_RETRY_ENTRY); 123 124 xas_set(&xas, 0); 125 xas_for_each(&xas, entry, ULONG_MAX) { 126 xas_store(&xas, xa_mk_index(xas.xa_index)); 127 } 128 xas_unlock(&xas); 129 130 xa_erase_index(xa, 0); 131 xa_erase_index(xa, 1); 132 } 133 134 static noinline void check_xa_load(struct xarray *xa) 135 { 136 unsigned long i, j; 137 138 for (i = 0; i < 1024; i++) { 139 for (j = 0; j < 1024; j++) { 140 void *entry = xa_load(xa, j); 141 if (j < i) 142 XA_BUG_ON(xa, xa_to_value(entry) != j); 143 else 144 XA_BUG_ON(xa, entry); 145 } 146 XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL); 147 } 148 149 for (i = 0; i < 1024; i++) { 150 for (j = 0; j < 1024; j++) { 151 void *entry = xa_load(xa, j); 152 if (j >= i) 153 XA_BUG_ON(xa, xa_to_value(entry) != j); 154 else 155 XA_BUG_ON(xa, entry); 156 } 157 xa_erase_index(xa, i); 158 } 159 XA_BUG_ON(xa, !xa_empty(xa)); 160 } 161 162 static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index) 163 { 164 unsigned int order; 165 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 8 : 1; 166 167 /* NULL elements have no marks set */ 168 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 169 xa_set_mark(xa, index, XA_MARK_0); 170 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 171 172 /* Storing a pointer will not make a mark appear */ 173 XA_BUG_ON(xa, xa_store_index(xa, index, GFP_KERNEL) != NULL); 174 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 175 xa_set_mark(xa, index, XA_MARK_0); 176 XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0)); 177 178 /* Setting one mark will not set another mark */ 179 XA_BUG_ON(xa, xa_get_mark(xa, index + 1, XA_MARK_0)); 180 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_1)); 181 182 /* Storing NULL clears marks, and they can't be set again */ 183 xa_erase_index(xa, index); 184 XA_BUG_ON(xa, !xa_empty(xa)); 185 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 186 xa_set_mark(xa, index, XA_MARK_0); 187 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 188 189 /* 190 * Storing a multi-index entry over entries with marks gives the 191 * entire entry the union of the marks 192 */ 193 BUG_ON((index % 4) != 0); 194 for (order = 2; order < max_order; order++) { 195 unsigned long base = round_down(index, 1UL << order); 196 unsigned long next = base + (1UL << order); 197 unsigned long i; 198 199 XA_BUG_ON(xa, xa_store_index(xa, index + 1, GFP_KERNEL)); 200 xa_set_mark(xa, index + 1, XA_MARK_0); 201 XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL)); 202 xa_set_mark(xa, index + 2, XA_MARK_2); 203 XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL)); 204 xa_store_order(xa, index, order, xa_mk_index(index), 205 GFP_KERNEL); 206 for (i = base; i < next; i++) { 207 XA_STATE(xas, xa, i); 208 unsigned int seen = 0; 209 void *entry; 210 211 XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0)); 212 XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_1)); 213 XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_2)); 214 215 /* We should see two elements in the array */ 216 rcu_read_lock(); 217 xas_for_each(&xas, entry, ULONG_MAX) 218 seen++; 219 rcu_read_unlock(); 220 XA_BUG_ON(xa, seen != 2); 221 222 /* One of which is marked */ 223 xas_set(&xas, 0); 224 seen = 0; 225 rcu_read_lock(); 226 xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) 227 seen++; 228 rcu_read_unlock(); 229 XA_BUG_ON(xa, seen != 1); 230 } 231 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_0)); 232 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_1)); 233 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_2)); 234 xa_erase_index(xa, index); 235 xa_erase_index(xa, next); 236 XA_BUG_ON(xa, !xa_empty(xa)); 237 } 238 XA_BUG_ON(xa, !xa_empty(xa)); 239 } 240 241 static noinline void check_xa_mark_2(struct xarray *xa) 242 { 243 XA_STATE(xas, xa, 0); 244 unsigned long index; 245 unsigned int count = 0; 246 void *entry; 247 248 xa_store_index(xa, 0, GFP_KERNEL); 249 xa_set_mark(xa, 0, XA_MARK_0); 250 xas_lock(&xas); 251 xas_load(&xas); 252 xas_init_marks(&xas); 253 xas_unlock(&xas); 254 XA_BUG_ON(xa, !xa_get_mark(xa, 0, XA_MARK_0) == 0); 255 256 for (index = 3500; index < 4500; index++) { 257 xa_store_index(xa, index, GFP_KERNEL); 258 xa_set_mark(xa, index, XA_MARK_0); 259 } 260 261 xas_reset(&xas); 262 rcu_read_lock(); 263 xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) 264 count++; 265 rcu_read_unlock(); 266 XA_BUG_ON(xa, count != 1000); 267 268 xas_lock(&xas); 269 xas_for_each(&xas, entry, ULONG_MAX) { 270 xas_init_marks(&xas); 271 XA_BUG_ON(xa, !xa_get_mark(xa, xas.xa_index, XA_MARK_0)); 272 XA_BUG_ON(xa, !xas_get_mark(&xas, XA_MARK_0)); 273 } 274 xas_unlock(&xas); 275 276 xa_destroy(xa); 277 } 278 279 static noinline void check_xa_mark(struct xarray *xa) 280 { 281 unsigned long index; 282 283 for (index = 0; index < 16384; index += 4) 284 check_xa_mark_1(xa, index); 285 286 check_xa_mark_2(xa); 287 } 288 289 static noinline void check_xa_shrink(struct xarray *xa) 290 { 291 XA_STATE(xas, xa, 1); 292 struct xa_node *node; 293 unsigned int order; 294 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 15 : 1; 295 296 XA_BUG_ON(xa, !xa_empty(xa)); 297 XA_BUG_ON(xa, xa_store_index(xa, 0, GFP_KERNEL) != NULL); 298 XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL); 299 300 /* 301 * Check that erasing the entry at 1 shrinks the tree and properly 302 * marks the node as being deleted. 303 */ 304 xas_lock(&xas); 305 XA_BUG_ON(xa, xas_load(&xas) != xa_mk_value(1)); 306 node = xas.xa_node; 307 XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != xa_mk_value(0)); 308 XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1)); 309 XA_BUG_ON(xa, xa_load(xa, 1) != NULL); 310 XA_BUG_ON(xa, xas.xa_node != XAS_BOUNDS); 311 XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != XA_RETRY_ENTRY); 312 XA_BUG_ON(xa, xas_load(&xas) != NULL); 313 xas_unlock(&xas); 314 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0)); 315 xa_erase_index(xa, 0); 316 XA_BUG_ON(xa, !xa_empty(xa)); 317 318 for (order = 0; order < max_order; order++) { 319 unsigned long max = (1UL << order) - 1; 320 xa_store_order(xa, 0, order, xa_mk_value(0), GFP_KERNEL); 321 XA_BUG_ON(xa, xa_load(xa, max) != xa_mk_value(0)); 322 XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL); 323 rcu_read_lock(); 324 node = xa_head(xa); 325 rcu_read_unlock(); 326 XA_BUG_ON(xa, xa_store_index(xa, ULONG_MAX, GFP_KERNEL) != 327 NULL); 328 rcu_read_lock(); 329 XA_BUG_ON(xa, xa_head(xa) == node); 330 rcu_read_unlock(); 331 XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL); 332 xa_erase_index(xa, ULONG_MAX); 333 XA_BUG_ON(xa, xa->xa_head != node); 334 xa_erase_index(xa, 0); 335 } 336 } 337 338 static noinline void check_cmpxchg(struct xarray *xa) 339 { 340 void *FIVE = xa_mk_value(5); 341 void *SIX = xa_mk_value(6); 342 void *LOTS = xa_mk_value(12345678); 343 344 XA_BUG_ON(xa, !xa_empty(xa)); 345 XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_KERNEL) != NULL); 346 XA_BUG_ON(xa, xa_insert(xa, 12345678, xa, GFP_KERNEL) != -EEXIST); 347 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, SIX, FIVE, GFP_KERNEL) != LOTS); 348 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, LOTS, FIVE, GFP_KERNEL) != LOTS); 349 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, FIVE, LOTS, GFP_KERNEL) != FIVE); 350 XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != NULL); 351 XA_BUG_ON(xa, xa_cmpxchg(xa, 5, NULL, FIVE, GFP_KERNEL) != NULL); 352 xa_erase_index(xa, 12345678); 353 xa_erase_index(xa, 5); 354 XA_BUG_ON(xa, !xa_empty(xa)); 355 } 356 357 static noinline void check_reserve(struct xarray *xa) 358 { 359 void *entry; 360 unsigned long index; 361 362 /* An array with a reserved entry is not empty */ 363 XA_BUG_ON(xa, !xa_empty(xa)); 364 xa_reserve(xa, 12345678, GFP_KERNEL); 365 XA_BUG_ON(xa, xa_empty(xa)); 366 XA_BUG_ON(xa, xa_load(xa, 12345678)); 367 xa_release(xa, 12345678); 368 XA_BUG_ON(xa, !xa_empty(xa)); 369 370 /* Releasing a used entry does nothing */ 371 xa_reserve(xa, 12345678, GFP_KERNEL); 372 XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_NOWAIT) != NULL); 373 xa_release(xa, 12345678); 374 xa_erase_index(xa, 12345678); 375 XA_BUG_ON(xa, !xa_empty(xa)); 376 377 /* cmpxchg sees a reserved entry as NULL */ 378 xa_reserve(xa, 12345678, GFP_KERNEL); 379 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, NULL, xa_mk_value(12345678), 380 GFP_NOWAIT) != NULL); 381 xa_release(xa, 12345678); 382 xa_erase_index(xa, 12345678); 383 XA_BUG_ON(xa, !xa_empty(xa)); 384 385 /* But xa_insert does not */ 386 xa_reserve(xa, 12345678, GFP_KERNEL); 387 XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) != 388 -EEXIST); 389 XA_BUG_ON(xa, xa_empty(xa)); 390 XA_BUG_ON(xa, xa_erase(xa, 12345678) != NULL); 391 XA_BUG_ON(xa, !xa_empty(xa)); 392 393 /* Can iterate through a reserved entry */ 394 xa_store_index(xa, 5, GFP_KERNEL); 395 xa_reserve(xa, 6, GFP_KERNEL); 396 xa_store_index(xa, 7, GFP_KERNEL); 397 398 xa_for_each(xa, index, entry) { 399 XA_BUG_ON(xa, index != 5 && index != 7); 400 } 401 xa_destroy(xa); 402 } 403 404 static noinline void check_xas_erase(struct xarray *xa) 405 { 406 XA_STATE(xas, xa, 0); 407 void *entry; 408 unsigned long i, j; 409 410 for (i = 0; i < 200; i++) { 411 for (j = i; j < 2 * i + 17; j++) { 412 xas_set(&xas, j); 413 do { 414 xas_lock(&xas); 415 xas_store(&xas, xa_mk_index(j)); 416 xas_unlock(&xas); 417 } while (xas_nomem(&xas, GFP_KERNEL)); 418 } 419 420 xas_set(&xas, ULONG_MAX); 421 do { 422 xas_lock(&xas); 423 xas_store(&xas, xa_mk_value(0)); 424 xas_unlock(&xas); 425 } while (xas_nomem(&xas, GFP_KERNEL)); 426 427 xas_lock(&xas); 428 xas_store(&xas, NULL); 429 430 xas_set(&xas, 0); 431 j = i; 432 xas_for_each(&xas, entry, ULONG_MAX) { 433 XA_BUG_ON(xa, entry != xa_mk_index(j)); 434 xas_store(&xas, NULL); 435 j++; 436 } 437 xas_unlock(&xas); 438 XA_BUG_ON(xa, !xa_empty(xa)); 439 } 440 } 441 442 #ifdef CONFIG_XARRAY_MULTI 443 static noinline void check_multi_store_1(struct xarray *xa, unsigned long index, 444 unsigned int order) 445 { 446 XA_STATE(xas, xa, index); 447 unsigned long min = index & ~((1UL << order) - 1); 448 unsigned long max = min + (1UL << order); 449 450 xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL); 451 XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(index)); 452 XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(index)); 453 XA_BUG_ON(xa, xa_load(xa, max) != NULL); 454 XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); 455 456 xas_lock(&xas); 457 XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(min)) != xa_mk_index(index)); 458 xas_unlock(&xas); 459 XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(min)); 460 XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(min)); 461 XA_BUG_ON(xa, xa_load(xa, max) != NULL); 462 XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); 463 464 xa_erase_index(xa, min); 465 XA_BUG_ON(xa, !xa_empty(xa)); 466 } 467 468 static noinline void check_multi_store_2(struct xarray *xa, unsigned long index, 469 unsigned int order) 470 { 471 XA_STATE(xas, xa, index); 472 xa_store_order(xa, index, order, xa_mk_value(0), GFP_KERNEL); 473 474 xas_lock(&xas); 475 XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(1)) != xa_mk_value(0)); 476 XA_BUG_ON(xa, xas.xa_index != index); 477 XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1)); 478 xas_unlock(&xas); 479 XA_BUG_ON(xa, !xa_empty(xa)); 480 } 481 482 static noinline void check_multi_store_3(struct xarray *xa, unsigned long index, 483 unsigned int order) 484 { 485 XA_STATE(xas, xa, 0); 486 void *entry; 487 int n = 0; 488 489 xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL); 490 491 xas_lock(&xas); 492 xas_for_each(&xas, entry, ULONG_MAX) { 493 XA_BUG_ON(xa, entry != xa_mk_index(index)); 494 n++; 495 } 496 XA_BUG_ON(xa, n != 1); 497 xas_set(&xas, index + 1); 498 xas_for_each(&xas, entry, ULONG_MAX) { 499 XA_BUG_ON(xa, entry != xa_mk_index(index)); 500 n++; 501 } 502 XA_BUG_ON(xa, n != 2); 503 xas_unlock(&xas); 504 505 xa_destroy(xa); 506 } 507 #endif 508 509 static noinline void check_multi_store(struct xarray *xa) 510 { 511 #ifdef CONFIG_XARRAY_MULTI 512 unsigned long i, j, k; 513 unsigned int max_order = (sizeof(long) == 4) ? 30 : 60; 514 515 /* Loading from any position returns the same value */ 516 xa_store_order(xa, 0, 1, xa_mk_value(0), GFP_KERNEL); 517 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0)); 518 XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0)); 519 XA_BUG_ON(xa, xa_load(xa, 2) != NULL); 520 rcu_read_lock(); 521 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 2); 522 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2); 523 rcu_read_unlock(); 524 525 /* Storing adjacent to the value does not alter the value */ 526 xa_store(xa, 3, xa, GFP_KERNEL); 527 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0)); 528 XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0)); 529 XA_BUG_ON(xa, xa_load(xa, 2) != NULL); 530 rcu_read_lock(); 531 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 3); 532 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2); 533 rcu_read_unlock(); 534 535 /* Overwriting multiple indexes works */ 536 xa_store_order(xa, 0, 2, xa_mk_value(1), GFP_KERNEL); 537 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(1)); 538 XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(1)); 539 XA_BUG_ON(xa, xa_load(xa, 2) != xa_mk_value(1)); 540 XA_BUG_ON(xa, xa_load(xa, 3) != xa_mk_value(1)); 541 XA_BUG_ON(xa, xa_load(xa, 4) != NULL); 542 rcu_read_lock(); 543 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 4); 544 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 4); 545 rcu_read_unlock(); 546 547 /* We can erase multiple values with a single store */ 548 xa_store_order(xa, 0, BITS_PER_LONG - 1, NULL, GFP_KERNEL); 549 XA_BUG_ON(xa, !xa_empty(xa)); 550 551 /* Even when the first slot is empty but the others aren't */ 552 xa_store_index(xa, 1, GFP_KERNEL); 553 xa_store_index(xa, 2, GFP_KERNEL); 554 xa_store_order(xa, 0, 2, NULL, GFP_KERNEL); 555 XA_BUG_ON(xa, !xa_empty(xa)); 556 557 for (i = 0; i < max_order; i++) { 558 for (j = 0; j < max_order; j++) { 559 xa_store_order(xa, 0, i, xa_mk_index(i), GFP_KERNEL); 560 xa_store_order(xa, 0, j, xa_mk_index(j), GFP_KERNEL); 561 562 for (k = 0; k < max_order; k++) { 563 void *entry = xa_load(xa, (1UL << k) - 1); 564 if ((i < k) && (j < k)) 565 XA_BUG_ON(xa, entry != NULL); 566 else 567 XA_BUG_ON(xa, entry != xa_mk_index(j)); 568 } 569 570 xa_erase(xa, 0); 571 XA_BUG_ON(xa, !xa_empty(xa)); 572 } 573 } 574 575 for (i = 0; i < 20; i++) { 576 check_multi_store_1(xa, 200, i); 577 check_multi_store_1(xa, 0, i); 578 check_multi_store_1(xa, (1UL << i) + 1, i); 579 } 580 check_multi_store_2(xa, 4095, 9); 581 582 for (i = 1; i < 20; i++) { 583 check_multi_store_3(xa, 0, i); 584 check_multi_store_3(xa, 1UL << i, i); 585 } 586 #endif 587 } 588 589 static DEFINE_XARRAY_ALLOC(xa0); 590 591 static noinline void check_xa_alloc(void) 592 { 593 int i; 594 u32 id; 595 596 /* An empty array should assign 0 to the first alloc */ 597 xa_alloc_index(&xa0, 0, GFP_KERNEL); 598 599 /* Erasing it should make the array empty again */ 600 xa_erase_index(&xa0, 0); 601 XA_BUG_ON(&xa0, !xa_empty(&xa0)); 602 603 /* And it should assign 0 again */ 604 xa_alloc_index(&xa0, 0, GFP_KERNEL); 605 606 /* The next assigned ID should be 1 */ 607 xa_alloc_index(&xa0, 1, GFP_KERNEL); 608 xa_erase_index(&xa0, 1); 609 610 /* Storing a value should mark it used */ 611 xa_store_index(&xa0, 1, GFP_KERNEL); 612 xa_alloc_index(&xa0, 2, GFP_KERNEL); 613 614 /* If we then erase 0, it should be free */ 615 xa_erase_index(&xa0, 0); 616 xa_alloc_index(&xa0, 0, GFP_KERNEL); 617 618 xa_erase_index(&xa0, 1); 619 xa_erase_index(&xa0, 2); 620 621 for (i = 1; i < 5000; i++) { 622 xa_alloc_index(&xa0, i, GFP_KERNEL); 623 } 624 625 xa_destroy(&xa0); 626 627 id = 0xfffffffeU; 628 XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id), 629 GFP_KERNEL) != 0); 630 XA_BUG_ON(&xa0, id != 0xfffffffeU); 631 XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id), 632 GFP_KERNEL) != 0); 633 XA_BUG_ON(&xa0, id != 0xffffffffU); 634 XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id), 635 GFP_KERNEL) != -ENOSPC); 636 XA_BUG_ON(&xa0, id != 0xffffffffU); 637 xa_destroy(&xa0); 638 639 id = 10; 640 XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, 5, xa_mk_index(id), 641 GFP_KERNEL) != -ENOSPC); 642 XA_BUG_ON(&xa0, xa_store_index(&xa0, 3, GFP_KERNEL) != 0); 643 XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, 5, xa_mk_index(id), 644 GFP_KERNEL) != -ENOSPC); 645 xa_erase_index(&xa0, 3); 646 XA_BUG_ON(&xa0, !xa_empty(&xa0)); 647 } 648 649 static noinline void __check_store_iter(struct xarray *xa, unsigned long start, 650 unsigned int order, unsigned int present) 651 { 652 XA_STATE_ORDER(xas, xa, start, order); 653 void *entry; 654 unsigned int count = 0; 655 656 retry: 657 xas_lock(&xas); 658 xas_for_each_conflict(&xas, entry) { 659 XA_BUG_ON(xa, !xa_is_value(entry)); 660 XA_BUG_ON(xa, entry < xa_mk_index(start)); 661 XA_BUG_ON(xa, entry > xa_mk_index(start + (1UL << order) - 1)); 662 count++; 663 } 664 xas_store(&xas, xa_mk_index(start)); 665 xas_unlock(&xas); 666 if (xas_nomem(&xas, GFP_KERNEL)) { 667 count = 0; 668 goto retry; 669 } 670 XA_BUG_ON(xa, xas_error(&xas)); 671 XA_BUG_ON(xa, count != present); 672 XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_index(start)); 673 XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) != 674 xa_mk_index(start)); 675 xa_erase_index(xa, start); 676 } 677 678 static noinline void check_store_iter(struct xarray *xa) 679 { 680 unsigned int i, j; 681 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; 682 683 for (i = 0; i < max_order; i++) { 684 unsigned int min = 1 << i; 685 unsigned int max = (2 << i) - 1; 686 __check_store_iter(xa, 0, i, 0); 687 XA_BUG_ON(xa, !xa_empty(xa)); 688 __check_store_iter(xa, min, i, 0); 689 XA_BUG_ON(xa, !xa_empty(xa)); 690 691 xa_store_index(xa, min, GFP_KERNEL); 692 __check_store_iter(xa, min, i, 1); 693 XA_BUG_ON(xa, !xa_empty(xa)); 694 xa_store_index(xa, max, GFP_KERNEL); 695 __check_store_iter(xa, min, i, 1); 696 XA_BUG_ON(xa, !xa_empty(xa)); 697 698 for (j = 0; j < min; j++) 699 xa_store_index(xa, j, GFP_KERNEL); 700 __check_store_iter(xa, 0, i, min); 701 XA_BUG_ON(xa, !xa_empty(xa)); 702 for (j = 0; j < min; j++) 703 xa_store_index(xa, min + j, GFP_KERNEL); 704 __check_store_iter(xa, min, i, min); 705 XA_BUG_ON(xa, !xa_empty(xa)); 706 } 707 #ifdef CONFIG_XARRAY_MULTI 708 xa_store_index(xa, 63, GFP_KERNEL); 709 xa_store_index(xa, 65, GFP_KERNEL); 710 __check_store_iter(xa, 64, 2, 1); 711 xa_erase_index(xa, 63); 712 #endif 713 XA_BUG_ON(xa, !xa_empty(xa)); 714 } 715 716 static noinline void check_multi_find(struct xarray *xa) 717 { 718 #ifdef CONFIG_XARRAY_MULTI 719 unsigned long index; 720 721 xa_store_order(xa, 12, 2, xa_mk_value(12), GFP_KERNEL); 722 XA_BUG_ON(xa, xa_store_index(xa, 16, GFP_KERNEL) != NULL); 723 724 index = 0; 725 XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) != 726 xa_mk_value(12)); 727 XA_BUG_ON(xa, index != 12); 728 index = 13; 729 XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) != 730 xa_mk_value(12)); 731 XA_BUG_ON(xa, (index < 12) || (index >= 16)); 732 XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT) != 733 xa_mk_value(16)); 734 XA_BUG_ON(xa, index != 16); 735 736 xa_erase_index(xa, 12); 737 xa_erase_index(xa, 16); 738 XA_BUG_ON(xa, !xa_empty(xa)); 739 #endif 740 } 741 742 static noinline void check_multi_find_2(struct xarray *xa) 743 { 744 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 10 : 1; 745 unsigned int i, j; 746 void *entry; 747 748 for (i = 0; i < max_order; i++) { 749 unsigned long index = 1UL << i; 750 for (j = 0; j < index; j++) { 751 XA_STATE(xas, xa, j + index); 752 xa_store_index(xa, index - 1, GFP_KERNEL); 753 xa_store_order(xa, index, i, xa_mk_index(index), 754 GFP_KERNEL); 755 rcu_read_lock(); 756 xas_for_each(&xas, entry, ULONG_MAX) { 757 xa_erase_index(xa, index); 758 } 759 rcu_read_unlock(); 760 xa_erase_index(xa, index - 1); 761 XA_BUG_ON(xa, !xa_empty(xa)); 762 } 763 } 764 } 765 766 static noinline void check_find_1(struct xarray *xa) 767 { 768 unsigned long i, j, k; 769 770 XA_BUG_ON(xa, !xa_empty(xa)); 771 772 /* 773 * Check xa_find with all pairs between 0 and 99 inclusive, 774 * starting at every index between 0 and 99 775 */ 776 for (i = 0; i < 100; i++) { 777 XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL); 778 xa_set_mark(xa, i, XA_MARK_0); 779 for (j = 0; j < i; j++) { 780 XA_BUG_ON(xa, xa_store_index(xa, j, GFP_KERNEL) != 781 NULL); 782 xa_set_mark(xa, j, XA_MARK_0); 783 for (k = 0; k < 100; k++) { 784 unsigned long index = k; 785 void *entry = xa_find(xa, &index, ULONG_MAX, 786 XA_PRESENT); 787 if (k <= j) 788 XA_BUG_ON(xa, index != j); 789 else if (k <= i) 790 XA_BUG_ON(xa, index != i); 791 else 792 XA_BUG_ON(xa, entry != NULL); 793 794 index = k; 795 entry = xa_find(xa, &index, ULONG_MAX, 796 XA_MARK_0); 797 if (k <= j) 798 XA_BUG_ON(xa, index != j); 799 else if (k <= i) 800 XA_BUG_ON(xa, index != i); 801 else 802 XA_BUG_ON(xa, entry != NULL); 803 } 804 xa_erase_index(xa, j); 805 XA_BUG_ON(xa, xa_get_mark(xa, j, XA_MARK_0)); 806 XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0)); 807 } 808 xa_erase_index(xa, i); 809 XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_0)); 810 } 811 XA_BUG_ON(xa, !xa_empty(xa)); 812 } 813 814 static noinline void check_find_2(struct xarray *xa) 815 { 816 void *entry; 817 unsigned long i, j, index; 818 819 xa_for_each(xa, index, entry) { 820 XA_BUG_ON(xa, true); 821 } 822 823 for (i = 0; i < 1024; i++) { 824 xa_store_index(xa, index, GFP_KERNEL); 825 j = 0; 826 xa_for_each(xa, index, entry) { 827 XA_BUG_ON(xa, xa_mk_index(index) != entry); 828 XA_BUG_ON(xa, index != j++); 829 } 830 } 831 832 xa_destroy(xa); 833 } 834 835 static noinline void check_find_3(struct xarray *xa) 836 { 837 XA_STATE(xas, xa, 0); 838 unsigned long i, j, k; 839 void *entry; 840 841 for (i = 0; i < 100; i++) { 842 for (j = 0; j < 100; j++) { 843 rcu_read_lock(); 844 for (k = 0; k < 100; k++) { 845 xas_set(&xas, j); 846 xas_for_each_marked(&xas, entry, k, XA_MARK_0) 847 ; 848 if (j > k) 849 XA_BUG_ON(xa, 850 xas.xa_node != XAS_RESTART); 851 } 852 rcu_read_unlock(); 853 } 854 xa_store_index(xa, i, GFP_KERNEL); 855 xa_set_mark(xa, i, XA_MARK_0); 856 } 857 xa_destroy(xa); 858 } 859 860 static noinline void check_find(struct xarray *xa) 861 { 862 check_find_1(xa); 863 check_find_2(xa); 864 check_find_3(xa); 865 check_multi_find(xa); 866 check_multi_find_2(xa); 867 } 868 869 /* See find_swap_entry() in mm/shmem.c */ 870 static noinline unsigned long xa_find_entry(struct xarray *xa, void *item) 871 { 872 XA_STATE(xas, xa, 0); 873 unsigned int checked = 0; 874 void *entry; 875 876 rcu_read_lock(); 877 xas_for_each(&xas, entry, ULONG_MAX) { 878 if (xas_retry(&xas, entry)) 879 continue; 880 if (entry == item) 881 break; 882 checked++; 883 if ((checked % 4) != 0) 884 continue; 885 xas_pause(&xas); 886 } 887 rcu_read_unlock(); 888 889 return entry ? xas.xa_index : -1; 890 } 891 892 static noinline void check_find_entry(struct xarray *xa) 893 { 894 #ifdef CONFIG_XARRAY_MULTI 895 unsigned int order; 896 unsigned long offset, index; 897 898 for (order = 0; order < 20; order++) { 899 for (offset = 0; offset < (1UL << (order + 3)); 900 offset += (1UL << order)) { 901 for (index = 0; index < (1UL << (order + 5)); 902 index += (1UL << order)) { 903 xa_store_order(xa, index, order, 904 xa_mk_index(index), GFP_KERNEL); 905 XA_BUG_ON(xa, xa_load(xa, index) != 906 xa_mk_index(index)); 907 XA_BUG_ON(xa, xa_find_entry(xa, 908 xa_mk_index(index)) != index); 909 } 910 XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); 911 xa_destroy(xa); 912 } 913 } 914 #endif 915 916 XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); 917 xa_store_index(xa, ULONG_MAX, GFP_KERNEL); 918 XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); 919 XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_index(ULONG_MAX)) != -1); 920 xa_erase_index(xa, ULONG_MAX); 921 XA_BUG_ON(xa, !xa_empty(xa)); 922 } 923 924 static noinline void check_move_small(struct xarray *xa, unsigned long idx) 925 { 926 XA_STATE(xas, xa, 0); 927 unsigned long i; 928 929 xa_store_index(xa, 0, GFP_KERNEL); 930 xa_store_index(xa, idx, GFP_KERNEL); 931 932 rcu_read_lock(); 933 for (i = 0; i < idx * 4; i++) { 934 void *entry = xas_next(&xas); 935 if (i <= idx) 936 XA_BUG_ON(xa, xas.xa_node == XAS_RESTART); 937 XA_BUG_ON(xa, xas.xa_index != i); 938 if (i == 0 || i == idx) 939 XA_BUG_ON(xa, entry != xa_mk_index(i)); 940 else 941 XA_BUG_ON(xa, entry != NULL); 942 } 943 xas_next(&xas); 944 XA_BUG_ON(xa, xas.xa_index != i); 945 946 do { 947 void *entry = xas_prev(&xas); 948 i--; 949 if (i <= idx) 950 XA_BUG_ON(xa, xas.xa_node == XAS_RESTART); 951 XA_BUG_ON(xa, xas.xa_index != i); 952 if (i == 0 || i == idx) 953 XA_BUG_ON(xa, entry != xa_mk_index(i)); 954 else 955 XA_BUG_ON(xa, entry != NULL); 956 } while (i > 0); 957 958 xas_set(&xas, ULONG_MAX); 959 XA_BUG_ON(xa, xas_next(&xas) != NULL); 960 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); 961 XA_BUG_ON(xa, xas_next(&xas) != xa_mk_value(0)); 962 XA_BUG_ON(xa, xas.xa_index != 0); 963 XA_BUG_ON(xa, xas_prev(&xas) != NULL); 964 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); 965 rcu_read_unlock(); 966 967 xa_erase_index(xa, 0); 968 xa_erase_index(xa, idx); 969 XA_BUG_ON(xa, !xa_empty(xa)); 970 } 971 972 static noinline void check_move(struct xarray *xa) 973 { 974 XA_STATE(xas, xa, (1 << 16) - 1); 975 unsigned long i; 976 977 for (i = 0; i < (1 << 16); i++) 978 XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL); 979 980 rcu_read_lock(); 981 do { 982 void *entry = xas_prev(&xas); 983 i--; 984 XA_BUG_ON(xa, entry != xa_mk_index(i)); 985 XA_BUG_ON(xa, i != xas.xa_index); 986 } while (i != 0); 987 988 XA_BUG_ON(xa, xas_prev(&xas) != NULL); 989 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); 990 991 do { 992 void *entry = xas_next(&xas); 993 XA_BUG_ON(xa, entry != xa_mk_index(i)); 994 XA_BUG_ON(xa, i != xas.xa_index); 995 i++; 996 } while (i < (1 << 16)); 997 rcu_read_unlock(); 998 999 for (i = (1 << 8); i < (1 << 15); i++) 1000 xa_erase_index(xa, i); 1001 1002 i = xas.xa_index; 1003 1004 rcu_read_lock(); 1005 do { 1006 void *entry = xas_prev(&xas); 1007 i--; 1008 if ((i < (1 << 8)) || (i >= (1 << 15))) 1009 XA_BUG_ON(xa, entry != xa_mk_index(i)); 1010 else 1011 XA_BUG_ON(xa, entry != NULL); 1012 XA_BUG_ON(xa, i != xas.xa_index); 1013 } while (i != 0); 1014 1015 XA_BUG_ON(xa, xas_prev(&xas) != NULL); 1016 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); 1017 1018 do { 1019 void *entry = xas_next(&xas); 1020 if ((i < (1 << 8)) || (i >= (1 << 15))) 1021 XA_BUG_ON(xa, entry != xa_mk_index(i)); 1022 else 1023 XA_BUG_ON(xa, entry != NULL); 1024 XA_BUG_ON(xa, i != xas.xa_index); 1025 i++; 1026 } while (i < (1 << 16)); 1027 rcu_read_unlock(); 1028 1029 xa_destroy(xa); 1030 1031 for (i = 0; i < 16; i++) 1032 check_move_small(xa, 1UL << i); 1033 1034 for (i = 2; i < 16; i++) 1035 check_move_small(xa, (1UL << i) - 1); 1036 } 1037 1038 static noinline void xa_store_many_order(struct xarray *xa, 1039 unsigned long index, unsigned order) 1040 { 1041 XA_STATE_ORDER(xas, xa, index, order); 1042 unsigned int i = 0; 1043 1044 do { 1045 xas_lock(&xas); 1046 XA_BUG_ON(xa, xas_find_conflict(&xas)); 1047 xas_create_range(&xas); 1048 if (xas_error(&xas)) 1049 goto unlock; 1050 for (i = 0; i < (1U << order); i++) { 1051 XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(index + i))); 1052 xas_next(&xas); 1053 } 1054 unlock: 1055 xas_unlock(&xas); 1056 } while (xas_nomem(&xas, GFP_KERNEL)); 1057 1058 XA_BUG_ON(xa, xas_error(&xas)); 1059 } 1060 1061 static noinline void check_create_range_1(struct xarray *xa, 1062 unsigned long index, unsigned order) 1063 { 1064 unsigned long i; 1065 1066 xa_store_many_order(xa, index, order); 1067 for (i = index; i < index + (1UL << order); i++) 1068 xa_erase_index(xa, i); 1069 XA_BUG_ON(xa, !xa_empty(xa)); 1070 } 1071 1072 static noinline void check_create_range_2(struct xarray *xa, unsigned order) 1073 { 1074 unsigned long i; 1075 unsigned long nr = 1UL << order; 1076 1077 for (i = 0; i < nr * nr; i += nr) 1078 xa_store_many_order(xa, i, order); 1079 for (i = 0; i < nr * nr; i++) 1080 xa_erase_index(xa, i); 1081 XA_BUG_ON(xa, !xa_empty(xa)); 1082 } 1083 1084 static noinline void check_create_range_3(void) 1085 { 1086 XA_STATE(xas, NULL, 0); 1087 xas_set_err(&xas, -EEXIST); 1088 xas_create_range(&xas); 1089 XA_BUG_ON(NULL, xas_error(&xas) != -EEXIST); 1090 } 1091 1092 static noinline void check_create_range_4(struct xarray *xa, 1093 unsigned long index, unsigned order) 1094 { 1095 XA_STATE_ORDER(xas, xa, index, order); 1096 unsigned long base = xas.xa_index; 1097 unsigned long i = 0; 1098 1099 xa_store_index(xa, index, GFP_KERNEL); 1100 do { 1101 xas_lock(&xas); 1102 xas_create_range(&xas); 1103 if (xas_error(&xas)) 1104 goto unlock; 1105 for (i = 0; i < (1UL << order); i++) { 1106 void *old = xas_store(&xas, xa_mk_index(base + i)); 1107 if (xas.xa_index == index) 1108 XA_BUG_ON(xa, old != xa_mk_index(base + i)); 1109 else 1110 XA_BUG_ON(xa, old != NULL); 1111 xas_next(&xas); 1112 } 1113 unlock: 1114 xas_unlock(&xas); 1115 } while (xas_nomem(&xas, GFP_KERNEL)); 1116 1117 XA_BUG_ON(xa, xas_error(&xas)); 1118 1119 for (i = base; i < base + (1UL << order); i++) 1120 xa_erase_index(xa, i); 1121 XA_BUG_ON(xa, !xa_empty(xa)); 1122 } 1123 1124 static noinline void check_create_range(struct xarray *xa) 1125 { 1126 unsigned int order; 1127 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 12 : 1; 1128 1129 for (order = 0; order < max_order; order++) { 1130 check_create_range_1(xa, 0, order); 1131 check_create_range_1(xa, 1U << order, order); 1132 check_create_range_1(xa, 2U << order, order); 1133 check_create_range_1(xa, 3U << order, order); 1134 check_create_range_1(xa, 1U << 24, order); 1135 if (order < 10) 1136 check_create_range_2(xa, order); 1137 1138 check_create_range_4(xa, 0, order); 1139 check_create_range_4(xa, 1U << order, order); 1140 check_create_range_4(xa, 2U << order, order); 1141 check_create_range_4(xa, 3U << order, order); 1142 check_create_range_4(xa, 1U << 24, order); 1143 1144 check_create_range_4(xa, 1, order); 1145 check_create_range_4(xa, (1U << order) + 1, order); 1146 check_create_range_4(xa, (2U << order) + 1, order); 1147 check_create_range_4(xa, (2U << order) - 1, order); 1148 check_create_range_4(xa, (3U << order) + 1, order); 1149 check_create_range_4(xa, (3U << order) - 1, order); 1150 check_create_range_4(xa, (1U << 24) + 1, order); 1151 } 1152 1153 check_create_range_3(); 1154 } 1155 1156 static noinline void __check_store_range(struct xarray *xa, unsigned long first, 1157 unsigned long last) 1158 { 1159 #ifdef CONFIG_XARRAY_MULTI 1160 xa_store_range(xa, first, last, xa_mk_index(first), GFP_KERNEL); 1161 1162 XA_BUG_ON(xa, xa_load(xa, first) != xa_mk_index(first)); 1163 XA_BUG_ON(xa, xa_load(xa, last) != xa_mk_index(first)); 1164 XA_BUG_ON(xa, xa_load(xa, first - 1) != NULL); 1165 XA_BUG_ON(xa, xa_load(xa, last + 1) != NULL); 1166 1167 xa_store_range(xa, first, last, NULL, GFP_KERNEL); 1168 #endif 1169 1170 XA_BUG_ON(xa, !xa_empty(xa)); 1171 } 1172 1173 static noinline void check_store_range(struct xarray *xa) 1174 { 1175 unsigned long i, j; 1176 1177 for (i = 0; i < 128; i++) { 1178 for (j = i; j < 128; j++) { 1179 __check_store_range(xa, i, j); 1180 __check_store_range(xa, 128 + i, 128 + j); 1181 __check_store_range(xa, 4095 + i, 4095 + j); 1182 __check_store_range(xa, 4096 + i, 4096 + j); 1183 __check_store_range(xa, 123456 + i, 123456 + j); 1184 __check_store_range(xa, (1 << 24) + i, (1 << 24) + j); 1185 } 1186 } 1187 } 1188 1189 static void check_align_1(struct xarray *xa, char *name) 1190 { 1191 int i; 1192 unsigned int id; 1193 unsigned long index; 1194 void *entry; 1195 1196 for (i = 0; i < 8; i++) { 1197 id = 0; 1198 XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, name + i, GFP_KERNEL) 1199 != 0); 1200 XA_BUG_ON(xa, id != i); 1201 } 1202 xa_for_each(xa, index, entry) 1203 XA_BUG_ON(xa, xa_is_err(entry)); 1204 xa_destroy(xa); 1205 } 1206 1207 static noinline void check_align(struct xarray *xa) 1208 { 1209 char name[] = "Motorola 68000"; 1210 1211 check_align_1(xa, name); 1212 check_align_1(xa, name + 1); 1213 check_align_1(xa, name + 2); 1214 check_align_1(xa, name + 3); 1215 // check_align_2(xa, name); 1216 } 1217 1218 static LIST_HEAD(shadow_nodes); 1219 1220 static void test_update_node(struct xa_node *node) 1221 { 1222 if (node->count && node->count == node->nr_values) { 1223 if (list_empty(&node->private_list)) 1224 list_add(&shadow_nodes, &node->private_list); 1225 } else { 1226 if (!list_empty(&node->private_list)) 1227 list_del_init(&node->private_list); 1228 } 1229 } 1230 1231 static noinline void shadow_remove(struct xarray *xa) 1232 { 1233 struct xa_node *node; 1234 1235 xa_lock(xa); 1236 while ((node = list_first_entry_or_null(&shadow_nodes, 1237 struct xa_node, private_list))) { 1238 XA_STATE(xas, node->array, 0); 1239 XA_BUG_ON(xa, node->array != xa); 1240 list_del_init(&node->private_list); 1241 xas.xa_node = xa_parent_locked(node->array, node); 1242 xas.xa_offset = node->offset; 1243 xas.xa_shift = node->shift + XA_CHUNK_SHIFT; 1244 xas_set_update(&xas, test_update_node); 1245 xas_store(&xas, NULL); 1246 } 1247 xa_unlock(xa); 1248 } 1249 1250 static noinline void check_workingset(struct xarray *xa, unsigned long index) 1251 { 1252 XA_STATE(xas, xa, index); 1253 xas_set_update(&xas, test_update_node); 1254 1255 do { 1256 xas_lock(&xas); 1257 xas_store(&xas, xa_mk_value(0)); 1258 xas_next(&xas); 1259 xas_store(&xas, xa_mk_value(1)); 1260 xas_unlock(&xas); 1261 } while (xas_nomem(&xas, GFP_KERNEL)); 1262 1263 XA_BUG_ON(xa, list_empty(&shadow_nodes)); 1264 1265 xas_lock(&xas); 1266 xas_next(&xas); 1267 xas_store(&xas, &xas); 1268 XA_BUG_ON(xa, !list_empty(&shadow_nodes)); 1269 1270 xas_store(&xas, xa_mk_value(2)); 1271 xas_unlock(&xas); 1272 XA_BUG_ON(xa, list_empty(&shadow_nodes)); 1273 1274 shadow_remove(xa); 1275 XA_BUG_ON(xa, !list_empty(&shadow_nodes)); 1276 XA_BUG_ON(xa, !xa_empty(xa)); 1277 } 1278 1279 /* 1280 * Check that the pointer / value / sibling entries are accounted the 1281 * way we expect them to be. 1282 */ 1283 static noinline void check_account(struct xarray *xa) 1284 { 1285 #ifdef CONFIG_XARRAY_MULTI 1286 unsigned int order; 1287 1288 for (order = 1; order < 12; order++) { 1289 XA_STATE(xas, xa, 1 << order); 1290 1291 xa_store_order(xa, 0, order, xa, GFP_KERNEL); 1292 rcu_read_lock(); 1293 xas_load(&xas); 1294 XA_BUG_ON(xa, xas.xa_node->count == 0); 1295 XA_BUG_ON(xa, xas.xa_node->count > (1 << order)); 1296 XA_BUG_ON(xa, xas.xa_node->nr_values != 0); 1297 rcu_read_unlock(); 1298 1299 xa_store_order(xa, 1 << order, order, xa_mk_index(1UL << order), 1300 GFP_KERNEL); 1301 XA_BUG_ON(xa, xas.xa_node->count != xas.xa_node->nr_values * 2); 1302 1303 xa_erase(xa, 1 << order); 1304 XA_BUG_ON(xa, xas.xa_node->nr_values != 0); 1305 1306 xa_erase(xa, 0); 1307 XA_BUG_ON(xa, !xa_empty(xa)); 1308 } 1309 #endif 1310 } 1311 1312 static noinline void check_destroy(struct xarray *xa) 1313 { 1314 unsigned long index; 1315 1316 XA_BUG_ON(xa, !xa_empty(xa)); 1317 1318 /* Destroying an empty array is a no-op */ 1319 xa_destroy(xa); 1320 XA_BUG_ON(xa, !xa_empty(xa)); 1321 1322 /* Destroying an array with a single entry */ 1323 for (index = 0; index < 1000; index++) { 1324 xa_store_index(xa, index, GFP_KERNEL); 1325 XA_BUG_ON(xa, xa_empty(xa)); 1326 xa_destroy(xa); 1327 XA_BUG_ON(xa, !xa_empty(xa)); 1328 } 1329 1330 /* Destroying an array with a single entry at ULONG_MAX */ 1331 xa_store(xa, ULONG_MAX, xa, GFP_KERNEL); 1332 XA_BUG_ON(xa, xa_empty(xa)); 1333 xa_destroy(xa); 1334 XA_BUG_ON(xa, !xa_empty(xa)); 1335 1336 #ifdef CONFIG_XARRAY_MULTI 1337 /* Destroying an array with a multi-index entry */ 1338 xa_store_order(xa, 1 << 11, 11, xa, GFP_KERNEL); 1339 XA_BUG_ON(xa, xa_empty(xa)); 1340 xa_destroy(xa); 1341 XA_BUG_ON(xa, !xa_empty(xa)); 1342 #endif 1343 } 1344 1345 static DEFINE_XARRAY(array); 1346 1347 static int xarray_checks(void) 1348 { 1349 check_xa_err(&array); 1350 check_xas_retry(&array); 1351 check_xa_load(&array); 1352 check_xa_mark(&array); 1353 check_xa_shrink(&array); 1354 check_xas_erase(&array); 1355 check_cmpxchg(&array); 1356 check_reserve(&array); 1357 check_multi_store(&array); 1358 check_xa_alloc(); 1359 check_find(&array); 1360 check_find_entry(&array); 1361 check_account(&array); 1362 check_destroy(&array); 1363 check_move(&array); 1364 check_create_range(&array); 1365 check_store_range(&array); 1366 check_store_iter(&array); 1367 check_align(&xa0); 1368 1369 check_workingset(&array, 0); 1370 check_workingset(&array, 64); 1371 check_workingset(&array, 4096); 1372 1373 printk("XArray: %u of %u tests passed\n", tests_passed, tests_run); 1374 return (tests_run == tests_passed) ? 0 : -EINVAL; 1375 } 1376 1377 static void xarray_exit(void) 1378 { 1379 } 1380 1381 module_init(xarray_checks); 1382 module_exit(xarray_exit); 1383 MODULE_AUTHOR("Matthew Wilcox <willy@infradead.org>"); 1384 MODULE_LICENSE("GPL"); 1385