1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * test_xarray.c: Test the XArray API 4 * Copyright (c) 2017-2018 Microsoft Corporation 5 * Copyright (c) 2019-2020 Oracle 6 * Author: Matthew Wilcox <willy@infradead.org> 7 */ 8 9 #include <linux/xarray.h> 10 #include <linux/module.h> 11 12 static unsigned int tests_run; 13 static unsigned int tests_passed; 14 15 #ifndef XA_DEBUG 16 # ifdef __KERNEL__ 17 void xa_dump(const struct xarray *xa) { } 18 # endif 19 #undef XA_BUG_ON 20 #define XA_BUG_ON(xa, x) do { \ 21 tests_run++; \ 22 if (x) { \ 23 printk("BUG at %s:%d\n", __func__, __LINE__); \ 24 xa_dump(xa); \ 25 dump_stack(); \ 26 } else { \ 27 tests_passed++; \ 28 } \ 29 } while (0) 30 #endif 31 32 static void *xa_mk_index(unsigned long index) 33 { 34 return xa_mk_value(index & LONG_MAX); 35 } 36 37 static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp) 38 { 39 return xa_store(xa, index, xa_mk_index(index), gfp); 40 } 41 42 static void xa_insert_index(struct xarray *xa, unsigned long index) 43 { 44 XA_BUG_ON(xa, xa_insert(xa, index, xa_mk_index(index), 45 GFP_KERNEL) != 0); 46 } 47 48 static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp) 49 { 50 u32 id; 51 52 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(index), xa_limit_32b, 53 gfp) != 0); 54 XA_BUG_ON(xa, id != index); 55 } 56 57 static void xa_erase_index(struct xarray *xa, unsigned long index) 58 { 59 XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_index(index)); 60 XA_BUG_ON(xa, xa_load(xa, index) != NULL); 61 } 62 63 /* 64 * If anyone needs this, please move it to xarray.c. We have no current 65 * users outside the test suite because all current multislot users want 66 * to use the advanced API. 67 */ 68 static void *xa_store_order(struct xarray *xa, unsigned long index, 69 unsigned order, void *entry, gfp_t gfp) 70 { 71 XA_STATE_ORDER(xas, xa, index, order); 72 void *curr; 73 74 do { 75 xas_lock(&xas); 76 curr = xas_store(&xas, entry); 77 xas_unlock(&xas); 78 } while (xas_nomem(&xas, gfp)); 79 80 return curr; 81 } 82 83 static noinline void check_xa_err(struct xarray *xa) 84 { 85 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 0, GFP_NOWAIT)) != 0); 86 XA_BUG_ON(xa, xa_err(xa_erase(xa, 0)) != 0); 87 #ifndef __KERNEL__ 88 /* The kernel does not fail GFP_NOWAIT allocations */ 89 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM); 90 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM); 91 #endif 92 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_KERNEL)) != 0); 93 XA_BUG_ON(xa, xa_err(xa_store(xa, 1, xa_mk_value(0), GFP_KERNEL)) != 0); 94 XA_BUG_ON(xa, xa_err(xa_erase(xa, 1)) != 0); 95 // kills the test-suite :-( 96 // XA_BUG_ON(xa, xa_err(xa_store(xa, 0, xa_mk_internal(0), 0)) != -EINVAL); 97 } 98 99 static noinline void check_xas_retry(struct xarray *xa) 100 { 101 XA_STATE(xas, xa, 0); 102 void *entry; 103 104 xa_store_index(xa, 0, GFP_KERNEL); 105 xa_store_index(xa, 1, GFP_KERNEL); 106 107 rcu_read_lock(); 108 XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_value(0)); 109 xa_erase_index(xa, 1); 110 XA_BUG_ON(xa, !xa_is_retry(xas_reload(&xas))); 111 XA_BUG_ON(xa, xas_retry(&xas, NULL)); 112 XA_BUG_ON(xa, xas_retry(&xas, xa_mk_value(0))); 113 xas_reset(&xas); 114 XA_BUG_ON(xa, xas.xa_node != XAS_RESTART); 115 XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0)); 116 XA_BUG_ON(xa, xas.xa_node != NULL); 117 rcu_read_unlock(); 118 119 XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL); 120 121 rcu_read_lock(); 122 XA_BUG_ON(xa, !xa_is_internal(xas_reload(&xas))); 123 xas.xa_node = XAS_RESTART; 124 XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0)); 125 rcu_read_unlock(); 126 127 /* Make sure we can iterate through retry entries */ 128 xas_lock(&xas); 129 xas_set(&xas, 0); 130 xas_store(&xas, XA_RETRY_ENTRY); 131 xas_set(&xas, 1); 132 xas_store(&xas, XA_RETRY_ENTRY); 133 134 xas_set(&xas, 0); 135 xas_for_each(&xas, entry, ULONG_MAX) { 136 xas_store(&xas, xa_mk_index(xas.xa_index)); 137 } 138 xas_unlock(&xas); 139 140 xa_erase_index(xa, 0); 141 xa_erase_index(xa, 1); 142 } 143 144 static noinline void check_xa_load(struct xarray *xa) 145 { 146 unsigned long i, j; 147 148 for (i = 0; i < 1024; i++) { 149 for (j = 0; j < 1024; j++) { 150 void *entry = xa_load(xa, j); 151 if (j < i) 152 XA_BUG_ON(xa, xa_to_value(entry) != j); 153 else 154 XA_BUG_ON(xa, entry); 155 } 156 XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL); 157 } 158 159 for (i = 0; i < 1024; i++) { 160 for (j = 0; j < 1024; j++) { 161 void *entry = xa_load(xa, j); 162 if (j >= i) 163 XA_BUG_ON(xa, xa_to_value(entry) != j); 164 else 165 XA_BUG_ON(xa, entry); 166 } 167 xa_erase_index(xa, i); 168 } 169 XA_BUG_ON(xa, !xa_empty(xa)); 170 } 171 172 static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index) 173 { 174 unsigned int order; 175 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 8 : 1; 176 177 /* NULL elements have no marks set */ 178 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 179 xa_set_mark(xa, index, XA_MARK_0); 180 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 181 182 /* Storing a pointer will not make a mark appear */ 183 XA_BUG_ON(xa, xa_store_index(xa, index, GFP_KERNEL) != NULL); 184 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 185 xa_set_mark(xa, index, XA_MARK_0); 186 XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0)); 187 188 /* Setting one mark will not set another mark */ 189 XA_BUG_ON(xa, xa_get_mark(xa, index + 1, XA_MARK_0)); 190 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_1)); 191 192 /* Storing NULL clears marks, and they can't be set again */ 193 xa_erase_index(xa, index); 194 XA_BUG_ON(xa, !xa_empty(xa)); 195 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 196 xa_set_mark(xa, index, XA_MARK_0); 197 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 198 199 /* 200 * Storing a multi-index entry over entries with marks gives the 201 * entire entry the union of the marks 202 */ 203 BUG_ON((index % 4) != 0); 204 for (order = 2; order < max_order; order++) { 205 unsigned long base = round_down(index, 1UL << order); 206 unsigned long next = base + (1UL << order); 207 unsigned long i; 208 209 XA_BUG_ON(xa, xa_store_index(xa, index + 1, GFP_KERNEL)); 210 xa_set_mark(xa, index + 1, XA_MARK_0); 211 XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL)); 212 xa_set_mark(xa, index + 2, XA_MARK_2); 213 XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL)); 214 xa_store_order(xa, index, order, xa_mk_index(index), 215 GFP_KERNEL); 216 for (i = base; i < next; i++) { 217 XA_STATE(xas, xa, i); 218 unsigned int seen = 0; 219 void *entry; 220 221 XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0)); 222 XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_1)); 223 XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_2)); 224 225 /* We should see two elements in the array */ 226 rcu_read_lock(); 227 xas_for_each(&xas, entry, ULONG_MAX) 228 seen++; 229 rcu_read_unlock(); 230 XA_BUG_ON(xa, seen != 2); 231 232 /* One of which is marked */ 233 xas_set(&xas, 0); 234 seen = 0; 235 rcu_read_lock(); 236 xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) 237 seen++; 238 rcu_read_unlock(); 239 XA_BUG_ON(xa, seen != 1); 240 } 241 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_0)); 242 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_1)); 243 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_2)); 244 xa_erase_index(xa, index); 245 xa_erase_index(xa, next); 246 XA_BUG_ON(xa, !xa_empty(xa)); 247 } 248 XA_BUG_ON(xa, !xa_empty(xa)); 249 } 250 251 static noinline void check_xa_mark_2(struct xarray *xa) 252 { 253 XA_STATE(xas, xa, 0); 254 unsigned long index; 255 unsigned int count = 0; 256 void *entry; 257 258 xa_store_index(xa, 0, GFP_KERNEL); 259 xa_set_mark(xa, 0, XA_MARK_0); 260 xas_lock(&xas); 261 xas_load(&xas); 262 xas_init_marks(&xas); 263 xas_unlock(&xas); 264 XA_BUG_ON(xa, !xa_get_mark(xa, 0, XA_MARK_0) == 0); 265 266 for (index = 3500; index < 4500; index++) { 267 xa_store_index(xa, index, GFP_KERNEL); 268 xa_set_mark(xa, index, XA_MARK_0); 269 } 270 271 xas_reset(&xas); 272 rcu_read_lock(); 273 xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) 274 count++; 275 rcu_read_unlock(); 276 XA_BUG_ON(xa, count != 1000); 277 278 xas_lock(&xas); 279 xas_for_each(&xas, entry, ULONG_MAX) { 280 xas_init_marks(&xas); 281 XA_BUG_ON(xa, !xa_get_mark(xa, xas.xa_index, XA_MARK_0)); 282 XA_BUG_ON(xa, !xas_get_mark(&xas, XA_MARK_0)); 283 } 284 xas_unlock(&xas); 285 286 xa_destroy(xa); 287 } 288 289 static noinline void check_xa_mark(struct xarray *xa) 290 { 291 unsigned long index; 292 293 for (index = 0; index < 16384; index += 4) 294 check_xa_mark_1(xa, index); 295 296 check_xa_mark_2(xa); 297 } 298 299 static noinline void check_xa_shrink(struct xarray *xa) 300 { 301 XA_STATE(xas, xa, 1); 302 struct xa_node *node; 303 unsigned int order; 304 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 15 : 1; 305 306 XA_BUG_ON(xa, !xa_empty(xa)); 307 XA_BUG_ON(xa, xa_store_index(xa, 0, GFP_KERNEL) != NULL); 308 XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL); 309 310 /* 311 * Check that erasing the entry at 1 shrinks the tree and properly 312 * marks the node as being deleted. 313 */ 314 xas_lock(&xas); 315 XA_BUG_ON(xa, xas_load(&xas) != xa_mk_value(1)); 316 node = xas.xa_node; 317 XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != xa_mk_value(0)); 318 XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1)); 319 XA_BUG_ON(xa, xa_load(xa, 1) != NULL); 320 XA_BUG_ON(xa, xas.xa_node != XAS_BOUNDS); 321 XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != XA_RETRY_ENTRY); 322 XA_BUG_ON(xa, xas_load(&xas) != NULL); 323 xas_unlock(&xas); 324 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0)); 325 xa_erase_index(xa, 0); 326 XA_BUG_ON(xa, !xa_empty(xa)); 327 328 for (order = 0; order < max_order; order++) { 329 unsigned long max = (1UL << order) - 1; 330 xa_store_order(xa, 0, order, xa_mk_value(0), GFP_KERNEL); 331 XA_BUG_ON(xa, xa_load(xa, max) != xa_mk_value(0)); 332 XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL); 333 rcu_read_lock(); 334 node = xa_head(xa); 335 rcu_read_unlock(); 336 XA_BUG_ON(xa, xa_store_index(xa, ULONG_MAX, GFP_KERNEL) != 337 NULL); 338 rcu_read_lock(); 339 XA_BUG_ON(xa, xa_head(xa) == node); 340 rcu_read_unlock(); 341 XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL); 342 xa_erase_index(xa, ULONG_MAX); 343 XA_BUG_ON(xa, xa->xa_head != node); 344 xa_erase_index(xa, 0); 345 } 346 } 347 348 static noinline void check_insert(struct xarray *xa) 349 { 350 unsigned long i; 351 352 for (i = 0; i < 1024; i++) { 353 xa_insert_index(xa, i); 354 XA_BUG_ON(xa, xa_load(xa, i - 1) != NULL); 355 XA_BUG_ON(xa, xa_load(xa, i + 1) != NULL); 356 xa_erase_index(xa, i); 357 } 358 359 for (i = 10; i < BITS_PER_LONG; i++) { 360 xa_insert_index(xa, 1UL << i); 361 XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 1) != NULL); 362 XA_BUG_ON(xa, xa_load(xa, (1UL << i) + 1) != NULL); 363 xa_erase_index(xa, 1UL << i); 364 365 xa_insert_index(xa, (1UL << i) - 1); 366 XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 2) != NULL); 367 XA_BUG_ON(xa, xa_load(xa, 1UL << i) != NULL); 368 xa_erase_index(xa, (1UL << i) - 1); 369 } 370 371 xa_insert_index(xa, ~0UL); 372 XA_BUG_ON(xa, xa_load(xa, 0UL) != NULL); 373 XA_BUG_ON(xa, xa_load(xa, ~1UL) != NULL); 374 xa_erase_index(xa, ~0UL); 375 376 XA_BUG_ON(xa, !xa_empty(xa)); 377 } 378 379 static noinline void check_cmpxchg(struct xarray *xa) 380 { 381 void *FIVE = xa_mk_value(5); 382 void *SIX = xa_mk_value(6); 383 void *LOTS = xa_mk_value(12345678); 384 385 XA_BUG_ON(xa, !xa_empty(xa)); 386 XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_KERNEL) != NULL); 387 XA_BUG_ON(xa, xa_insert(xa, 12345678, xa, GFP_KERNEL) != -EBUSY); 388 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, SIX, FIVE, GFP_KERNEL) != LOTS); 389 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, LOTS, FIVE, GFP_KERNEL) != LOTS); 390 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, FIVE, LOTS, GFP_KERNEL) != FIVE); 391 XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != NULL); 392 XA_BUG_ON(xa, xa_cmpxchg(xa, 5, NULL, FIVE, GFP_KERNEL) != NULL); 393 xa_erase_index(xa, 12345678); 394 xa_erase_index(xa, 5); 395 XA_BUG_ON(xa, !xa_empty(xa)); 396 } 397 398 static noinline void check_reserve(struct xarray *xa) 399 { 400 void *entry; 401 unsigned long index; 402 int count; 403 404 /* An array with a reserved entry is not empty */ 405 XA_BUG_ON(xa, !xa_empty(xa)); 406 XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0); 407 XA_BUG_ON(xa, xa_empty(xa)); 408 XA_BUG_ON(xa, xa_load(xa, 12345678)); 409 xa_release(xa, 12345678); 410 XA_BUG_ON(xa, !xa_empty(xa)); 411 412 /* Releasing a used entry does nothing */ 413 XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0); 414 XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_NOWAIT) != NULL); 415 xa_release(xa, 12345678); 416 xa_erase_index(xa, 12345678); 417 XA_BUG_ON(xa, !xa_empty(xa)); 418 419 /* cmpxchg sees a reserved entry as ZERO */ 420 XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0); 421 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, XA_ZERO_ENTRY, 422 xa_mk_value(12345678), GFP_NOWAIT) != NULL); 423 xa_release(xa, 12345678); 424 xa_erase_index(xa, 12345678); 425 XA_BUG_ON(xa, !xa_empty(xa)); 426 427 /* xa_insert treats it as busy */ 428 XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0); 429 XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) != 430 -EBUSY); 431 XA_BUG_ON(xa, xa_empty(xa)); 432 XA_BUG_ON(xa, xa_erase(xa, 12345678) != NULL); 433 XA_BUG_ON(xa, !xa_empty(xa)); 434 435 /* Can iterate through a reserved entry */ 436 xa_store_index(xa, 5, GFP_KERNEL); 437 XA_BUG_ON(xa, xa_reserve(xa, 6, GFP_KERNEL) != 0); 438 xa_store_index(xa, 7, GFP_KERNEL); 439 440 count = 0; 441 xa_for_each(xa, index, entry) { 442 XA_BUG_ON(xa, index != 5 && index != 7); 443 count++; 444 } 445 XA_BUG_ON(xa, count != 2); 446 447 /* If we free a reserved entry, we should be able to allocate it */ 448 if (xa->xa_flags & XA_FLAGS_ALLOC) { 449 u32 id; 450 451 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_value(8), 452 XA_LIMIT(5, 10), GFP_KERNEL) != 0); 453 XA_BUG_ON(xa, id != 8); 454 455 xa_release(xa, 6); 456 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_value(6), 457 XA_LIMIT(5, 10), GFP_KERNEL) != 0); 458 XA_BUG_ON(xa, id != 6); 459 } 460 461 xa_destroy(xa); 462 } 463 464 static noinline void check_xas_erase(struct xarray *xa) 465 { 466 XA_STATE(xas, xa, 0); 467 void *entry; 468 unsigned long i, j; 469 470 for (i = 0; i < 200; i++) { 471 for (j = i; j < 2 * i + 17; j++) { 472 xas_set(&xas, j); 473 do { 474 xas_lock(&xas); 475 xas_store(&xas, xa_mk_index(j)); 476 xas_unlock(&xas); 477 } while (xas_nomem(&xas, GFP_KERNEL)); 478 } 479 480 xas_set(&xas, ULONG_MAX); 481 do { 482 xas_lock(&xas); 483 xas_store(&xas, xa_mk_value(0)); 484 xas_unlock(&xas); 485 } while (xas_nomem(&xas, GFP_KERNEL)); 486 487 xas_lock(&xas); 488 xas_store(&xas, NULL); 489 490 xas_set(&xas, 0); 491 j = i; 492 xas_for_each(&xas, entry, ULONG_MAX) { 493 XA_BUG_ON(xa, entry != xa_mk_index(j)); 494 xas_store(&xas, NULL); 495 j++; 496 } 497 xas_unlock(&xas); 498 XA_BUG_ON(xa, !xa_empty(xa)); 499 } 500 } 501 502 #ifdef CONFIG_XARRAY_MULTI 503 static noinline void check_multi_store_1(struct xarray *xa, unsigned long index, 504 unsigned int order) 505 { 506 XA_STATE(xas, xa, index); 507 unsigned long min = index & ~((1UL << order) - 1); 508 unsigned long max = min + (1UL << order); 509 510 xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL); 511 XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(index)); 512 XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(index)); 513 XA_BUG_ON(xa, xa_load(xa, max) != NULL); 514 XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); 515 516 xas_lock(&xas); 517 XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(min)) != xa_mk_index(index)); 518 xas_unlock(&xas); 519 XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(min)); 520 XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(min)); 521 XA_BUG_ON(xa, xa_load(xa, max) != NULL); 522 XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); 523 524 xa_erase_index(xa, min); 525 XA_BUG_ON(xa, !xa_empty(xa)); 526 } 527 528 static noinline void check_multi_store_2(struct xarray *xa, unsigned long index, 529 unsigned int order) 530 { 531 XA_STATE(xas, xa, index); 532 xa_store_order(xa, index, order, xa_mk_value(0), GFP_KERNEL); 533 534 xas_lock(&xas); 535 XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(1)) != xa_mk_value(0)); 536 XA_BUG_ON(xa, xas.xa_index != index); 537 XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1)); 538 xas_unlock(&xas); 539 XA_BUG_ON(xa, !xa_empty(xa)); 540 } 541 542 static noinline void check_multi_store_3(struct xarray *xa, unsigned long index, 543 unsigned int order) 544 { 545 XA_STATE(xas, xa, 0); 546 void *entry; 547 int n = 0; 548 549 xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL); 550 551 xas_lock(&xas); 552 xas_for_each(&xas, entry, ULONG_MAX) { 553 XA_BUG_ON(xa, entry != xa_mk_index(index)); 554 n++; 555 } 556 XA_BUG_ON(xa, n != 1); 557 xas_set(&xas, index + 1); 558 xas_for_each(&xas, entry, ULONG_MAX) { 559 XA_BUG_ON(xa, entry != xa_mk_index(index)); 560 n++; 561 } 562 XA_BUG_ON(xa, n != 2); 563 xas_unlock(&xas); 564 565 xa_destroy(xa); 566 } 567 #endif 568 569 static noinline void check_multi_store(struct xarray *xa) 570 { 571 #ifdef CONFIG_XARRAY_MULTI 572 unsigned long i, j, k; 573 unsigned int max_order = (sizeof(long) == 4) ? 30 : 60; 574 575 /* Loading from any position returns the same value */ 576 xa_store_order(xa, 0, 1, xa_mk_value(0), GFP_KERNEL); 577 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0)); 578 XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0)); 579 XA_BUG_ON(xa, xa_load(xa, 2) != NULL); 580 rcu_read_lock(); 581 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 2); 582 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2); 583 rcu_read_unlock(); 584 585 /* Storing adjacent to the value does not alter the value */ 586 xa_store(xa, 3, xa, GFP_KERNEL); 587 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0)); 588 XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0)); 589 XA_BUG_ON(xa, xa_load(xa, 2) != NULL); 590 rcu_read_lock(); 591 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 3); 592 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2); 593 rcu_read_unlock(); 594 595 /* Overwriting multiple indexes works */ 596 xa_store_order(xa, 0, 2, xa_mk_value(1), GFP_KERNEL); 597 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(1)); 598 XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(1)); 599 XA_BUG_ON(xa, xa_load(xa, 2) != xa_mk_value(1)); 600 XA_BUG_ON(xa, xa_load(xa, 3) != xa_mk_value(1)); 601 XA_BUG_ON(xa, xa_load(xa, 4) != NULL); 602 rcu_read_lock(); 603 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 4); 604 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 4); 605 rcu_read_unlock(); 606 607 /* We can erase multiple values with a single store */ 608 xa_store_order(xa, 0, BITS_PER_LONG - 1, NULL, GFP_KERNEL); 609 XA_BUG_ON(xa, !xa_empty(xa)); 610 611 /* Even when the first slot is empty but the others aren't */ 612 xa_store_index(xa, 1, GFP_KERNEL); 613 xa_store_index(xa, 2, GFP_KERNEL); 614 xa_store_order(xa, 0, 2, NULL, GFP_KERNEL); 615 XA_BUG_ON(xa, !xa_empty(xa)); 616 617 for (i = 0; i < max_order; i++) { 618 for (j = 0; j < max_order; j++) { 619 xa_store_order(xa, 0, i, xa_mk_index(i), GFP_KERNEL); 620 xa_store_order(xa, 0, j, xa_mk_index(j), GFP_KERNEL); 621 622 for (k = 0; k < max_order; k++) { 623 void *entry = xa_load(xa, (1UL << k) - 1); 624 if ((i < k) && (j < k)) 625 XA_BUG_ON(xa, entry != NULL); 626 else 627 XA_BUG_ON(xa, entry != xa_mk_index(j)); 628 } 629 630 xa_erase(xa, 0); 631 XA_BUG_ON(xa, !xa_empty(xa)); 632 } 633 } 634 635 for (i = 0; i < 20; i++) { 636 check_multi_store_1(xa, 200, i); 637 check_multi_store_1(xa, 0, i); 638 check_multi_store_1(xa, (1UL << i) + 1, i); 639 } 640 check_multi_store_2(xa, 4095, 9); 641 642 for (i = 1; i < 20; i++) { 643 check_multi_store_3(xa, 0, i); 644 check_multi_store_3(xa, 1UL << i, i); 645 } 646 #endif 647 } 648 649 static noinline void check_xa_alloc_1(struct xarray *xa, unsigned int base) 650 { 651 int i; 652 u32 id; 653 654 XA_BUG_ON(xa, !xa_empty(xa)); 655 /* An empty array should assign %base to the first alloc */ 656 xa_alloc_index(xa, base, GFP_KERNEL); 657 658 /* Erasing it should make the array empty again */ 659 xa_erase_index(xa, base); 660 XA_BUG_ON(xa, !xa_empty(xa)); 661 662 /* And it should assign %base again */ 663 xa_alloc_index(xa, base, GFP_KERNEL); 664 665 /* Allocating and then erasing a lot should not lose base */ 666 for (i = base + 1; i < 2 * XA_CHUNK_SIZE; i++) 667 xa_alloc_index(xa, i, GFP_KERNEL); 668 for (i = base; i < 2 * XA_CHUNK_SIZE; i++) 669 xa_erase_index(xa, i); 670 xa_alloc_index(xa, base, GFP_KERNEL); 671 672 /* Destroying the array should do the same as erasing */ 673 xa_destroy(xa); 674 675 /* And it should assign %base again */ 676 xa_alloc_index(xa, base, GFP_KERNEL); 677 678 /* The next assigned ID should be base+1 */ 679 xa_alloc_index(xa, base + 1, GFP_KERNEL); 680 xa_erase_index(xa, base + 1); 681 682 /* Storing a value should mark it used */ 683 xa_store_index(xa, base + 1, GFP_KERNEL); 684 xa_alloc_index(xa, base + 2, GFP_KERNEL); 685 686 /* If we then erase base, it should be free */ 687 xa_erase_index(xa, base); 688 xa_alloc_index(xa, base, GFP_KERNEL); 689 690 xa_erase_index(xa, base + 1); 691 xa_erase_index(xa, base + 2); 692 693 for (i = 1; i < 5000; i++) { 694 xa_alloc_index(xa, base + i, GFP_KERNEL); 695 } 696 697 xa_destroy(xa); 698 699 /* Check that we fail properly at the limit of allocation */ 700 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX - 1), 701 XA_LIMIT(UINT_MAX - 1, UINT_MAX), 702 GFP_KERNEL) != 0); 703 XA_BUG_ON(xa, id != 0xfffffffeU); 704 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX), 705 XA_LIMIT(UINT_MAX - 1, UINT_MAX), 706 GFP_KERNEL) != 0); 707 XA_BUG_ON(xa, id != 0xffffffffU); 708 id = 3; 709 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(0), 710 XA_LIMIT(UINT_MAX - 1, UINT_MAX), 711 GFP_KERNEL) != -EBUSY); 712 XA_BUG_ON(xa, id != 3); 713 xa_destroy(xa); 714 715 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5), 716 GFP_KERNEL) != -EBUSY); 717 XA_BUG_ON(xa, xa_store_index(xa, 3, GFP_KERNEL) != 0); 718 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5), 719 GFP_KERNEL) != -EBUSY); 720 xa_erase_index(xa, 3); 721 XA_BUG_ON(xa, !xa_empty(xa)); 722 } 723 724 static noinline void check_xa_alloc_2(struct xarray *xa, unsigned int base) 725 { 726 unsigned int i, id; 727 unsigned long index; 728 void *entry; 729 730 /* Allocate and free a NULL and check xa_empty() behaves */ 731 XA_BUG_ON(xa, !xa_empty(xa)); 732 XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0); 733 XA_BUG_ON(xa, id != base); 734 XA_BUG_ON(xa, xa_empty(xa)); 735 XA_BUG_ON(xa, xa_erase(xa, id) != NULL); 736 XA_BUG_ON(xa, !xa_empty(xa)); 737 738 /* Ditto, but check destroy instead of erase */ 739 XA_BUG_ON(xa, !xa_empty(xa)); 740 XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0); 741 XA_BUG_ON(xa, id != base); 742 XA_BUG_ON(xa, xa_empty(xa)); 743 xa_destroy(xa); 744 XA_BUG_ON(xa, !xa_empty(xa)); 745 746 for (i = base; i < base + 10; i++) { 747 XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, 748 GFP_KERNEL) != 0); 749 XA_BUG_ON(xa, id != i); 750 } 751 752 XA_BUG_ON(xa, xa_store(xa, 3, xa_mk_index(3), GFP_KERNEL) != NULL); 753 XA_BUG_ON(xa, xa_store(xa, 4, xa_mk_index(4), GFP_KERNEL) != NULL); 754 XA_BUG_ON(xa, xa_store(xa, 4, NULL, GFP_KERNEL) != xa_mk_index(4)); 755 XA_BUG_ON(xa, xa_erase(xa, 5) != NULL); 756 XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0); 757 XA_BUG_ON(xa, id != 5); 758 759 xa_for_each(xa, index, entry) { 760 xa_erase_index(xa, index); 761 } 762 763 for (i = base; i < base + 9; i++) { 764 XA_BUG_ON(xa, xa_erase(xa, i) != NULL); 765 XA_BUG_ON(xa, xa_empty(xa)); 766 } 767 XA_BUG_ON(xa, xa_erase(xa, 8) != NULL); 768 XA_BUG_ON(xa, xa_empty(xa)); 769 XA_BUG_ON(xa, xa_erase(xa, base + 9) != NULL); 770 XA_BUG_ON(xa, !xa_empty(xa)); 771 772 xa_destroy(xa); 773 } 774 775 static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base) 776 { 777 struct xa_limit limit = XA_LIMIT(1, 0x3fff); 778 u32 next = 0; 779 unsigned int i, id; 780 unsigned long index; 781 void *entry; 782 783 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(1), limit, 784 &next, GFP_KERNEL) != 0); 785 XA_BUG_ON(xa, id != 1); 786 787 next = 0x3ffd; 788 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(0x3ffd), limit, 789 &next, GFP_KERNEL) != 0); 790 XA_BUG_ON(xa, id != 0x3ffd); 791 xa_erase_index(xa, 0x3ffd); 792 xa_erase_index(xa, 1); 793 XA_BUG_ON(xa, !xa_empty(xa)); 794 795 for (i = 0x3ffe; i < 0x4003; i++) { 796 if (i < 0x4000) 797 entry = xa_mk_index(i); 798 else 799 entry = xa_mk_index(i - 0x3fff); 800 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, entry, limit, 801 &next, GFP_KERNEL) != (id == 1)); 802 XA_BUG_ON(xa, xa_mk_index(id) != entry); 803 } 804 805 /* Check wrap-around is handled correctly */ 806 if (base != 0) 807 xa_erase_index(xa, base); 808 xa_erase_index(xa, base + 1); 809 next = UINT_MAX; 810 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(UINT_MAX), 811 xa_limit_32b, &next, GFP_KERNEL) != 0); 812 XA_BUG_ON(xa, id != UINT_MAX); 813 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base), 814 xa_limit_32b, &next, GFP_KERNEL) != 1); 815 XA_BUG_ON(xa, id != base); 816 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base + 1), 817 xa_limit_32b, &next, GFP_KERNEL) != 0); 818 XA_BUG_ON(xa, id != base + 1); 819 820 xa_for_each(xa, index, entry) 821 xa_erase_index(xa, index); 822 823 XA_BUG_ON(xa, !xa_empty(xa)); 824 } 825 826 static DEFINE_XARRAY_ALLOC(xa0); 827 static DEFINE_XARRAY_ALLOC1(xa1); 828 829 static noinline void check_xa_alloc(void) 830 { 831 check_xa_alloc_1(&xa0, 0); 832 check_xa_alloc_1(&xa1, 1); 833 check_xa_alloc_2(&xa0, 0); 834 check_xa_alloc_2(&xa1, 1); 835 check_xa_alloc_3(&xa0, 0); 836 check_xa_alloc_3(&xa1, 1); 837 } 838 839 static noinline void __check_store_iter(struct xarray *xa, unsigned long start, 840 unsigned int order, unsigned int present) 841 { 842 XA_STATE_ORDER(xas, xa, start, order); 843 void *entry; 844 unsigned int count = 0; 845 846 retry: 847 xas_lock(&xas); 848 xas_for_each_conflict(&xas, entry) { 849 XA_BUG_ON(xa, !xa_is_value(entry)); 850 XA_BUG_ON(xa, entry < xa_mk_index(start)); 851 XA_BUG_ON(xa, entry > xa_mk_index(start + (1UL << order) - 1)); 852 count++; 853 } 854 xas_store(&xas, xa_mk_index(start)); 855 xas_unlock(&xas); 856 if (xas_nomem(&xas, GFP_KERNEL)) { 857 count = 0; 858 goto retry; 859 } 860 XA_BUG_ON(xa, xas_error(&xas)); 861 XA_BUG_ON(xa, count != present); 862 XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_index(start)); 863 XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) != 864 xa_mk_index(start)); 865 xa_erase_index(xa, start); 866 } 867 868 static noinline void check_store_iter(struct xarray *xa) 869 { 870 unsigned int i, j; 871 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; 872 873 for (i = 0; i < max_order; i++) { 874 unsigned int min = 1 << i; 875 unsigned int max = (2 << i) - 1; 876 __check_store_iter(xa, 0, i, 0); 877 XA_BUG_ON(xa, !xa_empty(xa)); 878 __check_store_iter(xa, min, i, 0); 879 XA_BUG_ON(xa, !xa_empty(xa)); 880 881 xa_store_index(xa, min, GFP_KERNEL); 882 __check_store_iter(xa, min, i, 1); 883 XA_BUG_ON(xa, !xa_empty(xa)); 884 xa_store_index(xa, max, GFP_KERNEL); 885 __check_store_iter(xa, min, i, 1); 886 XA_BUG_ON(xa, !xa_empty(xa)); 887 888 for (j = 0; j < min; j++) 889 xa_store_index(xa, j, GFP_KERNEL); 890 __check_store_iter(xa, 0, i, min); 891 XA_BUG_ON(xa, !xa_empty(xa)); 892 for (j = 0; j < min; j++) 893 xa_store_index(xa, min + j, GFP_KERNEL); 894 __check_store_iter(xa, min, i, min); 895 XA_BUG_ON(xa, !xa_empty(xa)); 896 } 897 #ifdef CONFIG_XARRAY_MULTI 898 xa_store_index(xa, 63, GFP_KERNEL); 899 xa_store_index(xa, 65, GFP_KERNEL); 900 __check_store_iter(xa, 64, 2, 1); 901 xa_erase_index(xa, 63); 902 #endif 903 XA_BUG_ON(xa, !xa_empty(xa)); 904 } 905 906 static noinline void check_multi_find_1(struct xarray *xa, unsigned order) 907 { 908 #ifdef CONFIG_XARRAY_MULTI 909 unsigned long multi = 3 << order; 910 unsigned long next = 4 << order; 911 unsigned long index; 912 913 xa_store_order(xa, multi, order, xa_mk_value(multi), GFP_KERNEL); 914 XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL) != NULL); 915 XA_BUG_ON(xa, xa_store_index(xa, next + 1, GFP_KERNEL) != NULL); 916 917 index = 0; 918 XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) != 919 xa_mk_value(multi)); 920 XA_BUG_ON(xa, index != multi); 921 index = multi + 1; 922 XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) != 923 xa_mk_value(multi)); 924 XA_BUG_ON(xa, (index < multi) || (index >= next)); 925 XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT) != 926 xa_mk_value(next)); 927 XA_BUG_ON(xa, index != next); 928 XA_BUG_ON(xa, xa_find_after(xa, &index, next, XA_PRESENT) != NULL); 929 XA_BUG_ON(xa, index != next); 930 931 xa_erase_index(xa, multi); 932 xa_erase_index(xa, next); 933 xa_erase_index(xa, next + 1); 934 XA_BUG_ON(xa, !xa_empty(xa)); 935 #endif 936 } 937 938 static noinline void check_multi_find_2(struct xarray *xa) 939 { 940 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 10 : 1; 941 unsigned int i, j; 942 void *entry; 943 944 for (i = 0; i < max_order; i++) { 945 unsigned long index = 1UL << i; 946 for (j = 0; j < index; j++) { 947 XA_STATE(xas, xa, j + index); 948 xa_store_index(xa, index - 1, GFP_KERNEL); 949 xa_store_order(xa, index, i, xa_mk_index(index), 950 GFP_KERNEL); 951 rcu_read_lock(); 952 xas_for_each(&xas, entry, ULONG_MAX) { 953 xa_erase_index(xa, index); 954 } 955 rcu_read_unlock(); 956 xa_erase_index(xa, index - 1); 957 XA_BUG_ON(xa, !xa_empty(xa)); 958 } 959 } 960 } 961 962 static noinline void check_find_1(struct xarray *xa) 963 { 964 unsigned long i, j, k; 965 966 XA_BUG_ON(xa, !xa_empty(xa)); 967 968 /* 969 * Check xa_find with all pairs between 0 and 99 inclusive, 970 * starting at every index between 0 and 99 971 */ 972 for (i = 0; i < 100; i++) { 973 XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL); 974 xa_set_mark(xa, i, XA_MARK_0); 975 for (j = 0; j < i; j++) { 976 XA_BUG_ON(xa, xa_store_index(xa, j, GFP_KERNEL) != 977 NULL); 978 xa_set_mark(xa, j, XA_MARK_0); 979 for (k = 0; k < 100; k++) { 980 unsigned long index = k; 981 void *entry = xa_find(xa, &index, ULONG_MAX, 982 XA_PRESENT); 983 if (k <= j) 984 XA_BUG_ON(xa, index != j); 985 else if (k <= i) 986 XA_BUG_ON(xa, index != i); 987 else 988 XA_BUG_ON(xa, entry != NULL); 989 990 index = k; 991 entry = xa_find(xa, &index, ULONG_MAX, 992 XA_MARK_0); 993 if (k <= j) 994 XA_BUG_ON(xa, index != j); 995 else if (k <= i) 996 XA_BUG_ON(xa, index != i); 997 else 998 XA_BUG_ON(xa, entry != NULL); 999 } 1000 xa_erase_index(xa, j); 1001 XA_BUG_ON(xa, xa_get_mark(xa, j, XA_MARK_0)); 1002 XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0)); 1003 } 1004 xa_erase_index(xa, i); 1005 XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_0)); 1006 } 1007 XA_BUG_ON(xa, !xa_empty(xa)); 1008 } 1009 1010 static noinline void check_find_2(struct xarray *xa) 1011 { 1012 void *entry; 1013 unsigned long i, j, index; 1014 1015 xa_for_each(xa, index, entry) { 1016 XA_BUG_ON(xa, true); 1017 } 1018 1019 for (i = 0; i < 1024; i++) { 1020 xa_store_index(xa, index, GFP_KERNEL); 1021 j = 0; 1022 xa_for_each(xa, index, entry) { 1023 XA_BUG_ON(xa, xa_mk_index(index) != entry); 1024 XA_BUG_ON(xa, index != j++); 1025 } 1026 } 1027 1028 xa_destroy(xa); 1029 } 1030 1031 static noinline void check_find_3(struct xarray *xa) 1032 { 1033 XA_STATE(xas, xa, 0); 1034 unsigned long i, j, k; 1035 void *entry; 1036 1037 for (i = 0; i < 100; i++) { 1038 for (j = 0; j < 100; j++) { 1039 rcu_read_lock(); 1040 for (k = 0; k < 100; k++) { 1041 xas_set(&xas, j); 1042 xas_for_each_marked(&xas, entry, k, XA_MARK_0) 1043 ; 1044 if (j > k) 1045 XA_BUG_ON(xa, 1046 xas.xa_node != XAS_RESTART); 1047 } 1048 rcu_read_unlock(); 1049 } 1050 xa_store_index(xa, i, GFP_KERNEL); 1051 xa_set_mark(xa, i, XA_MARK_0); 1052 } 1053 xa_destroy(xa); 1054 } 1055 1056 static noinline void check_find_4(struct xarray *xa) 1057 { 1058 unsigned long index = 0; 1059 void *entry; 1060 1061 xa_store_index(xa, ULONG_MAX, GFP_KERNEL); 1062 1063 entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT); 1064 XA_BUG_ON(xa, entry != xa_mk_index(ULONG_MAX)); 1065 1066 entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT); 1067 XA_BUG_ON(xa, entry); 1068 1069 xa_erase_index(xa, ULONG_MAX); 1070 } 1071 1072 static noinline void check_find(struct xarray *xa) 1073 { 1074 unsigned i; 1075 1076 check_find_1(xa); 1077 check_find_2(xa); 1078 check_find_3(xa); 1079 check_find_4(xa); 1080 1081 for (i = 2; i < 10; i++) 1082 check_multi_find_1(xa, i); 1083 check_multi_find_2(xa); 1084 } 1085 1086 /* See find_swap_entry() in mm/shmem.c */ 1087 static noinline unsigned long xa_find_entry(struct xarray *xa, void *item) 1088 { 1089 XA_STATE(xas, xa, 0); 1090 unsigned int checked = 0; 1091 void *entry; 1092 1093 rcu_read_lock(); 1094 xas_for_each(&xas, entry, ULONG_MAX) { 1095 if (xas_retry(&xas, entry)) 1096 continue; 1097 if (entry == item) 1098 break; 1099 checked++; 1100 if ((checked % 4) != 0) 1101 continue; 1102 xas_pause(&xas); 1103 } 1104 rcu_read_unlock(); 1105 1106 return entry ? xas.xa_index : -1; 1107 } 1108 1109 static noinline void check_find_entry(struct xarray *xa) 1110 { 1111 #ifdef CONFIG_XARRAY_MULTI 1112 unsigned int order; 1113 unsigned long offset, index; 1114 1115 for (order = 0; order < 20; order++) { 1116 for (offset = 0; offset < (1UL << (order + 3)); 1117 offset += (1UL << order)) { 1118 for (index = 0; index < (1UL << (order + 5)); 1119 index += (1UL << order)) { 1120 xa_store_order(xa, index, order, 1121 xa_mk_index(index), GFP_KERNEL); 1122 XA_BUG_ON(xa, xa_load(xa, index) != 1123 xa_mk_index(index)); 1124 XA_BUG_ON(xa, xa_find_entry(xa, 1125 xa_mk_index(index)) != index); 1126 } 1127 XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); 1128 xa_destroy(xa); 1129 } 1130 } 1131 #endif 1132 1133 XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); 1134 xa_store_index(xa, ULONG_MAX, GFP_KERNEL); 1135 XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); 1136 XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_index(ULONG_MAX)) != -1); 1137 xa_erase_index(xa, ULONG_MAX); 1138 XA_BUG_ON(xa, !xa_empty(xa)); 1139 } 1140 1141 static noinline void check_move_tiny(struct xarray *xa) 1142 { 1143 XA_STATE(xas, xa, 0); 1144 1145 XA_BUG_ON(xa, !xa_empty(xa)); 1146 rcu_read_lock(); 1147 XA_BUG_ON(xa, xas_next(&xas) != NULL); 1148 XA_BUG_ON(xa, xas_next(&xas) != NULL); 1149 rcu_read_unlock(); 1150 xa_store_index(xa, 0, GFP_KERNEL); 1151 rcu_read_lock(); 1152 xas_set(&xas, 0); 1153 XA_BUG_ON(xa, xas_next(&xas) != xa_mk_index(0)); 1154 XA_BUG_ON(xa, xas_next(&xas) != NULL); 1155 xas_set(&xas, 0); 1156 XA_BUG_ON(xa, xas_prev(&xas) != xa_mk_index(0)); 1157 XA_BUG_ON(xa, xas_prev(&xas) != NULL); 1158 rcu_read_unlock(); 1159 xa_erase_index(xa, 0); 1160 XA_BUG_ON(xa, !xa_empty(xa)); 1161 } 1162 1163 static noinline void check_move_max(struct xarray *xa) 1164 { 1165 XA_STATE(xas, xa, 0); 1166 1167 xa_store_index(xa, ULONG_MAX, GFP_KERNEL); 1168 rcu_read_lock(); 1169 XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX)); 1170 XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL); 1171 rcu_read_unlock(); 1172 1173 xas_set(&xas, 0); 1174 rcu_read_lock(); 1175 XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX)); 1176 xas_pause(&xas); 1177 XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL); 1178 rcu_read_unlock(); 1179 1180 xa_erase_index(xa, ULONG_MAX); 1181 XA_BUG_ON(xa, !xa_empty(xa)); 1182 } 1183 1184 static noinline void check_move_small(struct xarray *xa, unsigned long idx) 1185 { 1186 XA_STATE(xas, xa, 0); 1187 unsigned long i; 1188 1189 xa_store_index(xa, 0, GFP_KERNEL); 1190 xa_store_index(xa, idx, GFP_KERNEL); 1191 1192 rcu_read_lock(); 1193 for (i = 0; i < idx * 4; i++) { 1194 void *entry = xas_next(&xas); 1195 if (i <= idx) 1196 XA_BUG_ON(xa, xas.xa_node == XAS_RESTART); 1197 XA_BUG_ON(xa, xas.xa_index != i); 1198 if (i == 0 || i == idx) 1199 XA_BUG_ON(xa, entry != xa_mk_index(i)); 1200 else 1201 XA_BUG_ON(xa, entry != NULL); 1202 } 1203 xas_next(&xas); 1204 XA_BUG_ON(xa, xas.xa_index != i); 1205 1206 do { 1207 void *entry = xas_prev(&xas); 1208 i--; 1209 if (i <= idx) 1210 XA_BUG_ON(xa, xas.xa_node == XAS_RESTART); 1211 XA_BUG_ON(xa, xas.xa_index != i); 1212 if (i == 0 || i == idx) 1213 XA_BUG_ON(xa, entry != xa_mk_index(i)); 1214 else 1215 XA_BUG_ON(xa, entry != NULL); 1216 } while (i > 0); 1217 1218 xas_set(&xas, ULONG_MAX); 1219 XA_BUG_ON(xa, xas_next(&xas) != NULL); 1220 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); 1221 XA_BUG_ON(xa, xas_next(&xas) != xa_mk_value(0)); 1222 XA_BUG_ON(xa, xas.xa_index != 0); 1223 XA_BUG_ON(xa, xas_prev(&xas) != NULL); 1224 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); 1225 rcu_read_unlock(); 1226 1227 xa_erase_index(xa, 0); 1228 xa_erase_index(xa, idx); 1229 XA_BUG_ON(xa, !xa_empty(xa)); 1230 } 1231 1232 static noinline void check_move(struct xarray *xa) 1233 { 1234 XA_STATE(xas, xa, (1 << 16) - 1); 1235 unsigned long i; 1236 1237 for (i = 0; i < (1 << 16); i++) 1238 XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL); 1239 1240 rcu_read_lock(); 1241 do { 1242 void *entry = xas_prev(&xas); 1243 i--; 1244 XA_BUG_ON(xa, entry != xa_mk_index(i)); 1245 XA_BUG_ON(xa, i != xas.xa_index); 1246 } while (i != 0); 1247 1248 XA_BUG_ON(xa, xas_prev(&xas) != NULL); 1249 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); 1250 1251 do { 1252 void *entry = xas_next(&xas); 1253 XA_BUG_ON(xa, entry != xa_mk_index(i)); 1254 XA_BUG_ON(xa, i != xas.xa_index); 1255 i++; 1256 } while (i < (1 << 16)); 1257 rcu_read_unlock(); 1258 1259 for (i = (1 << 8); i < (1 << 15); i++) 1260 xa_erase_index(xa, i); 1261 1262 i = xas.xa_index; 1263 1264 rcu_read_lock(); 1265 do { 1266 void *entry = xas_prev(&xas); 1267 i--; 1268 if ((i < (1 << 8)) || (i >= (1 << 15))) 1269 XA_BUG_ON(xa, entry != xa_mk_index(i)); 1270 else 1271 XA_BUG_ON(xa, entry != NULL); 1272 XA_BUG_ON(xa, i != xas.xa_index); 1273 } while (i != 0); 1274 1275 XA_BUG_ON(xa, xas_prev(&xas) != NULL); 1276 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); 1277 1278 do { 1279 void *entry = xas_next(&xas); 1280 if ((i < (1 << 8)) || (i >= (1 << 15))) 1281 XA_BUG_ON(xa, entry != xa_mk_index(i)); 1282 else 1283 XA_BUG_ON(xa, entry != NULL); 1284 XA_BUG_ON(xa, i != xas.xa_index); 1285 i++; 1286 } while (i < (1 << 16)); 1287 rcu_read_unlock(); 1288 1289 xa_destroy(xa); 1290 1291 check_move_tiny(xa); 1292 check_move_max(xa); 1293 1294 for (i = 0; i < 16; i++) 1295 check_move_small(xa, 1UL << i); 1296 1297 for (i = 2; i < 16; i++) 1298 check_move_small(xa, (1UL << i) - 1); 1299 } 1300 1301 static noinline void xa_store_many_order(struct xarray *xa, 1302 unsigned long index, unsigned order) 1303 { 1304 XA_STATE_ORDER(xas, xa, index, order); 1305 unsigned int i = 0; 1306 1307 do { 1308 xas_lock(&xas); 1309 XA_BUG_ON(xa, xas_find_conflict(&xas)); 1310 xas_create_range(&xas); 1311 if (xas_error(&xas)) 1312 goto unlock; 1313 for (i = 0; i < (1U << order); i++) { 1314 XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(index + i))); 1315 xas_next(&xas); 1316 } 1317 unlock: 1318 xas_unlock(&xas); 1319 } while (xas_nomem(&xas, GFP_KERNEL)); 1320 1321 XA_BUG_ON(xa, xas_error(&xas)); 1322 } 1323 1324 static noinline void check_create_range_1(struct xarray *xa, 1325 unsigned long index, unsigned order) 1326 { 1327 unsigned long i; 1328 1329 xa_store_many_order(xa, index, order); 1330 for (i = index; i < index + (1UL << order); i++) 1331 xa_erase_index(xa, i); 1332 XA_BUG_ON(xa, !xa_empty(xa)); 1333 } 1334 1335 static noinline void check_create_range_2(struct xarray *xa, unsigned order) 1336 { 1337 unsigned long i; 1338 unsigned long nr = 1UL << order; 1339 1340 for (i = 0; i < nr * nr; i += nr) 1341 xa_store_many_order(xa, i, order); 1342 for (i = 0; i < nr * nr; i++) 1343 xa_erase_index(xa, i); 1344 XA_BUG_ON(xa, !xa_empty(xa)); 1345 } 1346 1347 static noinline void check_create_range_3(void) 1348 { 1349 XA_STATE(xas, NULL, 0); 1350 xas_set_err(&xas, -EEXIST); 1351 xas_create_range(&xas); 1352 XA_BUG_ON(NULL, xas_error(&xas) != -EEXIST); 1353 } 1354 1355 static noinline void check_create_range_4(struct xarray *xa, 1356 unsigned long index, unsigned order) 1357 { 1358 XA_STATE_ORDER(xas, xa, index, order); 1359 unsigned long base = xas.xa_index; 1360 unsigned long i = 0; 1361 1362 xa_store_index(xa, index, GFP_KERNEL); 1363 do { 1364 xas_lock(&xas); 1365 xas_create_range(&xas); 1366 if (xas_error(&xas)) 1367 goto unlock; 1368 for (i = 0; i < (1UL << order); i++) { 1369 void *old = xas_store(&xas, xa_mk_index(base + i)); 1370 if (xas.xa_index == index) 1371 XA_BUG_ON(xa, old != xa_mk_index(base + i)); 1372 else 1373 XA_BUG_ON(xa, old != NULL); 1374 xas_next(&xas); 1375 } 1376 unlock: 1377 xas_unlock(&xas); 1378 } while (xas_nomem(&xas, GFP_KERNEL)); 1379 1380 XA_BUG_ON(xa, xas_error(&xas)); 1381 1382 for (i = base; i < base + (1UL << order); i++) 1383 xa_erase_index(xa, i); 1384 XA_BUG_ON(xa, !xa_empty(xa)); 1385 } 1386 1387 static noinline void check_create_range(struct xarray *xa) 1388 { 1389 unsigned int order; 1390 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 12 : 1; 1391 1392 for (order = 0; order < max_order; order++) { 1393 check_create_range_1(xa, 0, order); 1394 check_create_range_1(xa, 1U << order, order); 1395 check_create_range_1(xa, 2U << order, order); 1396 check_create_range_1(xa, 3U << order, order); 1397 check_create_range_1(xa, 1U << 24, order); 1398 if (order < 10) 1399 check_create_range_2(xa, order); 1400 1401 check_create_range_4(xa, 0, order); 1402 check_create_range_4(xa, 1U << order, order); 1403 check_create_range_4(xa, 2U << order, order); 1404 check_create_range_4(xa, 3U << order, order); 1405 check_create_range_4(xa, 1U << 24, order); 1406 1407 check_create_range_4(xa, 1, order); 1408 check_create_range_4(xa, (1U << order) + 1, order); 1409 check_create_range_4(xa, (2U << order) + 1, order); 1410 check_create_range_4(xa, (2U << order) - 1, order); 1411 check_create_range_4(xa, (3U << order) + 1, order); 1412 check_create_range_4(xa, (3U << order) - 1, order); 1413 check_create_range_4(xa, (1U << 24) + 1, order); 1414 } 1415 1416 check_create_range_3(); 1417 } 1418 1419 static noinline void __check_store_range(struct xarray *xa, unsigned long first, 1420 unsigned long last) 1421 { 1422 #ifdef CONFIG_XARRAY_MULTI 1423 xa_store_range(xa, first, last, xa_mk_index(first), GFP_KERNEL); 1424 1425 XA_BUG_ON(xa, xa_load(xa, first) != xa_mk_index(first)); 1426 XA_BUG_ON(xa, xa_load(xa, last) != xa_mk_index(first)); 1427 XA_BUG_ON(xa, xa_load(xa, first - 1) != NULL); 1428 XA_BUG_ON(xa, xa_load(xa, last + 1) != NULL); 1429 1430 xa_store_range(xa, first, last, NULL, GFP_KERNEL); 1431 #endif 1432 1433 XA_BUG_ON(xa, !xa_empty(xa)); 1434 } 1435 1436 static noinline void check_store_range(struct xarray *xa) 1437 { 1438 unsigned long i, j; 1439 1440 for (i = 0; i < 128; i++) { 1441 for (j = i; j < 128; j++) { 1442 __check_store_range(xa, i, j); 1443 __check_store_range(xa, 128 + i, 128 + j); 1444 __check_store_range(xa, 4095 + i, 4095 + j); 1445 __check_store_range(xa, 4096 + i, 4096 + j); 1446 __check_store_range(xa, 123456 + i, 123456 + j); 1447 __check_store_range(xa, (1 << 24) + i, (1 << 24) + j); 1448 } 1449 } 1450 } 1451 1452 static void check_align_1(struct xarray *xa, char *name) 1453 { 1454 int i; 1455 unsigned int id; 1456 unsigned long index; 1457 void *entry; 1458 1459 for (i = 0; i < 8; i++) { 1460 XA_BUG_ON(xa, xa_alloc(xa, &id, name + i, xa_limit_32b, 1461 GFP_KERNEL) != 0); 1462 XA_BUG_ON(xa, id != i); 1463 } 1464 xa_for_each(xa, index, entry) 1465 XA_BUG_ON(xa, xa_is_err(entry)); 1466 xa_destroy(xa); 1467 } 1468 1469 /* 1470 * We should always be able to store without allocating memory after 1471 * reserving a slot. 1472 */ 1473 static void check_align_2(struct xarray *xa, char *name) 1474 { 1475 int i; 1476 1477 XA_BUG_ON(xa, !xa_empty(xa)); 1478 1479 for (i = 0; i < 8; i++) { 1480 XA_BUG_ON(xa, xa_store(xa, 0, name + i, GFP_KERNEL) != NULL); 1481 xa_erase(xa, 0); 1482 } 1483 1484 for (i = 0; i < 8; i++) { 1485 XA_BUG_ON(xa, xa_reserve(xa, 0, GFP_KERNEL) != 0); 1486 XA_BUG_ON(xa, xa_store(xa, 0, name + i, 0) != NULL); 1487 xa_erase(xa, 0); 1488 } 1489 1490 XA_BUG_ON(xa, !xa_empty(xa)); 1491 } 1492 1493 static noinline void check_align(struct xarray *xa) 1494 { 1495 char name[] = "Motorola 68000"; 1496 1497 check_align_1(xa, name); 1498 check_align_1(xa, name + 1); 1499 check_align_1(xa, name + 2); 1500 check_align_1(xa, name + 3); 1501 check_align_2(xa, name); 1502 } 1503 1504 static LIST_HEAD(shadow_nodes); 1505 1506 static void test_update_node(struct xa_node *node) 1507 { 1508 if (node->count && node->count == node->nr_values) { 1509 if (list_empty(&node->private_list)) 1510 list_add(&shadow_nodes, &node->private_list); 1511 } else { 1512 if (!list_empty(&node->private_list)) 1513 list_del_init(&node->private_list); 1514 } 1515 } 1516 1517 static noinline void shadow_remove(struct xarray *xa) 1518 { 1519 struct xa_node *node; 1520 1521 xa_lock(xa); 1522 while ((node = list_first_entry_or_null(&shadow_nodes, 1523 struct xa_node, private_list))) { 1524 XA_STATE(xas, node->array, 0); 1525 XA_BUG_ON(xa, node->array != xa); 1526 list_del_init(&node->private_list); 1527 xas.xa_node = xa_parent_locked(node->array, node); 1528 xas.xa_offset = node->offset; 1529 xas.xa_shift = node->shift + XA_CHUNK_SHIFT; 1530 xas_set_update(&xas, test_update_node); 1531 xas_store(&xas, NULL); 1532 } 1533 xa_unlock(xa); 1534 } 1535 1536 static noinline void check_workingset(struct xarray *xa, unsigned long index) 1537 { 1538 XA_STATE(xas, xa, index); 1539 xas_set_update(&xas, test_update_node); 1540 1541 do { 1542 xas_lock(&xas); 1543 xas_store(&xas, xa_mk_value(0)); 1544 xas_next(&xas); 1545 xas_store(&xas, xa_mk_value(1)); 1546 xas_unlock(&xas); 1547 } while (xas_nomem(&xas, GFP_KERNEL)); 1548 1549 XA_BUG_ON(xa, list_empty(&shadow_nodes)); 1550 1551 xas_lock(&xas); 1552 xas_next(&xas); 1553 xas_store(&xas, &xas); 1554 XA_BUG_ON(xa, !list_empty(&shadow_nodes)); 1555 1556 xas_store(&xas, xa_mk_value(2)); 1557 xas_unlock(&xas); 1558 XA_BUG_ON(xa, list_empty(&shadow_nodes)); 1559 1560 shadow_remove(xa); 1561 XA_BUG_ON(xa, !list_empty(&shadow_nodes)); 1562 XA_BUG_ON(xa, !xa_empty(xa)); 1563 } 1564 1565 /* 1566 * Check that the pointer / value / sibling entries are accounted the 1567 * way we expect them to be. 1568 */ 1569 static noinline void check_account(struct xarray *xa) 1570 { 1571 #ifdef CONFIG_XARRAY_MULTI 1572 unsigned int order; 1573 1574 for (order = 1; order < 12; order++) { 1575 XA_STATE(xas, xa, 1 << order); 1576 1577 xa_store_order(xa, 0, order, xa, GFP_KERNEL); 1578 rcu_read_lock(); 1579 xas_load(&xas); 1580 XA_BUG_ON(xa, xas.xa_node->count == 0); 1581 XA_BUG_ON(xa, xas.xa_node->count > (1 << order)); 1582 XA_BUG_ON(xa, xas.xa_node->nr_values != 0); 1583 rcu_read_unlock(); 1584 1585 xa_store_order(xa, 1 << order, order, xa_mk_index(1UL << order), 1586 GFP_KERNEL); 1587 XA_BUG_ON(xa, xas.xa_node->count != xas.xa_node->nr_values * 2); 1588 1589 xa_erase(xa, 1 << order); 1590 XA_BUG_ON(xa, xas.xa_node->nr_values != 0); 1591 1592 xa_erase(xa, 0); 1593 XA_BUG_ON(xa, !xa_empty(xa)); 1594 } 1595 #endif 1596 } 1597 1598 static noinline void check_destroy(struct xarray *xa) 1599 { 1600 unsigned long index; 1601 1602 XA_BUG_ON(xa, !xa_empty(xa)); 1603 1604 /* Destroying an empty array is a no-op */ 1605 xa_destroy(xa); 1606 XA_BUG_ON(xa, !xa_empty(xa)); 1607 1608 /* Destroying an array with a single entry */ 1609 for (index = 0; index < 1000; index++) { 1610 xa_store_index(xa, index, GFP_KERNEL); 1611 XA_BUG_ON(xa, xa_empty(xa)); 1612 xa_destroy(xa); 1613 XA_BUG_ON(xa, !xa_empty(xa)); 1614 } 1615 1616 /* Destroying an array with a single entry at ULONG_MAX */ 1617 xa_store(xa, ULONG_MAX, xa, GFP_KERNEL); 1618 XA_BUG_ON(xa, xa_empty(xa)); 1619 xa_destroy(xa); 1620 XA_BUG_ON(xa, !xa_empty(xa)); 1621 1622 #ifdef CONFIG_XARRAY_MULTI 1623 /* Destroying an array with a multi-index entry */ 1624 xa_store_order(xa, 1 << 11, 11, xa, GFP_KERNEL); 1625 XA_BUG_ON(xa, xa_empty(xa)); 1626 xa_destroy(xa); 1627 XA_BUG_ON(xa, !xa_empty(xa)); 1628 #endif 1629 } 1630 1631 static DEFINE_XARRAY(array); 1632 1633 static int xarray_checks(void) 1634 { 1635 check_xa_err(&array); 1636 check_xas_retry(&array); 1637 check_xa_load(&array); 1638 check_xa_mark(&array); 1639 check_xa_shrink(&array); 1640 check_xas_erase(&array); 1641 check_insert(&array); 1642 check_cmpxchg(&array); 1643 check_reserve(&array); 1644 check_reserve(&xa0); 1645 check_multi_store(&array); 1646 check_xa_alloc(); 1647 check_find(&array); 1648 check_find_entry(&array); 1649 check_account(&array); 1650 check_destroy(&array); 1651 check_move(&array); 1652 check_create_range(&array); 1653 check_store_range(&array); 1654 check_store_iter(&array); 1655 check_align(&xa0); 1656 1657 check_workingset(&array, 0); 1658 check_workingset(&array, 64); 1659 check_workingset(&array, 4096); 1660 1661 printk("XArray: %u of %u tests passed\n", tests_passed, tests_run); 1662 return (tests_run == tests_passed) ? 0 : -EINVAL; 1663 } 1664 1665 static void xarray_exit(void) 1666 { 1667 } 1668 1669 module_init(xarray_checks); 1670 module_exit(xarray_exit); 1671 MODULE_AUTHOR("Matthew Wilcox <willy@infradead.org>"); 1672 MODULE_LICENSE("GPL"); 1673