1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * idr-test.c: Test the IDR API 4 * Copyright (c) 2016 Matthew Wilcox <willy@infradead.org> 5 */ 6 #include <linux/bitmap.h> 7 #include <linux/idr.h> 8 #include <linux/slab.h> 9 #include <linux/kernel.h> 10 #include <linux/errno.h> 11 12 #include "test.h" 13 14 #define DUMMY_PTR ((void *)0x10) 15 16 int item_idr_free(int id, void *p, void *data) 17 { 18 struct item *item = p; 19 assert(item->index == id); 20 free(p); 21 22 return 0; 23 } 24 25 void item_idr_remove(struct idr *idr, int id) 26 { 27 struct item *item = idr_find(idr, id); 28 assert(item->index == id); 29 idr_remove(idr, id); 30 free(item); 31 } 32 33 void idr_alloc_test(void) 34 { 35 unsigned long i; 36 DEFINE_IDR(idr); 37 38 assert(idr_alloc_cyclic(&idr, DUMMY_PTR, 0, 0x4000, GFP_KERNEL) == 0); 39 assert(idr_alloc_cyclic(&idr, DUMMY_PTR, 0x3ffd, 0x4000, GFP_KERNEL) == 0x3ffd); 40 idr_remove(&idr, 0x3ffd); 41 idr_remove(&idr, 0); 42 43 for (i = 0x3ffe; i < 0x4003; i++) { 44 int id; 45 struct item *item; 46 47 if (i < 0x4000) 48 item = item_create(i, 0); 49 else 50 item = item_create(i - 0x3fff, 0); 51 52 id = idr_alloc_cyclic(&idr, item, 1, 0x4000, GFP_KERNEL); 53 assert(id == item->index); 54 } 55 56 idr_for_each(&idr, item_idr_free, &idr); 57 idr_destroy(&idr); 58 } 59 60 void idr_replace_test(void) 61 { 62 DEFINE_IDR(idr); 63 64 idr_alloc(&idr, (void *)-1, 10, 11, GFP_KERNEL); 65 idr_replace(&idr, &idr, 10); 66 67 idr_destroy(&idr); 68 } 69 70 /* 71 * Unlike the radix tree, you can put a NULL pointer -- with care -- into 72 * the IDR. Some interfaces, like idr_find() do not distinguish between 73 * "present, value is NULL" and "not present", but that's exactly what some 74 * users want. 75 */ 76 void idr_null_test(void) 77 { 78 int i; 79 DEFINE_IDR(idr); 80 81 assert(idr_is_empty(&idr)); 82 83 assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 0); 84 assert(!idr_is_empty(&idr)); 85 idr_remove(&idr, 0); 86 assert(idr_is_empty(&idr)); 87 88 assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 0); 89 assert(!idr_is_empty(&idr)); 90 idr_destroy(&idr); 91 assert(idr_is_empty(&idr)); 92 93 for (i = 0; i < 10; i++) { 94 assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == i); 95 } 96 97 assert(idr_replace(&idr, DUMMY_PTR, 3) == NULL); 98 assert(idr_replace(&idr, DUMMY_PTR, 4) == NULL); 99 assert(idr_replace(&idr, NULL, 4) == DUMMY_PTR); 100 assert(idr_replace(&idr, DUMMY_PTR, 11) == ERR_PTR(-ENOENT)); 101 idr_remove(&idr, 5); 102 assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 5); 103 idr_remove(&idr, 5); 104 105 for (i = 0; i < 9; i++) { 106 idr_remove(&idr, i); 107 assert(!idr_is_empty(&idr)); 108 } 109 idr_remove(&idr, 8); 110 assert(!idr_is_empty(&idr)); 111 idr_remove(&idr, 9); 112 assert(idr_is_empty(&idr)); 113 114 assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 0); 115 assert(idr_replace(&idr, DUMMY_PTR, 3) == ERR_PTR(-ENOENT)); 116 assert(idr_replace(&idr, DUMMY_PTR, 0) == NULL); 117 assert(idr_replace(&idr, NULL, 0) == DUMMY_PTR); 118 119 idr_destroy(&idr); 120 assert(idr_is_empty(&idr)); 121 122 for (i = 1; i < 10; i++) { 123 assert(idr_alloc(&idr, NULL, 1, 0, GFP_KERNEL) == i); 124 } 125 126 idr_destroy(&idr); 127 assert(idr_is_empty(&idr)); 128 } 129 130 void idr_nowait_test(void) 131 { 132 unsigned int i; 133 DEFINE_IDR(idr); 134 135 idr_preload(GFP_KERNEL); 136 137 for (i = 0; i < 3; i++) { 138 struct item *item = item_create(i, 0); 139 assert(idr_alloc(&idr, item, i, i + 1, GFP_NOWAIT) == i); 140 } 141 142 idr_preload_end(); 143 144 idr_for_each(&idr, item_idr_free, &idr); 145 idr_destroy(&idr); 146 } 147 148 void idr_get_next_test(int base) 149 { 150 unsigned long i; 151 int nextid; 152 DEFINE_IDR(idr); 153 idr_init_base(&idr, base); 154 155 int indices[] = {4, 7, 9, 15, 65, 128, 1000, 99999, 0}; 156 157 for(i = 0; indices[i]; i++) { 158 struct item *item = item_create(indices[i], 0); 159 assert(idr_alloc(&idr, item, indices[i], indices[i+1], 160 GFP_KERNEL) == indices[i]); 161 } 162 163 for(i = 0, nextid = 0; indices[i]; i++) { 164 idr_get_next(&idr, &nextid); 165 assert(nextid == indices[i]); 166 nextid++; 167 } 168 169 idr_for_each(&idr, item_idr_free, &idr); 170 idr_destroy(&idr); 171 } 172 173 int idr_u32_cb(int id, void *ptr, void *data) 174 { 175 BUG_ON(id < 0); 176 BUG_ON(ptr != DUMMY_PTR); 177 return 0; 178 } 179 180 void idr_u32_test1(struct idr *idr, u32 handle) 181 { 182 static bool warned = false; 183 u32 id = handle; 184 int sid = 0; 185 void *ptr; 186 187 BUG_ON(idr_alloc_u32(idr, DUMMY_PTR, &id, id, GFP_KERNEL)); 188 BUG_ON(id != handle); 189 BUG_ON(idr_alloc_u32(idr, DUMMY_PTR, &id, id, GFP_KERNEL) != -ENOSPC); 190 BUG_ON(id != handle); 191 if (!warned && id > INT_MAX) 192 printk("vvv Ignore these warnings\n"); 193 ptr = idr_get_next(idr, &sid); 194 if (id > INT_MAX) { 195 BUG_ON(ptr != NULL); 196 BUG_ON(sid != 0); 197 } else { 198 BUG_ON(ptr != DUMMY_PTR); 199 BUG_ON(sid != id); 200 } 201 idr_for_each(idr, idr_u32_cb, NULL); 202 if (!warned && id > INT_MAX) { 203 printk("^^^ Warnings over\n"); 204 warned = true; 205 } 206 BUG_ON(idr_remove(idr, id) != DUMMY_PTR); 207 BUG_ON(!idr_is_empty(idr)); 208 } 209 210 void idr_u32_test(int base) 211 { 212 DEFINE_IDR(idr); 213 idr_init_base(&idr, base); 214 idr_u32_test1(&idr, 10); 215 idr_u32_test1(&idr, 0x7fffffff); 216 idr_u32_test1(&idr, 0x80000000); 217 idr_u32_test1(&idr, 0x80000001); 218 idr_u32_test1(&idr, 0xffe00000); 219 idr_u32_test1(&idr, 0xffffffff); 220 } 221 222 static void idr_align_test(struct idr *idr) 223 { 224 char name[] = "Motorola 68000"; 225 int i, id; 226 void *entry; 227 228 for (i = 0; i < 9; i++) { 229 BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != i); 230 idr_for_each_entry(idr, entry, id); 231 } 232 idr_destroy(idr); 233 234 for (i = 1; i < 10; i++) { 235 BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != i - 1); 236 idr_for_each_entry(idr, entry, id); 237 } 238 idr_destroy(idr); 239 240 for (i = 2; i < 11; i++) { 241 BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != i - 2); 242 idr_for_each_entry(idr, entry, id); 243 } 244 idr_destroy(idr); 245 246 for (i = 3; i < 12; i++) { 247 BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != i - 3); 248 idr_for_each_entry(idr, entry, id); 249 } 250 idr_destroy(idr); 251 252 for (i = 0; i < 8; i++) { 253 BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != 0); 254 BUG_ON(idr_alloc(idr, &name[i + 1], 0, 0, GFP_KERNEL) != 1); 255 idr_for_each_entry(idr, entry, id); 256 idr_remove(idr, 1); 257 idr_for_each_entry(idr, entry, id); 258 idr_remove(idr, 0); 259 BUG_ON(!idr_is_empty(idr)); 260 } 261 262 for (i = 0; i < 8; i++) { 263 BUG_ON(idr_alloc(idr, NULL, 0, 0, GFP_KERNEL) != 0); 264 idr_for_each_entry(idr, entry, id); 265 idr_replace(idr, &name[i], 0); 266 idr_for_each_entry(idr, entry, id); 267 BUG_ON(idr_find(idr, 0) != &name[i]); 268 idr_remove(idr, 0); 269 } 270 271 for (i = 0; i < 8; i++) { 272 BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != 0); 273 BUG_ON(idr_alloc(idr, NULL, 0, 0, GFP_KERNEL) != 1); 274 idr_remove(idr, 1); 275 idr_for_each_entry(idr, entry, id); 276 idr_replace(idr, &name[i + 1], 0); 277 idr_for_each_entry(idr, entry, id); 278 idr_remove(idr, 0); 279 } 280 } 281 282 void idr_checks(void) 283 { 284 unsigned long i; 285 DEFINE_IDR(idr); 286 287 for (i = 0; i < 10000; i++) { 288 struct item *item = item_create(i, 0); 289 assert(idr_alloc(&idr, item, 0, 20000, GFP_KERNEL) == i); 290 } 291 292 assert(idr_alloc(&idr, DUMMY_PTR, 5, 30, GFP_KERNEL) < 0); 293 294 for (i = 0; i < 5000; i++) 295 item_idr_remove(&idr, i); 296 297 idr_remove(&idr, 3); 298 299 idr_for_each(&idr, item_idr_free, &idr); 300 idr_destroy(&idr); 301 302 assert(idr_is_empty(&idr)); 303 304 idr_remove(&idr, 3); 305 idr_remove(&idr, 0); 306 307 assert(idr_alloc(&idr, DUMMY_PTR, 0, 0, GFP_KERNEL) == 0); 308 idr_remove(&idr, 1); 309 for (i = 1; i < RADIX_TREE_MAP_SIZE; i++) 310 assert(idr_alloc(&idr, DUMMY_PTR, 0, 0, GFP_KERNEL) == i); 311 idr_remove(&idr, 1 << 30); 312 idr_destroy(&idr); 313 314 for (i = INT_MAX - 3UL; i < INT_MAX + 1UL; i++) { 315 struct item *item = item_create(i, 0); 316 assert(idr_alloc(&idr, item, i, i + 10, GFP_KERNEL) == i); 317 } 318 assert(idr_alloc(&idr, DUMMY_PTR, i - 2, i, GFP_KERNEL) == -ENOSPC); 319 assert(idr_alloc(&idr, DUMMY_PTR, i - 2, i + 10, GFP_KERNEL) == -ENOSPC); 320 321 idr_for_each(&idr, item_idr_free, &idr); 322 idr_destroy(&idr); 323 idr_destroy(&idr); 324 325 assert(idr_is_empty(&idr)); 326 327 idr_set_cursor(&idr, INT_MAX - 3UL); 328 for (i = INT_MAX - 3UL; i < INT_MAX + 3UL; i++) { 329 struct item *item; 330 unsigned int id; 331 if (i <= INT_MAX) 332 item = item_create(i, 0); 333 else 334 item = item_create(i - INT_MAX - 1, 0); 335 336 id = idr_alloc_cyclic(&idr, item, 0, 0, GFP_KERNEL); 337 assert(id == item->index); 338 } 339 340 idr_for_each(&idr, item_idr_free, &idr); 341 idr_destroy(&idr); 342 assert(idr_is_empty(&idr)); 343 344 for (i = 1; i < 10000; i++) { 345 struct item *item = item_create(i, 0); 346 assert(idr_alloc(&idr, item, 1, 20000, GFP_KERNEL) == i); 347 } 348 349 idr_for_each(&idr, item_idr_free, &idr); 350 idr_destroy(&idr); 351 352 idr_replace_test(); 353 idr_alloc_test(); 354 idr_null_test(); 355 idr_nowait_test(); 356 idr_get_next_test(0); 357 idr_get_next_test(1); 358 idr_get_next_test(4); 359 idr_u32_test(4); 360 idr_u32_test(1); 361 idr_u32_test(0); 362 idr_align_test(&idr); 363 } 364 365 #define module_init(x) 366 #define module_exit(x) 367 #define MODULE_AUTHOR(x) 368 #define MODULE_LICENSE(x) 369 #define dump_stack() assert(0) 370 void ida_dump(struct ida *); 371 372 #include "../../../lib/test_ida.c" 373 374 /* 375 * Check that we get the correct error when we run out of memory doing 376 * allocations. In userspace, GFP_NOWAIT will always fail an allocation. 377 * The first test is for not having a bitmap available, and the second test 378 * is for not being able to allocate a level of the radix tree. 379 */ 380 void ida_check_nomem(void) 381 { 382 DEFINE_IDA(ida); 383 int id; 384 385 id = ida_alloc_min(&ida, 256, GFP_NOWAIT); 386 IDA_BUG_ON(&ida, id != -ENOMEM); 387 id = ida_alloc_min(&ida, 1UL << 30, GFP_NOWAIT); 388 IDA_BUG_ON(&ida, id != -ENOMEM); 389 IDA_BUG_ON(&ida, !ida_is_empty(&ida)); 390 } 391 392 /* 393 * Check handling of conversions between exceptional entries and full bitmaps. 394 */ 395 void ida_check_conv_user(void) 396 { 397 DEFINE_IDA(ida); 398 unsigned long i; 399 400 for (i = 0; i < 1000000; i++) { 401 int id = ida_alloc(&ida, GFP_NOWAIT); 402 if (id == -ENOMEM) { 403 IDA_BUG_ON(&ida, ((i % IDA_BITMAP_BITS) != 404 BITS_PER_XA_VALUE) && 405 ((i % IDA_BITMAP_BITS) != 0)); 406 id = ida_alloc(&ida, GFP_KERNEL); 407 } else { 408 IDA_BUG_ON(&ida, (i % IDA_BITMAP_BITS) == 409 BITS_PER_XA_VALUE); 410 } 411 IDA_BUG_ON(&ida, id != i); 412 } 413 ida_destroy(&ida); 414 } 415 416 void ida_check_random(void) 417 { 418 DEFINE_IDA(ida); 419 DECLARE_BITMAP(bitmap, 2048); 420 unsigned int i; 421 time_t s = time(NULL); 422 423 repeat: 424 memset(bitmap, 0, sizeof(bitmap)); 425 for (i = 0; i < 100000; i++) { 426 int i = rand(); 427 int bit = i & 2047; 428 if (test_bit(bit, bitmap)) { 429 __clear_bit(bit, bitmap); 430 ida_free(&ida, bit); 431 } else { 432 __set_bit(bit, bitmap); 433 IDA_BUG_ON(&ida, ida_alloc_min(&ida, bit, GFP_KERNEL) 434 != bit); 435 } 436 } 437 ida_destroy(&ida); 438 if (time(NULL) < s + 10) 439 goto repeat; 440 } 441 442 void ida_simple_get_remove_test(void) 443 { 444 DEFINE_IDA(ida); 445 unsigned long i; 446 447 for (i = 0; i < 10000; i++) { 448 assert(ida_simple_get(&ida, 0, 20000, GFP_KERNEL) == i); 449 } 450 assert(ida_simple_get(&ida, 5, 30, GFP_KERNEL) < 0); 451 452 for (i = 0; i < 10000; i++) { 453 ida_simple_remove(&ida, i); 454 } 455 assert(ida_is_empty(&ida)); 456 457 ida_destroy(&ida); 458 } 459 460 void user_ida_checks(void) 461 { 462 radix_tree_cpu_dead(1); 463 464 ida_check_nomem(); 465 ida_check_conv_user(); 466 ida_check_random(); 467 ida_simple_get_remove_test(); 468 469 radix_tree_cpu_dead(1); 470 } 471 472 static void *ida_random_fn(void *arg) 473 { 474 rcu_register_thread(); 475 ida_check_random(); 476 rcu_unregister_thread(); 477 return NULL; 478 } 479 480 void ida_thread_tests(void) 481 { 482 pthread_t threads[20]; 483 int i; 484 485 for (i = 0; i < ARRAY_SIZE(threads); i++) 486 if (pthread_create(&threads[i], NULL, ida_random_fn, NULL)) { 487 perror("creating ida thread"); 488 exit(1); 489 } 490 491 while (i--) 492 pthread_join(threads[i], NULL); 493 } 494 495 void ida_tests(void) 496 { 497 user_ida_checks(); 498 ida_checks(); 499 ida_exit(); 500 ida_thread_tests(); 501 } 502 503 int __weak main(void) 504 { 505 radix_tree_init(); 506 idr_checks(); 507 ida_tests(); 508 radix_tree_cpu_dead(1); 509 rcu_barrier(); 510 if (nr_allocated) 511 printf("nr_allocated = %d\n", nr_allocated); 512 return 0; 513 } 514