1 /* 2 * idr-test.c: Test the IDR API 3 * Copyright (c) 2016 Matthew Wilcox <willy@infradead.org> 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 */ 14 #include <linux/bitmap.h> 15 #include <linux/idr.h> 16 #include <linux/slab.h> 17 #include <linux/kernel.h> 18 #include <linux/errno.h> 19 20 #include "test.h" 21 22 #define DUMMY_PTR ((void *)0x10) 23 24 int item_idr_free(int id, void *p, void *data) 25 { 26 struct item *item = p; 27 assert(item->index == id); 28 free(p); 29 30 return 0; 31 } 32 33 void item_idr_remove(struct idr *idr, int id) 34 { 35 struct item *item = idr_find(idr, id); 36 assert(item->index == id); 37 idr_remove(idr, id); 38 free(item); 39 } 40 41 void idr_alloc_test(void) 42 { 43 unsigned long i; 44 DEFINE_IDR(idr); 45 46 assert(idr_alloc_cyclic(&idr, DUMMY_PTR, 0, 0x4000, GFP_KERNEL) == 0); 47 assert(idr_alloc_cyclic(&idr, DUMMY_PTR, 0x3ffd, 0x4000, GFP_KERNEL) == 0x3ffd); 48 idr_remove(&idr, 0x3ffd); 49 idr_remove(&idr, 0); 50 51 for (i = 0x3ffe; i < 0x4003; i++) { 52 int id; 53 struct item *item; 54 55 if (i < 0x4000) 56 item = item_create(i, 0); 57 else 58 item = item_create(i - 0x3fff, 0); 59 60 id = idr_alloc_cyclic(&idr, item, 1, 0x4000, GFP_KERNEL); 61 assert(id == item->index); 62 } 63 64 idr_for_each(&idr, item_idr_free, &idr); 65 idr_destroy(&idr); 66 } 67 68 void idr_replace_test(void) 69 { 70 DEFINE_IDR(idr); 71 72 idr_alloc(&idr, (void *)-1, 10, 11, GFP_KERNEL); 73 idr_replace(&idr, &idr, 10); 74 75 idr_destroy(&idr); 76 } 77 78 /* 79 * Unlike the radix tree, you can put a NULL pointer -- with care -- into 80 * the IDR. Some interfaces, like idr_find() do not distinguish between 81 * "present, value is NULL" and "not present", but that's exactly what some 82 * users want. 83 */ 84 void idr_null_test(void) 85 { 86 int i; 87 DEFINE_IDR(idr); 88 89 assert(idr_is_empty(&idr)); 90 91 assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 0); 92 assert(!idr_is_empty(&idr)); 93 idr_remove(&idr, 0); 94 assert(idr_is_empty(&idr)); 95 96 assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 0); 97 assert(!idr_is_empty(&idr)); 98 idr_destroy(&idr); 99 assert(idr_is_empty(&idr)); 100 101 for (i = 0; i < 10; i++) { 102 assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == i); 103 } 104 105 assert(idr_replace(&idr, DUMMY_PTR, 3) == NULL); 106 assert(idr_replace(&idr, DUMMY_PTR, 4) == NULL); 107 assert(idr_replace(&idr, NULL, 4) == DUMMY_PTR); 108 assert(idr_replace(&idr, DUMMY_PTR, 11) == ERR_PTR(-ENOENT)); 109 idr_remove(&idr, 5); 110 assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 5); 111 idr_remove(&idr, 5); 112 113 for (i = 0; i < 9; i++) { 114 idr_remove(&idr, i); 115 assert(!idr_is_empty(&idr)); 116 } 117 idr_remove(&idr, 8); 118 assert(!idr_is_empty(&idr)); 119 idr_remove(&idr, 9); 120 assert(idr_is_empty(&idr)); 121 122 assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 0); 123 assert(idr_replace(&idr, DUMMY_PTR, 3) == ERR_PTR(-ENOENT)); 124 assert(idr_replace(&idr, DUMMY_PTR, 0) == NULL); 125 assert(idr_replace(&idr, NULL, 0) == DUMMY_PTR); 126 127 idr_destroy(&idr); 128 assert(idr_is_empty(&idr)); 129 130 for (i = 1; i < 10; i++) { 131 assert(idr_alloc(&idr, NULL, 1, 0, GFP_KERNEL) == i); 132 } 133 134 idr_destroy(&idr); 135 assert(idr_is_empty(&idr)); 136 } 137 138 void idr_nowait_test(void) 139 { 140 unsigned int i; 141 DEFINE_IDR(idr); 142 143 idr_preload(GFP_KERNEL); 144 145 for (i = 0; i < 3; i++) { 146 struct item *item = item_create(i, 0); 147 assert(idr_alloc(&idr, item, i, i + 1, GFP_NOWAIT) == i); 148 } 149 150 idr_preload_end(); 151 152 idr_for_each(&idr, item_idr_free, &idr); 153 idr_destroy(&idr); 154 } 155 156 void idr_get_next_test(int base) 157 { 158 unsigned long i; 159 int nextid; 160 DEFINE_IDR(idr); 161 idr_init_base(&idr, base); 162 163 int indices[] = {4, 7, 9, 15, 65, 128, 1000, 99999, 0}; 164 165 for(i = 0; indices[i]; i++) { 166 struct item *item = item_create(indices[i], 0); 167 assert(idr_alloc(&idr, item, indices[i], indices[i+1], 168 GFP_KERNEL) == indices[i]); 169 } 170 171 for(i = 0, nextid = 0; indices[i]; i++) { 172 idr_get_next(&idr, &nextid); 173 assert(nextid == indices[i]); 174 nextid++; 175 } 176 177 idr_for_each(&idr, item_idr_free, &idr); 178 idr_destroy(&idr); 179 } 180 181 int idr_u32_cb(int id, void *ptr, void *data) 182 { 183 BUG_ON(id < 0); 184 BUG_ON(ptr != DUMMY_PTR); 185 return 0; 186 } 187 188 void idr_u32_test1(struct idr *idr, u32 handle) 189 { 190 static bool warned = false; 191 u32 id = handle; 192 int sid = 0; 193 void *ptr; 194 195 BUG_ON(idr_alloc_u32(idr, DUMMY_PTR, &id, id, GFP_KERNEL)); 196 BUG_ON(id != handle); 197 BUG_ON(idr_alloc_u32(idr, DUMMY_PTR, &id, id, GFP_KERNEL) != -ENOSPC); 198 BUG_ON(id != handle); 199 if (!warned && id > INT_MAX) 200 printk("vvv Ignore these warnings\n"); 201 ptr = idr_get_next(idr, &sid); 202 if (id > INT_MAX) { 203 BUG_ON(ptr != NULL); 204 BUG_ON(sid != 0); 205 } else { 206 BUG_ON(ptr != DUMMY_PTR); 207 BUG_ON(sid != id); 208 } 209 idr_for_each(idr, idr_u32_cb, NULL); 210 if (!warned && id > INT_MAX) { 211 printk("^^^ Warnings over\n"); 212 warned = true; 213 } 214 BUG_ON(idr_remove(idr, id) != DUMMY_PTR); 215 BUG_ON(!idr_is_empty(idr)); 216 } 217 218 void idr_u32_test(int base) 219 { 220 DEFINE_IDR(idr); 221 idr_init_base(&idr, base); 222 idr_u32_test1(&idr, 10); 223 idr_u32_test1(&idr, 0x7fffffff); 224 idr_u32_test1(&idr, 0x80000000); 225 idr_u32_test1(&idr, 0x80000001); 226 idr_u32_test1(&idr, 0xffe00000); 227 idr_u32_test1(&idr, 0xffffffff); 228 } 229 230 static void idr_align_test(struct idr *idr) 231 { 232 char name[] = "Motorola 68000"; 233 int i, id; 234 void *entry; 235 236 for (i = 0; i < 9; i++) { 237 BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != i); 238 idr_for_each_entry(idr, entry, id); 239 } 240 idr_destroy(idr); 241 242 for (i = 1; i < 10; i++) { 243 BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != i - 1); 244 idr_for_each_entry(idr, entry, id); 245 } 246 idr_destroy(idr); 247 248 for (i = 2; i < 11; i++) { 249 BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != i - 2); 250 idr_for_each_entry(idr, entry, id); 251 } 252 idr_destroy(idr); 253 254 for (i = 3; i < 12; i++) { 255 BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != i - 3); 256 idr_for_each_entry(idr, entry, id); 257 } 258 idr_destroy(idr); 259 260 for (i = 0; i < 8; i++) { 261 BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != 0); 262 BUG_ON(idr_alloc(idr, &name[i + 1], 0, 0, GFP_KERNEL) != 1); 263 idr_for_each_entry(idr, entry, id); 264 idr_remove(idr, 1); 265 idr_for_each_entry(idr, entry, id); 266 idr_remove(idr, 0); 267 BUG_ON(!idr_is_empty(idr)); 268 } 269 270 for (i = 0; i < 8; i++) { 271 BUG_ON(idr_alloc(idr, NULL, 0, 0, GFP_KERNEL) != 0); 272 idr_for_each_entry(idr, entry, id); 273 idr_replace(idr, &name[i], 0); 274 idr_for_each_entry(idr, entry, id); 275 BUG_ON(idr_find(idr, 0) != &name[i]); 276 idr_remove(idr, 0); 277 } 278 279 for (i = 0; i < 8; i++) { 280 BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != 0); 281 BUG_ON(idr_alloc(idr, NULL, 0, 0, GFP_KERNEL) != 1); 282 idr_remove(idr, 1); 283 idr_for_each_entry(idr, entry, id); 284 idr_replace(idr, &name[i + 1], 0); 285 idr_for_each_entry(idr, entry, id); 286 idr_remove(idr, 0); 287 } 288 } 289 290 void idr_checks(void) 291 { 292 unsigned long i; 293 DEFINE_IDR(idr); 294 295 for (i = 0; i < 10000; i++) { 296 struct item *item = item_create(i, 0); 297 assert(idr_alloc(&idr, item, 0, 20000, GFP_KERNEL) == i); 298 } 299 300 assert(idr_alloc(&idr, DUMMY_PTR, 5, 30, GFP_KERNEL) < 0); 301 302 for (i = 0; i < 5000; i++) 303 item_idr_remove(&idr, i); 304 305 idr_remove(&idr, 3); 306 307 idr_for_each(&idr, item_idr_free, &idr); 308 idr_destroy(&idr); 309 310 assert(idr_is_empty(&idr)); 311 312 idr_remove(&idr, 3); 313 idr_remove(&idr, 0); 314 315 assert(idr_alloc(&idr, DUMMY_PTR, 0, 0, GFP_KERNEL) == 0); 316 idr_remove(&idr, 1); 317 for (i = 1; i < RADIX_TREE_MAP_SIZE; i++) 318 assert(idr_alloc(&idr, DUMMY_PTR, 0, 0, GFP_KERNEL) == i); 319 idr_remove(&idr, 1 << 30); 320 idr_destroy(&idr); 321 322 for (i = INT_MAX - 3UL; i < INT_MAX + 1UL; i++) { 323 struct item *item = item_create(i, 0); 324 assert(idr_alloc(&idr, item, i, i + 10, GFP_KERNEL) == i); 325 } 326 assert(idr_alloc(&idr, DUMMY_PTR, i - 2, i, GFP_KERNEL) == -ENOSPC); 327 assert(idr_alloc(&idr, DUMMY_PTR, i - 2, i + 10, GFP_KERNEL) == -ENOSPC); 328 329 idr_for_each(&idr, item_idr_free, &idr); 330 idr_destroy(&idr); 331 idr_destroy(&idr); 332 333 assert(idr_is_empty(&idr)); 334 335 idr_set_cursor(&idr, INT_MAX - 3UL); 336 for (i = INT_MAX - 3UL; i < INT_MAX + 3UL; i++) { 337 struct item *item; 338 unsigned int id; 339 if (i <= INT_MAX) 340 item = item_create(i, 0); 341 else 342 item = item_create(i - INT_MAX - 1, 0); 343 344 id = idr_alloc_cyclic(&idr, item, 0, 0, GFP_KERNEL); 345 assert(id == item->index); 346 } 347 348 idr_for_each(&idr, item_idr_free, &idr); 349 idr_destroy(&idr); 350 assert(idr_is_empty(&idr)); 351 352 for (i = 1; i < 10000; i++) { 353 struct item *item = item_create(i, 0); 354 assert(idr_alloc(&idr, item, 1, 20000, GFP_KERNEL) == i); 355 } 356 357 idr_for_each(&idr, item_idr_free, &idr); 358 idr_destroy(&idr); 359 360 idr_replace_test(); 361 idr_alloc_test(); 362 idr_null_test(); 363 idr_nowait_test(); 364 idr_get_next_test(0); 365 idr_get_next_test(1); 366 idr_get_next_test(4); 367 idr_u32_test(4); 368 idr_u32_test(1); 369 idr_u32_test(0); 370 idr_align_test(&idr); 371 } 372 373 #define module_init(x) 374 #define module_exit(x) 375 #define MODULE_AUTHOR(x) 376 #define MODULE_LICENSE(x) 377 #define dump_stack() assert(0) 378 void ida_dump(struct ida *); 379 380 #include "../../../lib/test_ida.c" 381 382 /* 383 * Check that we get the correct error when we run out of memory doing 384 * allocations. In userspace, GFP_NOWAIT will always fail an allocation. 385 * The first test is for not having a bitmap available, and the second test 386 * is for not being able to allocate a level of the radix tree. 387 */ 388 void ida_check_nomem(void) 389 { 390 DEFINE_IDA(ida); 391 int id; 392 393 id = ida_alloc_min(&ida, 256, GFP_NOWAIT); 394 IDA_BUG_ON(&ida, id != -ENOMEM); 395 id = ida_alloc_min(&ida, 1UL << 30, GFP_NOWAIT); 396 IDA_BUG_ON(&ida, id != -ENOMEM); 397 IDA_BUG_ON(&ida, !ida_is_empty(&ida)); 398 } 399 400 /* 401 * Check handling of conversions between exceptional entries and full bitmaps. 402 */ 403 void ida_check_conv_user(void) 404 { 405 DEFINE_IDA(ida); 406 unsigned long i; 407 408 for (i = 0; i < 1000000; i++) { 409 int id = ida_alloc(&ida, GFP_NOWAIT); 410 if (id == -ENOMEM) { 411 IDA_BUG_ON(&ida, ((i % IDA_BITMAP_BITS) != 412 BITS_PER_XA_VALUE) && 413 ((i % IDA_BITMAP_BITS) != 0)); 414 id = ida_alloc(&ida, GFP_KERNEL); 415 } else { 416 IDA_BUG_ON(&ida, (i % IDA_BITMAP_BITS) == 417 BITS_PER_XA_VALUE); 418 } 419 IDA_BUG_ON(&ida, id != i); 420 } 421 ida_destroy(&ida); 422 } 423 424 void ida_check_random(void) 425 { 426 DEFINE_IDA(ida); 427 DECLARE_BITMAP(bitmap, 2048); 428 unsigned int i; 429 time_t s = time(NULL); 430 431 repeat: 432 memset(bitmap, 0, sizeof(bitmap)); 433 for (i = 0; i < 100000; i++) { 434 int i = rand(); 435 int bit = i & 2047; 436 if (test_bit(bit, bitmap)) { 437 __clear_bit(bit, bitmap); 438 ida_free(&ida, bit); 439 } else { 440 __set_bit(bit, bitmap); 441 IDA_BUG_ON(&ida, ida_alloc_min(&ida, bit, GFP_KERNEL) 442 != bit); 443 } 444 } 445 ida_destroy(&ida); 446 if (time(NULL) < s + 10) 447 goto repeat; 448 } 449 450 void ida_simple_get_remove_test(void) 451 { 452 DEFINE_IDA(ida); 453 unsigned long i; 454 455 for (i = 0; i < 10000; i++) { 456 assert(ida_simple_get(&ida, 0, 20000, GFP_KERNEL) == i); 457 } 458 assert(ida_simple_get(&ida, 5, 30, GFP_KERNEL) < 0); 459 460 for (i = 0; i < 10000; i++) { 461 ida_simple_remove(&ida, i); 462 } 463 assert(ida_is_empty(&ida)); 464 465 ida_destroy(&ida); 466 } 467 468 void user_ida_checks(void) 469 { 470 radix_tree_cpu_dead(1); 471 472 ida_check_nomem(); 473 ida_check_conv_user(); 474 ida_check_random(); 475 ida_simple_get_remove_test(); 476 477 radix_tree_cpu_dead(1); 478 } 479 480 static void *ida_random_fn(void *arg) 481 { 482 rcu_register_thread(); 483 ida_check_random(); 484 rcu_unregister_thread(); 485 return NULL; 486 } 487 488 void ida_thread_tests(void) 489 { 490 pthread_t threads[20]; 491 int i; 492 493 for (i = 0; i < ARRAY_SIZE(threads); i++) 494 if (pthread_create(&threads[i], NULL, ida_random_fn, NULL)) { 495 perror("creating ida thread"); 496 exit(1); 497 } 498 499 while (i--) 500 pthread_join(threads[i], NULL); 501 } 502 503 void ida_tests(void) 504 { 505 user_ida_checks(); 506 ida_checks(); 507 ida_exit(); 508 ida_thread_tests(); 509 } 510 511 int __weak main(void) 512 { 513 radix_tree_init(); 514 idr_checks(); 515 ida_tests(); 516 radix_tree_cpu_dead(1); 517 rcu_barrier(); 518 if (nr_allocated) 519 printf("nr_allocated = %d\n", nr_allocated); 520 return 0; 521 } 522