1 // SPDX-License-Identifier: GPL-2.0-or-later 2 #include "alloc_nid_api.h" 3 4 /* 5 * A simple test that tries to allocate a memory region within min_addr and 6 * max_addr range: 7 * 8 * + + 9 * | + +-----------+ | 10 * | | | rgn | | 11 * +----+-------+-----------+------+ 12 * ^ ^ 13 * | | 14 * min_addr max_addr 15 * 16 * Expect to allocate a cleared region that ends at max_addr. 17 */ 18 static int alloc_try_nid_top_down_simple_check(void) 19 { 20 struct memblock_region *rgn = &memblock.reserved.regions[0]; 21 void *allocated_ptr = NULL; 22 char *b; 23 24 phys_addr_t size = SZ_128; 25 phys_addr_t min_addr; 26 phys_addr_t max_addr; 27 phys_addr_t rgn_end; 28 29 setup_memblock(); 30 31 min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES * 2; 32 max_addr = min_addr + SZ_512; 33 34 allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 35 min_addr, max_addr, NUMA_NO_NODE); 36 b = (char *)allocated_ptr; 37 rgn_end = rgn->base + rgn->size; 38 39 assert(allocated_ptr); 40 assert(*b == 0); 41 42 assert(rgn->size == size); 43 assert(rgn->base == max_addr - size); 44 assert(rgn_end == max_addr); 45 46 assert(memblock.reserved.cnt == 1); 47 assert(memblock.reserved.total_size == size); 48 49 return 0; 50 } 51 52 /* 53 * A simple test that tries to allocate a memory region within min_addr and 54 * max_addr range, where the end address is misaligned: 55 * 56 * + + + 57 * | + +---------+ + | 58 * | | | rgn | | | 59 * +------+-------+---------+--+----+ 60 * ^ ^ ^ 61 * | | | 62 * min_add | max_addr 63 * | 64 * Aligned address 65 * boundary 66 * 67 * Expect to allocate a cleared, aligned region that ends before max_addr. 68 */ 69 static int alloc_try_nid_top_down_end_misaligned_check(void) 70 { 71 struct memblock_region *rgn = &memblock.reserved.regions[0]; 72 void *allocated_ptr = NULL; 73 char *b; 74 75 phys_addr_t size = SZ_128; 76 phys_addr_t misalign = SZ_2; 77 phys_addr_t min_addr; 78 phys_addr_t max_addr; 79 phys_addr_t rgn_end; 80 81 setup_memblock(); 82 83 min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES * 2; 84 max_addr = min_addr + SZ_512 + misalign; 85 86 allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 87 min_addr, max_addr, NUMA_NO_NODE); 88 b = (char *)allocated_ptr; 89 rgn_end = rgn->base + rgn->size; 90 91 assert(allocated_ptr); 92 assert(*b == 0); 93 94 assert(rgn->size == size); 95 assert(rgn->base == max_addr - size - misalign); 96 assert(rgn_end < max_addr); 97 98 assert(memblock.reserved.cnt == 1); 99 assert(memblock.reserved.total_size == size); 100 101 return 0; 102 } 103 104 /* 105 * A simple test that tries to allocate a memory region, which spans over the 106 * min_addr and max_addr range: 107 * 108 * + + 109 * | +---------------+ | 110 * | | rgn | | 111 * +------+---------------+-------+ 112 * ^ ^ 113 * | | 114 * min_addr max_addr 115 * 116 * Expect to allocate a cleared region that starts at min_addr and ends at 117 * max_addr, given that min_addr is aligned. 118 */ 119 static int alloc_try_nid_exact_address_generic_check(void) 120 { 121 struct memblock_region *rgn = &memblock.reserved.regions[0]; 122 void *allocated_ptr = NULL; 123 char *b; 124 125 phys_addr_t size = SZ_1K; 126 phys_addr_t min_addr; 127 phys_addr_t max_addr; 128 phys_addr_t rgn_end; 129 130 setup_memblock(); 131 132 min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES; 133 max_addr = min_addr + size; 134 135 allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 136 min_addr, max_addr, NUMA_NO_NODE); 137 b = (char *)allocated_ptr; 138 rgn_end = rgn->base + rgn->size; 139 140 assert(allocated_ptr); 141 assert(*b == 0); 142 143 assert(rgn->size == size); 144 assert(rgn->base == min_addr); 145 assert(rgn_end == max_addr); 146 147 assert(memblock.reserved.cnt == 1); 148 assert(memblock.reserved.total_size == size); 149 150 return 0; 151 } 152 153 /* 154 * A test that tries to allocate a memory region, which can't fit into 155 * min_addr and max_addr range: 156 * 157 * + + + 158 * | +----------+-----+ | 159 * | | rgn + | | 160 * +--------+----------+-----+----+ 161 * ^ ^ ^ 162 * | | | 163 * Aligned | max_addr 164 * address | 165 * boundary min_add 166 * 167 * Expect to drop the lower limit and allocate a cleared memory region which 168 * ends at max_addr (if the address is aligned). 169 */ 170 static int alloc_try_nid_top_down_narrow_range_check(void) 171 { 172 struct memblock_region *rgn = &memblock.reserved.regions[0]; 173 void *allocated_ptr = NULL; 174 char *b; 175 176 phys_addr_t size = SZ_256; 177 phys_addr_t min_addr; 178 phys_addr_t max_addr; 179 180 setup_memblock(); 181 182 min_addr = memblock_start_of_DRAM() + SZ_512; 183 max_addr = min_addr + SMP_CACHE_BYTES; 184 185 allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 186 min_addr, max_addr, NUMA_NO_NODE); 187 b = (char *)allocated_ptr; 188 189 assert(allocated_ptr); 190 assert(*b == 0); 191 192 assert(rgn->size == size); 193 assert(rgn->base == max_addr - size); 194 195 assert(memblock.reserved.cnt == 1); 196 assert(memblock.reserved.total_size == size); 197 198 return 0; 199 } 200 201 /* 202 * A test that tries to allocate a memory region, which can't fit into 203 * min_addr and max_addr range, with the latter being too close to the beginning 204 * of the available memory: 205 * 206 * +-------------+ 207 * | new | 208 * +-------------+ 209 * + + 210 * | + | 211 * | | | 212 * +-------+--------------+ 213 * ^ ^ 214 * | | 215 * | max_addr 216 * | 217 * min_addr 218 * 219 * Expect no allocation to happen. 220 */ 221 static int alloc_try_nid_low_max_generic_check(void) 222 { 223 void *allocated_ptr = NULL; 224 225 phys_addr_t size = SZ_1K; 226 phys_addr_t min_addr; 227 phys_addr_t max_addr; 228 229 setup_memblock(); 230 231 min_addr = memblock_start_of_DRAM(); 232 max_addr = min_addr + SMP_CACHE_BYTES; 233 234 allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 235 min_addr, max_addr, NUMA_NO_NODE); 236 237 assert(!allocated_ptr); 238 239 return 0; 240 } 241 242 /* 243 * A test that tries to allocate a memory region within min_addr min_addr range, 244 * with min_addr being so close that it's next to an allocated region: 245 * 246 * + + 247 * | +--------+---------------| 248 * | | r1 | rgn | 249 * +-------+--------+---------------+ 250 * ^ ^ 251 * | | 252 * min_addr max_addr 253 * 254 * Expect a merge of both regions. Only the region size gets updated. 255 */ 256 static int alloc_try_nid_min_reserved_generic_check(void) 257 { 258 struct memblock_region *rgn = &memblock.reserved.regions[0]; 259 void *allocated_ptr = NULL; 260 char *b; 261 262 phys_addr_t r1_size = SZ_128; 263 phys_addr_t r2_size = SZ_64; 264 phys_addr_t total_size = r1_size + r2_size; 265 phys_addr_t min_addr; 266 phys_addr_t max_addr; 267 phys_addr_t reserved_base; 268 269 setup_memblock(); 270 271 max_addr = memblock_end_of_DRAM(); 272 min_addr = max_addr - r2_size; 273 reserved_base = min_addr - r1_size; 274 275 memblock_reserve(reserved_base, r1_size); 276 277 allocated_ptr = memblock_alloc_try_nid(r2_size, SMP_CACHE_BYTES, 278 min_addr, max_addr, NUMA_NO_NODE); 279 b = (char *)allocated_ptr; 280 281 assert(allocated_ptr); 282 assert(*b == 0); 283 284 assert(rgn->size == total_size); 285 assert(rgn->base == reserved_base); 286 287 assert(memblock.reserved.cnt == 1); 288 assert(memblock.reserved.total_size == total_size); 289 290 return 0; 291 } 292 293 /* 294 * A test that tries to allocate a memory region within min_addr and max_addr, 295 * with max_addr being so close that it's next to an allocated region: 296 * 297 * + + 298 * | +-------------+--------| 299 * | | rgn | r1 | 300 * +----------+-------------+--------+ 301 * ^ ^ 302 * | | 303 * min_addr max_addr 304 * 305 * Expect a merge of regions. Only the region size gets updated. 306 */ 307 static int alloc_try_nid_max_reserved_generic_check(void) 308 { 309 struct memblock_region *rgn = &memblock.reserved.regions[0]; 310 void *allocated_ptr = NULL; 311 char *b; 312 313 phys_addr_t r1_size = SZ_64; 314 phys_addr_t r2_size = SZ_128; 315 phys_addr_t total_size = r1_size + r2_size; 316 phys_addr_t min_addr; 317 phys_addr_t max_addr; 318 319 setup_memblock(); 320 321 max_addr = memblock_end_of_DRAM() - r1_size; 322 min_addr = max_addr - r2_size; 323 324 memblock_reserve(max_addr, r1_size); 325 326 allocated_ptr = memblock_alloc_try_nid(r2_size, SMP_CACHE_BYTES, 327 min_addr, max_addr, NUMA_NO_NODE); 328 b = (char *)allocated_ptr; 329 330 assert(allocated_ptr); 331 assert(*b == 0); 332 333 assert(rgn->size == total_size); 334 assert(rgn->base == min_addr); 335 336 assert(memblock.reserved.cnt == 1); 337 assert(memblock.reserved.total_size == total_size); 338 339 return 0; 340 } 341 342 /* 343 * A test that tries to allocate memory within min_addr and max_add range, when 344 * there are two reserved regions at the borders, with a gap big enough to fit 345 * a new region: 346 * 347 * + + 348 * | +--------+ +-------+------+ | 349 * | | r2 | | rgn | r1 | | 350 * +----+--------+---+-------+------+--+ 351 * ^ ^ 352 * | | 353 * min_addr max_addr 354 * 355 * Expect to merge the new region with r1. The second region does not get 356 * updated. The total size field gets updated. 357 */ 358 359 static int alloc_try_nid_top_down_reserved_with_space_check(void) 360 { 361 struct memblock_region *rgn1 = &memblock.reserved.regions[1]; 362 struct memblock_region *rgn2 = &memblock.reserved.regions[0]; 363 void *allocated_ptr = NULL; 364 char *b; 365 struct region r1, r2; 366 367 phys_addr_t r3_size = SZ_64; 368 phys_addr_t gap_size = SMP_CACHE_BYTES; 369 phys_addr_t total_size; 370 phys_addr_t max_addr; 371 phys_addr_t min_addr; 372 373 setup_memblock(); 374 375 r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2; 376 r1.size = SMP_CACHE_BYTES; 377 378 r2.size = SZ_128; 379 r2.base = r1.base - (r3_size + gap_size + r2.size); 380 381 total_size = r1.size + r2.size + r3_size; 382 min_addr = r2.base + r2.size; 383 max_addr = r1.base; 384 385 memblock_reserve(r1.base, r1.size); 386 memblock_reserve(r2.base, r2.size); 387 388 allocated_ptr = memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES, 389 min_addr, max_addr, NUMA_NO_NODE); 390 b = (char *)allocated_ptr; 391 392 assert(allocated_ptr); 393 assert(*b == 0); 394 395 assert(rgn1->size == r1.size + r3_size); 396 assert(rgn1->base == max_addr - r3_size); 397 398 assert(rgn2->size == r2.size); 399 assert(rgn2->base == r2.base); 400 401 assert(memblock.reserved.cnt == 2); 402 assert(memblock.reserved.total_size == total_size); 403 404 return 0; 405 } 406 407 /* 408 * A test that tries to allocate memory within min_addr and max_add range, when 409 * there are two reserved regions at the borders, with a gap of a size equal to 410 * the size of the new region: 411 * 412 * + + 413 * | +--------+--------+--------+ | 414 * | | r2 | r3 | r1 | | 415 * +-----+--------+--------+--------+-----+ 416 * ^ ^ 417 * | | 418 * min_addr max_addr 419 * 420 * Expect to merge all of the regions into one. The region counter and total 421 * size fields get updated. 422 */ 423 static int alloc_try_nid_reserved_full_merge_generic_check(void) 424 { 425 struct memblock_region *rgn = &memblock.reserved.regions[0]; 426 void *allocated_ptr = NULL; 427 char *b; 428 struct region r1, r2; 429 430 phys_addr_t r3_size = SZ_64; 431 phys_addr_t total_size; 432 phys_addr_t max_addr; 433 phys_addr_t min_addr; 434 435 setup_memblock(); 436 437 r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2; 438 r1.size = SMP_CACHE_BYTES; 439 440 r2.size = SZ_128; 441 r2.base = r1.base - (r3_size + r2.size); 442 443 total_size = r1.size + r2.size + r3_size; 444 min_addr = r2.base + r2.size; 445 max_addr = r1.base; 446 447 memblock_reserve(r1.base, r1.size); 448 memblock_reserve(r2.base, r2.size); 449 450 allocated_ptr = memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES, 451 min_addr, max_addr, NUMA_NO_NODE); 452 b = (char *)allocated_ptr; 453 454 assert(allocated_ptr); 455 assert(*b == 0); 456 457 assert(rgn->size == total_size); 458 assert(rgn->base == r2.base); 459 460 assert(memblock.reserved.cnt == 1); 461 assert(memblock.reserved.total_size == total_size); 462 463 return 0; 464 } 465 466 /* 467 * A test that tries to allocate memory within min_addr and max_add range, when 468 * there are two reserved regions at the borders, with a gap that can't fit 469 * a new region: 470 * 471 * + + 472 * | +----------+------+ +------+ | 473 * | | r3 | r2 | | r1 | | 474 * +--+----------+------+----+------+---+ 475 * ^ ^ 476 * | | 477 * | max_addr 478 * | 479 * min_addr 480 * 481 * Expect to merge the new region with r2. The second region does not get 482 * updated. The total size counter gets updated. 483 */ 484 static int alloc_try_nid_top_down_reserved_no_space_check(void) 485 { 486 struct memblock_region *rgn1 = &memblock.reserved.regions[1]; 487 struct memblock_region *rgn2 = &memblock.reserved.regions[0]; 488 void *allocated_ptr = NULL; 489 char *b; 490 struct region r1, r2; 491 492 phys_addr_t r3_size = SZ_256; 493 phys_addr_t gap_size = SMP_CACHE_BYTES; 494 phys_addr_t total_size; 495 phys_addr_t max_addr; 496 phys_addr_t min_addr; 497 498 setup_memblock(); 499 500 r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2; 501 r1.size = SMP_CACHE_BYTES; 502 503 r2.size = SZ_128; 504 r2.base = r1.base - (r2.size + gap_size); 505 506 total_size = r1.size + r2.size + r3_size; 507 min_addr = r2.base + r2.size; 508 max_addr = r1.base; 509 510 memblock_reserve(r1.base, r1.size); 511 memblock_reserve(r2.base, r2.size); 512 513 allocated_ptr = memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES, 514 min_addr, max_addr, NUMA_NO_NODE); 515 b = (char *)allocated_ptr; 516 517 assert(allocated_ptr); 518 assert(*b == 0); 519 520 assert(rgn1->size == r1.size); 521 assert(rgn1->base == r1.base); 522 523 assert(rgn2->size == r2.size + r3_size); 524 assert(rgn2->base == r2.base - r3_size); 525 526 assert(memblock.reserved.cnt == 2); 527 assert(memblock.reserved.total_size == total_size); 528 529 return 0; 530 } 531 532 /* 533 * A test that tries to allocate memory within min_addr and max_add range, but 534 * it's too narrow and everything else is reserved: 535 * 536 * +-----------+ 537 * | new | 538 * +-----------+ 539 * + + 540 * |--------------+ +----------| 541 * | r2 | | r1 | 542 * +--------------+------+----------+ 543 * ^ ^ 544 * | | 545 * | max_addr 546 * | 547 * min_addr 548 * 549 * Expect no allocation to happen. 550 */ 551 552 static int alloc_try_nid_reserved_all_generic_check(void) 553 { 554 void *allocated_ptr = NULL; 555 struct region r1, r2; 556 557 phys_addr_t r3_size = SZ_256; 558 phys_addr_t gap_size = SMP_CACHE_BYTES; 559 phys_addr_t max_addr; 560 phys_addr_t min_addr; 561 562 setup_memblock(); 563 564 r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES; 565 r1.size = SMP_CACHE_BYTES; 566 567 r2.size = MEM_SIZE - (r1.size + gap_size); 568 r2.base = memblock_start_of_DRAM(); 569 570 min_addr = r2.base + r2.size; 571 max_addr = r1.base; 572 573 memblock_reserve(r1.base, r1.size); 574 memblock_reserve(r2.base, r2.size); 575 576 allocated_ptr = memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES, 577 min_addr, max_addr, NUMA_NO_NODE); 578 579 assert(!allocated_ptr); 580 581 return 0; 582 } 583 584 /* 585 * A test that tries to allocate a memory region, where max_addr is 586 * bigger than the end address of the available memory. Expect to allocate 587 * a cleared region that ends before the end of the memory. 588 */ 589 static int alloc_try_nid_top_down_cap_max_check(void) 590 { 591 struct memblock_region *rgn = &memblock.reserved.regions[0]; 592 void *allocated_ptr = NULL; 593 char *b; 594 595 phys_addr_t size = SZ_256; 596 phys_addr_t min_addr; 597 phys_addr_t max_addr; 598 599 setup_memblock(); 600 601 min_addr = memblock_end_of_DRAM() - SZ_1K; 602 max_addr = memblock_end_of_DRAM() + SZ_256; 603 604 allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 605 min_addr, max_addr, NUMA_NO_NODE); 606 b = (char *)allocated_ptr; 607 608 assert(allocated_ptr); 609 assert(*b == 0); 610 611 assert(rgn->size == size); 612 assert(rgn->base == memblock_end_of_DRAM() - size); 613 614 assert(memblock.reserved.cnt == 1); 615 assert(memblock.reserved.total_size == size); 616 617 return 0; 618 } 619 620 /* 621 * A test that tries to allocate a memory region, where min_addr is 622 * smaller than the start address of the available memory. Expect to allocate 623 * a cleared region that ends before the end of the memory. 624 */ 625 static int alloc_try_nid_top_down_cap_min_check(void) 626 { 627 struct memblock_region *rgn = &memblock.reserved.regions[0]; 628 void *allocated_ptr = NULL; 629 char *b; 630 631 phys_addr_t size = SZ_1K; 632 phys_addr_t min_addr; 633 phys_addr_t max_addr; 634 635 setup_memblock(); 636 637 min_addr = memblock_start_of_DRAM() - SZ_256; 638 max_addr = memblock_end_of_DRAM(); 639 640 allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 641 min_addr, max_addr, NUMA_NO_NODE); 642 b = (char *)allocated_ptr; 643 644 assert(allocated_ptr); 645 assert(*b == 0); 646 647 assert(rgn->size == size); 648 assert(rgn->base == memblock_end_of_DRAM() - size); 649 650 assert(memblock.reserved.cnt == 1); 651 assert(memblock.reserved.total_size == size); 652 653 return 0; 654 } 655 656 /* 657 * A simple test that tries to allocate a memory region within min_addr and 658 * max_addr range: 659 * 660 * + + 661 * | +-----------+ | | 662 * | | rgn | | | 663 * +----+-----------+-----------+------+ 664 * ^ ^ 665 * | | 666 * min_addr max_addr 667 * 668 * Expect to allocate a cleared region that ends before max_addr. 669 */ 670 static int alloc_try_nid_bottom_up_simple_check(void) 671 { 672 struct memblock_region *rgn = &memblock.reserved.regions[0]; 673 void *allocated_ptr = NULL; 674 char *b; 675 676 phys_addr_t size = SZ_128; 677 phys_addr_t min_addr; 678 phys_addr_t max_addr; 679 phys_addr_t rgn_end; 680 681 setup_memblock(); 682 683 min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES * 2; 684 max_addr = min_addr + SZ_512; 685 686 allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 687 min_addr, max_addr, 688 NUMA_NO_NODE); 689 b = (char *)allocated_ptr; 690 rgn_end = rgn->base + rgn->size; 691 692 assert(allocated_ptr); 693 assert(*b == 0); 694 695 assert(rgn->size == size); 696 assert(rgn->base == min_addr); 697 assert(rgn_end < max_addr); 698 699 assert(memblock.reserved.cnt == 1); 700 assert(memblock.reserved.total_size == size); 701 702 return 0; 703 } 704 705 /* 706 * A simple test that tries to allocate a memory region within min_addr and 707 * max_addr range, where the start address is misaligned: 708 * 709 * + + 710 * | + +-----------+ + | 711 * | | | rgn | | | 712 * +-----+---+-----------+-----+-----+ 713 * ^ ^----. ^ 714 * | | | 715 * min_add | max_addr 716 * | 717 * Aligned address 718 * boundary 719 * 720 * Expect to allocate a cleared, aligned region that ends before max_addr. 721 */ 722 static int alloc_try_nid_bottom_up_start_misaligned_check(void) 723 { 724 struct memblock_region *rgn = &memblock.reserved.regions[0]; 725 void *allocated_ptr = NULL; 726 char *b; 727 728 phys_addr_t size = SZ_128; 729 phys_addr_t misalign = SZ_2; 730 phys_addr_t min_addr; 731 phys_addr_t max_addr; 732 phys_addr_t rgn_end; 733 734 setup_memblock(); 735 736 min_addr = memblock_start_of_DRAM() + misalign; 737 max_addr = min_addr + SZ_512; 738 739 allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 740 min_addr, max_addr, 741 NUMA_NO_NODE); 742 b = (char *)allocated_ptr; 743 rgn_end = rgn->base + rgn->size; 744 745 assert(allocated_ptr); 746 assert(*b == 0); 747 748 assert(rgn->size == size); 749 assert(rgn->base == min_addr + (SMP_CACHE_BYTES - misalign)); 750 assert(rgn_end < max_addr); 751 752 assert(memblock.reserved.cnt == 1); 753 assert(memblock.reserved.total_size == size); 754 755 return 0; 756 } 757 758 /* 759 * A test that tries to allocate a memory region, which can't fit into min_addr 760 * and max_addr range: 761 * 762 * + + 763 * |---------+ + + | 764 * | rgn | | | | 765 * +---------+---------+----+------+ 766 * ^ ^ 767 * | | 768 * | max_addr 769 * | 770 * min_add 771 * 772 * Expect to drop the lower limit and allocate a cleared memory region which 773 * starts at the beginning of the available memory. 774 */ 775 static int alloc_try_nid_bottom_up_narrow_range_check(void) 776 { 777 struct memblock_region *rgn = &memblock.reserved.regions[0]; 778 void *allocated_ptr = NULL; 779 char *b; 780 781 phys_addr_t size = SZ_256; 782 phys_addr_t min_addr; 783 phys_addr_t max_addr; 784 785 setup_memblock(); 786 787 min_addr = memblock_start_of_DRAM() + SZ_512; 788 max_addr = min_addr + SMP_CACHE_BYTES; 789 790 allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 791 min_addr, max_addr, 792 NUMA_NO_NODE); 793 b = (char *)allocated_ptr; 794 795 assert(allocated_ptr); 796 assert(*b == 0); 797 798 assert(rgn->size == size); 799 assert(rgn->base == memblock_start_of_DRAM()); 800 801 assert(memblock.reserved.cnt == 1); 802 assert(memblock.reserved.total_size == size); 803 804 return 0; 805 } 806 807 /* 808 * A test that tries to allocate memory within min_addr and max_add range, when 809 * there are two reserved regions at the borders, with a gap big enough to fit 810 * a new region: 811 * 812 * + + 813 * | +--------+-------+ +------+ | 814 * | | r2 | rgn | | r1 | | 815 * +----+--------+-------+---+------+--+ 816 * ^ ^ 817 * | | 818 * min_addr max_addr 819 * 820 * Expect to merge the new region with r2. The second region does not get 821 * updated. The total size field gets updated. 822 */ 823 824 static int alloc_try_nid_bottom_up_reserved_with_space_check(void) 825 { 826 struct memblock_region *rgn1 = &memblock.reserved.regions[1]; 827 struct memblock_region *rgn2 = &memblock.reserved.regions[0]; 828 void *allocated_ptr = NULL; 829 char *b; 830 struct region r1, r2; 831 832 phys_addr_t r3_size = SZ_64; 833 phys_addr_t gap_size = SMP_CACHE_BYTES; 834 phys_addr_t total_size; 835 phys_addr_t max_addr; 836 phys_addr_t min_addr; 837 838 setup_memblock(); 839 840 r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2; 841 r1.size = SMP_CACHE_BYTES; 842 843 r2.size = SZ_128; 844 r2.base = r1.base - (r3_size + gap_size + r2.size); 845 846 total_size = r1.size + r2.size + r3_size; 847 min_addr = r2.base + r2.size; 848 max_addr = r1.base; 849 850 memblock_reserve(r1.base, r1.size); 851 memblock_reserve(r2.base, r2.size); 852 853 allocated_ptr = memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES, 854 min_addr, max_addr, 855 NUMA_NO_NODE); 856 b = (char *)allocated_ptr; 857 858 assert(allocated_ptr); 859 assert(*b == 0); 860 861 assert(rgn1->size == r1.size); 862 assert(rgn1->base == max_addr); 863 864 assert(rgn2->size == r2.size + r3_size); 865 assert(rgn2->base == r2.base); 866 867 assert(memblock.reserved.cnt == 2); 868 assert(memblock.reserved.total_size == total_size); 869 870 return 0; 871 } 872 873 /* 874 * A test that tries to allocate memory within min_addr and max_add range, when 875 * there are two reserved regions at the borders, with a gap of a size equal to 876 * the size of the new region: 877 * 878 * + + 879 * |----------+ +------+ +----+ | 880 * | r3 | | r2 | | r1 | | 881 * +----------+----+------+---+----+--+ 882 * ^ ^ 883 * | | 884 * | max_addr 885 * | 886 * min_addr 887 * 888 * Expect to drop the lower limit and allocate memory at the beginning of the 889 * available memory. The region counter and total size fields get updated. 890 * Other regions are not modified. 891 */ 892 893 static int alloc_try_nid_bottom_up_reserved_no_space_check(void) 894 { 895 struct memblock_region *rgn1 = &memblock.reserved.regions[2]; 896 struct memblock_region *rgn2 = &memblock.reserved.regions[1]; 897 struct memblock_region *rgn3 = &memblock.reserved.regions[0]; 898 void *allocated_ptr = NULL; 899 char *b; 900 struct region r1, r2; 901 902 phys_addr_t r3_size = SZ_256; 903 phys_addr_t gap_size = SMP_CACHE_BYTES; 904 phys_addr_t total_size; 905 phys_addr_t max_addr; 906 phys_addr_t min_addr; 907 908 setup_memblock(); 909 910 r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2; 911 r1.size = SMP_CACHE_BYTES; 912 913 r2.size = SZ_128; 914 r2.base = r1.base - (r2.size + gap_size); 915 916 total_size = r1.size + r2.size + r3_size; 917 min_addr = r2.base + r2.size; 918 max_addr = r1.base; 919 920 memblock_reserve(r1.base, r1.size); 921 memblock_reserve(r2.base, r2.size); 922 923 allocated_ptr = memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES, 924 min_addr, max_addr, 925 NUMA_NO_NODE); 926 b = (char *)allocated_ptr; 927 928 assert(allocated_ptr); 929 assert(*b == 0); 930 931 assert(rgn3->size == r3_size); 932 assert(rgn3->base == memblock_start_of_DRAM()); 933 934 assert(rgn2->size == r2.size); 935 assert(rgn2->base == r2.base); 936 937 assert(rgn1->size == r1.size); 938 assert(rgn1->base == r1.base); 939 940 assert(memblock.reserved.cnt == 3); 941 assert(memblock.reserved.total_size == total_size); 942 943 return 0; 944 } 945 946 /* 947 * A test that tries to allocate a memory region, where max_addr is 948 * bigger than the end address of the available memory. Expect to allocate 949 * a cleared region that starts at the min_addr 950 */ 951 static int alloc_try_nid_bottom_up_cap_max_check(void) 952 { 953 struct memblock_region *rgn = &memblock.reserved.regions[0]; 954 void *allocated_ptr = NULL; 955 char *b; 956 957 phys_addr_t size = SZ_256; 958 phys_addr_t min_addr; 959 phys_addr_t max_addr; 960 961 setup_memblock(); 962 963 min_addr = memblock_start_of_DRAM() + SZ_1K; 964 max_addr = memblock_end_of_DRAM() + SZ_256; 965 966 allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 967 min_addr, max_addr, 968 NUMA_NO_NODE); 969 b = (char *)allocated_ptr; 970 971 assert(allocated_ptr); 972 assert(*b == 0); 973 974 assert(rgn->size == size); 975 assert(rgn->base == min_addr); 976 977 assert(memblock.reserved.cnt == 1); 978 assert(memblock.reserved.total_size == size); 979 980 return 0; 981 } 982 983 /* 984 * A test that tries to allocate a memory region, where min_addr is 985 * smaller than the start address of the available memory. Expect to allocate 986 * a cleared region at the beginning of the available memory. 987 */ 988 static int alloc_try_nid_bottom_up_cap_min_check(void) 989 { 990 struct memblock_region *rgn = &memblock.reserved.regions[0]; 991 void *allocated_ptr = NULL; 992 char *b; 993 994 phys_addr_t size = SZ_1K; 995 phys_addr_t min_addr; 996 phys_addr_t max_addr; 997 998 setup_memblock(); 999 1000 min_addr = memblock_start_of_DRAM(); 1001 max_addr = memblock_end_of_DRAM() - SZ_256; 1002 1003 allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, 1004 min_addr, max_addr, 1005 NUMA_NO_NODE); 1006 b = (char *)allocated_ptr; 1007 1008 assert(allocated_ptr); 1009 assert(*b == 0); 1010 1011 assert(rgn->size == size); 1012 assert(rgn->base == memblock_start_of_DRAM()); 1013 1014 assert(memblock.reserved.cnt == 1); 1015 assert(memblock.reserved.total_size == size); 1016 1017 return 0; 1018 } 1019 1020 /* Test case wrappers */ 1021 static int alloc_try_nid_simple_check(void) 1022 { 1023 memblock_set_bottom_up(false); 1024 alloc_try_nid_top_down_simple_check(); 1025 memblock_set_bottom_up(true); 1026 alloc_try_nid_bottom_up_simple_check(); 1027 1028 return 0; 1029 } 1030 1031 static int alloc_try_nid_misaligned_check(void) 1032 { 1033 memblock_set_bottom_up(false); 1034 alloc_try_nid_top_down_end_misaligned_check(); 1035 memblock_set_bottom_up(true); 1036 alloc_try_nid_bottom_up_start_misaligned_check(); 1037 1038 return 0; 1039 } 1040 1041 static int alloc_try_nid_narrow_range_check(void) 1042 { 1043 memblock_set_bottom_up(false); 1044 alloc_try_nid_top_down_narrow_range_check(); 1045 memblock_set_bottom_up(true); 1046 alloc_try_nid_bottom_up_narrow_range_check(); 1047 1048 return 0; 1049 } 1050 1051 static int alloc_try_nid_reserved_with_space_check(void) 1052 { 1053 memblock_set_bottom_up(false); 1054 alloc_try_nid_top_down_reserved_with_space_check(); 1055 memblock_set_bottom_up(true); 1056 alloc_try_nid_bottom_up_reserved_with_space_check(); 1057 1058 return 0; 1059 } 1060 1061 static int alloc_try_nid_reserved_no_space_check(void) 1062 { 1063 memblock_set_bottom_up(false); 1064 alloc_try_nid_top_down_reserved_no_space_check(); 1065 memblock_set_bottom_up(true); 1066 alloc_try_nid_bottom_up_reserved_no_space_check(); 1067 1068 return 0; 1069 } 1070 1071 static int alloc_try_nid_cap_max_check(void) 1072 { 1073 memblock_set_bottom_up(false); 1074 alloc_try_nid_top_down_cap_max_check(); 1075 memblock_set_bottom_up(true); 1076 alloc_try_nid_bottom_up_cap_max_check(); 1077 1078 return 0; 1079 } 1080 1081 static int alloc_try_nid_cap_min_check(void) 1082 { 1083 memblock_set_bottom_up(false); 1084 alloc_try_nid_top_down_cap_min_check(); 1085 memblock_set_bottom_up(true); 1086 alloc_try_nid_bottom_up_cap_min_check(); 1087 1088 return 0; 1089 } 1090 1091 static int alloc_try_nid_min_reserved_check(void) 1092 { 1093 memblock_set_bottom_up(false); 1094 alloc_try_nid_min_reserved_generic_check(); 1095 memblock_set_bottom_up(true); 1096 alloc_try_nid_min_reserved_generic_check(); 1097 1098 return 0; 1099 } 1100 1101 static int alloc_try_nid_max_reserved_check(void) 1102 { 1103 memblock_set_bottom_up(false); 1104 alloc_try_nid_max_reserved_generic_check(); 1105 memblock_set_bottom_up(true); 1106 alloc_try_nid_max_reserved_generic_check(); 1107 1108 return 0; 1109 } 1110 1111 static int alloc_try_nid_exact_address_check(void) 1112 { 1113 memblock_set_bottom_up(false); 1114 alloc_try_nid_exact_address_generic_check(); 1115 memblock_set_bottom_up(true); 1116 alloc_try_nid_exact_address_generic_check(); 1117 1118 return 0; 1119 } 1120 1121 static int alloc_try_nid_reserved_full_merge_check(void) 1122 { 1123 memblock_set_bottom_up(false); 1124 alloc_try_nid_reserved_full_merge_generic_check(); 1125 memblock_set_bottom_up(true); 1126 alloc_try_nid_reserved_full_merge_generic_check(); 1127 1128 return 0; 1129 } 1130 1131 static int alloc_try_nid_reserved_all_check(void) 1132 { 1133 memblock_set_bottom_up(false); 1134 alloc_try_nid_reserved_all_generic_check(); 1135 memblock_set_bottom_up(true); 1136 alloc_try_nid_reserved_all_generic_check(); 1137 1138 return 0; 1139 } 1140 1141 static int alloc_try_nid_low_max_check(void) 1142 { 1143 memblock_set_bottom_up(false); 1144 alloc_try_nid_low_max_generic_check(); 1145 memblock_set_bottom_up(true); 1146 alloc_try_nid_low_max_generic_check(); 1147 1148 return 0; 1149 } 1150 1151 int memblock_alloc_nid_checks(void) 1152 { 1153 reset_memblock_attributes(); 1154 dummy_physical_memory_init(); 1155 1156 alloc_try_nid_simple_check(); 1157 alloc_try_nid_misaligned_check(); 1158 alloc_try_nid_narrow_range_check(); 1159 alloc_try_nid_reserved_with_space_check(); 1160 alloc_try_nid_reserved_no_space_check(); 1161 alloc_try_nid_cap_max_check(); 1162 alloc_try_nid_cap_min_check(); 1163 1164 alloc_try_nid_min_reserved_check(); 1165 alloc_try_nid_max_reserved_check(); 1166 alloc_try_nid_exact_address_check(); 1167 alloc_try_nid_reserved_full_merge_check(); 1168 alloc_try_nid_reserved_all_check(); 1169 alloc_try_nid_low_max_check(); 1170 1171 dummy_physical_memory_cleanup(); 1172 1173 return 0; 1174 } 1175