1 // SPDX-License-Identifier: GPL-2.0-or-later 2 #include <string.h> 3 #include <linux/memblock.h> 4 #include "basic_api.h" 5 6 #define EXPECTED_MEMBLOCK_REGIONS 128 7 #define FUNC_ADD "memblock_add" 8 #define FUNC_RESERVE "memblock_reserve" 9 #define FUNC_REMOVE "memblock_remove" 10 #define FUNC_FREE "memblock_free" 11 #define FUNC_TRIM "memblock_trim_memory" 12 13 static int memblock_initialization_check(void) 14 { 15 PREFIX_PUSH(); 16 17 ASSERT_NE(memblock.memory.regions, NULL); 18 ASSERT_EQ(memblock.memory.cnt, 1); 19 ASSERT_EQ(memblock.memory.max, EXPECTED_MEMBLOCK_REGIONS); 20 ASSERT_EQ(strcmp(memblock.memory.name, "memory"), 0); 21 22 ASSERT_NE(memblock.reserved.regions, NULL); 23 ASSERT_EQ(memblock.reserved.cnt, 1); 24 ASSERT_EQ(memblock.memory.max, EXPECTED_MEMBLOCK_REGIONS); 25 ASSERT_EQ(strcmp(memblock.reserved.name, "reserved"), 0); 26 27 ASSERT_EQ(memblock.bottom_up, false); 28 ASSERT_EQ(memblock.current_limit, MEMBLOCK_ALLOC_ANYWHERE); 29 30 test_pass_pop(); 31 32 return 0; 33 } 34 35 /* 36 * A simple test that adds a memory block of a specified base address 37 * and size to the collection of available memory regions (memblock.memory). 38 * Expect to create a new entry. The region counter and total memory get 39 * updated. 40 */ 41 static int memblock_add_simple_check(void) 42 { 43 struct memblock_region *rgn; 44 45 rgn = &memblock.memory.regions[0]; 46 47 struct region r = { 48 .base = SZ_1G, 49 .size = SZ_4M 50 }; 51 52 PREFIX_PUSH(); 53 54 reset_memblock_regions(); 55 memblock_add(r.base, r.size); 56 57 ASSERT_EQ(rgn->base, r.base); 58 ASSERT_EQ(rgn->size, r.size); 59 60 ASSERT_EQ(memblock.memory.cnt, 1); 61 ASSERT_EQ(memblock.memory.total_size, r.size); 62 63 test_pass_pop(); 64 65 return 0; 66 } 67 68 /* 69 * A simple test that adds a memory block of a specified base address, size, 70 * NUMA node and memory flags to the collection of available memory regions. 71 * Expect to create a new entry. The region counter and total memory get 72 * updated. 73 */ 74 static int memblock_add_node_simple_check(void) 75 { 76 struct memblock_region *rgn; 77 78 rgn = &memblock.memory.regions[0]; 79 80 struct region r = { 81 .base = SZ_1M, 82 .size = SZ_16M 83 }; 84 85 PREFIX_PUSH(); 86 87 reset_memblock_regions(); 88 memblock_add_node(r.base, r.size, 1, MEMBLOCK_HOTPLUG); 89 90 ASSERT_EQ(rgn->base, r.base); 91 ASSERT_EQ(rgn->size, r.size); 92 #ifdef CONFIG_NUMA 93 ASSERT_EQ(rgn->nid, 1); 94 #endif 95 ASSERT_EQ(rgn->flags, MEMBLOCK_HOTPLUG); 96 97 ASSERT_EQ(memblock.memory.cnt, 1); 98 ASSERT_EQ(memblock.memory.total_size, r.size); 99 100 test_pass_pop(); 101 102 return 0; 103 } 104 105 /* 106 * A test that tries to add two memory blocks that don't overlap with one 107 * another: 108 * 109 * | +--------+ +--------+ | 110 * | | r1 | | r2 | | 111 * +--------+--------+--------+--------+--+ 112 * 113 * Expect to add two correctly initialized entries to the collection of 114 * available memory regions (memblock.memory). The total size and 115 * region counter fields get updated. 116 */ 117 static int memblock_add_disjoint_check(void) 118 { 119 struct memblock_region *rgn1, *rgn2; 120 121 rgn1 = &memblock.memory.regions[0]; 122 rgn2 = &memblock.memory.regions[1]; 123 124 struct region r1 = { 125 .base = SZ_1G, 126 .size = SZ_8K 127 }; 128 struct region r2 = { 129 .base = SZ_1G + SZ_16K, 130 .size = SZ_8K 131 }; 132 133 PREFIX_PUSH(); 134 135 reset_memblock_regions(); 136 memblock_add(r1.base, r1.size); 137 memblock_add(r2.base, r2.size); 138 139 ASSERT_EQ(rgn1->base, r1.base); 140 ASSERT_EQ(rgn1->size, r1.size); 141 142 ASSERT_EQ(rgn2->base, r2.base); 143 ASSERT_EQ(rgn2->size, r2.size); 144 145 ASSERT_EQ(memblock.memory.cnt, 2); 146 ASSERT_EQ(memblock.memory.total_size, r1.size + r2.size); 147 148 test_pass_pop(); 149 150 return 0; 151 } 152 153 /* 154 * A test that tries to add two memory blocks r1 and r2, where r2 overlaps 155 * with the beginning of r1 (that is r1.base < r2.base + r2.size): 156 * 157 * | +----+----+------------+ | 158 * | | |r2 | r1 | | 159 * +----+----+----+------------+----------+ 160 * ^ ^ 161 * | | 162 * | r1.base 163 * | 164 * r2.base 165 * 166 * Expect to merge the two entries into one region that starts at r2.base 167 * and has size of two regions minus their intersection. The total size of 168 * the available memory is updated, and the region counter stays the same. 169 */ 170 static int memblock_add_overlap_top_check(void) 171 { 172 struct memblock_region *rgn; 173 phys_addr_t total_size; 174 175 rgn = &memblock.memory.regions[0]; 176 177 struct region r1 = { 178 .base = SZ_512M, 179 .size = SZ_1G 180 }; 181 struct region r2 = { 182 .base = SZ_256M, 183 .size = SZ_512M 184 }; 185 186 PREFIX_PUSH(); 187 188 total_size = (r1.base - r2.base) + r1.size; 189 190 reset_memblock_regions(); 191 memblock_add(r1.base, r1.size); 192 memblock_add(r2.base, r2.size); 193 194 ASSERT_EQ(rgn->base, r2.base); 195 ASSERT_EQ(rgn->size, total_size); 196 197 ASSERT_EQ(memblock.memory.cnt, 1); 198 ASSERT_EQ(memblock.memory.total_size, total_size); 199 200 test_pass_pop(); 201 202 return 0; 203 } 204 205 /* 206 * A test that tries to add two memory blocks r1 and r2, where r2 overlaps 207 * with the end of r1 (that is r2.base < r1.base + r1.size): 208 * 209 * | +--+------+----------+ | 210 * | | | r1 | r2 | | 211 * +--+--+------+----------+--------------+ 212 * ^ ^ 213 * | | 214 * | r2.base 215 * | 216 * r1.base 217 * 218 * Expect to merge the two entries into one region that starts at r1.base 219 * and has size of two regions minus their intersection. The total size of 220 * the available memory is updated, and the region counter stays the same. 221 */ 222 static int memblock_add_overlap_bottom_check(void) 223 { 224 struct memblock_region *rgn; 225 phys_addr_t total_size; 226 227 rgn = &memblock.memory.regions[0]; 228 229 struct region r1 = { 230 .base = SZ_128M, 231 .size = SZ_512M 232 }; 233 struct region r2 = { 234 .base = SZ_256M, 235 .size = SZ_1G 236 }; 237 238 PREFIX_PUSH(); 239 240 total_size = (r2.base - r1.base) + r2.size; 241 242 reset_memblock_regions(); 243 memblock_add(r1.base, r1.size); 244 memblock_add(r2.base, r2.size); 245 246 ASSERT_EQ(rgn->base, r1.base); 247 ASSERT_EQ(rgn->size, total_size); 248 249 ASSERT_EQ(memblock.memory.cnt, 1); 250 ASSERT_EQ(memblock.memory.total_size, total_size); 251 252 test_pass_pop(); 253 254 return 0; 255 } 256 257 /* 258 * A test that tries to add two memory blocks r1 and r2, where r2 is 259 * within the range of r1 (that is r1.base < r2.base && 260 * r2.base + r2.size < r1.base + r1.size): 261 * 262 * | +-------+--+-----------------------+ 263 * | | |r2| r1 | 264 * +---+-------+--+-----------------------+ 265 * ^ 266 * | 267 * r1.base 268 * 269 * Expect to merge two entries into one region that stays the same. 270 * The counter and total size of available memory are not updated. 271 */ 272 static int memblock_add_within_check(void) 273 { 274 struct memblock_region *rgn; 275 276 rgn = &memblock.memory.regions[0]; 277 278 struct region r1 = { 279 .base = SZ_8M, 280 .size = SZ_32M 281 }; 282 struct region r2 = { 283 .base = SZ_16M, 284 .size = SZ_1M 285 }; 286 287 PREFIX_PUSH(); 288 289 reset_memblock_regions(); 290 memblock_add(r1.base, r1.size); 291 memblock_add(r2.base, r2.size); 292 293 ASSERT_EQ(rgn->base, r1.base); 294 ASSERT_EQ(rgn->size, r1.size); 295 296 ASSERT_EQ(memblock.memory.cnt, 1); 297 ASSERT_EQ(memblock.memory.total_size, r1.size); 298 299 test_pass_pop(); 300 301 return 0; 302 } 303 304 /* 305 * A simple test that tries to add the same memory block twice. Expect 306 * the counter and total size of available memory to not be updated. 307 */ 308 static int memblock_add_twice_check(void) 309 { 310 struct region r = { 311 .base = SZ_16K, 312 .size = SZ_2M 313 }; 314 315 PREFIX_PUSH(); 316 317 reset_memblock_regions(); 318 319 memblock_add(r.base, r.size); 320 memblock_add(r.base, r.size); 321 322 ASSERT_EQ(memblock.memory.cnt, 1); 323 ASSERT_EQ(memblock.memory.total_size, r.size); 324 325 test_pass_pop(); 326 327 return 0; 328 } 329 330 /* 331 * A test that tries to add two memory blocks that don't overlap with one 332 * another and then add a third memory block in the space between the first two: 333 * 334 * | +--------+--------+--------+ | 335 * | | r1 | r3 | r2 | | 336 * +--------+--------+--------+--------+--+ 337 * 338 * Expect to merge the three entries into one region that starts at r1.base 339 * and has size of r1.size + r2.size + r3.size. The region counter and total 340 * size of the available memory are updated. 341 */ 342 static int memblock_add_between_check(void) 343 { 344 struct memblock_region *rgn; 345 phys_addr_t total_size; 346 347 rgn = &memblock.memory.regions[0]; 348 349 struct region r1 = { 350 .base = SZ_1G, 351 .size = SZ_8K 352 }; 353 struct region r2 = { 354 .base = SZ_1G + SZ_16K, 355 .size = SZ_8K 356 }; 357 struct region r3 = { 358 .base = SZ_1G + SZ_8K, 359 .size = SZ_8K 360 }; 361 362 PREFIX_PUSH(); 363 364 total_size = r1.size + r2.size + r3.size; 365 366 reset_memblock_regions(); 367 memblock_add(r1.base, r1.size); 368 memblock_add(r2.base, r2.size); 369 memblock_add(r3.base, r3.size); 370 371 ASSERT_EQ(rgn->base, r1.base); 372 ASSERT_EQ(rgn->size, total_size); 373 374 ASSERT_EQ(memblock.memory.cnt, 1); 375 ASSERT_EQ(memblock.memory.total_size, total_size); 376 377 test_pass_pop(); 378 379 return 0; 380 } 381 382 /* 383 * A simple test that tries to add a memory block r when r extends past 384 * PHYS_ADDR_MAX: 385 * 386 * +--------+ 387 * | r | 388 * +--------+ 389 * | +----+ 390 * | | rgn| 391 * +----------------------------+----+ 392 * 393 * Expect to add a memory block of size PHYS_ADDR_MAX - r.base. Expect the 394 * total size of available memory and the counter to be updated. 395 */ 396 static int memblock_add_near_max_check(void) 397 { 398 struct memblock_region *rgn; 399 phys_addr_t total_size; 400 401 rgn = &memblock.memory.regions[0]; 402 403 struct region r = { 404 .base = PHYS_ADDR_MAX - SZ_1M, 405 .size = SZ_2M 406 }; 407 408 PREFIX_PUSH(); 409 410 total_size = PHYS_ADDR_MAX - r.base; 411 412 reset_memblock_regions(); 413 memblock_add(r.base, r.size); 414 415 ASSERT_EQ(rgn->base, r.base); 416 ASSERT_EQ(rgn->size, total_size); 417 418 ASSERT_EQ(memblock.memory.cnt, 1); 419 ASSERT_EQ(memblock.memory.total_size, total_size); 420 421 test_pass_pop(); 422 423 return 0; 424 } 425 426 /* 427 * A test that trying to add the 129th memory block. 428 * Expect to trigger memblock_double_array() to double the 429 * memblock.memory.max, find a new valid memory as 430 * memory.regions. 431 */ 432 static int memblock_add_many_check(void) 433 { 434 int i; 435 void *orig_region; 436 struct region r = { 437 .base = SZ_16K, 438 .size = SZ_16K, 439 }; 440 phys_addr_t new_memory_regions_size; 441 phys_addr_t base, size = SZ_64; 442 phys_addr_t gap_size = SZ_64; 443 444 PREFIX_PUSH(); 445 446 reset_memblock_regions(); 447 memblock_allow_resize(); 448 449 dummy_physical_memory_init(); 450 /* 451 * We allocated enough memory by using dummy_physical_memory_init(), and 452 * split it into small block. First we split a large enough memory block 453 * as the memory region which will be choosed by memblock_double_array(). 454 */ 455 base = PAGE_ALIGN(dummy_physical_memory_base()); 456 new_memory_regions_size = PAGE_ALIGN(INIT_MEMBLOCK_REGIONS * 2 * 457 sizeof(struct memblock_region)); 458 memblock_add(base, new_memory_regions_size); 459 460 /* This is the base of small memory block. */ 461 base += new_memory_regions_size + gap_size; 462 463 orig_region = memblock.memory.regions; 464 465 for (i = 0; i < INIT_MEMBLOCK_REGIONS; i++) { 466 /* 467 * Add these small block to fulfill the memblock. We keep a 468 * gap between the nearby memory to avoid being merged. 469 */ 470 memblock_add(base, size); 471 base += size + gap_size; 472 473 ASSERT_EQ(memblock.memory.cnt, i + 2); 474 ASSERT_EQ(memblock.memory.total_size, new_memory_regions_size + 475 (i + 1) * size); 476 } 477 478 /* 479 * At there, memblock_double_array() has been succeed, check if it 480 * update the memory.max. 481 */ 482 ASSERT_EQ(memblock.memory.max, INIT_MEMBLOCK_REGIONS * 2); 483 484 /* memblock_double_array() will reserve the memory it used. Check it. */ 485 ASSERT_EQ(memblock.reserved.cnt, 1); 486 ASSERT_EQ(memblock.reserved.total_size, new_memory_regions_size); 487 488 /* 489 * Now memblock_double_array() works fine. Let's check after the 490 * double_array(), the memblock_add() still works as normal. 491 */ 492 memblock_add(r.base, r.size); 493 ASSERT_EQ(memblock.memory.regions[0].base, r.base); 494 ASSERT_EQ(memblock.memory.regions[0].size, r.size); 495 496 ASSERT_EQ(memblock.memory.cnt, INIT_MEMBLOCK_REGIONS + 2); 497 ASSERT_EQ(memblock.memory.total_size, INIT_MEMBLOCK_REGIONS * size + 498 new_memory_regions_size + 499 r.size); 500 ASSERT_EQ(memblock.memory.max, INIT_MEMBLOCK_REGIONS * 2); 501 502 dummy_physical_memory_cleanup(); 503 504 /* 505 * The current memory.regions is occupying a range of memory that 506 * allocated from dummy_physical_memory_init(). After free the memory, 507 * we must not use it. So restore the origin memory region to make sure 508 * the tests can run as normal and not affected by the double array. 509 */ 510 memblock.memory.regions = orig_region; 511 memblock.memory.cnt = INIT_MEMBLOCK_REGIONS; 512 513 test_pass_pop(); 514 515 return 0; 516 } 517 518 static int memblock_add_checks(void) 519 { 520 prefix_reset(); 521 prefix_push(FUNC_ADD); 522 test_print("Running %s tests...\n", FUNC_ADD); 523 524 memblock_add_simple_check(); 525 memblock_add_node_simple_check(); 526 memblock_add_disjoint_check(); 527 memblock_add_overlap_top_check(); 528 memblock_add_overlap_bottom_check(); 529 memblock_add_within_check(); 530 memblock_add_twice_check(); 531 memblock_add_between_check(); 532 memblock_add_near_max_check(); 533 memblock_add_many_check(); 534 535 prefix_pop(); 536 537 return 0; 538 } 539 540 /* 541 * A simple test that marks a memory block of a specified base address 542 * and size as reserved and to the collection of reserved memory regions 543 * (memblock.reserved). Expect to create a new entry. The region counter 544 * and total memory size are updated. 545 */ 546 static int memblock_reserve_simple_check(void) 547 { 548 struct memblock_region *rgn; 549 550 rgn = &memblock.reserved.regions[0]; 551 552 struct region r = { 553 .base = SZ_2G, 554 .size = SZ_128M 555 }; 556 557 PREFIX_PUSH(); 558 559 reset_memblock_regions(); 560 memblock_reserve(r.base, r.size); 561 562 ASSERT_EQ(rgn->base, r.base); 563 ASSERT_EQ(rgn->size, r.size); 564 565 test_pass_pop(); 566 567 return 0; 568 } 569 570 /* 571 * A test that tries to mark two memory blocks that don't overlap as reserved: 572 * 573 * | +--+ +----------------+ | 574 * | |r1| | r2 | | 575 * +--------+--+------+----------------+--+ 576 * 577 * Expect to add two entries to the collection of reserved memory regions 578 * (memblock.reserved). The total size and region counter for 579 * memblock.reserved are updated. 580 */ 581 static int memblock_reserve_disjoint_check(void) 582 { 583 struct memblock_region *rgn1, *rgn2; 584 585 rgn1 = &memblock.reserved.regions[0]; 586 rgn2 = &memblock.reserved.regions[1]; 587 588 struct region r1 = { 589 .base = SZ_256M, 590 .size = SZ_16M 591 }; 592 struct region r2 = { 593 .base = SZ_512M, 594 .size = SZ_512M 595 }; 596 597 PREFIX_PUSH(); 598 599 reset_memblock_regions(); 600 memblock_reserve(r1.base, r1.size); 601 memblock_reserve(r2.base, r2.size); 602 603 ASSERT_EQ(rgn1->base, r1.base); 604 ASSERT_EQ(rgn1->size, r1.size); 605 606 ASSERT_EQ(rgn2->base, r2.base); 607 ASSERT_EQ(rgn2->size, r2.size); 608 609 ASSERT_EQ(memblock.reserved.cnt, 2); 610 ASSERT_EQ(memblock.reserved.total_size, r1.size + r2.size); 611 612 test_pass_pop(); 613 614 return 0; 615 } 616 617 /* 618 * A test that tries to mark two memory blocks r1 and r2 as reserved, 619 * where r2 overlaps with the beginning of r1 (that is 620 * r1.base < r2.base + r2.size): 621 * 622 * | +--------------+--+--------------+ | 623 * | | r2 | | r1 | | 624 * +--+--------------+--+--------------+--+ 625 * ^ ^ 626 * | | 627 * | r1.base 628 * | 629 * r2.base 630 * 631 * Expect to merge two entries into one region that starts at r2.base and 632 * has size of two regions minus their intersection. The total size of the 633 * reserved memory is updated, and the region counter is not updated. 634 */ 635 static int memblock_reserve_overlap_top_check(void) 636 { 637 struct memblock_region *rgn; 638 phys_addr_t total_size; 639 640 rgn = &memblock.reserved.regions[0]; 641 642 struct region r1 = { 643 .base = SZ_1G, 644 .size = SZ_1G 645 }; 646 struct region r2 = { 647 .base = SZ_128M, 648 .size = SZ_1G 649 }; 650 651 PREFIX_PUSH(); 652 653 total_size = (r1.base - r2.base) + r1.size; 654 655 reset_memblock_regions(); 656 memblock_reserve(r1.base, r1.size); 657 memblock_reserve(r2.base, r2.size); 658 659 ASSERT_EQ(rgn->base, r2.base); 660 ASSERT_EQ(rgn->size, total_size); 661 662 ASSERT_EQ(memblock.reserved.cnt, 1); 663 ASSERT_EQ(memblock.reserved.total_size, total_size); 664 665 test_pass_pop(); 666 667 return 0; 668 } 669 670 /* 671 * A test that tries to mark two memory blocks r1 and r2 as reserved, 672 * where r2 overlaps with the end of r1 (that is 673 * r2.base < r1.base + r1.size): 674 * 675 * | +--------------+--+--------------+ | 676 * | | r1 | | r2 | | 677 * +--+--------------+--+--------------+--+ 678 * ^ ^ 679 * | | 680 * | r2.base 681 * | 682 * r1.base 683 * 684 * Expect to merge two entries into one region that starts at r1.base and 685 * has size of two regions minus their intersection. The total size of the 686 * reserved memory is updated, and the region counter is not updated. 687 */ 688 static int memblock_reserve_overlap_bottom_check(void) 689 { 690 struct memblock_region *rgn; 691 phys_addr_t total_size; 692 693 rgn = &memblock.reserved.regions[0]; 694 695 struct region r1 = { 696 .base = SZ_2K, 697 .size = SZ_128K 698 }; 699 struct region r2 = { 700 .base = SZ_128K, 701 .size = SZ_128K 702 }; 703 704 PREFIX_PUSH(); 705 706 total_size = (r2.base - r1.base) + r2.size; 707 708 reset_memblock_regions(); 709 memblock_reserve(r1.base, r1.size); 710 memblock_reserve(r2.base, r2.size); 711 712 ASSERT_EQ(rgn->base, r1.base); 713 ASSERT_EQ(rgn->size, total_size); 714 715 ASSERT_EQ(memblock.reserved.cnt, 1); 716 ASSERT_EQ(memblock.reserved.total_size, total_size); 717 718 test_pass_pop(); 719 720 return 0; 721 } 722 723 /* 724 * A test that tries to mark two memory blocks r1 and r2 as reserved, 725 * where r2 is within the range of r1 (that is 726 * (r1.base < r2.base) && (r2.base + r2.size < r1.base + r1.size)): 727 * 728 * | +-----+--+---------------------------| 729 * | | |r2| r1 | 730 * +-+-----+--+---------------------------+ 731 * ^ ^ 732 * | | 733 * | r2.base 734 * | 735 * r1.base 736 * 737 * Expect to merge two entries into one region that stays the same. The 738 * counter and total size of available memory are not updated. 739 */ 740 static int memblock_reserve_within_check(void) 741 { 742 struct memblock_region *rgn; 743 744 rgn = &memblock.reserved.regions[0]; 745 746 struct region r1 = { 747 .base = SZ_1M, 748 .size = SZ_8M 749 }; 750 struct region r2 = { 751 .base = SZ_2M, 752 .size = SZ_64K 753 }; 754 755 PREFIX_PUSH(); 756 757 reset_memblock_regions(); 758 memblock_reserve(r1.base, r1.size); 759 memblock_reserve(r2.base, r2.size); 760 761 ASSERT_EQ(rgn->base, r1.base); 762 ASSERT_EQ(rgn->size, r1.size); 763 764 ASSERT_EQ(memblock.reserved.cnt, 1); 765 ASSERT_EQ(memblock.reserved.total_size, r1.size); 766 767 test_pass_pop(); 768 769 return 0; 770 } 771 772 /* 773 * A simple test that tries to reserve the same memory block twice. 774 * Expect the region counter and total size of reserved memory to not 775 * be updated. 776 */ 777 static int memblock_reserve_twice_check(void) 778 { 779 struct region r = { 780 .base = SZ_16K, 781 .size = SZ_2M 782 }; 783 784 PREFIX_PUSH(); 785 786 reset_memblock_regions(); 787 788 memblock_reserve(r.base, r.size); 789 memblock_reserve(r.base, r.size); 790 791 ASSERT_EQ(memblock.reserved.cnt, 1); 792 ASSERT_EQ(memblock.reserved.total_size, r.size); 793 794 test_pass_pop(); 795 796 return 0; 797 } 798 799 /* 800 * A test that tries to mark two memory blocks that don't overlap as reserved 801 * and then reserve a third memory block in the space between the first two: 802 * 803 * | +--------+--------+--------+ | 804 * | | r1 | r3 | r2 | | 805 * +--------+--------+--------+--------+--+ 806 * 807 * Expect to merge the three entries into one reserved region that starts at 808 * r1.base and has size of r1.size + r2.size + r3.size. The region counter and 809 * total for memblock.reserved are updated. 810 */ 811 static int memblock_reserve_between_check(void) 812 { 813 struct memblock_region *rgn; 814 phys_addr_t total_size; 815 816 rgn = &memblock.reserved.regions[0]; 817 818 struct region r1 = { 819 .base = SZ_1G, 820 .size = SZ_8K 821 }; 822 struct region r2 = { 823 .base = SZ_1G + SZ_16K, 824 .size = SZ_8K 825 }; 826 struct region r3 = { 827 .base = SZ_1G + SZ_8K, 828 .size = SZ_8K 829 }; 830 831 PREFIX_PUSH(); 832 833 total_size = r1.size + r2.size + r3.size; 834 835 reset_memblock_regions(); 836 memblock_reserve(r1.base, r1.size); 837 memblock_reserve(r2.base, r2.size); 838 memblock_reserve(r3.base, r3.size); 839 840 ASSERT_EQ(rgn->base, r1.base); 841 ASSERT_EQ(rgn->size, total_size); 842 843 ASSERT_EQ(memblock.reserved.cnt, 1); 844 ASSERT_EQ(memblock.reserved.total_size, total_size); 845 846 test_pass_pop(); 847 848 return 0; 849 } 850 851 /* 852 * A simple test that tries to reserve a memory block r when r extends past 853 * PHYS_ADDR_MAX: 854 * 855 * +--------+ 856 * | r | 857 * +--------+ 858 * | +----+ 859 * | | rgn| 860 * +----------------------------+----+ 861 * 862 * Expect to reserve a memory block of size PHYS_ADDR_MAX - r.base. Expect the 863 * total size of reserved memory and the counter to be updated. 864 */ 865 static int memblock_reserve_near_max_check(void) 866 { 867 struct memblock_region *rgn; 868 phys_addr_t total_size; 869 870 rgn = &memblock.reserved.regions[0]; 871 872 struct region r = { 873 .base = PHYS_ADDR_MAX - SZ_1M, 874 .size = SZ_2M 875 }; 876 877 PREFIX_PUSH(); 878 879 total_size = PHYS_ADDR_MAX - r.base; 880 881 reset_memblock_regions(); 882 memblock_reserve(r.base, r.size); 883 884 ASSERT_EQ(rgn->base, r.base); 885 ASSERT_EQ(rgn->size, total_size); 886 887 ASSERT_EQ(memblock.reserved.cnt, 1); 888 ASSERT_EQ(memblock.reserved.total_size, total_size); 889 890 test_pass_pop(); 891 892 return 0; 893 } 894 895 /* 896 * A test that trying to reserve the 129th memory block. 897 * Expect to trigger memblock_double_array() to double the 898 * memblock.memory.max, find a new valid memory as 899 * reserved.regions. 900 */ 901 static int memblock_reserve_many_check(void) 902 { 903 int i; 904 void *orig_region; 905 struct region r = { 906 .base = SZ_16K, 907 .size = SZ_16K, 908 }; 909 phys_addr_t memory_base = SZ_128K; 910 phys_addr_t new_reserved_regions_size; 911 912 PREFIX_PUSH(); 913 914 reset_memblock_regions(); 915 memblock_allow_resize(); 916 917 /* Add a valid memory region used by double_array(). */ 918 dummy_physical_memory_init(); 919 memblock_add(dummy_physical_memory_base(), MEM_SIZE); 920 921 for (i = 0; i < INIT_MEMBLOCK_REGIONS; i++) { 922 /* Reserve some fakes memory region to fulfill the memblock. */ 923 memblock_reserve(memory_base, MEM_SIZE); 924 925 ASSERT_EQ(memblock.reserved.cnt, i + 1); 926 ASSERT_EQ(memblock.reserved.total_size, (i + 1) * MEM_SIZE); 927 928 /* Keep the gap so these memory region will not be merged. */ 929 memory_base += MEM_SIZE * 2; 930 } 931 932 orig_region = memblock.reserved.regions; 933 934 /* This reserve the 129 memory_region, and makes it double array. */ 935 memblock_reserve(memory_base, MEM_SIZE); 936 937 /* 938 * This is the memory region size used by the doubled reserved.regions, 939 * and it has been reserved due to it has been used. The size is used to 940 * calculate the total_size that the memblock.reserved have now. 941 */ 942 new_reserved_regions_size = PAGE_ALIGN((INIT_MEMBLOCK_REGIONS * 2) * 943 sizeof(struct memblock_region)); 944 /* 945 * The double_array() will find a free memory region as the new 946 * reserved.regions, and the used memory region will be reserved, so 947 * there will be one more region exist in the reserved memblock. And the 948 * one more reserved region's size is new_reserved_regions_size. 949 */ 950 ASSERT_EQ(memblock.reserved.cnt, INIT_MEMBLOCK_REGIONS + 2); 951 ASSERT_EQ(memblock.reserved.total_size, (INIT_MEMBLOCK_REGIONS + 1) * MEM_SIZE + 952 new_reserved_regions_size); 953 ASSERT_EQ(memblock.reserved.max, INIT_MEMBLOCK_REGIONS * 2); 954 955 /* 956 * Now memblock_double_array() works fine. Let's check after the 957 * double_array(), the memblock_reserve() still works as normal. 958 */ 959 memblock_reserve(r.base, r.size); 960 ASSERT_EQ(memblock.reserved.regions[0].base, r.base); 961 ASSERT_EQ(memblock.reserved.regions[0].size, r.size); 962 963 ASSERT_EQ(memblock.reserved.cnt, INIT_MEMBLOCK_REGIONS + 3); 964 ASSERT_EQ(memblock.reserved.total_size, (INIT_MEMBLOCK_REGIONS + 1) * MEM_SIZE + 965 new_reserved_regions_size + 966 r.size); 967 ASSERT_EQ(memblock.reserved.max, INIT_MEMBLOCK_REGIONS * 2); 968 969 dummy_physical_memory_cleanup(); 970 971 /* 972 * The current reserved.regions is occupying a range of memory that 973 * allocated from dummy_physical_memory_init(). After free the memory, 974 * we must not use it. So restore the origin memory region to make sure 975 * the tests can run as normal and not affected by the double array. 976 */ 977 memblock.reserved.regions = orig_region; 978 memblock.reserved.cnt = INIT_MEMBLOCK_RESERVED_REGIONS; 979 980 test_pass_pop(); 981 982 return 0; 983 } 984 985 static int memblock_reserve_checks(void) 986 { 987 prefix_reset(); 988 prefix_push(FUNC_RESERVE); 989 test_print("Running %s tests...\n", FUNC_RESERVE); 990 991 memblock_reserve_simple_check(); 992 memblock_reserve_disjoint_check(); 993 memblock_reserve_overlap_top_check(); 994 memblock_reserve_overlap_bottom_check(); 995 memblock_reserve_within_check(); 996 memblock_reserve_twice_check(); 997 memblock_reserve_between_check(); 998 memblock_reserve_near_max_check(); 999 memblock_reserve_many_check(); 1000 1001 prefix_pop(); 1002 1003 return 0; 1004 } 1005 1006 /* 1007 * A simple test that tries to remove a region r1 from the array of 1008 * available memory regions. By "removing" a region we mean overwriting it 1009 * with the next region r2 in memblock.memory: 1010 * 1011 * | ...... +----------------+ | 1012 * | : r1 : | r2 | | 1013 * +--+----+----------+----------------+--+ 1014 * ^ 1015 * | 1016 * rgn.base 1017 * 1018 * Expect to add two memory blocks r1 and r2 and then remove r1 so that 1019 * r2 is the first available region. The region counter and total size 1020 * are updated. 1021 */ 1022 static int memblock_remove_simple_check(void) 1023 { 1024 struct memblock_region *rgn; 1025 1026 rgn = &memblock.memory.regions[0]; 1027 1028 struct region r1 = { 1029 .base = SZ_2K, 1030 .size = SZ_4K 1031 }; 1032 struct region r2 = { 1033 .base = SZ_128K, 1034 .size = SZ_4M 1035 }; 1036 1037 PREFIX_PUSH(); 1038 1039 reset_memblock_regions(); 1040 memblock_add(r1.base, r1.size); 1041 memblock_add(r2.base, r2.size); 1042 memblock_remove(r1.base, r1.size); 1043 1044 ASSERT_EQ(rgn->base, r2.base); 1045 ASSERT_EQ(rgn->size, r2.size); 1046 1047 ASSERT_EQ(memblock.memory.cnt, 1); 1048 ASSERT_EQ(memblock.memory.total_size, r2.size); 1049 1050 test_pass_pop(); 1051 1052 return 0; 1053 } 1054 1055 /* 1056 * A test that tries to remove a region r2 that was not registered as 1057 * available memory (i.e. has no corresponding entry in memblock.memory): 1058 * 1059 * +----------------+ 1060 * | r2 | 1061 * +----------------+ 1062 * | +----+ | 1063 * | | r1 | | 1064 * +--+----+------------------------------+ 1065 * ^ 1066 * | 1067 * rgn.base 1068 * 1069 * Expect the array, regions counter and total size to not be modified. 1070 */ 1071 static int memblock_remove_absent_check(void) 1072 { 1073 struct memblock_region *rgn; 1074 1075 rgn = &memblock.memory.regions[0]; 1076 1077 struct region r1 = { 1078 .base = SZ_512K, 1079 .size = SZ_4M 1080 }; 1081 struct region r2 = { 1082 .base = SZ_64M, 1083 .size = SZ_1G 1084 }; 1085 1086 PREFIX_PUSH(); 1087 1088 reset_memblock_regions(); 1089 memblock_add(r1.base, r1.size); 1090 memblock_remove(r2.base, r2.size); 1091 1092 ASSERT_EQ(rgn->base, r1.base); 1093 ASSERT_EQ(rgn->size, r1.size); 1094 1095 ASSERT_EQ(memblock.memory.cnt, 1); 1096 ASSERT_EQ(memblock.memory.total_size, r1.size); 1097 1098 test_pass_pop(); 1099 1100 return 0; 1101 } 1102 1103 /* 1104 * A test that tries to remove a region r2 that overlaps with the 1105 * beginning of the already existing entry r1 1106 * (that is r1.base < r2.base + r2.size): 1107 * 1108 * +-----------------+ 1109 * | r2 | 1110 * +-----------------+ 1111 * | .........+--------+ | 1112 * | : r1 | rgn | | 1113 * +-----------------+--------+--------+--+ 1114 * ^ ^ 1115 * | | 1116 * | rgn.base 1117 * r1.base 1118 * 1119 * Expect that only the intersection of both regions is removed from the 1120 * available memory pool. The regions counter and total size are updated. 1121 */ 1122 static int memblock_remove_overlap_top_check(void) 1123 { 1124 struct memblock_region *rgn; 1125 phys_addr_t r1_end, r2_end, total_size; 1126 1127 rgn = &memblock.memory.regions[0]; 1128 1129 struct region r1 = { 1130 .base = SZ_32M, 1131 .size = SZ_32M 1132 }; 1133 struct region r2 = { 1134 .base = SZ_16M, 1135 .size = SZ_32M 1136 }; 1137 1138 PREFIX_PUSH(); 1139 1140 r1_end = r1.base + r1.size; 1141 r2_end = r2.base + r2.size; 1142 total_size = r1_end - r2_end; 1143 1144 reset_memblock_regions(); 1145 memblock_add(r1.base, r1.size); 1146 memblock_remove(r2.base, r2.size); 1147 1148 ASSERT_EQ(rgn->base, r1.base + r2.base); 1149 ASSERT_EQ(rgn->size, total_size); 1150 1151 ASSERT_EQ(memblock.memory.cnt, 1); 1152 ASSERT_EQ(memblock.memory.total_size, total_size); 1153 1154 test_pass_pop(); 1155 1156 return 0; 1157 } 1158 1159 /* 1160 * A test that tries to remove a region r2 that overlaps with the end of 1161 * the already existing region r1 (that is r2.base < r1.base + r1.size): 1162 * 1163 * +--------------------------------+ 1164 * | r2 | 1165 * +--------------------------------+ 1166 * | +---+..... | 1167 * | |rgn| r1 : | 1168 * +-+---+----+---------------------------+ 1169 * ^ 1170 * | 1171 * r1.base 1172 * 1173 * Expect that only the intersection of both regions is removed from the 1174 * available memory pool. The regions counter and total size are updated. 1175 */ 1176 static int memblock_remove_overlap_bottom_check(void) 1177 { 1178 struct memblock_region *rgn; 1179 phys_addr_t total_size; 1180 1181 rgn = &memblock.memory.regions[0]; 1182 1183 struct region r1 = { 1184 .base = SZ_2M, 1185 .size = SZ_64M 1186 }; 1187 struct region r2 = { 1188 .base = SZ_32M, 1189 .size = SZ_256M 1190 }; 1191 1192 PREFIX_PUSH(); 1193 1194 total_size = r2.base - r1.base; 1195 1196 reset_memblock_regions(); 1197 memblock_add(r1.base, r1.size); 1198 memblock_remove(r2.base, r2.size); 1199 1200 ASSERT_EQ(rgn->base, r1.base); 1201 ASSERT_EQ(rgn->size, total_size); 1202 1203 ASSERT_EQ(memblock.memory.cnt, 1); 1204 ASSERT_EQ(memblock.memory.total_size, total_size); 1205 1206 test_pass_pop(); 1207 1208 return 0; 1209 } 1210 1211 /* 1212 * A test that tries to remove a region r2 that is within the range of 1213 * the already existing entry r1 (that is 1214 * (r1.base < r2.base) && (r2.base + r2.size < r1.base + r1.size)): 1215 * 1216 * +----+ 1217 * | r2 | 1218 * +----+ 1219 * | +-------------+....+---------------+ | 1220 * | | rgn1 | r1 | rgn2 | | 1221 * +-+-------------+----+---------------+-+ 1222 * ^ 1223 * | 1224 * r1.base 1225 * 1226 * Expect that the region is split into two - one that ends at r2.base and 1227 * another that starts at r2.base + r2.size, with appropriate sizes. The 1228 * region counter and total size are updated. 1229 */ 1230 static int memblock_remove_within_check(void) 1231 { 1232 struct memblock_region *rgn1, *rgn2; 1233 phys_addr_t r1_size, r2_size, total_size; 1234 1235 rgn1 = &memblock.memory.regions[0]; 1236 rgn2 = &memblock.memory.regions[1]; 1237 1238 struct region r1 = { 1239 .base = SZ_1M, 1240 .size = SZ_32M 1241 }; 1242 struct region r2 = { 1243 .base = SZ_16M, 1244 .size = SZ_1M 1245 }; 1246 1247 PREFIX_PUSH(); 1248 1249 r1_size = r2.base - r1.base; 1250 r2_size = (r1.base + r1.size) - (r2.base + r2.size); 1251 total_size = r1_size + r2_size; 1252 1253 reset_memblock_regions(); 1254 memblock_add(r1.base, r1.size); 1255 memblock_remove(r2.base, r2.size); 1256 1257 ASSERT_EQ(rgn1->base, r1.base); 1258 ASSERT_EQ(rgn1->size, r1_size); 1259 1260 ASSERT_EQ(rgn2->base, r2.base + r2.size); 1261 ASSERT_EQ(rgn2->size, r2_size); 1262 1263 ASSERT_EQ(memblock.memory.cnt, 2); 1264 ASSERT_EQ(memblock.memory.total_size, total_size); 1265 1266 test_pass_pop(); 1267 1268 return 0; 1269 } 1270 1271 /* 1272 * A simple test that tries to remove a region r1 from the array of 1273 * available memory regions when r1 is the only available region. 1274 * Expect to add a memory block r1 and then remove r1 so that a dummy 1275 * region is added. The region counter stays the same, and the total size 1276 * is updated. 1277 */ 1278 static int memblock_remove_only_region_check(void) 1279 { 1280 struct memblock_region *rgn; 1281 1282 rgn = &memblock.memory.regions[0]; 1283 1284 struct region r1 = { 1285 .base = SZ_2K, 1286 .size = SZ_4K 1287 }; 1288 1289 PREFIX_PUSH(); 1290 1291 reset_memblock_regions(); 1292 memblock_add(r1.base, r1.size); 1293 memblock_remove(r1.base, r1.size); 1294 1295 ASSERT_EQ(rgn->base, 0); 1296 ASSERT_EQ(rgn->size, 0); 1297 1298 ASSERT_EQ(memblock.memory.cnt, 1); 1299 ASSERT_EQ(memblock.memory.total_size, 0); 1300 1301 test_pass_pop(); 1302 1303 return 0; 1304 } 1305 1306 /* 1307 * A simple test that tries remove a region r2 from the array of available 1308 * memory regions when r2 extends past PHYS_ADDR_MAX: 1309 * 1310 * +--------+ 1311 * | r2 | 1312 * +--------+ 1313 * | +---+....+ 1314 * | |rgn| | 1315 * +------------------------+---+----+ 1316 * 1317 * Expect that only the portion between PHYS_ADDR_MAX and r2.base is removed. 1318 * Expect the total size of available memory to be updated and the counter to 1319 * not be updated. 1320 */ 1321 static int memblock_remove_near_max_check(void) 1322 { 1323 struct memblock_region *rgn; 1324 phys_addr_t total_size; 1325 1326 rgn = &memblock.memory.regions[0]; 1327 1328 struct region r1 = { 1329 .base = PHYS_ADDR_MAX - SZ_2M, 1330 .size = SZ_2M 1331 }; 1332 1333 struct region r2 = { 1334 .base = PHYS_ADDR_MAX - SZ_1M, 1335 .size = SZ_2M 1336 }; 1337 1338 PREFIX_PUSH(); 1339 1340 total_size = r1.size - (PHYS_ADDR_MAX - r2.base); 1341 1342 reset_memblock_regions(); 1343 memblock_add(r1.base, r1.size); 1344 memblock_remove(r2.base, r2.size); 1345 1346 ASSERT_EQ(rgn->base, r1.base); 1347 ASSERT_EQ(rgn->size, total_size); 1348 1349 ASSERT_EQ(memblock.memory.cnt, 1); 1350 ASSERT_EQ(memblock.memory.total_size, total_size); 1351 1352 test_pass_pop(); 1353 1354 return 0; 1355 } 1356 1357 /* 1358 * A test that tries to remove a region r3 that overlaps with two existing 1359 * regions r1 and r2: 1360 * 1361 * +----------------+ 1362 * | r3 | 1363 * +----------------+ 1364 * | +----+..... ........+--------+ 1365 * | | |r1 : : |r2 | | 1366 * +----+----+----+---+-------+--------+-----+ 1367 * 1368 * Expect that only the intersections of r1 with r3 and r2 with r3 are removed 1369 * from the available memory pool. Expect the total size of available memory to 1370 * be updated and the counter to not be updated. 1371 */ 1372 static int memblock_remove_overlap_two_check(void) 1373 { 1374 struct memblock_region *rgn1, *rgn2; 1375 phys_addr_t new_r1_size, new_r2_size, r2_end, r3_end, total_size; 1376 1377 rgn1 = &memblock.memory.regions[0]; 1378 rgn2 = &memblock.memory.regions[1]; 1379 1380 struct region r1 = { 1381 .base = SZ_16M, 1382 .size = SZ_32M 1383 }; 1384 struct region r2 = { 1385 .base = SZ_64M, 1386 .size = SZ_64M 1387 }; 1388 struct region r3 = { 1389 .base = SZ_32M, 1390 .size = SZ_64M 1391 }; 1392 1393 PREFIX_PUSH(); 1394 1395 r2_end = r2.base + r2.size; 1396 r3_end = r3.base + r3.size; 1397 new_r1_size = r3.base - r1.base; 1398 new_r2_size = r2_end - r3_end; 1399 total_size = new_r1_size + new_r2_size; 1400 1401 reset_memblock_regions(); 1402 memblock_add(r1.base, r1.size); 1403 memblock_add(r2.base, r2.size); 1404 memblock_remove(r3.base, r3.size); 1405 1406 ASSERT_EQ(rgn1->base, r1.base); 1407 ASSERT_EQ(rgn1->size, new_r1_size); 1408 1409 ASSERT_EQ(rgn2->base, r3_end); 1410 ASSERT_EQ(rgn2->size, new_r2_size); 1411 1412 ASSERT_EQ(memblock.memory.cnt, 2); 1413 ASSERT_EQ(memblock.memory.total_size, total_size); 1414 1415 test_pass_pop(); 1416 1417 return 0; 1418 } 1419 1420 static int memblock_remove_checks(void) 1421 { 1422 prefix_reset(); 1423 prefix_push(FUNC_REMOVE); 1424 test_print("Running %s tests...\n", FUNC_REMOVE); 1425 1426 memblock_remove_simple_check(); 1427 memblock_remove_absent_check(); 1428 memblock_remove_overlap_top_check(); 1429 memblock_remove_overlap_bottom_check(); 1430 memblock_remove_within_check(); 1431 memblock_remove_only_region_check(); 1432 memblock_remove_near_max_check(); 1433 memblock_remove_overlap_two_check(); 1434 1435 prefix_pop(); 1436 1437 return 0; 1438 } 1439 1440 /* 1441 * A simple test that tries to free a memory block r1 that was marked 1442 * earlier as reserved. By "freeing" a region we mean overwriting it with 1443 * the next entry r2 in memblock.reserved: 1444 * 1445 * | ...... +----+ | 1446 * | : r1 : | r2 | | 1447 * +--------------+----+-----------+----+-+ 1448 * ^ 1449 * | 1450 * rgn.base 1451 * 1452 * Expect to reserve two memory regions and then erase r1 region with the 1453 * value of r2. The region counter and total size are updated. 1454 */ 1455 static int memblock_free_simple_check(void) 1456 { 1457 struct memblock_region *rgn; 1458 1459 rgn = &memblock.reserved.regions[0]; 1460 1461 struct region r1 = { 1462 .base = SZ_4M, 1463 .size = SZ_1M 1464 }; 1465 struct region r2 = { 1466 .base = SZ_8M, 1467 .size = SZ_1M 1468 }; 1469 1470 PREFIX_PUSH(); 1471 1472 reset_memblock_regions(); 1473 memblock_reserve(r1.base, r1.size); 1474 memblock_reserve(r2.base, r2.size); 1475 memblock_free((void *)r1.base, r1.size); 1476 1477 ASSERT_EQ(rgn->base, r2.base); 1478 ASSERT_EQ(rgn->size, r2.size); 1479 1480 ASSERT_EQ(memblock.reserved.cnt, 1); 1481 ASSERT_EQ(memblock.reserved.total_size, r2.size); 1482 1483 test_pass_pop(); 1484 1485 return 0; 1486 } 1487 1488 /* 1489 * A test that tries to free a region r2 that was not marked as reserved 1490 * (i.e. has no corresponding entry in memblock.reserved): 1491 * 1492 * +----------------+ 1493 * | r2 | 1494 * +----------------+ 1495 * | +----+ | 1496 * | | r1 | | 1497 * +--+----+------------------------------+ 1498 * ^ 1499 * | 1500 * rgn.base 1501 * 1502 * The array, regions counter and total size are not modified. 1503 */ 1504 static int memblock_free_absent_check(void) 1505 { 1506 struct memblock_region *rgn; 1507 1508 rgn = &memblock.reserved.regions[0]; 1509 1510 struct region r1 = { 1511 .base = SZ_2M, 1512 .size = SZ_8K 1513 }; 1514 struct region r2 = { 1515 .base = SZ_16M, 1516 .size = SZ_128M 1517 }; 1518 1519 PREFIX_PUSH(); 1520 1521 reset_memblock_regions(); 1522 memblock_reserve(r1.base, r1.size); 1523 memblock_free((void *)r2.base, r2.size); 1524 1525 ASSERT_EQ(rgn->base, r1.base); 1526 ASSERT_EQ(rgn->size, r1.size); 1527 1528 ASSERT_EQ(memblock.reserved.cnt, 1); 1529 ASSERT_EQ(memblock.reserved.total_size, r1.size); 1530 1531 test_pass_pop(); 1532 1533 return 0; 1534 } 1535 1536 /* 1537 * A test that tries to free a region r2 that overlaps with the beginning 1538 * of the already existing entry r1 (that is r1.base < r2.base + r2.size): 1539 * 1540 * +----+ 1541 * | r2 | 1542 * +----+ 1543 * | ...+--------------+ | 1544 * | : | r1 | | 1545 * +----+--+--------------+---------------+ 1546 * ^ ^ 1547 * | | 1548 * | rgn.base 1549 * | 1550 * r1.base 1551 * 1552 * Expect that only the intersection of both regions is freed. The 1553 * regions counter and total size are updated. 1554 */ 1555 static int memblock_free_overlap_top_check(void) 1556 { 1557 struct memblock_region *rgn; 1558 phys_addr_t total_size; 1559 1560 rgn = &memblock.reserved.regions[0]; 1561 1562 struct region r1 = { 1563 .base = SZ_8M, 1564 .size = SZ_32M 1565 }; 1566 struct region r2 = { 1567 .base = SZ_1M, 1568 .size = SZ_8M 1569 }; 1570 1571 PREFIX_PUSH(); 1572 1573 total_size = (r1.size + r1.base) - (r2.base + r2.size); 1574 1575 reset_memblock_regions(); 1576 memblock_reserve(r1.base, r1.size); 1577 memblock_free((void *)r2.base, r2.size); 1578 1579 ASSERT_EQ(rgn->base, r2.base + r2.size); 1580 ASSERT_EQ(rgn->size, total_size); 1581 1582 ASSERT_EQ(memblock.reserved.cnt, 1); 1583 ASSERT_EQ(memblock.reserved.total_size, total_size); 1584 1585 test_pass_pop(); 1586 1587 return 0; 1588 } 1589 1590 /* 1591 * A test that tries to free a region r2 that overlaps with the end of 1592 * the already existing entry r1 (that is r2.base < r1.base + r1.size): 1593 * 1594 * +----------------+ 1595 * | r2 | 1596 * +----------------+ 1597 * | +-----------+..... | 1598 * | | r1 | : | 1599 * +----+-----------+----+----------------+ 1600 * 1601 * Expect that only the intersection of both regions is freed. The 1602 * regions counter and total size are updated. 1603 */ 1604 static int memblock_free_overlap_bottom_check(void) 1605 { 1606 struct memblock_region *rgn; 1607 phys_addr_t total_size; 1608 1609 rgn = &memblock.reserved.regions[0]; 1610 1611 struct region r1 = { 1612 .base = SZ_8M, 1613 .size = SZ_32M 1614 }; 1615 struct region r2 = { 1616 .base = SZ_32M, 1617 .size = SZ_32M 1618 }; 1619 1620 PREFIX_PUSH(); 1621 1622 total_size = r2.base - r1.base; 1623 1624 reset_memblock_regions(); 1625 memblock_reserve(r1.base, r1.size); 1626 memblock_free((void *)r2.base, r2.size); 1627 1628 ASSERT_EQ(rgn->base, r1.base); 1629 ASSERT_EQ(rgn->size, total_size); 1630 1631 ASSERT_EQ(memblock.reserved.cnt, 1); 1632 ASSERT_EQ(memblock.reserved.total_size, total_size); 1633 1634 test_pass_pop(); 1635 1636 return 0; 1637 } 1638 1639 /* 1640 * A test that tries to free a region r2 that is within the range of the 1641 * already existing entry r1 (that is 1642 * (r1.base < r2.base) && (r2.base + r2.size < r1.base + r1.size)): 1643 * 1644 * +----+ 1645 * | r2 | 1646 * +----+ 1647 * | +------------+....+---------------+ 1648 * | | rgn1 | r1 | rgn2 | 1649 * +----+------------+----+---------------+ 1650 * ^ 1651 * | 1652 * r1.base 1653 * 1654 * Expect that the region is split into two - one that ends at r2.base and 1655 * another that starts at r2.base + r2.size, with appropriate sizes. The 1656 * region counter and total size fields are updated. 1657 */ 1658 static int memblock_free_within_check(void) 1659 { 1660 struct memblock_region *rgn1, *rgn2; 1661 phys_addr_t r1_size, r2_size, total_size; 1662 1663 rgn1 = &memblock.reserved.regions[0]; 1664 rgn2 = &memblock.reserved.regions[1]; 1665 1666 struct region r1 = { 1667 .base = SZ_1M, 1668 .size = SZ_8M 1669 }; 1670 struct region r2 = { 1671 .base = SZ_4M, 1672 .size = SZ_1M 1673 }; 1674 1675 PREFIX_PUSH(); 1676 1677 r1_size = r2.base - r1.base; 1678 r2_size = (r1.base + r1.size) - (r2.base + r2.size); 1679 total_size = r1_size + r2_size; 1680 1681 reset_memblock_regions(); 1682 memblock_reserve(r1.base, r1.size); 1683 memblock_free((void *)r2.base, r2.size); 1684 1685 ASSERT_EQ(rgn1->base, r1.base); 1686 ASSERT_EQ(rgn1->size, r1_size); 1687 1688 ASSERT_EQ(rgn2->base, r2.base + r2.size); 1689 ASSERT_EQ(rgn2->size, r2_size); 1690 1691 ASSERT_EQ(memblock.reserved.cnt, 2); 1692 ASSERT_EQ(memblock.reserved.total_size, total_size); 1693 1694 test_pass_pop(); 1695 1696 return 0; 1697 } 1698 1699 /* 1700 * A simple test that tries to free a memory block r1 that was marked 1701 * earlier as reserved when r1 is the only available region. 1702 * Expect to reserve a memory block r1 and then free r1 so that r1 is 1703 * overwritten with a dummy region. The region counter stays the same, 1704 * and the total size is updated. 1705 */ 1706 static int memblock_free_only_region_check(void) 1707 { 1708 struct memblock_region *rgn; 1709 1710 rgn = &memblock.reserved.regions[0]; 1711 1712 struct region r1 = { 1713 .base = SZ_2K, 1714 .size = SZ_4K 1715 }; 1716 1717 PREFIX_PUSH(); 1718 1719 reset_memblock_regions(); 1720 memblock_reserve(r1.base, r1.size); 1721 memblock_free((void *)r1.base, r1.size); 1722 1723 ASSERT_EQ(rgn->base, 0); 1724 ASSERT_EQ(rgn->size, 0); 1725 1726 ASSERT_EQ(memblock.reserved.cnt, 1); 1727 ASSERT_EQ(memblock.reserved.total_size, 0); 1728 1729 test_pass_pop(); 1730 1731 return 0; 1732 } 1733 1734 /* 1735 * A simple test that tries free a region r2 when r2 extends past PHYS_ADDR_MAX: 1736 * 1737 * +--------+ 1738 * | r2 | 1739 * +--------+ 1740 * | +---+....+ 1741 * | |rgn| | 1742 * +------------------------+---+----+ 1743 * 1744 * Expect that only the portion between PHYS_ADDR_MAX and r2.base is freed. 1745 * Expect the total size of reserved memory to be updated and the counter to 1746 * not be updated. 1747 */ 1748 static int memblock_free_near_max_check(void) 1749 { 1750 struct memblock_region *rgn; 1751 phys_addr_t total_size; 1752 1753 rgn = &memblock.reserved.regions[0]; 1754 1755 struct region r1 = { 1756 .base = PHYS_ADDR_MAX - SZ_2M, 1757 .size = SZ_2M 1758 }; 1759 1760 struct region r2 = { 1761 .base = PHYS_ADDR_MAX - SZ_1M, 1762 .size = SZ_2M 1763 }; 1764 1765 PREFIX_PUSH(); 1766 1767 total_size = r1.size - (PHYS_ADDR_MAX - r2.base); 1768 1769 reset_memblock_regions(); 1770 memblock_reserve(r1.base, r1.size); 1771 memblock_free((void *)r2.base, r2.size); 1772 1773 ASSERT_EQ(rgn->base, r1.base); 1774 ASSERT_EQ(rgn->size, total_size); 1775 1776 ASSERT_EQ(memblock.reserved.cnt, 1); 1777 ASSERT_EQ(memblock.reserved.total_size, total_size); 1778 1779 test_pass_pop(); 1780 1781 return 0; 1782 } 1783 1784 /* 1785 * A test that tries to free a reserved region r3 that overlaps with two 1786 * existing reserved regions r1 and r2: 1787 * 1788 * +----------------+ 1789 * | r3 | 1790 * +----------------+ 1791 * | +----+..... ........+--------+ 1792 * | | |r1 : : |r2 | | 1793 * +----+----+----+---+-------+--------+-----+ 1794 * 1795 * Expect that only the intersections of r1 with r3 and r2 with r3 are freed 1796 * from the collection of reserved memory. Expect the total size of reserved 1797 * memory to be updated and the counter to not be updated. 1798 */ 1799 static int memblock_free_overlap_two_check(void) 1800 { 1801 struct memblock_region *rgn1, *rgn2; 1802 phys_addr_t new_r1_size, new_r2_size, r2_end, r3_end, total_size; 1803 1804 rgn1 = &memblock.reserved.regions[0]; 1805 rgn2 = &memblock.reserved.regions[1]; 1806 1807 struct region r1 = { 1808 .base = SZ_16M, 1809 .size = SZ_32M 1810 }; 1811 struct region r2 = { 1812 .base = SZ_64M, 1813 .size = SZ_64M 1814 }; 1815 struct region r3 = { 1816 .base = SZ_32M, 1817 .size = SZ_64M 1818 }; 1819 1820 PREFIX_PUSH(); 1821 1822 r2_end = r2.base + r2.size; 1823 r3_end = r3.base + r3.size; 1824 new_r1_size = r3.base - r1.base; 1825 new_r2_size = r2_end - r3_end; 1826 total_size = new_r1_size + new_r2_size; 1827 1828 reset_memblock_regions(); 1829 memblock_reserve(r1.base, r1.size); 1830 memblock_reserve(r2.base, r2.size); 1831 memblock_free((void *)r3.base, r3.size); 1832 1833 ASSERT_EQ(rgn1->base, r1.base); 1834 ASSERT_EQ(rgn1->size, new_r1_size); 1835 1836 ASSERT_EQ(rgn2->base, r3_end); 1837 ASSERT_EQ(rgn2->size, new_r2_size); 1838 1839 ASSERT_EQ(memblock.reserved.cnt, 2); 1840 ASSERT_EQ(memblock.reserved.total_size, total_size); 1841 1842 test_pass_pop(); 1843 1844 return 0; 1845 } 1846 1847 static int memblock_free_checks(void) 1848 { 1849 prefix_reset(); 1850 prefix_push(FUNC_FREE); 1851 test_print("Running %s tests...\n", FUNC_FREE); 1852 1853 memblock_free_simple_check(); 1854 memblock_free_absent_check(); 1855 memblock_free_overlap_top_check(); 1856 memblock_free_overlap_bottom_check(); 1857 memblock_free_within_check(); 1858 memblock_free_only_region_check(); 1859 memblock_free_near_max_check(); 1860 memblock_free_overlap_two_check(); 1861 1862 prefix_pop(); 1863 1864 return 0; 1865 } 1866 1867 static int memblock_set_bottom_up_check(void) 1868 { 1869 prefix_push("memblock_set_bottom_up"); 1870 1871 memblock_set_bottom_up(false); 1872 ASSERT_EQ(memblock.bottom_up, false); 1873 memblock_set_bottom_up(true); 1874 ASSERT_EQ(memblock.bottom_up, true); 1875 1876 reset_memblock_attributes(); 1877 test_pass_pop(); 1878 1879 return 0; 1880 } 1881 1882 static int memblock_bottom_up_check(void) 1883 { 1884 prefix_push("memblock_bottom_up"); 1885 1886 memblock_set_bottom_up(false); 1887 ASSERT_EQ(memblock_bottom_up(), memblock.bottom_up); 1888 ASSERT_EQ(memblock_bottom_up(), false); 1889 memblock_set_bottom_up(true); 1890 ASSERT_EQ(memblock_bottom_up(), memblock.bottom_up); 1891 ASSERT_EQ(memblock_bottom_up(), true); 1892 1893 reset_memblock_attributes(); 1894 test_pass_pop(); 1895 1896 return 0; 1897 } 1898 1899 static int memblock_bottom_up_checks(void) 1900 { 1901 test_print("Running memblock_*bottom_up tests...\n"); 1902 1903 prefix_reset(); 1904 memblock_set_bottom_up_check(); 1905 prefix_reset(); 1906 memblock_bottom_up_check(); 1907 1908 return 0; 1909 } 1910 1911 /* 1912 * A test that tries to trim memory when both ends of the memory region are 1913 * aligned. Expect that the memory will not be trimmed. Expect the counter to 1914 * not be updated. 1915 */ 1916 static int memblock_trim_memory_aligned_check(void) 1917 { 1918 struct memblock_region *rgn; 1919 const phys_addr_t alignment = SMP_CACHE_BYTES; 1920 1921 rgn = &memblock.memory.regions[0]; 1922 1923 struct region r = { 1924 .base = alignment, 1925 .size = alignment * 4 1926 }; 1927 1928 PREFIX_PUSH(); 1929 1930 reset_memblock_regions(); 1931 memblock_add(r.base, r.size); 1932 memblock_trim_memory(alignment); 1933 1934 ASSERT_EQ(rgn->base, r.base); 1935 ASSERT_EQ(rgn->size, r.size); 1936 1937 ASSERT_EQ(memblock.memory.cnt, 1); 1938 1939 test_pass_pop(); 1940 1941 return 0; 1942 } 1943 1944 /* 1945 * A test that tries to trim memory when there are two available regions, r1 and 1946 * r2. Region r1 is aligned on both ends and region r2 is unaligned on one end 1947 * and smaller than the alignment: 1948 * 1949 * alignment 1950 * |--------| 1951 * | +-----------------+ +------+ | 1952 * | | r1 | | r2 | | 1953 * +--------+-----------------+--------+------+---+ 1954 * ^ ^ ^ ^ ^ 1955 * |________|________|________| | 1956 * | Unaligned address 1957 * Aligned addresses 1958 * 1959 * Expect that r1 will not be trimmed and r2 will be removed. Expect the 1960 * counter to be updated. 1961 */ 1962 static int memblock_trim_memory_too_small_check(void) 1963 { 1964 struct memblock_region *rgn; 1965 const phys_addr_t alignment = SMP_CACHE_BYTES; 1966 1967 rgn = &memblock.memory.regions[0]; 1968 1969 struct region r1 = { 1970 .base = alignment, 1971 .size = alignment * 2 1972 }; 1973 struct region r2 = { 1974 .base = alignment * 4, 1975 .size = alignment - SZ_2 1976 }; 1977 1978 PREFIX_PUSH(); 1979 1980 reset_memblock_regions(); 1981 memblock_add(r1.base, r1.size); 1982 memblock_add(r2.base, r2.size); 1983 memblock_trim_memory(alignment); 1984 1985 ASSERT_EQ(rgn->base, r1.base); 1986 ASSERT_EQ(rgn->size, r1.size); 1987 1988 ASSERT_EQ(memblock.memory.cnt, 1); 1989 1990 test_pass_pop(); 1991 1992 return 0; 1993 } 1994 1995 /* 1996 * A test that tries to trim memory when there are two available regions, r1 and 1997 * r2. Region r1 is aligned on both ends and region r2 is unaligned at the base 1998 * and aligned at the end: 1999 * 2000 * Unaligned address 2001 * | 2002 * v 2003 * | +-----------------+ +---------------+ | 2004 * | | r1 | | r2 | | 2005 * +--------+-----------------+----------+---------------+---+ 2006 * ^ ^ ^ ^ ^ ^ 2007 * |________|________|________|________|________| 2008 * | 2009 * Aligned addresses 2010 * 2011 * Expect that r1 will not be trimmed and r2 will be trimmed at the base. 2012 * Expect the counter to not be updated. 2013 */ 2014 static int memblock_trim_memory_unaligned_base_check(void) 2015 { 2016 struct memblock_region *rgn1, *rgn2; 2017 const phys_addr_t alignment = SMP_CACHE_BYTES; 2018 phys_addr_t offset = SZ_2; 2019 phys_addr_t new_r2_base, new_r2_size; 2020 2021 rgn1 = &memblock.memory.regions[0]; 2022 rgn2 = &memblock.memory.regions[1]; 2023 2024 struct region r1 = { 2025 .base = alignment, 2026 .size = alignment * 2 2027 }; 2028 struct region r2 = { 2029 .base = alignment * 4 + offset, 2030 .size = alignment * 2 - offset 2031 }; 2032 2033 PREFIX_PUSH(); 2034 2035 new_r2_base = r2.base + (alignment - offset); 2036 new_r2_size = r2.size - (alignment - offset); 2037 2038 reset_memblock_regions(); 2039 memblock_add(r1.base, r1.size); 2040 memblock_add(r2.base, r2.size); 2041 memblock_trim_memory(alignment); 2042 2043 ASSERT_EQ(rgn1->base, r1.base); 2044 ASSERT_EQ(rgn1->size, r1.size); 2045 2046 ASSERT_EQ(rgn2->base, new_r2_base); 2047 ASSERT_EQ(rgn2->size, new_r2_size); 2048 2049 ASSERT_EQ(memblock.memory.cnt, 2); 2050 2051 test_pass_pop(); 2052 2053 return 0; 2054 } 2055 2056 /* 2057 * A test that tries to trim memory when there are two available regions, r1 and 2058 * r2. Region r1 is aligned on both ends and region r2 is aligned at the base 2059 * and unaligned at the end: 2060 * 2061 * Unaligned address 2062 * | 2063 * v 2064 * | +-----------------+ +---------------+ | 2065 * | | r1 | | r2 | | 2066 * +--------+-----------------+--------+---------------+---+ 2067 * ^ ^ ^ ^ ^ ^ 2068 * |________|________|________|________|________| 2069 * | 2070 * Aligned addresses 2071 * 2072 * Expect that r1 will not be trimmed and r2 will be trimmed at the end. 2073 * Expect the counter to not be updated. 2074 */ 2075 static int memblock_trim_memory_unaligned_end_check(void) 2076 { 2077 struct memblock_region *rgn1, *rgn2; 2078 const phys_addr_t alignment = SMP_CACHE_BYTES; 2079 phys_addr_t offset = SZ_2; 2080 phys_addr_t new_r2_size; 2081 2082 rgn1 = &memblock.memory.regions[0]; 2083 rgn2 = &memblock.memory.regions[1]; 2084 2085 struct region r1 = { 2086 .base = alignment, 2087 .size = alignment * 2 2088 }; 2089 struct region r2 = { 2090 .base = alignment * 4, 2091 .size = alignment * 2 - offset 2092 }; 2093 2094 PREFIX_PUSH(); 2095 2096 new_r2_size = r2.size - (alignment - offset); 2097 2098 reset_memblock_regions(); 2099 memblock_add(r1.base, r1.size); 2100 memblock_add(r2.base, r2.size); 2101 memblock_trim_memory(alignment); 2102 2103 ASSERT_EQ(rgn1->base, r1.base); 2104 ASSERT_EQ(rgn1->size, r1.size); 2105 2106 ASSERT_EQ(rgn2->base, r2.base); 2107 ASSERT_EQ(rgn2->size, new_r2_size); 2108 2109 ASSERT_EQ(memblock.memory.cnt, 2); 2110 2111 test_pass_pop(); 2112 2113 return 0; 2114 } 2115 2116 static int memblock_trim_memory_checks(void) 2117 { 2118 prefix_reset(); 2119 prefix_push(FUNC_TRIM); 2120 test_print("Running %s tests...\n", FUNC_TRIM); 2121 2122 memblock_trim_memory_aligned_check(); 2123 memblock_trim_memory_too_small_check(); 2124 memblock_trim_memory_unaligned_base_check(); 2125 memblock_trim_memory_unaligned_end_check(); 2126 2127 prefix_pop(); 2128 2129 return 0; 2130 } 2131 2132 int memblock_basic_checks(void) 2133 { 2134 memblock_initialization_check(); 2135 memblock_add_checks(); 2136 memblock_reserve_checks(); 2137 memblock_remove_checks(); 2138 memblock_free_checks(); 2139 memblock_bottom_up_checks(); 2140 memblock_trim_memory_checks(); 2141 2142 return 0; 2143 } 2144