18f98435dSKarolina Drobnik // SPDX-License-Identifier: GPL-2.0-or-later
28f98435dSKarolina Drobnik #include "alloc_nid_api.h"
38f98435dSKarolina Drobnik 
4ae544fd6SRebecca Mckeever static int alloc_nid_test_flags = TEST_F_NONE;
5ae544fd6SRebecca Mckeever 
650c80241SRebecca Mckeever /*
750c80241SRebecca Mckeever  * contains the fraction of MEM_SIZE contained in each node in basis point
850c80241SRebecca Mckeever  * units (one hundredth of 1% or 1/10000)
950c80241SRebecca Mckeever  */
1050c80241SRebecca Mckeever static const unsigned int node_fractions[] = {
1150c80241SRebecca Mckeever 	2500, /* 1/4  */
1250c80241SRebecca Mckeever 	 625, /* 1/16 */
1350c80241SRebecca Mckeever 	1250, /* 1/8  */
1450c80241SRebecca Mckeever 	1250, /* 1/8  */
1550c80241SRebecca Mckeever 	 625, /* 1/16 */
1650c80241SRebecca Mckeever 	 625, /* 1/16 */
1750c80241SRebecca Mckeever 	2500, /* 1/4  */
1850c80241SRebecca Mckeever 	 625, /* 1/16 */
1950c80241SRebecca Mckeever };
2050c80241SRebecca Mckeever 
get_memblock_alloc_nid_name(int flags)2161da0332SRebecca Mckeever static inline const char * const get_memblock_alloc_nid_name(int flags)
22ae544fd6SRebecca Mckeever {
2361da0332SRebecca Mckeever 	if (flags & TEST_F_EXACT)
2461da0332SRebecca Mckeever 		return "memblock_alloc_exact_nid_raw";
25ae544fd6SRebecca Mckeever 	if (flags & TEST_F_RAW)
26ae544fd6SRebecca Mckeever 		return "memblock_alloc_try_nid_raw";
27ae544fd6SRebecca Mckeever 	return "memblock_alloc_try_nid";
28ae544fd6SRebecca Mckeever }
29ae544fd6SRebecca Mckeever 
run_memblock_alloc_nid(phys_addr_t size,phys_addr_t align,phys_addr_t min_addr,phys_addr_t max_addr,int nid)3061da0332SRebecca Mckeever static inline void *run_memblock_alloc_nid(phys_addr_t size,
31ae544fd6SRebecca Mckeever 					   phys_addr_t align,
32ae544fd6SRebecca Mckeever 					   phys_addr_t min_addr,
33ae544fd6SRebecca Mckeever 					   phys_addr_t max_addr, int nid)
34ae544fd6SRebecca Mckeever {
3561da0332SRebecca Mckeever 	assert(!(alloc_nid_test_flags & TEST_F_EXACT) ||
3661da0332SRebecca Mckeever 	       (alloc_nid_test_flags & TEST_F_RAW));
3761da0332SRebecca Mckeever 	/*
3861da0332SRebecca Mckeever 	 * TEST_F_EXACT should be checked before TEST_F_RAW since
3961da0332SRebecca Mckeever 	 * memblock_alloc_exact_nid_raw() performs raw allocations.
4061da0332SRebecca Mckeever 	 */
4161da0332SRebecca Mckeever 	if (alloc_nid_test_flags & TEST_F_EXACT)
4261da0332SRebecca Mckeever 		return memblock_alloc_exact_nid_raw(size, align, min_addr,
4361da0332SRebecca Mckeever 						    max_addr, nid);
44ae544fd6SRebecca Mckeever 	if (alloc_nid_test_flags & TEST_F_RAW)
45ae544fd6SRebecca Mckeever 		return memblock_alloc_try_nid_raw(size, align, min_addr,
46ae544fd6SRebecca Mckeever 						  max_addr, nid);
47ae544fd6SRebecca Mckeever 	return memblock_alloc_try_nid(size, align, min_addr, max_addr, nid);
48ae544fd6SRebecca Mckeever }
49ae544fd6SRebecca Mckeever 
508f98435dSKarolina Drobnik /*
518f98435dSKarolina Drobnik  * A simple test that tries to allocate a memory region within min_addr and
528f98435dSKarolina Drobnik  * max_addr range:
538f98435dSKarolina Drobnik  *
548f98435dSKarolina Drobnik  *        +                   +
558f98435dSKarolina Drobnik  *   |    +       +-----------+      |
568f98435dSKarolina Drobnik  *   |    |       |    rgn    |      |
578f98435dSKarolina Drobnik  *   +----+-------+-----------+------+
588f98435dSKarolina Drobnik  *        ^                   ^
598f98435dSKarolina Drobnik  *        |                   |
608f98435dSKarolina Drobnik  *        min_addr           max_addr
618f98435dSKarolina Drobnik  *
6235e49953SRebecca Mckeever  * Expect to allocate a region that ends at max_addr.
638f98435dSKarolina Drobnik  */
alloc_nid_top_down_simple_check(void)6461da0332SRebecca Mckeever static int alloc_nid_top_down_simple_check(void)
658f98435dSKarolina Drobnik {
668f98435dSKarolina Drobnik 	struct memblock_region *rgn = &memblock.reserved.regions[0];
678f98435dSKarolina Drobnik 	void *allocated_ptr = NULL;
688f98435dSKarolina Drobnik 	phys_addr_t size = SZ_128;
698f98435dSKarolina Drobnik 	phys_addr_t min_addr;
708f98435dSKarolina Drobnik 	phys_addr_t max_addr;
718f98435dSKarolina Drobnik 	phys_addr_t rgn_end;
728f98435dSKarolina Drobnik 
7342c3ba86SRebecca Mckeever 	PREFIX_PUSH();
748f98435dSKarolina Drobnik 	setup_memblock();
758f98435dSKarolina Drobnik 
768f98435dSKarolina Drobnik 	min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES * 2;
778f98435dSKarolina Drobnik 	max_addr = min_addr + SZ_512;
788f98435dSKarolina Drobnik 
7961da0332SRebecca Mckeever 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
80ae544fd6SRebecca Mckeever 					       min_addr, max_addr,
81ae544fd6SRebecca Mckeever 					       NUMA_NO_NODE);
828f98435dSKarolina Drobnik 	rgn_end = rgn->base + rgn->size;
838f98435dSKarolina Drobnik 
8476586c00SRebecca Mckeever 	ASSERT_NE(allocated_ptr, NULL);
85ae544fd6SRebecca Mckeever 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
868f98435dSKarolina Drobnik 
8776586c00SRebecca Mckeever 	ASSERT_EQ(rgn->size, size);
8876586c00SRebecca Mckeever 	ASSERT_EQ(rgn->base, max_addr - size);
8976586c00SRebecca Mckeever 	ASSERT_EQ(rgn_end, max_addr);
908f98435dSKarolina Drobnik 
9176586c00SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.cnt, 1);
9276586c00SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.total_size, size);
9376586c00SRebecca Mckeever 
9476586c00SRebecca Mckeever 	test_pass_pop();
958f98435dSKarolina Drobnik 
968f98435dSKarolina Drobnik 	return 0;
978f98435dSKarolina Drobnik }
988f98435dSKarolina Drobnik 
998f98435dSKarolina Drobnik /*
1008f98435dSKarolina Drobnik  * A simple test that tries to allocate a memory region within min_addr and
1018f98435dSKarolina Drobnik  * max_addr range, where the end address is misaligned:
1028f98435dSKarolina Drobnik  *
1038f98435dSKarolina Drobnik  *         +       +            +
1048f98435dSKarolina Drobnik  *  |      +       +---------+  +    |
1058f98435dSKarolina Drobnik  *  |      |       |   rgn   |  |    |
1068f98435dSKarolina Drobnik  *  +------+-------+---------+--+----+
1078f98435dSKarolina Drobnik  *         ^       ^            ^
1088f98435dSKarolina Drobnik  *         |       |            |
1098f98435dSKarolina Drobnik  *       min_add   |            max_addr
1108f98435dSKarolina Drobnik  *                 |
1118f98435dSKarolina Drobnik  *                 Aligned address
1128f98435dSKarolina Drobnik  *                 boundary
1138f98435dSKarolina Drobnik  *
11435e49953SRebecca Mckeever  * Expect to allocate an aligned region that ends before max_addr.
1158f98435dSKarolina Drobnik  */
alloc_nid_top_down_end_misaligned_check(void)11661da0332SRebecca Mckeever static int alloc_nid_top_down_end_misaligned_check(void)
1178f98435dSKarolina Drobnik {
1188f98435dSKarolina Drobnik 	struct memblock_region *rgn = &memblock.reserved.regions[0];
1198f98435dSKarolina Drobnik 	void *allocated_ptr = NULL;
1208f98435dSKarolina Drobnik 	phys_addr_t size = SZ_128;
1218f98435dSKarolina Drobnik 	phys_addr_t misalign = SZ_2;
1228f98435dSKarolina Drobnik 	phys_addr_t min_addr;
1238f98435dSKarolina Drobnik 	phys_addr_t max_addr;
1248f98435dSKarolina Drobnik 	phys_addr_t rgn_end;
1258f98435dSKarolina Drobnik 
12642c3ba86SRebecca Mckeever 	PREFIX_PUSH();
1278f98435dSKarolina Drobnik 	setup_memblock();
1288f98435dSKarolina Drobnik 
1298f98435dSKarolina Drobnik 	min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES * 2;
1308f98435dSKarolina Drobnik 	max_addr = min_addr + SZ_512 + misalign;
1318f98435dSKarolina Drobnik 
13261da0332SRebecca Mckeever 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
133ae544fd6SRebecca Mckeever 					       min_addr, max_addr,
134ae544fd6SRebecca Mckeever 					       NUMA_NO_NODE);
1358f98435dSKarolina Drobnik 	rgn_end = rgn->base + rgn->size;
1368f98435dSKarolina Drobnik 
13776586c00SRebecca Mckeever 	ASSERT_NE(allocated_ptr, NULL);
138ae544fd6SRebecca Mckeever 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1398f98435dSKarolina Drobnik 
14076586c00SRebecca Mckeever 	ASSERT_EQ(rgn->size, size);
14176586c00SRebecca Mckeever 	ASSERT_EQ(rgn->base, max_addr - size - misalign);
14276586c00SRebecca Mckeever 	ASSERT_LT(rgn_end, max_addr);
1438f98435dSKarolina Drobnik 
14476586c00SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.cnt, 1);
14576586c00SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.total_size, size);
14676586c00SRebecca Mckeever 
14776586c00SRebecca Mckeever 	test_pass_pop();
1488f98435dSKarolina Drobnik 
1498f98435dSKarolina Drobnik 	return 0;
1508f98435dSKarolina Drobnik }
1518f98435dSKarolina Drobnik 
1528f98435dSKarolina Drobnik /*
1538f98435dSKarolina Drobnik  * A simple test that tries to allocate a memory region, which spans over the
1548f98435dSKarolina Drobnik  * min_addr and max_addr range:
1558f98435dSKarolina Drobnik  *
1568f98435dSKarolina Drobnik  *         +               +
1578f98435dSKarolina Drobnik  *  |      +---------------+       |
1588f98435dSKarolina Drobnik  *  |      |      rgn      |       |
1598f98435dSKarolina Drobnik  *  +------+---------------+-------+
1608f98435dSKarolina Drobnik  *         ^               ^
1618f98435dSKarolina Drobnik  *         |               |
1628f98435dSKarolina Drobnik  *         min_addr        max_addr
1638f98435dSKarolina Drobnik  *
16435e49953SRebecca Mckeever  * Expect to allocate a region that starts at min_addr and ends at
1658f98435dSKarolina Drobnik  * max_addr, given that min_addr is aligned.
1668f98435dSKarolina Drobnik  */
alloc_nid_exact_address_generic_check(void)16761da0332SRebecca Mckeever static int alloc_nid_exact_address_generic_check(void)
1688f98435dSKarolina Drobnik {
1698f98435dSKarolina Drobnik 	struct memblock_region *rgn = &memblock.reserved.regions[0];
1708f98435dSKarolina Drobnik 	void *allocated_ptr = NULL;
1718f98435dSKarolina Drobnik 	phys_addr_t size = SZ_1K;
1728f98435dSKarolina Drobnik 	phys_addr_t min_addr;
1738f98435dSKarolina Drobnik 	phys_addr_t max_addr;
1748f98435dSKarolina Drobnik 	phys_addr_t rgn_end;
1758f98435dSKarolina Drobnik 
17642c3ba86SRebecca Mckeever 	PREFIX_PUSH();
1778f98435dSKarolina Drobnik 	setup_memblock();
1788f98435dSKarolina Drobnik 
1798f98435dSKarolina Drobnik 	min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES;
1808f98435dSKarolina Drobnik 	max_addr = min_addr + size;
1818f98435dSKarolina Drobnik 
18261da0332SRebecca Mckeever 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
183ae544fd6SRebecca Mckeever 					       min_addr, max_addr,
184ae544fd6SRebecca Mckeever 					       NUMA_NO_NODE);
1858f98435dSKarolina Drobnik 	rgn_end = rgn->base + rgn->size;
1868f98435dSKarolina Drobnik 
18776586c00SRebecca Mckeever 	ASSERT_NE(allocated_ptr, NULL);
188ae544fd6SRebecca Mckeever 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1898f98435dSKarolina Drobnik 
19076586c00SRebecca Mckeever 	ASSERT_EQ(rgn->size, size);
19176586c00SRebecca Mckeever 	ASSERT_EQ(rgn->base, min_addr);
19276586c00SRebecca Mckeever 	ASSERT_EQ(rgn_end, max_addr);
1938f98435dSKarolina Drobnik 
19476586c00SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.cnt, 1);
19576586c00SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.total_size, size);
19676586c00SRebecca Mckeever 
19776586c00SRebecca Mckeever 	test_pass_pop();
1988f98435dSKarolina Drobnik 
1998f98435dSKarolina Drobnik 	return 0;
2008f98435dSKarolina Drobnik }
2018f98435dSKarolina Drobnik 
2028f98435dSKarolina Drobnik /*
2038f98435dSKarolina Drobnik  * A test that tries to allocate a memory region, which can't fit into
2048f98435dSKarolina Drobnik  * min_addr and max_addr range:
2058f98435dSKarolina Drobnik  *
2068f98435dSKarolina Drobnik  *           +          +     +
2078f98435dSKarolina Drobnik  *  |        +----------+-----+    |
2088f98435dSKarolina Drobnik  *  |        |   rgn    +     |    |
2098f98435dSKarolina Drobnik  *  +--------+----------+-----+----+
2108f98435dSKarolina Drobnik  *           ^          ^     ^
2118f98435dSKarolina Drobnik  *           |          |     |
2128f98435dSKarolina Drobnik  *           Aligned    |    max_addr
2138f98435dSKarolina Drobnik  *           address    |
2148f98435dSKarolina Drobnik  *           boundary   min_add
2158f98435dSKarolina Drobnik  *
21635e49953SRebecca Mckeever  * Expect to drop the lower limit and allocate a memory region which
2178f98435dSKarolina Drobnik  * ends at max_addr (if the address is aligned).
2188f98435dSKarolina Drobnik  */
alloc_nid_top_down_narrow_range_check(void)21961da0332SRebecca Mckeever static int alloc_nid_top_down_narrow_range_check(void)
2208f98435dSKarolina Drobnik {
2218f98435dSKarolina Drobnik 	struct memblock_region *rgn = &memblock.reserved.regions[0];
2228f98435dSKarolina Drobnik 	void *allocated_ptr = NULL;
2238f98435dSKarolina Drobnik 	phys_addr_t size = SZ_256;
2248f98435dSKarolina Drobnik 	phys_addr_t min_addr;
2258f98435dSKarolina Drobnik 	phys_addr_t max_addr;
2268f98435dSKarolina Drobnik 
22742c3ba86SRebecca Mckeever 	PREFIX_PUSH();
2288f98435dSKarolina Drobnik 	setup_memblock();
2298f98435dSKarolina Drobnik 
2308f98435dSKarolina Drobnik 	min_addr = memblock_start_of_DRAM() + SZ_512;
2318f98435dSKarolina Drobnik 	max_addr = min_addr + SMP_CACHE_BYTES;
2328f98435dSKarolina Drobnik 
23361da0332SRebecca Mckeever 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
234ae544fd6SRebecca Mckeever 					       min_addr, max_addr,
235ae544fd6SRebecca Mckeever 					       NUMA_NO_NODE);
2368f98435dSKarolina Drobnik 
23776586c00SRebecca Mckeever 	ASSERT_NE(allocated_ptr, NULL);
238ae544fd6SRebecca Mckeever 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
2398f98435dSKarolina Drobnik 
24076586c00SRebecca Mckeever 	ASSERT_EQ(rgn->size, size);
24176586c00SRebecca Mckeever 	ASSERT_EQ(rgn->base, max_addr - size);
2428f98435dSKarolina Drobnik 
24376586c00SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.cnt, 1);
24476586c00SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.total_size, size);
24576586c00SRebecca Mckeever 
24676586c00SRebecca Mckeever 	test_pass_pop();
2478f98435dSKarolina Drobnik 
2488f98435dSKarolina Drobnik 	return 0;
2498f98435dSKarolina Drobnik }
2508f98435dSKarolina Drobnik 
2518f98435dSKarolina Drobnik /*
2528f98435dSKarolina Drobnik  * A test that tries to allocate a memory region, which can't fit into
2538f98435dSKarolina Drobnik  * min_addr and max_addr range, with the latter being too close to the beginning
2548f98435dSKarolina Drobnik  * of the available memory:
2558f98435dSKarolina Drobnik  *
2568f98435dSKarolina Drobnik  *   +-------------+
2578f98435dSKarolina Drobnik  *   |     new     |
2588f98435dSKarolina Drobnik  *   +-------------+
2598f98435dSKarolina Drobnik  *         +       +
2608f98435dSKarolina Drobnik  *         |       +              |
2618f98435dSKarolina Drobnik  *         |       |              |
2628f98435dSKarolina Drobnik  *         +-------+--------------+
2638f98435dSKarolina Drobnik  *         ^       ^
2648f98435dSKarolina Drobnik  *         |       |
2658f98435dSKarolina Drobnik  *         |       max_addr
2668f98435dSKarolina Drobnik  *         |
2678f98435dSKarolina Drobnik  *         min_addr
2688f98435dSKarolina Drobnik  *
2698f98435dSKarolina Drobnik  * Expect no allocation to happen.
2708f98435dSKarolina Drobnik  */
alloc_nid_low_max_generic_check(void)27161da0332SRebecca Mckeever static int alloc_nid_low_max_generic_check(void)
2728f98435dSKarolina Drobnik {
2738f98435dSKarolina Drobnik 	void *allocated_ptr = NULL;
2748f98435dSKarolina Drobnik 	phys_addr_t size = SZ_1K;
2758f98435dSKarolina Drobnik 	phys_addr_t min_addr;
2768f98435dSKarolina Drobnik 	phys_addr_t max_addr;
2778f98435dSKarolina Drobnik 
27842c3ba86SRebecca Mckeever 	PREFIX_PUSH();
2798f98435dSKarolina Drobnik 	setup_memblock();
2808f98435dSKarolina Drobnik 
2818f98435dSKarolina Drobnik 	min_addr = memblock_start_of_DRAM();
2828f98435dSKarolina Drobnik 	max_addr = min_addr + SMP_CACHE_BYTES;
2838f98435dSKarolina Drobnik 
28461da0332SRebecca Mckeever 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
285ae544fd6SRebecca Mckeever 					       min_addr, max_addr,
286ae544fd6SRebecca Mckeever 					       NUMA_NO_NODE);
2878f98435dSKarolina Drobnik 
28876586c00SRebecca Mckeever 	ASSERT_EQ(allocated_ptr, NULL);
28976586c00SRebecca Mckeever 
29076586c00SRebecca Mckeever 	test_pass_pop();
2918f98435dSKarolina Drobnik 
2928f98435dSKarolina Drobnik 	return 0;
2938f98435dSKarolina Drobnik }
2948f98435dSKarolina Drobnik 
2958f98435dSKarolina Drobnik /*
2968f98435dSKarolina Drobnik  * A test that tries to allocate a memory region within min_addr min_addr range,
2978f98435dSKarolina Drobnik  * with min_addr being so close that it's next to an allocated region:
2988f98435dSKarolina Drobnik  *
2998f98435dSKarolina Drobnik  *          +                        +
3008f98435dSKarolina Drobnik  *  |       +--------+---------------|
3018f98435dSKarolina Drobnik  *  |       |   r1   |      rgn      |
3028f98435dSKarolina Drobnik  *  +-------+--------+---------------+
3038f98435dSKarolina Drobnik  *          ^                        ^
3048f98435dSKarolina Drobnik  *          |                        |
3058f98435dSKarolina Drobnik  *          min_addr                 max_addr
3068f98435dSKarolina Drobnik  *
3078f98435dSKarolina Drobnik  * Expect a merge of both regions. Only the region size gets updated.
3088f98435dSKarolina Drobnik  */
alloc_nid_min_reserved_generic_check(void)30961da0332SRebecca Mckeever static int alloc_nid_min_reserved_generic_check(void)
3108f98435dSKarolina Drobnik {
3118f98435dSKarolina Drobnik 	struct memblock_region *rgn = &memblock.reserved.regions[0];
3128f98435dSKarolina Drobnik 	void *allocated_ptr = NULL;
3138f98435dSKarolina Drobnik 	phys_addr_t r1_size = SZ_128;
3148f98435dSKarolina Drobnik 	phys_addr_t r2_size = SZ_64;
3158f98435dSKarolina Drobnik 	phys_addr_t total_size = r1_size + r2_size;
3168f98435dSKarolina Drobnik 	phys_addr_t min_addr;
3178f98435dSKarolina Drobnik 	phys_addr_t max_addr;
3188f98435dSKarolina Drobnik 	phys_addr_t reserved_base;
3198f98435dSKarolina Drobnik 
32042c3ba86SRebecca Mckeever 	PREFIX_PUSH();
3218f98435dSKarolina Drobnik 	setup_memblock();
3228f98435dSKarolina Drobnik 
3238f98435dSKarolina Drobnik 	max_addr = memblock_end_of_DRAM();
3248f98435dSKarolina Drobnik 	min_addr = max_addr - r2_size;
3258f98435dSKarolina Drobnik 	reserved_base = min_addr - r1_size;
3268f98435dSKarolina Drobnik 
3278f98435dSKarolina Drobnik 	memblock_reserve(reserved_base, r1_size);
3288f98435dSKarolina Drobnik 
32961da0332SRebecca Mckeever 	allocated_ptr = run_memblock_alloc_nid(r2_size, SMP_CACHE_BYTES,
330ae544fd6SRebecca Mckeever 					       min_addr, max_addr,
331ae544fd6SRebecca Mckeever 					       NUMA_NO_NODE);
3328f98435dSKarolina Drobnik 
33376586c00SRebecca Mckeever 	ASSERT_NE(allocated_ptr, NULL);
334ae544fd6SRebecca Mckeever 	assert_mem_content(allocated_ptr, r2_size, alloc_nid_test_flags);
3358f98435dSKarolina Drobnik 
33676586c00SRebecca Mckeever 	ASSERT_EQ(rgn->size, total_size);
33776586c00SRebecca Mckeever 	ASSERT_EQ(rgn->base, reserved_base);
3388f98435dSKarolina Drobnik 
33976586c00SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.cnt, 1);
34076586c00SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.total_size, total_size);
34176586c00SRebecca Mckeever 
34276586c00SRebecca Mckeever 	test_pass_pop();
3438f98435dSKarolina Drobnik 
3448f98435dSKarolina Drobnik 	return 0;
3458f98435dSKarolina Drobnik }
3468f98435dSKarolina Drobnik 
3478f98435dSKarolina Drobnik /*
3488f98435dSKarolina Drobnik  * A test that tries to allocate a memory region within min_addr and max_addr,
3498f98435dSKarolina Drobnik  * with max_addr being so close that it's next to an allocated region:
3508f98435dSKarolina Drobnik  *
3518f98435dSKarolina Drobnik  *             +             +
3528f98435dSKarolina Drobnik  *  |          +-------------+--------|
3538f98435dSKarolina Drobnik  *  |          |     rgn     |   r1   |
3548f98435dSKarolina Drobnik  *  +----------+-------------+--------+
3558f98435dSKarolina Drobnik  *             ^             ^
3568f98435dSKarolina Drobnik  *             |             |
3578f98435dSKarolina Drobnik  *             min_addr      max_addr
3588f98435dSKarolina Drobnik  *
3598f98435dSKarolina Drobnik  * Expect a merge of regions. Only the region size gets updated.
3608f98435dSKarolina Drobnik  */
alloc_nid_max_reserved_generic_check(void)36161da0332SRebecca Mckeever static int alloc_nid_max_reserved_generic_check(void)
3628f98435dSKarolina Drobnik {
3638f98435dSKarolina Drobnik 	struct memblock_region *rgn = &memblock.reserved.regions[0];
3648f98435dSKarolina Drobnik 	void *allocated_ptr = NULL;
3658f98435dSKarolina Drobnik 	phys_addr_t r1_size = SZ_64;
3668f98435dSKarolina Drobnik 	phys_addr_t r2_size = SZ_128;
3678f98435dSKarolina Drobnik 	phys_addr_t total_size = r1_size + r2_size;
3688f98435dSKarolina Drobnik 	phys_addr_t min_addr;
3698f98435dSKarolina Drobnik 	phys_addr_t max_addr;
3708f98435dSKarolina Drobnik 
37142c3ba86SRebecca Mckeever 	PREFIX_PUSH();
3728f98435dSKarolina Drobnik 	setup_memblock();
3738f98435dSKarolina Drobnik 
3748f98435dSKarolina Drobnik 	max_addr = memblock_end_of_DRAM() - r1_size;
3758f98435dSKarolina Drobnik 	min_addr = max_addr - r2_size;
3768f98435dSKarolina Drobnik 
3778f98435dSKarolina Drobnik 	memblock_reserve(max_addr, r1_size);
3788f98435dSKarolina Drobnik 
37961da0332SRebecca Mckeever 	allocated_ptr = run_memblock_alloc_nid(r2_size, SMP_CACHE_BYTES,
380ae544fd6SRebecca Mckeever 					       min_addr, max_addr,
381ae544fd6SRebecca Mckeever 					       NUMA_NO_NODE);
3828f98435dSKarolina Drobnik 
38376586c00SRebecca Mckeever 	ASSERT_NE(allocated_ptr, NULL);
384ae544fd6SRebecca Mckeever 	assert_mem_content(allocated_ptr, r2_size, alloc_nid_test_flags);
3858f98435dSKarolina Drobnik 
38676586c00SRebecca Mckeever 	ASSERT_EQ(rgn->size, total_size);
38776586c00SRebecca Mckeever 	ASSERT_EQ(rgn->base, min_addr);
3888f98435dSKarolina Drobnik 
38976586c00SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.cnt, 1);
39076586c00SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.total_size, total_size);
39176586c00SRebecca Mckeever 
39276586c00SRebecca Mckeever 	test_pass_pop();
3938f98435dSKarolina Drobnik 
3948f98435dSKarolina Drobnik 	return 0;
3958f98435dSKarolina Drobnik }
3968f98435dSKarolina Drobnik 
3978f98435dSKarolina Drobnik /*
3988f98435dSKarolina Drobnik  * A test that tries to allocate memory within min_addr and max_add range, when
3998f98435dSKarolina Drobnik  * there are two reserved regions at the borders, with a gap big enough to fit
4008f98435dSKarolina Drobnik  * a new region:
4018f98435dSKarolina Drobnik  *
4028f98435dSKarolina Drobnik  *                +           +
4038f98435dSKarolina Drobnik  *  |    +--------+   +-------+------+  |
4048f98435dSKarolina Drobnik  *  |    |   r2   |   |  rgn  |  r1  |  |
4058f98435dSKarolina Drobnik  *  +----+--------+---+-------+------+--+
4068f98435dSKarolina Drobnik  *                ^           ^
4078f98435dSKarolina Drobnik  *                |           |
4088f98435dSKarolina Drobnik  *                min_addr    max_addr
4098f98435dSKarolina Drobnik  *
4108f98435dSKarolina Drobnik  * Expect to merge the new region with r1. The second region does not get
4118f98435dSKarolina Drobnik  * updated. The total size field gets updated.
4128f98435dSKarolina Drobnik  */
4138f98435dSKarolina Drobnik 
alloc_nid_top_down_reserved_with_space_check(void)41461da0332SRebecca Mckeever static int alloc_nid_top_down_reserved_with_space_check(void)
4158f98435dSKarolina Drobnik {
4168f98435dSKarolina Drobnik 	struct memblock_region *rgn1 = &memblock.reserved.regions[1];
4178f98435dSKarolina Drobnik 	struct memblock_region *rgn2 = &memblock.reserved.regions[0];
4188f98435dSKarolina Drobnik 	void *allocated_ptr = NULL;
4198f98435dSKarolina Drobnik 	struct region r1, r2;
4208f98435dSKarolina Drobnik 	phys_addr_t r3_size = SZ_64;
4218f98435dSKarolina Drobnik 	phys_addr_t gap_size = SMP_CACHE_BYTES;
4228f98435dSKarolina Drobnik 	phys_addr_t total_size;
4238f98435dSKarolina Drobnik 	phys_addr_t max_addr;
4248f98435dSKarolina Drobnik 	phys_addr_t min_addr;
4258f98435dSKarolina Drobnik 
42642c3ba86SRebecca Mckeever 	PREFIX_PUSH();
4278f98435dSKarolina Drobnik 	setup_memblock();
4288f98435dSKarolina Drobnik 
4298f98435dSKarolina Drobnik 	r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
4308f98435dSKarolina Drobnik 	r1.size = SMP_CACHE_BYTES;
4318f98435dSKarolina Drobnik 
4328f98435dSKarolina Drobnik 	r2.size = SZ_128;
4338f98435dSKarolina Drobnik 	r2.base = r1.base - (r3_size + gap_size + r2.size);
4348f98435dSKarolina Drobnik 
4358f98435dSKarolina Drobnik 	total_size = r1.size + r2.size + r3_size;
4368f98435dSKarolina Drobnik 	min_addr = r2.base + r2.size;
4378f98435dSKarolina Drobnik 	max_addr = r1.base;
4388f98435dSKarolina Drobnik 
4398f98435dSKarolina Drobnik 	memblock_reserve(r1.base, r1.size);
4408f98435dSKarolina Drobnik 	memblock_reserve(r2.base, r2.size);
4418f98435dSKarolina Drobnik 
44261da0332SRebecca Mckeever 	allocated_ptr = run_memblock_alloc_nid(r3_size, SMP_CACHE_BYTES,
443ae544fd6SRebecca Mckeever 					       min_addr, max_addr,
444ae544fd6SRebecca Mckeever 					       NUMA_NO_NODE);
4458f98435dSKarolina Drobnik 
44676586c00SRebecca Mckeever 	ASSERT_NE(allocated_ptr, NULL);
447ae544fd6SRebecca Mckeever 	assert_mem_content(allocated_ptr, r3_size, alloc_nid_test_flags);
4488f98435dSKarolina Drobnik 
44976586c00SRebecca Mckeever 	ASSERT_EQ(rgn1->size, r1.size + r3_size);
45076586c00SRebecca Mckeever 	ASSERT_EQ(rgn1->base, max_addr - r3_size);
4518f98435dSKarolina Drobnik 
45276586c00SRebecca Mckeever 	ASSERT_EQ(rgn2->size, r2.size);
45376586c00SRebecca Mckeever 	ASSERT_EQ(rgn2->base, r2.base);
4548f98435dSKarolina Drobnik 
45576586c00SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.cnt, 2);
45676586c00SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.total_size, total_size);
45776586c00SRebecca Mckeever 
45876586c00SRebecca Mckeever 	test_pass_pop();
4598f98435dSKarolina Drobnik 
4608f98435dSKarolina Drobnik 	return 0;
4618f98435dSKarolina Drobnik }
4628f98435dSKarolina Drobnik 
4638f98435dSKarolina Drobnik /*
4648f98435dSKarolina Drobnik  * A test that tries to allocate memory within min_addr and max_add range, when
4658f98435dSKarolina Drobnik  * there are two reserved regions at the borders, with a gap of a size equal to
4668f98435dSKarolina Drobnik  * the size of the new region:
4678f98435dSKarolina Drobnik  *
4688f98435dSKarolina Drobnik  *                 +        +
4698f98435dSKarolina Drobnik  *  |     +--------+--------+--------+     |
4708f98435dSKarolina Drobnik  *  |     |   r2   |   r3   |   r1   |     |
4718f98435dSKarolina Drobnik  *  +-----+--------+--------+--------+-----+
4728f98435dSKarolina Drobnik  *                 ^        ^
4738f98435dSKarolina Drobnik  *                 |        |
4748f98435dSKarolina Drobnik  *                 min_addr max_addr
4758f98435dSKarolina Drobnik  *
4768f98435dSKarolina Drobnik  * Expect to merge all of the regions into one. The region counter and total
4778f98435dSKarolina Drobnik  * size fields get updated.
4788f98435dSKarolina Drobnik  */
alloc_nid_reserved_full_merge_generic_check(void)47961da0332SRebecca Mckeever static int alloc_nid_reserved_full_merge_generic_check(void)
4808f98435dSKarolina Drobnik {
4818f98435dSKarolina Drobnik 	struct memblock_region *rgn = &memblock.reserved.regions[0];
4828f98435dSKarolina Drobnik 	void *allocated_ptr = NULL;
4838f98435dSKarolina Drobnik 	struct region r1, r2;
4848f98435dSKarolina Drobnik 	phys_addr_t r3_size = SZ_64;
4858f98435dSKarolina Drobnik 	phys_addr_t total_size;
4868f98435dSKarolina Drobnik 	phys_addr_t max_addr;
4878f98435dSKarolina Drobnik 	phys_addr_t min_addr;
4888f98435dSKarolina Drobnik 
48942c3ba86SRebecca Mckeever 	PREFIX_PUSH();
4908f98435dSKarolina Drobnik 	setup_memblock();
4918f98435dSKarolina Drobnik 
4928f98435dSKarolina Drobnik 	r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
4938f98435dSKarolina Drobnik 	r1.size = SMP_CACHE_BYTES;
4948f98435dSKarolina Drobnik 
4958f98435dSKarolina Drobnik 	r2.size = SZ_128;
4968f98435dSKarolina Drobnik 	r2.base = r1.base - (r3_size + r2.size);
4978f98435dSKarolina Drobnik 
4988f98435dSKarolina Drobnik 	total_size = r1.size + r2.size + r3_size;
4998f98435dSKarolina Drobnik 	min_addr = r2.base + r2.size;
5008f98435dSKarolina Drobnik 	max_addr = r1.base;
5018f98435dSKarolina Drobnik 
5028f98435dSKarolina Drobnik 	memblock_reserve(r1.base, r1.size);
5038f98435dSKarolina Drobnik 	memblock_reserve(r2.base, r2.size);
5048f98435dSKarolina Drobnik 
50561da0332SRebecca Mckeever 	allocated_ptr = run_memblock_alloc_nid(r3_size, SMP_CACHE_BYTES,
506ae544fd6SRebecca Mckeever 					       min_addr, max_addr,
507ae544fd6SRebecca Mckeever 					       NUMA_NO_NODE);
5088f98435dSKarolina Drobnik 
50976586c00SRebecca Mckeever 	ASSERT_NE(allocated_ptr, NULL);
510ae544fd6SRebecca Mckeever 	assert_mem_content(allocated_ptr, r3_size, alloc_nid_test_flags);
5118f98435dSKarolina Drobnik 
51276586c00SRebecca Mckeever 	ASSERT_EQ(rgn->size, total_size);
51376586c00SRebecca Mckeever 	ASSERT_EQ(rgn->base, r2.base);
5148f98435dSKarolina Drobnik 
51576586c00SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.cnt, 1);
51676586c00SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.total_size, total_size);
51776586c00SRebecca Mckeever 
51876586c00SRebecca Mckeever 	test_pass_pop();
5198f98435dSKarolina Drobnik 
5208f98435dSKarolina Drobnik 	return 0;
5218f98435dSKarolina Drobnik }
5228f98435dSKarolina Drobnik 
5238f98435dSKarolina Drobnik /*
5248f98435dSKarolina Drobnik  * A test that tries to allocate memory within min_addr and max_add range, when
5258f98435dSKarolina Drobnik  * there are two reserved regions at the borders, with a gap that can't fit
5268f98435dSKarolina Drobnik  * a new region:
5278f98435dSKarolina Drobnik  *
5288f98435dSKarolina Drobnik  *                       +    +
5298f98435dSKarolina Drobnik  *  |  +----------+------+    +------+   |
5308f98435dSKarolina Drobnik  *  |  |    r3    |  r2  |    |  r1  |   |
5318f98435dSKarolina Drobnik  *  +--+----------+------+----+------+---+
5328f98435dSKarolina Drobnik  *                       ^    ^
5338f98435dSKarolina Drobnik  *                       |    |
5348f98435dSKarolina Drobnik  *                       |    max_addr
5358f98435dSKarolina Drobnik  *                       |
5368f98435dSKarolina Drobnik  *                       min_addr
5378f98435dSKarolina Drobnik  *
5388f98435dSKarolina Drobnik  * Expect to merge the new region with r2. The second region does not get
5398f98435dSKarolina Drobnik  * updated. The total size counter gets updated.
5408f98435dSKarolina Drobnik  */
alloc_nid_top_down_reserved_no_space_check(void)54161da0332SRebecca Mckeever static int alloc_nid_top_down_reserved_no_space_check(void)
5428f98435dSKarolina Drobnik {
5438f98435dSKarolina Drobnik 	struct memblock_region *rgn1 = &memblock.reserved.regions[1];
5448f98435dSKarolina Drobnik 	struct memblock_region *rgn2 = &memblock.reserved.regions[0];
5458f98435dSKarolina Drobnik 	void *allocated_ptr = NULL;
5468f98435dSKarolina Drobnik 	struct region r1, r2;
5478f98435dSKarolina Drobnik 	phys_addr_t r3_size = SZ_256;
5488f98435dSKarolina Drobnik 	phys_addr_t gap_size = SMP_CACHE_BYTES;
5498f98435dSKarolina Drobnik 	phys_addr_t total_size;
5508f98435dSKarolina Drobnik 	phys_addr_t max_addr;
5518f98435dSKarolina Drobnik 	phys_addr_t min_addr;
5528f98435dSKarolina Drobnik 
55342c3ba86SRebecca Mckeever 	PREFIX_PUSH();
5548f98435dSKarolina Drobnik 	setup_memblock();
5558f98435dSKarolina Drobnik 
5568f98435dSKarolina Drobnik 	r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
5578f98435dSKarolina Drobnik 	r1.size = SMP_CACHE_BYTES;
5588f98435dSKarolina Drobnik 
5598f98435dSKarolina Drobnik 	r2.size = SZ_128;
5608f98435dSKarolina Drobnik 	r2.base = r1.base - (r2.size + gap_size);
5618f98435dSKarolina Drobnik 
5628f98435dSKarolina Drobnik 	total_size = r1.size + r2.size + r3_size;
5638f98435dSKarolina Drobnik 	min_addr = r2.base + r2.size;
5648f98435dSKarolina Drobnik 	max_addr = r1.base;
5658f98435dSKarolina Drobnik 
5668f98435dSKarolina Drobnik 	memblock_reserve(r1.base, r1.size);
5678f98435dSKarolina Drobnik 	memblock_reserve(r2.base, r2.size);
5688f98435dSKarolina Drobnik 
56961da0332SRebecca Mckeever 	allocated_ptr = run_memblock_alloc_nid(r3_size, SMP_CACHE_BYTES,
570ae544fd6SRebecca Mckeever 					       min_addr, max_addr,
571ae544fd6SRebecca Mckeever 					       NUMA_NO_NODE);
5728f98435dSKarolina Drobnik 
57376586c00SRebecca Mckeever 	ASSERT_NE(allocated_ptr, NULL);
574ae544fd6SRebecca Mckeever 	assert_mem_content(allocated_ptr, r3_size, alloc_nid_test_flags);
5758f98435dSKarolina Drobnik 
57676586c00SRebecca Mckeever 	ASSERT_EQ(rgn1->size, r1.size);
57776586c00SRebecca Mckeever 	ASSERT_EQ(rgn1->base, r1.base);
5788f98435dSKarolina Drobnik 
57976586c00SRebecca Mckeever 	ASSERT_EQ(rgn2->size, r2.size + r3_size);
58076586c00SRebecca Mckeever 	ASSERT_EQ(rgn2->base, r2.base - r3_size);
5818f98435dSKarolina Drobnik 
58276586c00SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.cnt, 2);
58376586c00SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.total_size, total_size);
58476586c00SRebecca Mckeever 
58576586c00SRebecca Mckeever 	test_pass_pop();
5868f98435dSKarolina Drobnik 
5878f98435dSKarolina Drobnik 	return 0;
5888f98435dSKarolina Drobnik }
5898f98435dSKarolina Drobnik 
5908f98435dSKarolina Drobnik /*
5918f98435dSKarolina Drobnik  * A test that tries to allocate memory within min_addr and max_add range, but
5928f98435dSKarolina Drobnik  * it's too narrow and everything else is reserved:
5938f98435dSKarolina Drobnik  *
5948f98435dSKarolina Drobnik  *            +-----------+
5958f98435dSKarolina Drobnik  *            |    new    |
5968f98435dSKarolina Drobnik  *            +-----------+
5978f98435dSKarolina Drobnik  *                 +      +
5988f98435dSKarolina Drobnik  *  |--------------+      +----------|
5998f98435dSKarolina Drobnik  *  |      r2      |      |    r1    |
6008f98435dSKarolina Drobnik  *  +--------------+------+----------+
6018f98435dSKarolina Drobnik  *                 ^      ^
6028f98435dSKarolina Drobnik  *                 |      |
6038f98435dSKarolina Drobnik  *                 |      max_addr
6048f98435dSKarolina Drobnik  *                 |
6058f98435dSKarolina Drobnik  *                 min_addr
6068f98435dSKarolina Drobnik  *
6078f98435dSKarolina Drobnik  * Expect no allocation to happen.
6088f98435dSKarolina Drobnik  */
6098f98435dSKarolina Drobnik 
alloc_nid_reserved_all_generic_check(void)61061da0332SRebecca Mckeever static int alloc_nid_reserved_all_generic_check(void)
6118f98435dSKarolina Drobnik {
6128f98435dSKarolina Drobnik 	void *allocated_ptr = NULL;
6138f98435dSKarolina Drobnik 	struct region r1, r2;
6148f98435dSKarolina Drobnik 	phys_addr_t r3_size = SZ_256;
6158f98435dSKarolina Drobnik 	phys_addr_t gap_size = SMP_CACHE_BYTES;
6168f98435dSKarolina Drobnik 	phys_addr_t max_addr;
6178f98435dSKarolina Drobnik 	phys_addr_t min_addr;
6188f98435dSKarolina Drobnik 
61942c3ba86SRebecca Mckeever 	PREFIX_PUSH();
6208f98435dSKarolina Drobnik 	setup_memblock();
6218f98435dSKarolina Drobnik 
6228f98435dSKarolina Drobnik 	r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES;
6238f98435dSKarolina Drobnik 	r1.size = SMP_CACHE_BYTES;
6248f98435dSKarolina Drobnik 
6258f98435dSKarolina Drobnik 	r2.size = MEM_SIZE - (r1.size + gap_size);
6268f98435dSKarolina Drobnik 	r2.base = memblock_start_of_DRAM();
6278f98435dSKarolina Drobnik 
6288f98435dSKarolina Drobnik 	min_addr = r2.base + r2.size;
6298f98435dSKarolina Drobnik 	max_addr = r1.base;
6308f98435dSKarolina Drobnik 
6318f98435dSKarolina Drobnik 	memblock_reserve(r1.base, r1.size);
6328f98435dSKarolina Drobnik 	memblock_reserve(r2.base, r2.size);
6338f98435dSKarolina Drobnik 
63461da0332SRebecca Mckeever 	allocated_ptr = run_memblock_alloc_nid(r3_size, SMP_CACHE_BYTES,
635ae544fd6SRebecca Mckeever 					       min_addr, max_addr,
636ae544fd6SRebecca Mckeever 					       NUMA_NO_NODE);
6378f98435dSKarolina Drobnik 
63876586c00SRebecca Mckeever 	ASSERT_EQ(allocated_ptr, NULL);
63976586c00SRebecca Mckeever 
64076586c00SRebecca Mckeever 	test_pass_pop();
6418f98435dSKarolina Drobnik 
6428f98435dSKarolina Drobnik 	return 0;
6438f98435dSKarolina Drobnik }
6448f98435dSKarolina Drobnik 
6458f98435dSKarolina Drobnik /*
6468f98435dSKarolina Drobnik  * A test that tries to allocate a memory region, where max_addr is
6478f98435dSKarolina Drobnik  * bigger than the end address of the available memory. Expect to allocate
64835e49953SRebecca Mckeever  * a region that ends before the end of the memory.
6498f98435dSKarolina Drobnik  */
alloc_nid_top_down_cap_max_check(void)65061da0332SRebecca Mckeever static int alloc_nid_top_down_cap_max_check(void)
6518f98435dSKarolina Drobnik {
6528f98435dSKarolina Drobnik 	struct memblock_region *rgn = &memblock.reserved.regions[0];
6538f98435dSKarolina Drobnik 	void *allocated_ptr = NULL;
6548f98435dSKarolina Drobnik 	phys_addr_t size = SZ_256;
6558f98435dSKarolina Drobnik 	phys_addr_t min_addr;
6568f98435dSKarolina Drobnik 	phys_addr_t max_addr;
6578f98435dSKarolina Drobnik 
65842c3ba86SRebecca Mckeever 	PREFIX_PUSH();
6598f98435dSKarolina Drobnik 	setup_memblock();
6608f98435dSKarolina Drobnik 
6618f98435dSKarolina Drobnik 	min_addr = memblock_end_of_DRAM() - SZ_1K;
6628f98435dSKarolina Drobnik 	max_addr = memblock_end_of_DRAM() + SZ_256;
6638f98435dSKarolina Drobnik 
66461da0332SRebecca Mckeever 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
665ae544fd6SRebecca Mckeever 					       min_addr, max_addr,
666ae544fd6SRebecca Mckeever 					       NUMA_NO_NODE);
6678f98435dSKarolina Drobnik 
66876586c00SRebecca Mckeever 	ASSERT_NE(allocated_ptr, NULL);
669ae544fd6SRebecca Mckeever 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
6708f98435dSKarolina Drobnik 
67176586c00SRebecca Mckeever 	ASSERT_EQ(rgn->size, size);
67276586c00SRebecca Mckeever 	ASSERT_EQ(rgn->base, memblock_end_of_DRAM() - size);
6738f98435dSKarolina Drobnik 
67476586c00SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.cnt, 1);
67576586c00SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.total_size, size);
67676586c00SRebecca Mckeever 
67776586c00SRebecca Mckeever 	test_pass_pop();
6788f98435dSKarolina Drobnik 
6798f98435dSKarolina Drobnik 	return 0;
6808f98435dSKarolina Drobnik }
6818f98435dSKarolina Drobnik 
6828f98435dSKarolina Drobnik /*
6838f98435dSKarolina Drobnik  * A test that tries to allocate a memory region, where min_addr is
6848f98435dSKarolina Drobnik  * smaller than the start address of the available memory. Expect to allocate
68535e49953SRebecca Mckeever  * a region that ends before the end of the memory.
6868f98435dSKarolina Drobnik  */
alloc_nid_top_down_cap_min_check(void)68761da0332SRebecca Mckeever static int alloc_nid_top_down_cap_min_check(void)
6888f98435dSKarolina Drobnik {
6898f98435dSKarolina Drobnik 	struct memblock_region *rgn = &memblock.reserved.regions[0];
6908f98435dSKarolina Drobnik 	void *allocated_ptr = NULL;
6918f98435dSKarolina Drobnik 	phys_addr_t size = SZ_1K;
6928f98435dSKarolina Drobnik 	phys_addr_t min_addr;
6938f98435dSKarolina Drobnik 	phys_addr_t max_addr;
6948f98435dSKarolina Drobnik 
69542c3ba86SRebecca Mckeever 	PREFIX_PUSH();
6968f98435dSKarolina Drobnik 	setup_memblock();
6978f98435dSKarolina Drobnik 
6988f98435dSKarolina Drobnik 	min_addr = memblock_start_of_DRAM() - SZ_256;
6998f98435dSKarolina Drobnik 	max_addr = memblock_end_of_DRAM();
7008f98435dSKarolina Drobnik 
70161da0332SRebecca Mckeever 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
702ae544fd6SRebecca Mckeever 					       min_addr, max_addr,
703ae544fd6SRebecca Mckeever 					       NUMA_NO_NODE);
7048f98435dSKarolina Drobnik 
70576586c00SRebecca Mckeever 	ASSERT_NE(allocated_ptr, NULL);
706ae544fd6SRebecca Mckeever 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
7078f98435dSKarolina Drobnik 
70876586c00SRebecca Mckeever 	ASSERT_EQ(rgn->size, size);
70976586c00SRebecca Mckeever 	ASSERT_EQ(rgn->base, memblock_end_of_DRAM() - size);
7108f98435dSKarolina Drobnik 
71176586c00SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.cnt, 1);
71276586c00SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.total_size, size);
71376586c00SRebecca Mckeever 
71476586c00SRebecca Mckeever 	test_pass_pop();
7158f98435dSKarolina Drobnik 
7168f98435dSKarolina Drobnik 	return 0;
7178f98435dSKarolina Drobnik }
7188f98435dSKarolina Drobnik 
7199d8f6abeSKarolina Drobnik /*
7209d8f6abeSKarolina Drobnik  * A simple test that tries to allocate a memory region within min_addr and
7219d8f6abeSKarolina Drobnik  * max_addr range:
7229d8f6abeSKarolina Drobnik  *
7239d8f6abeSKarolina Drobnik  *        +                       +
7249d8f6abeSKarolina Drobnik  *   |    +-----------+           |      |
7259d8f6abeSKarolina Drobnik  *   |    |    rgn    |           |      |
7269d8f6abeSKarolina Drobnik  *   +----+-----------+-----------+------+
7279d8f6abeSKarolina Drobnik  *        ^                       ^
7289d8f6abeSKarolina Drobnik  *        |                       |
7299d8f6abeSKarolina Drobnik  *        min_addr                max_addr
7309d8f6abeSKarolina Drobnik  *
73135e49953SRebecca Mckeever  * Expect to allocate a region that ends before max_addr.
7329d8f6abeSKarolina Drobnik  */
alloc_nid_bottom_up_simple_check(void)73361da0332SRebecca Mckeever static int alloc_nid_bottom_up_simple_check(void)
7349d8f6abeSKarolina Drobnik {
7359d8f6abeSKarolina Drobnik 	struct memblock_region *rgn = &memblock.reserved.regions[0];
7369d8f6abeSKarolina Drobnik 	void *allocated_ptr = NULL;
7379d8f6abeSKarolina Drobnik 	phys_addr_t size = SZ_128;
7389d8f6abeSKarolina Drobnik 	phys_addr_t min_addr;
7399d8f6abeSKarolina Drobnik 	phys_addr_t max_addr;
7409d8f6abeSKarolina Drobnik 	phys_addr_t rgn_end;
7419d8f6abeSKarolina Drobnik 
74242c3ba86SRebecca Mckeever 	PREFIX_PUSH();
7439d8f6abeSKarolina Drobnik 	setup_memblock();
7449d8f6abeSKarolina Drobnik 
7459d8f6abeSKarolina Drobnik 	min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES * 2;
7469d8f6abeSKarolina Drobnik 	max_addr = min_addr + SZ_512;
7479d8f6abeSKarolina Drobnik 
74861da0332SRebecca Mckeever 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
7499d8f6abeSKarolina Drobnik 					       min_addr, max_addr,
7509d8f6abeSKarolina Drobnik 					       NUMA_NO_NODE);
7519d8f6abeSKarolina Drobnik 	rgn_end = rgn->base + rgn->size;
7529d8f6abeSKarolina Drobnik 
75376586c00SRebecca Mckeever 	ASSERT_NE(allocated_ptr, NULL);
754ae544fd6SRebecca Mckeever 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
7559d8f6abeSKarolina Drobnik 
75676586c00SRebecca Mckeever 	ASSERT_EQ(rgn->size, size);
75776586c00SRebecca Mckeever 	ASSERT_EQ(rgn->base, min_addr);
75876586c00SRebecca Mckeever 	ASSERT_LT(rgn_end, max_addr);
7599d8f6abeSKarolina Drobnik 
76076586c00SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.cnt, 1);
76176586c00SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.total_size, size);
76276586c00SRebecca Mckeever 
76376586c00SRebecca Mckeever 	test_pass_pop();
7649d8f6abeSKarolina Drobnik 
7659d8f6abeSKarolina Drobnik 	return 0;
7669d8f6abeSKarolina Drobnik }
7679d8f6abeSKarolina Drobnik 
7689d8f6abeSKarolina Drobnik /*
7699d8f6abeSKarolina Drobnik  * A simple test that tries to allocate a memory region within min_addr and
7709d8f6abeSKarolina Drobnik  * max_addr range, where the start address is misaligned:
7719d8f6abeSKarolina Drobnik  *
7729d8f6abeSKarolina Drobnik  *        +                     +
7739d8f6abeSKarolina Drobnik  *  |     +   +-----------+     +     |
7749d8f6abeSKarolina Drobnik  *  |     |   |    rgn    |     |     |
7759d8f6abeSKarolina Drobnik  *  +-----+---+-----------+-----+-----+
7769d8f6abeSKarolina Drobnik  *        ^   ^----.            ^
7779d8f6abeSKarolina Drobnik  *        |        |            |
7789d8f6abeSKarolina Drobnik  *     min_add     |            max_addr
7799d8f6abeSKarolina Drobnik  *                 |
7809d8f6abeSKarolina Drobnik  *                 Aligned address
7819d8f6abeSKarolina Drobnik  *                 boundary
7829d8f6abeSKarolina Drobnik  *
78335e49953SRebecca Mckeever  * Expect to allocate an aligned region that ends before max_addr.
7849d8f6abeSKarolina Drobnik  */
alloc_nid_bottom_up_start_misaligned_check(void)78561da0332SRebecca Mckeever static int alloc_nid_bottom_up_start_misaligned_check(void)
7869d8f6abeSKarolina Drobnik {
7879d8f6abeSKarolina Drobnik 	struct memblock_region *rgn = &memblock.reserved.regions[0];
7889d8f6abeSKarolina Drobnik 	void *allocated_ptr = NULL;
7899d8f6abeSKarolina Drobnik 	phys_addr_t size = SZ_128;
7909d8f6abeSKarolina Drobnik 	phys_addr_t misalign = SZ_2;
7919d8f6abeSKarolina Drobnik 	phys_addr_t min_addr;
7929d8f6abeSKarolina Drobnik 	phys_addr_t max_addr;
7939d8f6abeSKarolina Drobnik 	phys_addr_t rgn_end;
7949d8f6abeSKarolina Drobnik 
79542c3ba86SRebecca Mckeever 	PREFIX_PUSH();
7969d8f6abeSKarolina Drobnik 	setup_memblock();
7979d8f6abeSKarolina Drobnik 
7989d8f6abeSKarolina Drobnik 	min_addr = memblock_start_of_DRAM() + misalign;
7999d8f6abeSKarolina Drobnik 	max_addr = min_addr + SZ_512;
8009d8f6abeSKarolina Drobnik 
80161da0332SRebecca Mckeever 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
8029d8f6abeSKarolina Drobnik 					       min_addr, max_addr,
8039d8f6abeSKarolina Drobnik 					       NUMA_NO_NODE);
8049d8f6abeSKarolina Drobnik 	rgn_end = rgn->base + rgn->size;
8059d8f6abeSKarolina Drobnik 
80676586c00SRebecca Mckeever 	ASSERT_NE(allocated_ptr, NULL);
807ae544fd6SRebecca Mckeever 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
8089d8f6abeSKarolina Drobnik 
80976586c00SRebecca Mckeever 	ASSERT_EQ(rgn->size, size);
81076586c00SRebecca Mckeever 	ASSERT_EQ(rgn->base, min_addr + (SMP_CACHE_BYTES - misalign));
81176586c00SRebecca Mckeever 	ASSERT_LT(rgn_end, max_addr);
8129d8f6abeSKarolina Drobnik 
81376586c00SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.cnt, 1);
81476586c00SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.total_size, size);
81576586c00SRebecca Mckeever 
81676586c00SRebecca Mckeever 	test_pass_pop();
8179d8f6abeSKarolina Drobnik 
8189d8f6abeSKarolina Drobnik 	return 0;
8199d8f6abeSKarolina Drobnik }
8209d8f6abeSKarolina Drobnik 
8219d8f6abeSKarolina Drobnik /*
8229d8f6abeSKarolina Drobnik  * A test that tries to allocate a memory region, which can't fit into min_addr
8239d8f6abeSKarolina Drobnik  * and max_addr range:
8249d8f6abeSKarolina Drobnik  *
8259d8f6abeSKarolina Drobnik  *                      +    +
8269d8f6abeSKarolina Drobnik  *  |---------+         +    +      |
8279d8f6abeSKarolina Drobnik  *  |   rgn   |         |    |      |
8289d8f6abeSKarolina Drobnik  *  +---------+---------+----+------+
8299d8f6abeSKarolina Drobnik  *                      ^    ^
8309d8f6abeSKarolina Drobnik  *                      |    |
8319d8f6abeSKarolina Drobnik  *                      |    max_addr
8329d8f6abeSKarolina Drobnik  *                      |
8339d8f6abeSKarolina Drobnik  *                      min_add
8349d8f6abeSKarolina Drobnik  *
83535e49953SRebecca Mckeever  * Expect to drop the lower limit and allocate a memory region which
8369d8f6abeSKarolina Drobnik  * starts at the beginning of the available memory.
8379d8f6abeSKarolina Drobnik  */
alloc_nid_bottom_up_narrow_range_check(void)83861da0332SRebecca Mckeever static int alloc_nid_bottom_up_narrow_range_check(void)
8399d8f6abeSKarolina Drobnik {
8409d8f6abeSKarolina Drobnik 	struct memblock_region *rgn = &memblock.reserved.regions[0];
8419d8f6abeSKarolina Drobnik 	void *allocated_ptr = NULL;
8429d8f6abeSKarolina Drobnik 	phys_addr_t size = SZ_256;
8439d8f6abeSKarolina Drobnik 	phys_addr_t min_addr;
8449d8f6abeSKarolina Drobnik 	phys_addr_t max_addr;
8459d8f6abeSKarolina Drobnik 
84642c3ba86SRebecca Mckeever 	PREFIX_PUSH();
8479d8f6abeSKarolina Drobnik 	setup_memblock();
8489d8f6abeSKarolina Drobnik 
8499d8f6abeSKarolina Drobnik 	min_addr = memblock_start_of_DRAM() + SZ_512;
8509d8f6abeSKarolina Drobnik 	max_addr = min_addr + SMP_CACHE_BYTES;
8519d8f6abeSKarolina Drobnik 
85261da0332SRebecca Mckeever 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
8539d8f6abeSKarolina Drobnik 					       min_addr, max_addr,
8549d8f6abeSKarolina Drobnik 					       NUMA_NO_NODE);
8559d8f6abeSKarolina Drobnik 
85676586c00SRebecca Mckeever 	ASSERT_NE(allocated_ptr, NULL);
857ae544fd6SRebecca Mckeever 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
8589d8f6abeSKarolina Drobnik 
85976586c00SRebecca Mckeever 	ASSERT_EQ(rgn->size, size);
86076586c00SRebecca Mckeever 	ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
8619d8f6abeSKarolina Drobnik 
86276586c00SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.cnt, 1);
86376586c00SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.total_size, size);
86476586c00SRebecca Mckeever 
86576586c00SRebecca Mckeever 	test_pass_pop();
8669d8f6abeSKarolina Drobnik 
8679d8f6abeSKarolina Drobnik 	return 0;
8689d8f6abeSKarolina Drobnik }
8699d8f6abeSKarolina Drobnik 
8709d8f6abeSKarolina Drobnik /*
8719d8f6abeSKarolina Drobnik  * A test that tries to allocate memory within min_addr and max_add range, when
8729d8f6abeSKarolina Drobnik  * there are two reserved regions at the borders, with a gap big enough to fit
8739d8f6abeSKarolina Drobnik  * a new region:
8749d8f6abeSKarolina Drobnik  *
8759d8f6abeSKarolina Drobnik  *                +           +
8769d8f6abeSKarolina Drobnik  *  |    +--------+-------+   +------+  |
8779d8f6abeSKarolina Drobnik  *  |    |   r2   |  rgn  |   |  r1  |  |
8789d8f6abeSKarolina Drobnik  *  +----+--------+-------+---+------+--+
8799d8f6abeSKarolina Drobnik  *                ^           ^
8809d8f6abeSKarolina Drobnik  *                |           |
8819d8f6abeSKarolina Drobnik  *                min_addr    max_addr
8829d8f6abeSKarolina Drobnik  *
8839d8f6abeSKarolina Drobnik  * Expect to merge the new region with r2. The second region does not get
8849d8f6abeSKarolina Drobnik  * updated. The total size field gets updated.
8859d8f6abeSKarolina Drobnik  */
8869d8f6abeSKarolina Drobnik 
alloc_nid_bottom_up_reserved_with_space_check(void)88761da0332SRebecca Mckeever static int alloc_nid_bottom_up_reserved_with_space_check(void)
8889d8f6abeSKarolina Drobnik {
8899d8f6abeSKarolina Drobnik 	struct memblock_region *rgn1 = &memblock.reserved.regions[1];
8909d8f6abeSKarolina Drobnik 	struct memblock_region *rgn2 = &memblock.reserved.regions[0];
8919d8f6abeSKarolina Drobnik 	void *allocated_ptr = NULL;
8929d8f6abeSKarolina Drobnik 	struct region r1, r2;
8939d8f6abeSKarolina Drobnik 	phys_addr_t r3_size = SZ_64;
8949d8f6abeSKarolina Drobnik 	phys_addr_t gap_size = SMP_CACHE_BYTES;
8959d8f6abeSKarolina Drobnik 	phys_addr_t total_size;
8969d8f6abeSKarolina Drobnik 	phys_addr_t max_addr;
8979d8f6abeSKarolina Drobnik 	phys_addr_t min_addr;
8989d8f6abeSKarolina Drobnik 
89942c3ba86SRebecca Mckeever 	PREFIX_PUSH();
9009d8f6abeSKarolina Drobnik 	setup_memblock();
9019d8f6abeSKarolina Drobnik 
9029d8f6abeSKarolina Drobnik 	r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
9039d8f6abeSKarolina Drobnik 	r1.size = SMP_CACHE_BYTES;
9049d8f6abeSKarolina Drobnik 
9059d8f6abeSKarolina Drobnik 	r2.size = SZ_128;
9069d8f6abeSKarolina Drobnik 	r2.base = r1.base - (r3_size + gap_size + r2.size);
9079d8f6abeSKarolina Drobnik 
9089d8f6abeSKarolina Drobnik 	total_size = r1.size + r2.size + r3_size;
9099d8f6abeSKarolina Drobnik 	min_addr = r2.base + r2.size;
9109d8f6abeSKarolina Drobnik 	max_addr = r1.base;
9119d8f6abeSKarolina Drobnik 
9129d8f6abeSKarolina Drobnik 	memblock_reserve(r1.base, r1.size);
9139d8f6abeSKarolina Drobnik 	memblock_reserve(r2.base, r2.size);
9149d8f6abeSKarolina Drobnik 
91561da0332SRebecca Mckeever 	allocated_ptr = run_memblock_alloc_nid(r3_size, SMP_CACHE_BYTES,
9169d8f6abeSKarolina Drobnik 					       min_addr, max_addr,
9179d8f6abeSKarolina Drobnik 					       NUMA_NO_NODE);
9189d8f6abeSKarolina Drobnik 
91976586c00SRebecca Mckeever 	ASSERT_NE(allocated_ptr, NULL);
920ae544fd6SRebecca Mckeever 	assert_mem_content(allocated_ptr, r3_size, alloc_nid_test_flags);
9219d8f6abeSKarolina Drobnik 
92276586c00SRebecca Mckeever 	ASSERT_EQ(rgn1->size, r1.size);
92376586c00SRebecca Mckeever 	ASSERT_EQ(rgn1->base, max_addr);
9249d8f6abeSKarolina Drobnik 
92576586c00SRebecca Mckeever 	ASSERT_EQ(rgn2->size, r2.size + r3_size);
92676586c00SRebecca Mckeever 	ASSERT_EQ(rgn2->base, r2.base);
9279d8f6abeSKarolina Drobnik 
92876586c00SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.cnt, 2);
92976586c00SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.total_size, total_size);
93076586c00SRebecca Mckeever 
93176586c00SRebecca Mckeever 	test_pass_pop();
9329d8f6abeSKarolina Drobnik 
9339d8f6abeSKarolina Drobnik 	return 0;
9349d8f6abeSKarolina Drobnik }
9359d8f6abeSKarolina Drobnik 
9369d8f6abeSKarolina Drobnik /*
9379d8f6abeSKarolina Drobnik  * A test that tries to allocate memory within min_addr and max_add range, when
9389d8f6abeSKarolina Drobnik  * there are two reserved regions at the borders, with a gap of a size equal to
9399d8f6abeSKarolina Drobnik  * the size of the new region:
9409d8f6abeSKarolina Drobnik  *
9419d8f6abeSKarolina Drobnik  *                         +   +
9429d8f6abeSKarolina Drobnik  *  |----------+    +------+   +----+  |
9439d8f6abeSKarolina Drobnik  *  |    r3    |    |  r2  |   | r1 |  |
9449d8f6abeSKarolina Drobnik  *  +----------+----+------+---+----+--+
9459d8f6abeSKarolina Drobnik  *                         ^   ^
9469d8f6abeSKarolina Drobnik  *                         |   |
9479d8f6abeSKarolina Drobnik  *                         |  max_addr
9489d8f6abeSKarolina Drobnik  *                         |
9499d8f6abeSKarolina Drobnik  *                         min_addr
9509d8f6abeSKarolina Drobnik  *
9519d8f6abeSKarolina Drobnik  * Expect to drop the lower limit and allocate memory at the beginning of the
9529d8f6abeSKarolina Drobnik  * available memory. The region counter and total size fields get updated.
9539d8f6abeSKarolina Drobnik  * Other regions are not modified.
9549d8f6abeSKarolina Drobnik  */
9559d8f6abeSKarolina Drobnik 
alloc_nid_bottom_up_reserved_no_space_check(void)95661da0332SRebecca Mckeever static int alloc_nid_bottom_up_reserved_no_space_check(void)
9579d8f6abeSKarolina Drobnik {
9589d8f6abeSKarolina Drobnik 	struct memblock_region *rgn1 = &memblock.reserved.regions[2];
9599d8f6abeSKarolina Drobnik 	struct memblock_region *rgn2 = &memblock.reserved.regions[1];
9609d8f6abeSKarolina Drobnik 	struct memblock_region *rgn3 = &memblock.reserved.regions[0];
9619d8f6abeSKarolina Drobnik 	void *allocated_ptr = NULL;
9629d8f6abeSKarolina Drobnik 	struct region r1, r2;
9639d8f6abeSKarolina Drobnik 	phys_addr_t r3_size = SZ_256;
9649d8f6abeSKarolina Drobnik 	phys_addr_t gap_size = SMP_CACHE_BYTES;
9659d8f6abeSKarolina Drobnik 	phys_addr_t total_size;
9669d8f6abeSKarolina Drobnik 	phys_addr_t max_addr;
9679d8f6abeSKarolina Drobnik 	phys_addr_t min_addr;
9689d8f6abeSKarolina Drobnik 
96942c3ba86SRebecca Mckeever 	PREFIX_PUSH();
9709d8f6abeSKarolina Drobnik 	setup_memblock();
9719d8f6abeSKarolina Drobnik 
9729d8f6abeSKarolina Drobnik 	r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
9739d8f6abeSKarolina Drobnik 	r1.size = SMP_CACHE_BYTES;
9749d8f6abeSKarolina Drobnik 
9759d8f6abeSKarolina Drobnik 	r2.size = SZ_128;
9769d8f6abeSKarolina Drobnik 	r2.base = r1.base - (r2.size + gap_size);
9779d8f6abeSKarolina Drobnik 
9789d8f6abeSKarolina Drobnik 	total_size = r1.size + r2.size + r3_size;
9799d8f6abeSKarolina Drobnik 	min_addr = r2.base + r2.size;
9809d8f6abeSKarolina Drobnik 	max_addr = r1.base;
9819d8f6abeSKarolina Drobnik 
9829d8f6abeSKarolina Drobnik 	memblock_reserve(r1.base, r1.size);
9839d8f6abeSKarolina Drobnik 	memblock_reserve(r2.base, r2.size);
9849d8f6abeSKarolina Drobnik 
98561da0332SRebecca Mckeever 	allocated_ptr = run_memblock_alloc_nid(r3_size, SMP_CACHE_BYTES,
9869d8f6abeSKarolina Drobnik 					       min_addr, max_addr,
9879d8f6abeSKarolina Drobnik 					       NUMA_NO_NODE);
9889d8f6abeSKarolina Drobnik 
98976586c00SRebecca Mckeever 	ASSERT_NE(allocated_ptr, NULL);
990ae544fd6SRebecca Mckeever 	assert_mem_content(allocated_ptr, r3_size, alloc_nid_test_flags);
9919d8f6abeSKarolina Drobnik 
99276586c00SRebecca Mckeever 	ASSERT_EQ(rgn3->size, r3_size);
99376586c00SRebecca Mckeever 	ASSERT_EQ(rgn3->base, memblock_start_of_DRAM());
9949d8f6abeSKarolina Drobnik 
99576586c00SRebecca Mckeever 	ASSERT_EQ(rgn2->size, r2.size);
99676586c00SRebecca Mckeever 	ASSERT_EQ(rgn2->base, r2.base);
9979d8f6abeSKarolina Drobnik 
99876586c00SRebecca Mckeever 	ASSERT_EQ(rgn1->size, r1.size);
99976586c00SRebecca Mckeever 	ASSERT_EQ(rgn1->base, r1.base);
10009d8f6abeSKarolina Drobnik 
100176586c00SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.cnt, 3);
100276586c00SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.total_size, total_size);
100376586c00SRebecca Mckeever 
100476586c00SRebecca Mckeever 	test_pass_pop();
10059d8f6abeSKarolina Drobnik 
10069d8f6abeSKarolina Drobnik 	return 0;
10079d8f6abeSKarolina Drobnik }
10089d8f6abeSKarolina Drobnik 
10099d8f6abeSKarolina Drobnik /*
10109d8f6abeSKarolina Drobnik  * A test that tries to allocate a memory region, where max_addr is
10119d8f6abeSKarolina Drobnik  * bigger than the end address of the available memory. Expect to allocate
101235e49953SRebecca Mckeever  * a region that starts at the min_addr.
10139d8f6abeSKarolina Drobnik  */
alloc_nid_bottom_up_cap_max_check(void)101461da0332SRebecca Mckeever static int alloc_nid_bottom_up_cap_max_check(void)
10159d8f6abeSKarolina Drobnik {
10169d8f6abeSKarolina Drobnik 	struct memblock_region *rgn = &memblock.reserved.regions[0];
10179d8f6abeSKarolina Drobnik 	void *allocated_ptr = NULL;
10189d8f6abeSKarolina Drobnik 	phys_addr_t size = SZ_256;
10199d8f6abeSKarolina Drobnik 	phys_addr_t min_addr;
10209d8f6abeSKarolina Drobnik 	phys_addr_t max_addr;
10219d8f6abeSKarolina Drobnik 
102242c3ba86SRebecca Mckeever 	PREFIX_PUSH();
10239d8f6abeSKarolina Drobnik 	setup_memblock();
10249d8f6abeSKarolina Drobnik 
10259d8f6abeSKarolina Drobnik 	min_addr = memblock_start_of_DRAM() + SZ_1K;
10269d8f6abeSKarolina Drobnik 	max_addr = memblock_end_of_DRAM() + SZ_256;
10279d8f6abeSKarolina Drobnik 
102861da0332SRebecca Mckeever 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
10299d8f6abeSKarolina Drobnik 					       min_addr, max_addr,
10309d8f6abeSKarolina Drobnik 					       NUMA_NO_NODE);
10319d8f6abeSKarolina Drobnik 
103276586c00SRebecca Mckeever 	ASSERT_NE(allocated_ptr, NULL);
1033ae544fd6SRebecca Mckeever 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
10349d8f6abeSKarolina Drobnik 
103576586c00SRebecca Mckeever 	ASSERT_EQ(rgn->size, size);
103676586c00SRebecca Mckeever 	ASSERT_EQ(rgn->base, min_addr);
10379d8f6abeSKarolina Drobnik 
103876586c00SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.cnt, 1);
103976586c00SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.total_size, size);
104076586c00SRebecca Mckeever 
104176586c00SRebecca Mckeever 	test_pass_pop();
10429d8f6abeSKarolina Drobnik 
10439d8f6abeSKarolina Drobnik 	return 0;
10449d8f6abeSKarolina Drobnik }
10459d8f6abeSKarolina Drobnik 
10469d8f6abeSKarolina Drobnik /*
10479d8f6abeSKarolina Drobnik  * A test that tries to allocate a memory region, where min_addr is
10489d8f6abeSKarolina Drobnik  * smaller than the start address of the available memory. Expect to allocate
104935e49953SRebecca Mckeever  * a region at the beginning of the available memory.
10509d8f6abeSKarolina Drobnik  */
alloc_nid_bottom_up_cap_min_check(void)105161da0332SRebecca Mckeever static int alloc_nid_bottom_up_cap_min_check(void)
10529d8f6abeSKarolina Drobnik {
10539d8f6abeSKarolina Drobnik 	struct memblock_region *rgn = &memblock.reserved.regions[0];
10549d8f6abeSKarolina Drobnik 	void *allocated_ptr = NULL;
10559d8f6abeSKarolina Drobnik 	phys_addr_t size = SZ_1K;
10569d8f6abeSKarolina Drobnik 	phys_addr_t min_addr;
10579d8f6abeSKarolina Drobnik 	phys_addr_t max_addr;
10589d8f6abeSKarolina Drobnik 
105942c3ba86SRebecca Mckeever 	PREFIX_PUSH();
10609d8f6abeSKarolina Drobnik 	setup_memblock();
10619d8f6abeSKarolina Drobnik 
10629d8f6abeSKarolina Drobnik 	min_addr = memblock_start_of_DRAM();
10639d8f6abeSKarolina Drobnik 	max_addr = memblock_end_of_DRAM() - SZ_256;
10649d8f6abeSKarolina Drobnik 
106561da0332SRebecca Mckeever 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
10669d8f6abeSKarolina Drobnik 					       min_addr, max_addr,
10679d8f6abeSKarolina Drobnik 					       NUMA_NO_NODE);
10689d8f6abeSKarolina Drobnik 
106976586c00SRebecca Mckeever 	ASSERT_NE(allocated_ptr, NULL);
1070ae544fd6SRebecca Mckeever 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
10719d8f6abeSKarolina Drobnik 
107276586c00SRebecca Mckeever 	ASSERT_EQ(rgn->size, size);
107376586c00SRebecca Mckeever 	ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
10749d8f6abeSKarolina Drobnik 
107576586c00SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.cnt, 1);
107676586c00SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.total_size, size);
107776586c00SRebecca Mckeever 
107876586c00SRebecca Mckeever 	test_pass_pop();
10799d8f6abeSKarolina Drobnik 
10809d8f6abeSKarolina Drobnik 	return 0;
10819d8f6abeSKarolina Drobnik }
10829d8f6abeSKarolina Drobnik 
108350c80241SRebecca Mckeever /* Test case wrappers for range tests */
alloc_nid_simple_check(void)108461da0332SRebecca Mckeever static int alloc_nid_simple_check(void)
10859d8f6abeSKarolina Drobnik {
108676586c00SRebecca Mckeever 	test_print("\tRunning %s...\n", __func__);
10879d8f6abeSKarolina Drobnik 	memblock_set_bottom_up(false);
108861da0332SRebecca Mckeever 	alloc_nid_top_down_simple_check();
10899d8f6abeSKarolina Drobnik 	memblock_set_bottom_up(true);
109061da0332SRebecca Mckeever 	alloc_nid_bottom_up_simple_check();
10919d8f6abeSKarolina Drobnik 
10929d8f6abeSKarolina Drobnik 	return 0;
10939d8f6abeSKarolina Drobnik }
10949d8f6abeSKarolina Drobnik 
alloc_nid_misaligned_check(void)109561da0332SRebecca Mckeever static int alloc_nid_misaligned_check(void)
10969d8f6abeSKarolina Drobnik {
109776586c00SRebecca Mckeever 	test_print("\tRunning %s...\n", __func__);
10989d8f6abeSKarolina Drobnik 	memblock_set_bottom_up(false);
109961da0332SRebecca Mckeever 	alloc_nid_top_down_end_misaligned_check();
11009d8f6abeSKarolina Drobnik 	memblock_set_bottom_up(true);
110161da0332SRebecca Mckeever 	alloc_nid_bottom_up_start_misaligned_check();
11029d8f6abeSKarolina Drobnik 
11039d8f6abeSKarolina Drobnik 	return 0;
11049d8f6abeSKarolina Drobnik }
11059d8f6abeSKarolina Drobnik 
alloc_nid_narrow_range_check(void)110661da0332SRebecca Mckeever static int alloc_nid_narrow_range_check(void)
11079d8f6abeSKarolina Drobnik {
110876586c00SRebecca Mckeever 	test_print("\tRunning %s...\n", __func__);
11099d8f6abeSKarolina Drobnik 	memblock_set_bottom_up(false);
111061da0332SRebecca Mckeever 	alloc_nid_top_down_narrow_range_check();
11119d8f6abeSKarolina Drobnik 	memblock_set_bottom_up(true);
111261da0332SRebecca Mckeever 	alloc_nid_bottom_up_narrow_range_check();
11139d8f6abeSKarolina Drobnik 
11149d8f6abeSKarolina Drobnik 	return 0;
11159d8f6abeSKarolina Drobnik }
11169d8f6abeSKarolina Drobnik 
alloc_nid_reserved_with_space_check(void)111761da0332SRebecca Mckeever static int alloc_nid_reserved_with_space_check(void)
11189d8f6abeSKarolina Drobnik {
111976586c00SRebecca Mckeever 	test_print("\tRunning %s...\n", __func__);
11209d8f6abeSKarolina Drobnik 	memblock_set_bottom_up(false);
112161da0332SRebecca Mckeever 	alloc_nid_top_down_reserved_with_space_check();
11229d8f6abeSKarolina Drobnik 	memblock_set_bottom_up(true);
112361da0332SRebecca Mckeever 	alloc_nid_bottom_up_reserved_with_space_check();
11249d8f6abeSKarolina Drobnik 
11259d8f6abeSKarolina Drobnik 	return 0;
11269d8f6abeSKarolina Drobnik }
11279d8f6abeSKarolina Drobnik 
alloc_nid_reserved_no_space_check(void)112861da0332SRebecca Mckeever static int alloc_nid_reserved_no_space_check(void)
11299d8f6abeSKarolina Drobnik {
113076586c00SRebecca Mckeever 	test_print("\tRunning %s...\n", __func__);
11319d8f6abeSKarolina Drobnik 	memblock_set_bottom_up(false);
113261da0332SRebecca Mckeever 	alloc_nid_top_down_reserved_no_space_check();
11339d8f6abeSKarolina Drobnik 	memblock_set_bottom_up(true);
113461da0332SRebecca Mckeever 	alloc_nid_bottom_up_reserved_no_space_check();
11359d8f6abeSKarolina Drobnik 
11369d8f6abeSKarolina Drobnik 	return 0;
11379d8f6abeSKarolina Drobnik }
11389d8f6abeSKarolina Drobnik 
alloc_nid_cap_max_check(void)113961da0332SRebecca Mckeever static int alloc_nid_cap_max_check(void)
11409d8f6abeSKarolina Drobnik {
114176586c00SRebecca Mckeever 	test_print("\tRunning %s...\n", __func__);
11429d8f6abeSKarolina Drobnik 	memblock_set_bottom_up(false);
114361da0332SRebecca Mckeever 	alloc_nid_top_down_cap_max_check();
11449d8f6abeSKarolina Drobnik 	memblock_set_bottom_up(true);
114561da0332SRebecca Mckeever 	alloc_nid_bottom_up_cap_max_check();
11469d8f6abeSKarolina Drobnik 
11479d8f6abeSKarolina Drobnik 	return 0;
11489d8f6abeSKarolina Drobnik }
11499d8f6abeSKarolina Drobnik 
alloc_nid_cap_min_check(void)115061da0332SRebecca Mckeever static int alloc_nid_cap_min_check(void)
11519d8f6abeSKarolina Drobnik {
115276586c00SRebecca Mckeever 	test_print("\tRunning %s...\n", __func__);
11539d8f6abeSKarolina Drobnik 	memblock_set_bottom_up(false);
115461da0332SRebecca Mckeever 	alloc_nid_top_down_cap_min_check();
11559d8f6abeSKarolina Drobnik 	memblock_set_bottom_up(true);
115661da0332SRebecca Mckeever 	alloc_nid_bottom_up_cap_min_check();
11579d8f6abeSKarolina Drobnik 
11589d8f6abeSKarolina Drobnik 	return 0;
11599d8f6abeSKarolina Drobnik }
11609d8f6abeSKarolina Drobnik 
alloc_nid_min_reserved_check(void)116161da0332SRebecca Mckeever static int alloc_nid_min_reserved_check(void)
11629d8f6abeSKarolina Drobnik {
116376586c00SRebecca Mckeever 	test_print("\tRunning %s...\n", __func__);
116461da0332SRebecca Mckeever 	run_top_down(alloc_nid_min_reserved_generic_check);
116561da0332SRebecca Mckeever 	run_bottom_up(alloc_nid_min_reserved_generic_check);
11669d8f6abeSKarolina Drobnik 
11679d8f6abeSKarolina Drobnik 	return 0;
11689d8f6abeSKarolina Drobnik }
11699d8f6abeSKarolina Drobnik 
alloc_nid_max_reserved_check(void)117061da0332SRebecca Mckeever static int alloc_nid_max_reserved_check(void)
11719d8f6abeSKarolina Drobnik {
117276586c00SRebecca Mckeever 	test_print("\tRunning %s...\n", __func__);
117361da0332SRebecca Mckeever 	run_top_down(alloc_nid_max_reserved_generic_check);
117461da0332SRebecca Mckeever 	run_bottom_up(alloc_nid_max_reserved_generic_check);
11759d8f6abeSKarolina Drobnik 
11769d8f6abeSKarolina Drobnik 	return 0;
11779d8f6abeSKarolina Drobnik }
11789d8f6abeSKarolina Drobnik 
alloc_nid_exact_address_check(void)117961da0332SRebecca Mckeever static int alloc_nid_exact_address_check(void)
11809d8f6abeSKarolina Drobnik {
118176586c00SRebecca Mckeever 	test_print("\tRunning %s...\n", __func__);
118261da0332SRebecca Mckeever 	run_top_down(alloc_nid_exact_address_generic_check);
118361da0332SRebecca Mckeever 	run_bottom_up(alloc_nid_exact_address_generic_check);
11849d8f6abeSKarolina Drobnik 
11859d8f6abeSKarolina Drobnik 	return 0;
11869d8f6abeSKarolina Drobnik }
11879d8f6abeSKarolina Drobnik 
alloc_nid_reserved_full_merge_check(void)118861da0332SRebecca Mckeever static int alloc_nid_reserved_full_merge_check(void)
11899d8f6abeSKarolina Drobnik {
119076586c00SRebecca Mckeever 	test_print("\tRunning %s...\n", __func__);
119161da0332SRebecca Mckeever 	run_top_down(alloc_nid_reserved_full_merge_generic_check);
119261da0332SRebecca Mckeever 	run_bottom_up(alloc_nid_reserved_full_merge_generic_check);
11939d8f6abeSKarolina Drobnik 
11949d8f6abeSKarolina Drobnik 	return 0;
11959d8f6abeSKarolina Drobnik }
11969d8f6abeSKarolina Drobnik 
alloc_nid_reserved_all_check(void)119761da0332SRebecca Mckeever static int alloc_nid_reserved_all_check(void)
11989d8f6abeSKarolina Drobnik {
119976586c00SRebecca Mckeever 	test_print("\tRunning %s...\n", __func__);
120061da0332SRebecca Mckeever 	run_top_down(alloc_nid_reserved_all_generic_check);
120161da0332SRebecca Mckeever 	run_bottom_up(alloc_nid_reserved_all_generic_check);
12029d8f6abeSKarolina Drobnik 
12039d8f6abeSKarolina Drobnik 	return 0;
12049d8f6abeSKarolina Drobnik }
12059d8f6abeSKarolina Drobnik 
alloc_nid_low_max_check(void)120661da0332SRebecca Mckeever static int alloc_nid_low_max_check(void)
12079d8f6abeSKarolina Drobnik {
120876586c00SRebecca Mckeever 	test_print("\tRunning %s...\n", __func__);
120961da0332SRebecca Mckeever 	run_top_down(alloc_nid_low_max_generic_check);
121061da0332SRebecca Mckeever 	run_bottom_up(alloc_nid_low_max_generic_check);
12119d8f6abeSKarolina Drobnik 
12129d8f6abeSKarolina Drobnik 	return 0;
12139d8f6abeSKarolina Drobnik }
12149d8f6abeSKarolina Drobnik 
memblock_alloc_nid_range_checks(void)121550c80241SRebecca Mckeever static int memblock_alloc_nid_range_checks(void)
12168f98435dSKarolina Drobnik {
121750c80241SRebecca Mckeever 	test_print("Running %s range tests...\n",
121861da0332SRebecca Mckeever 		   get_memblock_alloc_nid_name(alloc_nid_test_flags));
12198f98435dSKarolina Drobnik 
122061da0332SRebecca Mckeever 	alloc_nid_simple_check();
122161da0332SRebecca Mckeever 	alloc_nid_misaligned_check();
122261da0332SRebecca Mckeever 	alloc_nid_narrow_range_check();
122361da0332SRebecca Mckeever 	alloc_nid_reserved_with_space_check();
122461da0332SRebecca Mckeever 	alloc_nid_reserved_no_space_check();
122561da0332SRebecca Mckeever 	alloc_nid_cap_max_check();
122661da0332SRebecca Mckeever 	alloc_nid_cap_min_check();
12278f98435dSKarolina Drobnik 
122861da0332SRebecca Mckeever 	alloc_nid_min_reserved_check();
122961da0332SRebecca Mckeever 	alloc_nid_max_reserved_check();
123061da0332SRebecca Mckeever 	alloc_nid_exact_address_check();
123161da0332SRebecca Mckeever 	alloc_nid_reserved_full_merge_check();
123261da0332SRebecca Mckeever 	alloc_nid_reserved_all_check();
123361da0332SRebecca Mckeever 	alloc_nid_low_max_check();
12348f98435dSKarolina Drobnik 
123550c80241SRebecca Mckeever 	return 0;
123650c80241SRebecca Mckeever }
123750c80241SRebecca Mckeever 
123850c80241SRebecca Mckeever /*
123950c80241SRebecca Mckeever  * A test that tries to allocate a memory region in a specific NUMA node that
124050c80241SRebecca Mckeever  * has enough memory to allocate a region of the requested size.
124150c80241SRebecca Mckeever  * Expect to allocate an aligned region at the end of the requested node.
124250c80241SRebecca Mckeever  */
alloc_nid_top_down_numa_simple_check(void)124361da0332SRebecca Mckeever static int alloc_nid_top_down_numa_simple_check(void)
124450c80241SRebecca Mckeever {
124550c80241SRebecca Mckeever 	int nid_req = 3;
124650c80241SRebecca Mckeever 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
124750c80241SRebecca Mckeever 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
124850c80241SRebecca Mckeever 	void *allocated_ptr = NULL;
124950c80241SRebecca Mckeever 	phys_addr_t size;
125050c80241SRebecca Mckeever 	phys_addr_t min_addr;
125150c80241SRebecca Mckeever 	phys_addr_t max_addr;
125250c80241SRebecca Mckeever 
125350c80241SRebecca Mckeever 	PREFIX_PUSH();
125450c80241SRebecca Mckeever 	setup_numa_memblock(node_fractions);
125550c80241SRebecca Mckeever 
125650c80241SRebecca Mckeever 	ASSERT_LE(SZ_4, req_node->size);
125750c80241SRebecca Mckeever 	size = req_node->size / SZ_4;
125850c80241SRebecca Mckeever 	min_addr = memblock_start_of_DRAM();
125950c80241SRebecca Mckeever 	max_addr = memblock_end_of_DRAM();
126050c80241SRebecca Mckeever 
126161da0332SRebecca Mckeever 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
126250c80241SRebecca Mckeever 					       min_addr, max_addr, nid_req);
126350c80241SRebecca Mckeever 
126450c80241SRebecca Mckeever 	ASSERT_NE(allocated_ptr, NULL);
126550c80241SRebecca Mckeever 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
126650c80241SRebecca Mckeever 
126750c80241SRebecca Mckeever 	ASSERT_EQ(new_rgn->size, size);
126850c80241SRebecca Mckeever 	ASSERT_EQ(new_rgn->base, region_end(req_node) - size);
126950c80241SRebecca Mckeever 	ASSERT_LE(req_node->base, new_rgn->base);
127050c80241SRebecca Mckeever 
127150c80241SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.cnt, 1);
127250c80241SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.total_size, size);
127350c80241SRebecca Mckeever 
127450c80241SRebecca Mckeever 	test_pass_pop();
127550c80241SRebecca Mckeever 
127650c80241SRebecca Mckeever 	return 0;
127750c80241SRebecca Mckeever }
127850c80241SRebecca Mckeever 
127950c80241SRebecca Mckeever /*
128050c80241SRebecca Mckeever  * A test that tries to allocate a memory region in a specific NUMA node that
128150c80241SRebecca Mckeever  * does not have enough memory to allocate a region of the requested size:
128250c80241SRebecca Mckeever  *
128350c80241SRebecca Mckeever  *  |   +-----+          +------------------+     |
128450c80241SRebecca Mckeever  *  |   | req |          |     expected     |     |
128550c80241SRebecca Mckeever  *  +---+-----+----------+------------------+-----+
128650c80241SRebecca Mckeever  *
128750c80241SRebecca Mckeever  *  |                             +---------+     |
128850c80241SRebecca Mckeever  *  |                             |   rgn   |     |
128950c80241SRebecca Mckeever  *  +-----------------------------+---------+-----+
129050c80241SRebecca Mckeever  *
129150c80241SRebecca Mckeever  * Expect to allocate an aligned region at the end of the last node that has
129250c80241SRebecca Mckeever  * enough memory (in this case, nid = 6) after falling back to NUMA_NO_NODE.
129350c80241SRebecca Mckeever  */
alloc_nid_top_down_numa_small_node_check(void)129461da0332SRebecca Mckeever static int alloc_nid_top_down_numa_small_node_check(void)
129550c80241SRebecca Mckeever {
129650c80241SRebecca Mckeever 	int nid_req = 1;
129750c80241SRebecca Mckeever 	int nid_exp = 6;
129850c80241SRebecca Mckeever 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
129950c80241SRebecca Mckeever 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
130050c80241SRebecca Mckeever 	struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
130150c80241SRebecca Mckeever 	void *allocated_ptr = NULL;
130250c80241SRebecca Mckeever 	phys_addr_t size;
130350c80241SRebecca Mckeever 	phys_addr_t min_addr;
130450c80241SRebecca Mckeever 	phys_addr_t max_addr;
130550c80241SRebecca Mckeever 
130650c80241SRebecca Mckeever 	PREFIX_PUSH();
130750c80241SRebecca Mckeever 	setup_numa_memblock(node_fractions);
130850c80241SRebecca Mckeever 
130950c80241SRebecca Mckeever 	size = SZ_2 * req_node->size;
131050c80241SRebecca Mckeever 	min_addr = memblock_start_of_DRAM();
131150c80241SRebecca Mckeever 	max_addr = memblock_end_of_DRAM();
131250c80241SRebecca Mckeever 
131361da0332SRebecca Mckeever 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
131450c80241SRebecca Mckeever 					       min_addr, max_addr, nid_req);
131550c80241SRebecca Mckeever 
131650c80241SRebecca Mckeever 	ASSERT_NE(allocated_ptr, NULL);
131750c80241SRebecca Mckeever 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
131850c80241SRebecca Mckeever 
131950c80241SRebecca Mckeever 	ASSERT_EQ(new_rgn->size, size);
132050c80241SRebecca Mckeever 	ASSERT_EQ(new_rgn->base, region_end(exp_node) - size);
132150c80241SRebecca Mckeever 	ASSERT_LE(exp_node->base, new_rgn->base);
132250c80241SRebecca Mckeever 
132350c80241SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.cnt, 1);
132450c80241SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.total_size, size);
132550c80241SRebecca Mckeever 
132650c80241SRebecca Mckeever 	test_pass_pop();
132750c80241SRebecca Mckeever 
132850c80241SRebecca Mckeever 	return 0;
132950c80241SRebecca Mckeever }
133050c80241SRebecca Mckeever 
133150c80241SRebecca Mckeever /*
133250c80241SRebecca Mckeever  * A test that tries to allocate a memory region in a specific NUMA node that
133350c80241SRebecca Mckeever  * is fully reserved:
133450c80241SRebecca Mckeever  *
133550c80241SRebecca Mckeever  *  |              +---------+            +------------------+     |
133650c80241SRebecca Mckeever  *  |              |requested|            |     expected     |     |
133750c80241SRebecca Mckeever  *  +--------------+---------+------------+------------------+-----+
133850c80241SRebecca Mckeever  *
133950c80241SRebecca Mckeever  *  |              +---------+                     +---------+     |
134050c80241SRebecca Mckeever  *  |              | reserved|                     |   new   |     |
134150c80241SRebecca Mckeever  *  +--------------+---------+---------------------+---------+-----+
134250c80241SRebecca Mckeever  *
134350c80241SRebecca Mckeever  * Expect to allocate an aligned region at the end of the last node that is
134450c80241SRebecca Mckeever  * large enough and has enough unreserved memory (in this case, nid = 6) after
134550c80241SRebecca Mckeever  * falling back to NUMA_NO_NODE. The region count and total size get updated.
134650c80241SRebecca Mckeever  */
alloc_nid_top_down_numa_node_reserved_check(void)134761da0332SRebecca Mckeever static int alloc_nid_top_down_numa_node_reserved_check(void)
134850c80241SRebecca Mckeever {
134950c80241SRebecca Mckeever 	int nid_req = 2;
135050c80241SRebecca Mckeever 	int nid_exp = 6;
135150c80241SRebecca Mckeever 	struct memblock_region *new_rgn = &memblock.reserved.regions[1];
135250c80241SRebecca Mckeever 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
135350c80241SRebecca Mckeever 	struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
135450c80241SRebecca Mckeever 	void *allocated_ptr = NULL;
135550c80241SRebecca Mckeever 	phys_addr_t size;
135650c80241SRebecca Mckeever 	phys_addr_t min_addr;
135750c80241SRebecca Mckeever 	phys_addr_t max_addr;
135850c80241SRebecca Mckeever 
135950c80241SRebecca Mckeever 	PREFIX_PUSH();
136050c80241SRebecca Mckeever 	setup_numa_memblock(node_fractions);
136150c80241SRebecca Mckeever 
136250c80241SRebecca Mckeever 	size = req_node->size;
136350c80241SRebecca Mckeever 	min_addr = memblock_start_of_DRAM();
136450c80241SRebecca Mckeever 	max_addr = memblock_end_of_DRAM();
136550c80241SRebecca Mckeever 
136650c80241SRebecca Mckeever 	memblock_reserve(req_node->base, req_node->size);
136761da0332SRebecca Mckeever 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
136850c80241SRebecca Mckeever 					       min_addr, max_addr, nid_req);
136950c80241SRebecca Mckeever 
137050c80241SRebecca Mckeever 	ASSERT_NE(allocated_ptr, NULL);
137150c80241SRebecca Mckeever 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
137250c80241SRebecca Mckeever 
137350c80241SRebecca Mckeever 	ASSERT_EQ(new_rgn->size, size);
137450c80241SRebecca Mckeever 	ASSERT_EQ(new_rgn->base, region_end(exp_node) - size);
137550c80241SRebecca Mckeever 	ASSERT_LE(exp_node->base, new_rgn->base);
137650c80241SRebecca Mckeever 
137750c80241SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.cnt, 2);
137850c80241SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.total_size, size + req_node->size);
137950c80241SRebecca Mckeever 
138050c80241SRebecca Mckeever 	test_pass_pop();
138150c80241SRebecca Mckeever 
138250c80241SRebecca Mckeever 	return 0;
138350c80241SRebecca Mckeever }
138450c80241SRebecca Mckeever 
138550c80241SRebecca Mckeever /*
138650c80241SRebecca Mckeever  * A test that tries to allocate a memory region in a specific NUMA node that
138750c80241SRebecca Mckeever  * is partially reserved but has enough memory for the allocated region:
138850c80241SRebecca Mckeever  *
138950c80241SRebecca Mckeever  *  |           +---------------------------------------+          |
139050c80241SRebecca Mckeever  *  |           |               requested               |          |
139150c80241SRebecca Mckeever  *  +-----------+---------------------------------------+----------+
139250c80241SRebecca Mckeever  *
139350c80241SRebecca Mckeever  *  |           +------------------+              +-----+          |
139450c80241SRebecca Mckeever  *  |           |     reserved     |              | new |          |
139550c80241SRebecca Mckeever  *  +-----------+------------------+--------------+-----+----------+
139650c80241SRebecca Mckeever  *
139750c80241SRebecca Mckeever  * Expect to allocate an aligned region at the end of the requested node. The
139850c80241SRebecca Mckeever  * region count and total size get updated.
139950c80241SRebecca Mckeever  */
alloc_nid_top_down_numa_part_reserved_check(void)140061da0332SRebecca Mckeever static int alloc_nid_top_down_numa_part_reserved_check(void)
140150c80241SRebecca Mckeever {
140250c80241SRebecca Mckeever 	int nid_req = 4;
140350c80241SRebecca Mckeever 	struct memblock_region *new_rgn = &memblock.reserved.regions[1];
140450c80241SRebecca Mckeever 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
140550c80241SRebecca Mckeever 	void *allocated_ptr = NULL;
140650c80241SRebecca Mckeever 	struct region r1;
140750c80241SRebecca Mckeever 	phys_addr_t size;
140850c80241SRebecca Mckeever 	phys_addr_t min_addr;
140950c80241SRebecca Mckeever 	phys_addr_t max_addr;
141050c80241SRebecca Mckeever 
141150c80241SRebecca Mckeever 	PREFIX_PUSH();
141250c80241SRebecca Mckeever 	setup_numa_memblock(node_fractions);
141350c80241SRebecca Mckeever 
141450c80241SRebecca Mckeever 	ASSERT_LE(SZ_8, req_node->size);
141550c80241SRebecca Mckeever 	r1.base = req_node->base;
141650c80241SRebecca Mckeever 	r1.size = req_node->size / SZ_2;
141750c80241SRebecca Mckeever 	size = r1.size / SZ_4;
141850c80241SRebecca Mckeever 	min_addr = memblock_start_of_DRAM();
141950c80241SRebecca Mckeever 	max_addr = memblock_end_of_DRAM();
142050c80241SRebecca Mckeever 
142150c80241SRebecca Mckeever 	memblock_reserve(r1.base, r1.size);
142261da0332SRebecca Mckeever 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
142350c80241SRebecca Mckeever 					       min_addr, max_addr, nid_req);
142450c80241SRebecca Mckeever 
142550c80241SRebecca Mckeever 	ASSERT_NE(allocated_ptr, NULL);
142650c80241SRebecca Mckeever 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
142750c80241SRebecca Mckeever 
142850c80241SRebecca Mckeever 	ASSERT_EQ(new_rgn->size, size);
142950c80241SRebecca Mckeever 	ASSERT_EQ(new_rgn->base, region_end(req_node) - size);
143050c80241SRebecca Mckeever 	ASSERT_LE(req_node->base, new_rgn->base);
143150c80241SRebecca Mckeever 
143250c80241SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.cnt, 2);
143350c80241SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.total_size, size + r1.size);
143450c80241SRebecca Mckeever 
143550c80241SRebecca Mckeever 	test_pass_pop();
143650c80241SRebecca Mckeever 
143750c80241SRebecca Mckeever 	return 0;
143850c80241SRebecca Mckeever }
143950c80241SRebecca Mckeever 
144050c80241SRebecca Mckeever /*
144150c80241SRebecca Mckeever  * A test that tries to allocate a memory region in a specific NUMA node that
144250c80241SRebecca Mckeever  * is partially reserved and does not have enough contiguous memory for the
144350c80241SRebecca Mckeever  * allocated region:
144450c80241SRebecca Mckeever  *
144550c80241SRebecca Mckeever  *  |           +-----------------------+         +----------------------|
144650c80241SRebecca Mckeever  *  |           |       requested       |         |       expected       |
144750c80241SRebecca Mckeever  *  +-----------+-----------------------+---------+----------------------+
144850c80241SRebecca Mckeever  *
144950c80241SRebecca Mckeever  *  |                 +----------+                           +-----------|
145050c80241SRebecca Mckeever  *  |                 | reserved |                           |    new    |
145150c80241SRebecca Mckeever  *  +-----------------+----------+---------------------------+-----------+
145250c80241SRebecca Mckeever  *
145350c80241SRebecca Mckeever  * Expect to allocate an aligned region at the end of the last node that is
145450c80241SRebecca Mckeever  * large enough and has enough unreserved memory (in this case,
145550c80241SRebecca Mckeever  * nid = NUMA_NODES - 1) after falling back to NUMA_NO_NODE. The region count
145650c80241SRebecca Mckeever  * and total size get updated.
145750c80241SRebecca Mckeever  */
alloc_nid_top_down_numa_part_reserved_fallback_check(void)145861da0332SRebecca Mckeever static int alloc_nid_top_down_numa_part_reserved_fallback_check(void)
145950c80241SRebecca Mckeever {
146050c80241SRebecca Mckeever 	int nid_req = 4;
146150c80241SRebecca Mckeever 	int nid_exp = NUMA_NODES - 1;
146250c80241SRebecca Mckeever 	struct memblock_region *new_rgn = &memblock.reserved.regions[1];
146350c80241SRebecca Mckeever 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
146450c80241SRebecca Mckeever 	struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
146550c80241SRebecca Mckeever 	void *allocated_ptr = NULL;
146650c80241SRebecca Mckeever 	struct region r1;
146750c80241SRebecca Mckeever 	phys_addr_t size;
146850c80241SRebecca Mckeever 	phys_addr_t min_addr;
146950c80241SRebecca Mckeever 	phys_addr_t max_addr;
147050c80241SRebecca Mckeever 
147150c80241SRebecca Mckeever 	PREFIX_PUSH();
147250c80241SRebecca Mckeever 	setup_numa_memblock(node_fractions);
147350c80241SRebecca Mckeever 
147450c80241SRebecca Mckeever 	ASSERT_LE(SZ_4, req_node->size);
147550c80241SRebecca Mckeever 	size = req_node->size / SZ_2;
147650c80241SRebecca Mckeever 	r1.base = req_node->base + (size / SZ_2);
147750c80241SRebecca Mckeever 	r1.size = size;
147850c80241SRebecca Mckeever 
147950c80241SRebecca Mckeever 	min_addr = memblock_start_of_DRAM();
148050c80241SRebecca Mckeever 	max_addr = memblock_end_of_DRAM();
148150c80241SRebecca Mckeever 
148250c80241SRebecca Mckeever 	memblock_reserve(r1.base, r1.size);
148361da0332SRebecca Mckeever 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
148450c80241SRebecca Mckeever 					       min_addr, max_addr, nid_req);
148550c80241SRebecca Mckeever 
148650c80241SRebecca Mckeever 	ASSERT_NE(allocated_ptr, NULL);
148750c80241SRebecca Mckeever 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
148850c80241SRebecca Mckeever 
148950c80241SRebecca Mckeever 	ASSERT_EQ(new_rgn->size, size);
149050c80241SRebecca Mckeever 	ASSERT_EQ(new_rgn->base, region_end(exp_node) - size);
149150c80241SRebecca Mckeever 	ASSERT_LE(exp_node->base, new_rgn->base);
149250c80241SRebecca Mckeever 
149350c80241SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.cnt, 2);
149450c80241SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.total_size, size + r1.size);
149550c80241SRebecca Mckeever 
149650c80241SRebecca Mckeever 	test_pass_pop();
149750c80241SRebecca Mckeever 
149850c80241SRebecca Mckeever 	return 0;
149950c80241SRebecca Mckeever }
150050c80241SRebecca Mckeever 
150150c80241SRebecca Mckeever /*
150250c80241SRebecca Mckeever  * A test that tries to allocate a memory region that spans over the min_addr
150350c80241SRebecca Mckeever  * and max_addr range and overlaps with two different nodes, where the first
150450c80241SRebecca Mckeever  * node is the requested node:
150550c80241SRebecca Mckeever  *
150650c80241SRebecca Mckeever  *                                min_addr
150750c80241SRebecca Mckeever  *                                |           max_addr
150850c80241SRebecca Mckeever  *                                |           |
150950c80241SRebecca Mckeever  *                                v           v
151050c80241SRebecca Mckeever  *  |           +-----------------------+-----------+              |
151150c80241SRebecca Mckeever  *  |           |       requested       |   node3   |              |
151250c80241SRebecca Mckeever  *  +-----------+-----------------------+-----------+--------------+
151350c80241SRebecca Mckeever  *                                +           +
151450c80241SRebecca Mckeever  *  |                       +-----------+                          |
151550c80241SRebecca Mckeever  *  |                       |    rgn    |                          |
151650c80241SRebecca Mckeever  *  +-----------------------+-----------+--------------------------+
151750c80241SRebecca Mckeever  *
151850c80241SRebecca Mckeever  * Expect to drop the lower limit and allocate a memory region that ends at
151950c80241SRebecca Mckeever  * the end of the requested node.
152050c80241SRebecca Mckeever  */
alloc_nid_top_down_numa_split_range_low_check(void)152161da0332SRebecca Mckeever static int alloc_nid_top_down_numa_split_range_low_check(void)
152250c80241SRebecca Mckeever {
152350c80241SRebecca Mckeever 	int nid_req = 2;
152450c80241SRebecca Mckeever 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
152550c80241SRebecca Mckeever 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
152650c80241SRebecca Mckeever 	void *allocated_ptr = NULL;
152750c80241SRebecca Mckeever 	phys_addr_t size = SZ_512;
152850c80241SRebecca Mckeever 	phys_addr_t min_addr;
152950c80241SRebecca Mckeever 	phys_addr_t max_addr;
153050c80241SRebecca Mckeever 	phys_addr_t req_node_end;
153150c80241SRebecca Mckeever 
153250c80241SRebecca Mckeever 	PREFIX_PUSH();
153350c80241SRebecca Mckeever 	setup_numa_memblock(node_fractions);
153450c80241SRebecca Mckeever 
153550c80241SRebecca Mckeever 	req_node_end = region_end(req_node);
153650c80241SRebecca Mckeever 	min_addr = req_node_end - SZ_256;
153750c80241SRebecca Mckeever 	max_addr = min_addr + size;
153850c80241SRebecca Mckeever 
153961da0332SRebecca Mckeever 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
154050c80241SRebecca Mckeever 					       min_addr, max_addr, nid_req);
154150c80241SRebecca Mckeever 
154250c80241SRebecca Mckeever 	ASSERT_NE(allocated_ptr, NULL);
154350c80241SRebecca Mckeever 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
154450c80241SRebecca Mckeever 
154550c80241SRebecca Mckeever 	ASSERT_EQ(new_rgn->size, size);
154650c80241SRebecca Mckeever 	ASSERT_EQ(new_rgn->base, req_node_end - size);
154750c80241SRebecca Mckeever 	ASSERT_LE(req_node->base, new_rgn->base);
154850c80241SRebecca Mckeever 
154950c80241SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.cnt, 1);
155050c80241SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.total_size, size);
155150c80241SRebecca Mckeever 
155250c80241SRebecca Mckeever 	test_pass_pop();
155350c80241SRebecca Mckeever 
155450c80241SRebecca Mckeever 	return 0;
155550c80241SRebecca Mckeever }
155650c80241SRebecca Mckeever 
155750c80241SRebecca Mckeever /*
155850c80241SRebecca Mckeever  * A test that tries to allocate a memory region that spans over the min_addr
155950c80241SRebecca Mckeever  * and max_addr range and overlaps with two different nodes, where the second
156050c80241SRebecca Mckeever  * node is the requested node:
156150c80241SRebecca Mckeever  *
156250c80241SRebecca Mckeever  *                               min_addr
156350c80241SRebecca Mckeever  *                               |         max_addr
156450c80241SRebecca Mckeever  *                               |         |
156550c80241SRebecca Mckeever  *                               v         v
156650c80241SRebecca Mckeever  *  |      +--------------------------+---------+                |
156750c80241SRebecca Mckeever  *  |      |         expected         |requested|                |
156850c80241SRebecca Mckeever  *  +------+--------------------------+---------+----------------+
156950c80241SRebecca Mckeever  *                               +         +
157050c80241SRebecca Mckeever  *  |                       +---------+                          |
157150c80241SRebecca Mckeever  *  |                       |   rgn   |                          |
157250c80241SRebecca Mckeever  *  +-----------------------+---------+--------------------------+
157350c80241SRebecca Mckeever  *
157450c80241SRebecca Mckeever  * Expect to drop the lower limit and allocate a memory region that
157550c80241SRebecca Mckeever  * ends at the end of the first node that overlaps with the range.
157650c80241SRebecca Mckeever  */
alloc_nid_top_down_numa_split_range_high_check(void)157761da0332SRebecca Mckeever static int alloc_nid_top_down_numa_split_range_high_check(void)
157850c80241SRebecca Mckeever {
157950c80241SRebecca Mckeever 	int nid_req = 3;
158050c80241SRebecca Mckeever 	int nid_exp = nid_req - 1;
158150c80241SRebecca Mckeever 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
158250c80241SRebecca Mckeever 	struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
158350c80241SRebecca Mckeever 	void *allocated_ptr = NULL;
158450c80241SRebecca Mckeever 	phys_addr_t size = SZ_512;
158550c80241SRebecca Mckeever 	phys_addr_t min_addr;
158650c80241SRebecca Mckeever 	phys_addr_t max_addr;
158750c80241SRebecca Mckeever 	phys_addr_t exp_node_end;
158850c80241SRebecca Mckeever 
158950c80241SRebecca Mckeever 	PREFIX_PUSH();
159050c80241SRebecca Mckeever 	setup_numa_memblock(node_fractions);
159150c80241SRebecca Mckeever 
159250c80241SRebecca Mckeever 	exp_node_end = region_end(exp_node);
159350c80241SRebecca Mckeever 	min_addr = exp_node_end - SZ_256;
159450c80241SRebecca Mckeever 	max_addr = min_addr + size;
159550c80241SRebecca Mckeever 
159661da0332SRebecca Mckeever 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
159750c80241SRebecca Mckeever 					       min_addr, max_addr, nid_req);
159850c80241SRebecca Mckeever 
159950c80241SRebecca Mckeever 	ASSERT_NE(allocated_ptr, NULL);
160050c80241SRebecca Mckeever 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
160150c80241SRebecca Mckeever 
160250c80241SRebecca Mckeever 	ASSERT_EQ(new_rgn->size, size);
160350c80241SRebecca Mckeever 	ASSERT_EQ(new_rgn->base, exp_node_end - size);
160450c80241SRebecca Mckeever 	ASSERT_LE(exp_node->base, new_rgn->base);
160550c80241SRebecca Mckeever 
160650c80241SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.cnt, 1);
160750c80241SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.total_size, size);
160850c80241SRebecca Mckeever 
160950c80241SRebecca Mckeever 	test_pass_pop();
161050c80241SRebecca Mckeever 
161150c80241SRebecca Mckeever 	return 0;
161250c80241SRebecca Mckeever }
161350c80241SRebecca Mckeever 
161450c80241SRebecca Mckeever /*
161550c80241SRebecca Mckeever  * A test that tries to allocate a memory region that spans over the min_addr
161650c80241SRebecca Mckeever  * and max_addr range and overlaps with two different nodes, where the requested
161750c80241SRebecca Mckeever  * node ends before min_addr:
161850c80241SRebecca Mckeever  *
161950c80241SRebecca Mckeever  *                                         min_addr
162050c80241SRebecca Mckeever  *                                         |         max_addr
162150c80241SRebecca Mckeever  *                                         |         |
162250c80241SRebecca Mckeever  *                                         v         v
162350c80241SRebecca Mckeever  *  |    +---------------+        +-------------+---------+          |
162450c80241SRebecca Mckeever  *  |    |   requested   |        |    node1    |  node2  |          |
162550c80241SRebecca Mckeever  *  +----+---------------+--------+-------------+---------+----------+
162650c80241SRebecca Mckeever  *                                         +         +
162750c80241SRebecca Mckeever  *  |          +---------+                                           |
162850c80241SRebecca Mckeever  *  |          |   rgn   |                                           |
162950c80241SRebecca Mckeever  *  +----------+---------+-------------------------------------------+
163050c80241SRebecca Mckeever  *
163150c80241SRebecca Mckeever  * Expect to drop the lower limit and allocate a memory region that ends at
163250c80241SRebecca Mckeever  * the end of the requested node.
163350c80241SRebecca Mckeever  */
alloc_nid_top_down_numa_no_overlap_split_check(void)163461da0332SRebecca Mckeever static int alloc_nid_top_down_numa_no_overlap_split_check(void)
163550c80241SRebecca Mckeever {
163650c80241SRebecca Mckeever 	int nid_req = 2;
163750c80241SRebecca Mckeever 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
163850c80241SRebecca Mckeever 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
163950c80241SRebecca Mckeever 	struct memblock_region *node2 = &memblock.memory.regions[6];
164050c80241SRebecca Mckeever 	void *allocated_ptr = NULL;
164150c80241SRebecca Mckeever 	phys_addr_t size;
164250c80241SRebecca Mckeever 	phys_addr_t min_addr;
164350c80241SRebecca Mckeever 	phys_addr_t max_addr;
164450c80241SRebecca Mckeever 
164550c80241SRebecca Mckeever 	PREFIX_PUSH();
164650c80241SRebecca Mckeever 	setup_numa_memblock(node_fractions);
164750c80241SRebecca Mckeever 
164850c80241SRebecca Mckeever 	size = SZ_512;
164950c80241SRebecca Mckeever 	min_addr = node2->base - SZ_256;
165050c80241SRebecca Mckeever 	max_addr = min_addr + size;
165150c80241SRebecca Mckeever 
165261da0332SRebecca Mckeever 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
165350c80241SRebecca Mckeever 					       min_addr, max_addr, nid_req);
165450c80241SRebecca Mckeever 
165550c80241SRebecca Mckeever 	ASSERT_NE(allocated_ptr, NULL);
165650c80241SRebecca Mckeever 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
165750c80241SRebecca Mckeever 
165850c80241SRebecca Mckeever 	ASSERT_EQ(new_rgn->size, size);
165950c80241SRebecca Mckeever 	ASSERT_EQ(new_rgn->base, region_end(req_node) - size);
166050c80241SRebecca Mckeever 	ASSERT_LE(req_node->base, new_rgn->base);
166150c80241SRebecca Mckeever 
166250c80241SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.cnt, 1);
166350c80241SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.total_size, size);
166450c80241SRebecca Mckeever 
166550c80241SRebecca Mckeever 	test_pass_pop();
166650c80241SRebecca Mckeever 
166750c80241SRebecca Mckeever 	return 0;
166850c80241SRebecca Mckeever }
166950c80241SRebecca Mckeever 
167050c80241SRebecca Mckeever /*
167150c80241SRebecca Mckeever  * A test that tries to allocate memory within min_addr and max_add range when
167250c80241SRebecca Mckeever  * the requested node and the range do not overlap, and requested node ends
167350c80241SRebecca Mckeever  * before min_addr. The range overlaps with multiple nodes along node
167450c80241SRebecca Mckeever  * boundaries:
167550c80241SRebecca Mckeever  *
167650c80241SRebecca Mckeever  *                          min_addr
167750c80241SRebecca Mckeever  *                          |                                 max_addr
167850c80241SRebecca Mckeever  *                          |                                 |
167950c80241SRebecca Mckeever  *                          v                                 v
168050c80241SRebecca Mckeever  *  |-----------+           +----------+----...----+----------+      |
168150c80241SRebecca Mckeever  *  | requested |           | min node |    ...    | max node |      |
168250c80241SRebecca Mckeever  *  +-----------+-----------+----------+----...----+----------+------+
168350c80241SRebecca Mckeever  *                          +                                 +
168450c80241SRebecca Mckeever  *  |                                                   +-----+      |
168550c80241SRebecca Mckeever  *  |                                                   | rgn |      |
168650c80241SRebecca Mckeever  *  +---------------------------------------------------+-----+------+
168750c80241SRebecca Mckeever  *
168850c80241SRebecca Mckeever  * Expect to allocate a memory region at the end of the final node in
168950c80241SRebecca Mckeever  * the range after falling back to NUMA_NO_NODE.
169050c80241SRebecca Mckeever  */
alloc_nid_top_down_numa_no_overlap_low_check(void)169161da0332SRebecca Mckeever static int alloc_nid_top_down_numa_no_overlap_low_check(void)
169250c80241SRebecca Mckeever {
169350c80241SRebecca Mckeever 	int nid_req = 0;
169450c80241SRebecca Mckeever 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
169550c80241SRebecca Mckeever 	struct memblock_region *min_node = &memblock.memory.regions[2];
169650c80241SRebecca Mckeever 	struct memblock_region *max_node = &memblock.memory.regions[5];
169750c80241SRebecca Mckeever 	void *allocated_ptr = NULL;
169850c80241SRebecca Mckeever 	phys_addr_t size = SZ_64;
169950c80241SRebecca Mckeever 	phys_addr_t max_addr;
170050c80241SRebecca Mckeever 	phys_addr_t min_addr;
170150c80241SRebecca Mckeever 
170250c80241SRebecca Mckeever 	PREFIX_PUSH();
170350c80241SRebecca Mckeever 	setup_numa_memblock(node_fractions);
170450c80241SRebecca Mckeever 
170550c80241SRebecca Mckeever 	min_addr = min_node->base;
170650c80241SRebecca Mckeever 	max_addr = region_end(max_node);
170750c80241SRebecca Mckeever 
170861da0332SRebecca Mckeever 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
170950c80241SRebecca Mckeever 					       min_addr, max_addr, nid_req);
171050c80241SRebecca Mckeever 
171150c80241SRebecca Mckeever 	ASSERT_NE(allocated_ptr, NULL);
171250c80241SRebecca Mckeever 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
171350c80241SRebecca Mckeever 
171450c80241SRebecca Mckeever 	ASSERT_EQ(new_rgn->size, size);
171550c80241SRebecca Mckeever 	ASSERT_EQ(new_rgn->base, max_addr - size);
171650c80241SRebecca Mckeever 	ASSERT_LE(max_node->base, new_rgn->base);
171750c80241SRebecca Mckeever 
171850c80241SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.cnt, 1);
171950c80241SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.total_size, size);
172050c80241SRebecca Mckeever 
172150c80241SRebecca Mckeever 	test_pass_pop();
172250c80241SRebecca Mckeever 
172350c80241SRebecca Mckeever 	return 0;
172450c80241SRebecca Mckeever }
172550c80241SRebecca Mckeever 
172650c80241SRebecca Mckeever /*
172750c80241SRebecca Mckeever  * A test that tries to allocate memory within min_addr and max_add range when
172850c80241SRebecca Mckeever  * the requested node and the range do not overlap, and requested node starts
172950c80241SRebecca Mckeever  * after max_addr. The range overlaps with multiple nodes along node
173050c80241SRebecca Mckeever  * boundaries:
173150c80241SRebecca Mckeever  *
173250c80241SRebecca Mckeever  *        min_addr
173350c80241SRebecca Mckeever  *        |                                 max_addr
173450c80241SRebecca Mckeever  *        |                                 |
173550c80241SRebecca Mckeever  *        v                                 v
173650c80241SRebecca Mckeever  *  |     +----------+----...----+----------+        +-----------+   |
173750c80241SRebecca Mckeever  *  |     | min node |    ...    | max node |        | requested |   |
173850c80241SRebecca Mckeever  *  +-----+----------+----...----+----------+--------+-----------+---+
173950c80241SRebecca Mckeever  *        +                                 +
174050c80241SRebecca Mckeever  *  |                                 +-----+                        |
174150c80241SRebecca Mckeever  *  |                                 | rgn |                        |
174250c80241SRebecca Mckeever  *  +---------------------------------+-----+------------------------+
174350c80241SRebecca Mckeever  *
174450c80241SRebecca Mckeever  * Expect to allocate a memory region at the end of the final node in
174550c80241SRebecca Mckeever  * the range after falling back to NUMA_NO_NODE.
174650c80241SRebecca Mckeever  */
alloc_nid_top_down_numa_no_overlap_high_check(void)174761da0332SRebecca Mckeever static int alloc_nid_top_down_numa_no_overlap_high_check(void)
174850c80241SRebecca Mckeever {
174950c80241SRebecca Mckeever 	int nid_req = 7;
175050c80241SRebecca Mckeever 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
175150c80241SRebecca Mckeever 	struct memblock_region *min_node = &memblock.memory.regions[2];
175250c80241SRebecca Mckeever 	struct memblock_region *max_node = &memblock.memory.regions[5];
175350c80241SRebecca Mckeever 	void *allocated_ptr = NULL;
175450c80241SRebecca Mckeever 	phys_addr_t size = SZ_64;
175550c80241SRebecca Mckeever 	phys_addr_t max_addr;
175650c80241SRebecca Mckeever 	phys_addr_t min_addr;
175750c80241SRebecca Mckeever 
175850c80241SRebecca Mckeever 	PREFIX_PUSH();
175950c80241SRebecca Mckeever 	setup_numa_memblock(node_fractions);
176050c80241SRebecca Mckeever 
176150c80241SRebecca Mckeever 	min_addr = min_node->base;
176250c80241SRebecca Mckeever 	max_addr = region_end(max_node);
176350c80241SRebecca Mckeever 
176461da0332SRebecca Mckeever 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
176550c80241SRebecca Mckeever 					       min_addr, max_addr, nid_req);
176650c80241SRebecca Mckeever 
176750c80241SRebecca Mckeever 	ASSERT_NE(allocated_ptr, NULL);
176850c80241SRebecca Mckeever 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
176950c80241SRebecca Mckeever 
177050c80241SRebecca Mckeever 	ASSERT_EQ(new_rgn->size, size);
177150c80241SRebecca Mckeever 	ASSERT_EQ(new_rgn->base, max_addr - size);
177250c80241SRebecca Mckeever 	ASSERT_LE(max_node->base, new_rgn->base);
177350c80241SRebecca Mckeever 
177450c80241SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.cnt, 1);
177550c80241SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.total_size, size);
177650c80241SRebecca Mckeever 
177750c80241SRebecca Mckeever 	test_pass_pop();
177850c80241SRebecca Mckeever 
177950c80241SRebecca Mckeever 	return 0;
178050c80241SRebecca Mckeever }
178150c80241SRebecca Mckeever 
17824b41046eSRebecca Mckeever /*
17834b41046eSRebecca Mckeever  * A test that tries to allocate a memory region in a specific NUMA node that
17844b41046eSRebecca Mckeever  * has enough memory to allocate a region of the requested size.
17854b41046eSRebecca Mckeever  * Expect to allocate an aligned region at the beginning of the requested node.
17864b41046eSRebecca Mckeever  */
alloc_nid_bottom_up_numa_simple_check(void)178761da0332SRebecca Mckeever static int alloc_nid_bottom_up_numa_simple_check(void)
17884b41046eSRebecca Mckeever {
17894b41046eSRebecca Mckeever 	int nid_req = 3;
17904b41046eSRebecca Mckeever 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
17914b41046eSRebecca Mckeever 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
17924b41046eSRebecca Mckeever 	void *allocated_ptr = NULL;
17934b41046eSRebecca Mckeever 	phys_addr_t size;
17944b41046eSRebecca Mckeever 	phys_addr_t min_addr;
17954b41046eSRebecca Mckeever 	phys_addr_t max_addr;
17964b41046eSRebecca Mckeever 
17974b41046eSRebecca Mckeever 	PREFIX_PUSH();
17984b41046eSRebecca Mckeever 	setup_numa_memblock(node_fractions);
17994b41046eSRebecca Mckeever 
18004b41046eSRebecca Mckeever 	ASSERT_LE(SZ_4, req_node->size);
18014b41046eSRebecca Mckeever 	size = req_node->size / SZ_4;
18024b41046eSRebecca Mckeever 	min_addr = memblock_start_of_DRAM();
18034b41046eSRebecca Mckeever 	max_addr = memblock_end_of_DRAM();
18044b41046eSRebecca Mckeever 
180561da0332SRebecca Mckeever 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
18064b41046eSRebecca Mckeever 					       min_addr, max_addr, nid_req);
18074b41046eSRebecca Mckeever 
18084b41046eSRebecca Mckeever 	ASSERT_NE(allocated_ptr, NULL);
18094b41046eSRebecca Mckeever 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
18104b41046eSRebecca Mckeever 
18114b41046eSRebecca Mckeever 	ASSERT_EQ(new_rgn->size, size);
18124b41046eSRebecca Mckeever 	ASSERT_EQ(new_rgn->base, req_node->base);
18134b41046eSRebecca Mckeever 	ASSERT_LE(region_end(new_rgn), region_end(req_node));
18144b41046eSRebecca Mckeever 
18154b41046eSRebecca Mckeever 	ASSERT_EQ(memblock.reserved.cnt, 1);
18164b41046eSRebecca Mckeever 	ASSERT_EQ(memblock.reserved.total_size, size);
18174b41046eSRebecca Mckeever 
18184b41046eSRebecca Mckeever 	test_pass_pop();
18194b41046eSRebecca Mckeever 
18204b41046eSRebecca Mckeever 	return 0;
18214b41046eSRebecca Mckeever }
18224b41046eSRebecca Mckeever 
18234b41046eSRebecca Mckeever /*
18244b41046eSRebecca Mckeever  * A test that tries to allocate a memory region in a specific NUMA node that
18254b41046eSRebecca Mckeever  * does not have enough memory to allocate a region of the requested size:
18264b41046eSRebecca Mckeever  *
18274b41046eSRebecca Mckeever  *  |----------------------+-----+                |
18284b41046eSRebecca Mckeever  *  |       expected       | req |                |
18294b41046eSRebecca Mckeever  *  +----------------------+-----+----------------+
18304b41046eSRebecca Mckeever  *
18314b41046eSRebecca Mckeever  *  |---------+                                   |
18324b41046eSRebecca Mckeever  *  |   rgn   |                                   |
18334b41046eSRebecca Mckeever  *  +---------+-----------------------------------+
18344b41046eSRebecca Mckeever  *
18354b41046eSRebecca Mckeever  * Expect to allocate an aligned region at the beginning of the first node that
18364b41046eSRebecca Mckeever  * has enough memory (in this case, nid = 0) after falling back to NUMA_NO_NODE.
18374b41046eSRebecca Mckeever  */
alloc_nid_bottom_up_numa_small_node_check(void)183861da0332SRebecca Mckeever static int alloc_nid_bottom_up_numa_small_node_check(void)
18394b41046eSRebecca Mckeever {
18404b41046eSRebecca Mckeever 	int nid_req = 1;
18414b41046eSRebecca Mckeever 	int nid_exp = 0;
18424b41046eSRebecca Mckeever 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
18434b41046eSRebecca Mckeever 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
18444b41046eSRebecca Mckeever 	struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
18454b41046eSRebecca Mckeever 	void *allocated_ptr = NULL;
18464b41046eSRebecca Mckeever 	phys_addr_t size;
18474b41046eSRebecca Mckeever 	phys_addr_t min_addr;
18484b41046eSRebecca Mckeever 	phys_addr_t max_addr;
18494b41046eSRebecca Mckeever 
18504b41046eSRebecca Mckeever 	PREFIX_PUSH();
18514b41046eSRebecca Mckeever 	setup_numa_memblock(node_fractions);
18524b41046eSRebecca Mckeever 
18534b41046eSRebecca Mckeever 	size = SZ_2 * req_node->size;
18544b41046eSRebecca Mckeever 	min_addr = memblock_start_of_DRAM();
18554b41046eSRebecca Mckeever 	max_addr = memblock_end_of_DRAM();
18564b41046eSRebecca Mckeever 
185761da0332SRebecca Mckeever 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
18584b41046eSRebecca Mckeever 					       min_addr, max_addr, nid_req);
18594b41046eSRebecca Mckeever 
18604b41046eSRebecca Mckeever 	ASSERT_NE(allocated_ptr, NULL);
18614b41046eSRebecca Mckeever 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
18624b41046eSRebecca Mckeever 
18634b41046eSRebecca Mckeever 	ASSERT_EQ(new_rgn->size, size);
18644b41046eSRebecca Mckeever 	ASSERT_EQ(new_rgn->base, exp_node->base);
18654b41046eSRebecca Mckeever 	ASSERT_LE(region_end(new_rgn), region_end(exp_node));
18664b41046eSRebecca Mckeever 
18674b41046eSRebecca Mckeever 	ASSERT_EQ(memblock.reserved.cnt, 1);
18684b41046eSRebecca Mckeever 	ASSERT_EQ(memblock.reserved.total_size, size);
18694b41046eSRebecca Mckeever 
18704b41046eSRebecca Mckeever 	test_pass_pop();
18714b41046eSRebecca Mckeever 
18724b41046eSRebecca Mckeever 	return 0;
18734b41046eSRebecca Mckeever }
18744b41046eSRebecca Mckeever 
18754b41046eSRebecca Mckeever /*
18764b41046eSRebecca Mckeever  * A test that tries to allocate a memory region in a specific NUMA node that
18774b41046eSRebecca Mckeever  * is fully reserved:
18784b41046eSRebecca Mckeever  *
18794b41046eSRebecca Mckeever  *  |----------------------+     +-----------+                    |
18804b41046eSRebecca Mckeever  *  |       expected       |     | requested |                    |
18814b41046eSRebecca Mckeever  *  +----------------------+-----+-----------+--------------------+
18824b41046eSRebecca Mckeever  *
18834b41046eSRebecca Mckeever  *  |-----------+                +-----------+                    |
18844b41046eSRebecca Mckeever  *  |    new    |                |  reserved |                    |
18854b41046eSRebecca Mckeever  *  +-----------+----------------+-----------+--------------------+
18864b41046eSRebecca Mckeever  *
18874b41046eSRebecca Mckeever  * Expect to allocate an aligned region at the beginning of the first node that
18884b41046eSRebecca Mckeever  * is large enough and has enough unreserved memory (in this case, nid = 0)
18894b41046eSRebecca Mckeever  * after falling back to NUMA_NO_NODE. The region count and total size get
18904b41046eSRebecca Mckeever  * updated.
18914b41046eSRebecca Mckeever  */
alloc_nid_bottom_up_numa_node_reserved_check(void)189261da0332SRebecca Mckeever static int alloc_nid_bottom_up_numa_node_reserved_check(void)
18934b41046eSRebecca Mckeever {
18944b41046eSRebecca Mckeever 	int nid_req = 2;
18954b41046eSRebecca Mckeever 	int nid_exp = 0;
18964b41046eSRebecca Mckeever 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
18974b41046eSRebecca Mckeever 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
18984b41046eSRebecca Mckeever 	struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
18994b41046eSRebecca Mckeever 	void *allocated_ptr = NULL;
19004b41046eSRebecca Mckeever 	phys_addr_t size;
19014b41046eSRebecca Mckeever 	phys_addr_t min_addr;
19024b41046eSRebecca Mckeever 	phys_addr_t max_addr;
19034b41046eSRebecca Mckeever 
19044b41046eSRebecca Mckeever 	PREFIX_PUSH();
19054b41046eSRebecca Mckeever 	setup_numa_memblock(node_fractions);
19064b41046eSRebecca Mckeever 
19074b41046eSRebecca Mckeever 	size = req_node->size;
19084b41046eSRebecca Mckeever 	min_addr = memblock_start_of_DRAM();
19094b41046eSRebecca Mckeever 	max_addr = memblock_end_of_DRAM();
19104b41046eSRebecca Mckeever 
19114b41046eSRebecca Mckeever 	memblock_reserve(req_node->base, req_node->size);
191261da0332SRebecca Mckeever 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
19134b41046eSRebecca Mckeever 					       min_addr, max_addr, nid_req);
19144b41046eSRebecca Mckeever 
19154b41046eSRebecca Mckeever 	ASSERT_NE(allocated_ptr, NULL);
19164b41046eSRebecca Mckeever 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
19174b41046eSRebecca Mckeever 
19184b41046eSRebecca Mckeever 	ASSERT_EQ(new_rgn->size, size);
19194b41046eSRebecca Mckeever 	ASSERT_EQ(new_rgn->base, exp_node->base);
19204b41046eSRebecca Mckeever 	ASSERT_LE(region_end(new_rgn), region_end(exp_node));
19214b41046eSRebecca Mckeever 
19224b41046eSRebecca Mckeever 	ASSERT_EQ(memblock.reserved.cnt, 2);
19234b41046eSRebecca Mckeever 	ASSERT_EQ(memblock.reserved.total_size, size + req_node->size);
19244b41046eSRebecca Mckeever 
19254b41046eSRebecca Mckeever 	test_pass_pop();
19264b41046eSRebecca Mckeever 
19274b41046eSRebecca Mckeever 	return 0;
19284b41046eSRebecca Mckeever }
19294b41046eSRebecca Mckeever 
19304b41046eSRebecca Mckeever /*
19314b41046eSRebecca Mckeever  * A test that tries to allocate a memory region in a specific NUMA node that
19324b41046eSRebecca Mckeever  * is partially reserved but has enough memory for the allocated region:
19334b41046eSRebecca Mckeever  *
19344b41046eSRebecca Mckeever  *  |           +---------------------------------------+         |
19354b41046eSRebecca Mckeever  *  |           |               requested               |         |
19364b41046eSRebecca Mckeever  *  +-----------+---------------------------------------+---------+
19374b41046eSRebecca Mckeever  *
19384b41046eSRebecca Mckeever  *  |           +------------------+-----+                        |
19394b41046eSRebecca Mckeever  *  |           |     reserved     | new |                        |
19404b41046eSRebecca Mckeever  *  +-----------+------------------+-----+------------------------+
19414b41046eSRebecca Mckeever  *
19424b41046eSRebecca Mckeever  * Expect to allocate an aligned region in the requested node that merges with
19434b41046eSRebecca Mckeever  * the existing reserved region. The total size gets updated.
19444b41046eSRebecca Mckeever  */
alloc_nid_bottom_up_numa_part_reserved_check(void)194561da0332SRebecca Mckeever static int alloc_nid_bottom_up_numa_part_reserved_check(void)
19464b41046eSRebecca Mckeever {
19474b41046eSRebecca Mckeever 	int nid_req = 4;
19484b41046eSRebecca Mckeever 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
19494b41046eSRebecca Mckeever 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
19504b41046eSRebecca Mckeever 	void *allocated_ptr = NULL;
19514b41046eSRebecca Mckeever 	struct region r1;
19524b41046eSRebecca Mckeever 	phys_addr_t size;
19534b41046eSRebecca Mckeever 	phys_addr_t min_addr;
19544b41046eSRebecca Mckeever 	phys_addr_t max_addr;
19554b41046eSRebecca Mckeever 	phys_addr_t total_size;
19564b41046eSRebecca Mckeever 
19574b41046eSRebecca Mckeever 	PREFIX_PUSH();
19584b41046eSRebecca Mckeever 	setup_numa_memblock(node_fractions);
19594b41046eSRebecca Mckeever 
19604b41046eSRebecca Mckeever 	ASSERT_LE(SZ_8, req_node->size);
19614b41046eSRebecca Mckeever 	r1.base = req_node->base;
19624b41046eSRebecca Mckeever 	r1.size = req_node->size / SZ_2;
19634b41046eSRebecca Mckeever 	size = r1.size / SZ_4;
19644b41046eSRebecca Mckeever 	min_addr = memblock_start_of_DRAM();
19654b41046eSRebecca Mckeever 	max_addr = memblock_end_of_DRAM();
19664b41046eSRebecca Mckeever 	total_size = size + r1.size;
19674b41046eSRebecca Mckeever 
19684b41046eSRebecca Mckeever 	memblock_reserve(r1.base, r1.size);
196961da0332SRebecca Mckeever 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
19704b41046eSRebecca Mckeever 					       min_addr, max_addr, nid_req);
19714b41046eSRebecca Mckeever 
19724b41046eSRebecca Mckeever 	ASSERT_NE(allocated_ptr, NULL);
19734b41046eSRebecca Mckeever 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
19744b41046eSRebecca Mckeever 
19754b41046eSRebecca Mckeever 	ASSERT_EQ(new_rgn->size, total_size);
19764b41046eSRebecca Mckeever 	ASSERT_EQ(new_rgn->base, req_node->base);
19774b41046eSRebecca Mckeever 	ASSERT_LE(region_end(new_rgn), region_end(req_node));
19784b41046eSRebecca Mckeever 
19794b41046eSRebecca Mckeever 	ASSERT_EQ(memblock.reserved.cnt, 1);
19804b41046eSRebecca Mckeever 	ASSERT_EQ(memblock.reserved.total_size, total_size);
19814b41046eSRebecca Mckeever 
19824b41046eSRebecca Mckeever 	test_pass_pop();
19834b41046eSRebecca Mckeever 
19844b41046eSRebecca Mckeever 	return 0;
19854b41046eSRebecca Mckeever }
19864b41046eSRebecca Mckeever 
19874b41046eSRebecca Mckeever /*
19884b41046eSRebecca Mckeever  * A test that tries to allocate a memory region in a specific NUMA node that
19894b41046eSRebecca Mckeever  * is partially reserved and does not have enough contiguous memory for the
19904b41046eSRebecca Mckeever  * allocated region:
19914b41046eSRebecca Mckeever  *
19924b41046eSRebecca Mckeever  *  |----------------------+       +-----------------------+         |
19934b41046eSRebecca Mckeever  *  |       expected       |       |       requested       |         |
19944b41046eSRebecca Mckeever  *  +----------------------+-------+-----------------------+---------+
19954b41046eSRebecca Mckeever  *
19964b41046eSRebecca Mckeever  *  |-----------+                        +----------+                |
19974b41046eSRebecca Mckeever  *  |    new    |                        | reserved |                |
19984b41046eSRebecca Mckeever  *  +-----------+------------------------+----------+----------------+
19994b41046eSRebecca Mckeever  *
20004b41046eSRebecca Mckeever  * Expect to allocate an aligned region at the beginning of the first
20014b41046eSRebecca Mckeever  * node that is large enough and has enough unreserved memory (in this case,
20024b41046eSRebecca Mckeever  * nid = 0) after falling back to NUMA_NO_NODE. The region count and total size
20034b41046eSRebecca Mckeever  * get updated.
20044b41046eSRebecca Mckeever  */
alloc_nid_bottom_up_numa_part_reserved_fallback_check(void)200561da0332SRebecca Mckeever static int alloc_nid_bottom_up_numa_part_reserved_fallback_check(void)
20064b41046eSRebecca Mckeever {
20074b41046eSRebecca Mckeever 	int nid_req = 4;
20084b41046eSRebecca Mckeever 	int nid_exp = 0;
20094b41046eSRebecca Mckeever 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
20104b41046eSRebecca Mckeever 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
20114b41046eSRebecca Mckeever 	struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
20124b41046eSRebecca Mckeever 	void *allocated_ptr = NULL;
20134b41046eSRebecca Mckeever 	struct region r1;
20144b41046eSRebecca Mckeever 	phys_addr_t size;
20154b41046eSRebecca Mckeever 	phys_addr_t min_addr;
20164b41046eSRebecca Mckeever 	phys_addr_t max_addr;
20174b41046eSRebecca Mckeever 
20184b41046eSRebecca Mckeever 	PREFIX_PUSH();
20194b41046eSRebecca Mckeever 	setup_numa_memblock(node_fractions);
20204b41046eSRebecca Mckeever 
20214b41046eSRebecca Mckeever 	ASSERT_LE(SZ_4, req_node->size);
20224b41046eSRebecca Mckeever 	size = req_node->size / SZ_2;
20234b41046eSRebecca Mckeever 	r1.base = req_node->base + (size / SZ_2);
20244b41046eSRebecca Mckeever 	r1.size = size;
20254b41046eSRebecca Mckeever 
20264b41046eSRebecca Mckeever 	min_addr = memblock_start_of_DRAM();
20274b41046eSRebecca Mckeever 	max_addr = memblock_end_of_DRAM();
20284b41046eSRebecca Mckeever 
20294b41046eSRebecca Mckeever 	memblock_reserve(r1.base, r1.size);
203061da0332SRebecca Mckeever 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
20314b41046eSRebecca Mckeever 					       min_addr, max_addr, nid_req);
20324b41046eSRebecca Mckeever 
20334b41046eSRebecca Mckeever 	ASSERT_NE(allocated_ptr, NULL);
20344b41046eSRebecca Mckeever 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
20354b41046eSRebecca Mckeever 
20364b41046eSRebecca Mckeever 	ASSERT_EQ(new_rgn->size, size);
20374b41046eSRebecca Mckeever 	ASSERT_EQ(new_rgn->base, exp_node->base);
20384b41046eSRebecca Mckeever 	ASSERT_LE(region_end(new_rgn), region_end(exp_node));
20394b41046eSRebecca Mckeever 
20404b41046eSRebecca Mckeever 	ASSERT_EQ(memblock.reserved.cnt, 2);
20414b41046eSRebecca Mckeever 	ASSERT_EQ(memblock.reserved.total_size, size + r1.size);
20424b41046eSRebecca Mckeever 
20434b41046eSRebecca Mckeever 	test_pass_pop();
20444b41046eSRebecca Mckeever 
20454b41046eSRebecca Mckeever 	return 0;
20464b41046eSRebecca Mckeever }
20474b41046eSRebecca Mckeever 
20484b41046eSRebecca Mckeever /*
20494b41046eSRebecca Mckeever  * A test that tries to allocate a memory region that spans over the min_addr
20504b41046eSRebecca Mckeever  * and max_addr range and overlaps with two different nodes, where the first
20514b41046eSRebecca Mckeever  * node is the requested node:
20524b41046eSRebecca Mckeever  *
20534b41046eSRebecca Mckeever  *                                min_addr
20544b41046eSRebecca Mckeever  *                                |           max_addr
20554b41046eSRebecca Mckeever  *                                |           |
20564b41046eSRebecca Mckeever  *                                v           v
20574b41046eSRebecca Mckeever  *  |           +-----------------------+-----------+              |
20584b41046eSRebecca Mckeever  *  |           |       requested       |   node3   |              |
20594b41046eSRebecca Mckeever  *  +-----------+-----------------------+-----------+--------------+
20604b41046eSRebecca Mckeever  *                                +           +
20614b41046eSRebecca Mckeever  *  |           +-----------+                                      |
20624b41046eSRebecca Mckeever  *  |           |    rgn    |                                      |
20634b41046eSRebecca Mckeever  *  +-----------+-----------+--------------------------------------+
20644b41046eSRebecca Mckeever  *
20654b41046eSRebecca Mckeever  * Expect to drop the lower limit and allocate a memory region at the beginning
20664b41046eSRebecca Mckeever  * of the requested node.
20674b41046eSRebecca Mckeever  */
alloc_nid_bottom_up_numa_split_range_low_check(void)206861da0332SRebecca Mckeever static int alloc_nid_bottom_up_numa_split_range_low_check(void)
20694b41046eSRebecca Mckeever {
20704b41046eSRebecca Mckeever 	int nid_req = 2;
20714b41046eSRebecca Mckeever 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
20724b41046eSRebecca Mckeever 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
20734b41046eSRebecca Mckeever 	void *allocated_ptr = NULL;
20744b41046eSRebecca Mckeever 	phys_addr_t size = SZ_512;
20754b41046eSRebecca Mckeever 	phys_addr_t min_addr;
20764b41046eSRebecca Mckeever 	phys_addr_t max_addr;
20774b41046eSRebecca Mckeever 	phys_addr_t req_node_end;
20784b41046eSRebecca Mckeever 
20794b41046eSRebecca Mckeever 	PREFIX_PUSH();
20804b41046eSRebecca Mckeever 	setup_numa_memblock(node_fractions);
20814b41046eSRebecca Mckeever 
20824b41046eSRebecca Mckeever 	req_node_end = region_end(req_node);
20834b41046eSRebecca Mckeever 	min_addr = req_node_end - SZ_256;
20844b41046eSRebecca Mckeever 	max_addr = min_addr + size;
20854b41046eSRebecca Mckeever 
208661da0332SRebecca Mckeever 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
20874b41046eSRebecca Mckeever 					       min_addr, max_addr, nid_req);
20884b41046eSRebecca Mckeever 
20894b41046eSRebecca Mckeever 	ASSERT_NE(allocated_ptr, NULL);
20904b41046eSRebecca Mckeever 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
20914b41046eSRebecca Mckeever 
20924b41046eSRebecca Mckeever 	ASSERT_EQ(new_rgn->size, size);
20934b41046eSRebecca Mckeever 	ASSERT_EQ(new_rgn->base, req_node->base);
20944b41046eSRebecca Mckeever 	ASSERT_LE(region_end(new_rgn), req_node_end);
20954b41046eSRebecca Mckeever 
20964b41046eSRebecca Mckeever 	ASSERT_EQ(memblock.reserved.cnt, 1);
20974b41046eSRebecca Mckeever 	ASSERT_EQ(memblock.reserved.total_size, size);
20984b41046eSRebecca Mckeever 
20994b41046eSRebecca Mckeever 	test_pass_pop();
21004b41046eSRebecca Mckeever 
21014b41046eSRebecca Mckeever 	return 0;
21024b41046eSRebecca Mckeever }
21034b41046eSRebecca Mckeever 
21044b41046eSRebecca Mckeever /*
21054b41046eSRebecca Mckeever  * A test that tries to allocate a memory region that spans over the min_addr
21064b41046eSRebecca Mckeever  * and max_addr range and overlaps with two different nodes, where the second
21074b41046eSRebecca Mckeever  * node is the requested node:
21084b41046eSRebecca Mckeever  *
21094b41046eSRebecca Mckeever  *                                                min_addr
21104b41046eSRebecca Mckeever  *                                                |         max_addr
21114b41046eSRebecca Mckeever  *                                                |         |
21124b41046eSRebecca Mckeever  *                                                v         v
21134b41046eSRebecca Mckeever  *  |------------------+        +----------------------+---------+      |
21144b41046eSRebecca Mckeever  *  |     expected     |        |       previous       |requested|      |
21154b41046eSRebecca Mckeever  *  +------------------+--------+----------------------+---------+------+
21164b41046eSRebecca Mckeever  *                                                +         +
21174b41046eSRebecca Mckeever  *  |---------+                                                         |
21184b41046eSRebecca Mckeever  *  |   rgn   |                                                         |
21194b41046eSRebecca Mckeever  *  +---------+---------------------------------------------------------+
21204b41046eSRebecca Mckeever  *
21214b41046eSRebecca Mckeever  * Expect to drop the lower limit and allocate a memory region at the beginning
21224b41046eSRebecca Mckeever  * of the first node that has enough memory.
21234b41046eSRebecca Mckeever  */
alloc_nid_bottom_up_numa_split_range_high_check(void)212461da0332SRebecca Mckeever static int alloc_nid_bottom_up_numa_split_range_high_check(void)
21254b41046eSRebecca Mckeever {
21264b41046eSRebecca Mckeever 	int nid_req = 3;
21274b41046eSRebecca Mckeever 	int nid_exp = 0;
21284b41046eSRebecca Mckeever 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
21294b41046eSRebecca Mckeever 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
21304b41046eSRebecca Mckeever 	struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
21314b41046eSRebecca Mckeever 	void *allocated_ptr = NULL;
21324b41046eSRebecca Mckeever 	phys_addr_t size = SZ_512;
21334b41046eSRebecca Mckeever 	phys_addr_t min_addr;
21344b41046eSRebecca Mckeever 	phys_addr_t max_addr;
21354b41046eSRebecca Mckeever 	phys_addr_t exp_node_end;
21364b41046eSRebecca Mckeever 
21374b41046eSRebecca Mckeever 	PREFIX_PUSH();
21384b41046eSRebecca Mckeever 	setup_numa_memblock(node_fractions);
21394b41046eSRebecca Mckeever 
21404b41046eSRebecca Mckeever 	exp_node_end = region_end(req_node);
21414b41046eSRebecca Mckeever 	min_addr = req_node->base - SZ_256;
21424b41046eSRebecca Mckeever 	max_addr = min_addr + size;
21434b41046eSRebecca Mckeever 
214461da0332SRebecca Mckeever 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
21454b41046eSRebecca Mckeever 					       min_addr, max_addr, nid_req);
21464b41046eSRebecca Mckeever 
21474b41046eSRebecca Mckeever 	ASSERT_NE(allocated_ptr, NULL);
21484b41046eSRebecca Mckeever 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
21494b41046eSRebecca Mckeever 
21504b41046eSRebecca Mckeever 	ASSERT_EQ(new_rgn->size, size);
21514b41046eSRebecca Mckeever 	ASSERT_EQ(new_rgn->base, exp_node->base);
21524b41046eSRebecca Mckeever 	ASSERT_LE(region_end(new_rgn), exp_node_end);
21534b41046eSRebecca Mckeever 
21544b41046eSRebecca Mckeever 	ASSERT_EQ(memblock.reserved.cnt, 1);
21554b41046eSRebecca Mckeever 	ASSERT_EQ(memblock.reserved.total_size, size);
21564b41046eSRebecca Mckeever 
21574b41046eSRebecca Mckeever 	test_pass_pop();
21584b41046eSRebecca Mckeever 
21594b41046eSRebecca Mckeever 	return 0;
21604b41046eSRebecca Mckeever }
21614b41046eSRebecca Mckeever 
21624b41046eSRebecca Mckeever /*
21634b41046eSRebecca Mckeever  * A test that tries to allocate a memory region that spans over the min_addr
21644b41046eSRebecca Mckeever  * and max_addr range and overlaps with two different nodes, where the requested
21654b41046eSRebecca Mckeever  * node ends before min_addr:
21664b41046eSRebecca Mckeever  *
21674b41046eSRebecca Mckeever  *                                          min_addr
21684b41046eSRebecca Mckeever  *                                         |         max_addr
21694b41046eSRebecca Mckeever  *                                         |         |
21704b41046eSRebecca Mckeever  *                                         v         v
21714b41046eSRebecca Mckeever  *  |    +---------------+        +-------------+---------+         |
21724b41046eSRebecca Mckeever  *  |    |   requested   |        |    node1    |  node2  |         |
21734b41046eSRebecca Mckeever  *  +----+---------------+--------+-------------+---------+---------+
21744b41046eSRebecca Mckeever  *                                         +         +
21754b41046eSRebecca Mckeever  *  |    +---------+                                                |
21764b41046eSRebecca Mckeever  *  |    |   rgn   |                                                |
21774b41046eSRebecca Mckeever  *  +----+---------+------------------------------------------------+
21784b41046eSRebecca Mckeever  *
21794b41046eSRebecca Mckeever  * Expect to drop the lower limit and allocate a memory region that starts at
21804b41046eSRebecca Mckeever  * the beginning of the requested node.
21814b41046eSRebecca Mckeever  */
alloc_nid_bottom_up_numa_no_overlap_split_check(void)218261da0332SRebecca Mckeever static int alloc_nid_bottom_up_numa_no_overlap_split_check(void)
21834b41046eSRebecca Mckeever {
21844b41046eSRebecca Mckeever 	int nid_req = 2;
21854b41046eSRebecca Mckeever 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
21864b41046eSRebecca Mckeever 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
21874b41046eSRebecca Mckeever 	struct memblock_region *node2 = &memblock.memory.regions[6];
21884b41046eSRebecca Mckeever 	void *allocated_ptr = NULL;
21894b41046eSRebecca Mckeever 	phys_addr_t size;
21904b41046eSRebecca Mckeever 	phys_addr_t min_addr;
21914b41046eSRebecca Mckeever 	phys_addr_t max_addr;
21924b41046eSRebecca Mckeever 
21934b41046eSRebecca Mckeever 	PREFIX_PUSH();
21944b41046eSRebecca Mckeever 	setup_numa_memblock(node_fractions);
21954b41046eSRebecca Mckeever 
21964b41046eSRebecca Mckeever 	size = SZ_512;
21974b41046eSRebecca Mckeever 	min_addr = node2->base - SZ_256;
21984b41046eSRebecca Mckeever 	max_addr = min_addr + size;
21994b41046eSRebecca Mckeever 
220061da0332SRebecca Mckeever 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
22014b41046eSRebecca Mckeever 					       min_addr, max_addr, nid_req);
22024b41046eSRebecca Mckeever 
22034b41046eSRebecca Mckeever 	ASSERT_NE(allocated_ptr, NULL);
22044b41046eSRebecca Mckeever 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
22054b41046eSRebecca Mckeever 
22064b41046eSRebecca Mckeever 	ASSERT_EQ(new_rgn->size, size);
22074b41046eSRebecca Mckeever 	ASSERT_EQ(new_rgn->base, req_node->base);
22084b41046eSRebecca Mckeever 	ASSERT_LE(region_end(new_rgn), region_end(req_node));
22094b41046eSRebecca Mckeever 
22104b41046eSRebecca Mckeever 	ASSERT_EQ(memblock.reserved.cnt, 1);
22114b41046eSRebecca Mckeever 	ASSERT_EQ(memblock.reserved.total_size, size);
22124b41046eSRebecca Mckeever 
22134b41046eSRebecca Mckeever 	test_pass_pop();
22144b41046eSRebecca Mckeever 
22154b41046eSRebecca Mckeever 	return 0;
22164b41046eSRebecca Mckeever }
22174b41046eSRebecca Mckeever 
22184b41046eSRebecca Mckeever /*
22194b41046eSRebecca Mckeever  * A test that tries to allocate memory within min_addr and max_add range when
22204b41046eSRebecca Mckeever  * the requested node and the range do not overlap, and requested node ends
22214b41046eSRebecca Mckeever  * before min_addr. The range overlaps with multiple nodes along node
22224b41046eSRebecca Mckeever  * boundaries:
22234b41046eSRebecca Mckeever  *
22244b41046eSRebecca Mckeever  *                          min_addr
22254b41046eSRebecca Mckeever  *                          |                                 max_addr
22264b41046eSRebecca Mckeever  *                          |                                 |
22274b41046eSRebecca Mckeever  *                          v                                 v
22284b41046eSRebecca Mckeever  *  |-----------+           +----------+----...----+----------+      |
22294b41046eSRebecca Mckeever  *  | requested |           | min node |    ...    | max node |      |
22304b41046eSRebecca Mckeever  *  +-----------+-----------+----------+----...----+----------+------+
22314b41046eSRebecca Mckeever  *                          +                                 +
22324b41046eSRebecca Mckeever  *  |                       +-----+                                  |
22334b41046eSRebecca Mckeever  *  |                       | rgn |                                  |
22344b41046eSRebecca Mckeever  *  +-----------------------+-----+----------------------------------+
22354b41046eSRebecca Mckeever  *
22364b41046eSRebecca Mckeever  * Expect to allocate a memory region at the beginning of the first node
22374b41046eSRebecca Mckeever  * in the range after falling back to NUMA_NO_NODE.
22384b41046eSRebecca Mckeever  */
alloc_nid_bottom_up_numa_no_overlap_low_check(void)223961da0332SRebecca Mckeever static int alloc_nid_bottom_up_numa_no_overlap_low_check(void)
22404b41046eSRebecca Mckeever {
22414b41046eSRebecca Mckeever 	int nid_req = 0;
22424b41046eSRebecca Mckeever 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
22434b41046eSRebecca Mckeever 	struct memblock_region *min_node = &memblock.memory.regions[2];
22444b41046eSRebecca Mckeever 	struct memblock_region *max_node = &memblock.memory.regions[5];
22454b41046eSRebecca Mckeever 	void *allocated_ptr = NULL;
22464b41046eSRebecca Mckeever 	phys_addr_t size = SZ_64;
22474b41046eSRebecca Mckeever 	phys_addr_t max_addr;
22484b41046eSRebecca Mckeever 	phys_addr_t min_addr;
22494b41046eSRebecca Mckeever 
22504b41046eSRebecca Mckeever 	PREFIX_PUSH();
22514b41046eSRebecca Mckeever 	setup_numa_memblock(node_fractions);
22524b41046eSRebecca Mckeever 
22534b41046eSRebecca Mckeever 	min_addr = min_node->base;
22544b41046eSRebecca Mckeever 	max_addr = region_end(max_node);
22554b41046eSRebecca Mckeever 
225661da0332SRebecca Mckeever 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
22574b41046eSRebecca Mckeever 					       min_addr, max_addr, nid_req);
22584b41046eSRebecca Mckeever 
22594b41046eSRebecca Mckeever 	ASSERT_NE(allocated_ptr, NULL);
22604b41046eSRebecca Mckeever 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
22614b41046eSRebecca Mckeever 
22624b41046eSRebecca Mckeever 	ASSERT_EQ(new_rgn->size, size);
22634b41046eSRebecca Mckeever 	ASSERT_EQ(new_rgn->base, min_addr);
22644b41046eSRebecca Mckeever 	ASSERT_LE(region_end(new_rgn), region_end(min_node));
22654b41046eSRebecca Mckeever 
22664b41046eSRebecca Mckeever 	ASSERT_EQ(memblock.reserved.cnt, 1);
22674b41046eSRebecca Mckeever 	ASSERT_EQ(memblock.reserved.total_size, size);
22684b41046eSRebecca Mckeever 
22694b41046eSRebecca Mckeever 	test_pass_pop();
22704b41046eSRebecca Mckeever 
22714b41046eSRebecca Mckeever 	return 0;
22724b41046eSRebecca Mckeever }
22734b41046eSRebecca Mckeever 
22744b41046eSRebecca Mckeever /*
22754b41046eSRebecca Mckeever  * A test that tries to allocate memory within min_addr and max_add range when
22764b41046eSRebecca Mckeever  * the requested node and the range do not overlap, and requested node starts
22774b41046eSRebecca Mckeever  * after max_addr. The range overlaps with multiple nodes along node
22784b41046eSRebecca Mckeever  * boundaries:
22794b41046eSRebecca Mckeever  *
22804b41046eSRebecca Mckeever  *        min_addr
22814b41046eSRebecca Mckeever  *        |                                 max_addr
22824b41046eSRebecca Mckeever  *        |                                 |
22834b41046eSRebecca Mckeever  *        v                                 v
22844b41046eSRebecca Mckeever  *  |     +----------+----...----+----------+         +---------+   |
22854b41046eSRebecca Mckeever  *  |     | min node |    ...    | max node |         |requested|   |
22864b41046eSRebecca Mckeever  *  +-----+----------+----...----+----------+---------+---------+---+
22874b41046eSRebecca Mckeever  *        +                                 +
22884b41046eSRebecca Mckeever  *  |     +-----+                                                   |
22894b41046eSRebecca Mckeever  *  |     | rgn |                                                   |
22904b41046eSRebecca Mckeever  *  +-----+-----+---------------------------------------------------+
22914b41046eSRebecca Mckeever  *
22924b41046eSRebecca Mckeever  * Expect to allocate a memory region at the beginning of the first node
22934b41046eSRebecca Mckeever  * in the range after falling back to NUMA_NO_NODE.
22944b41046eSRebecca Mckeever  */
alloc_nid_bottom_up_numa_no_overlap_high_check(void)229561da0332SRebecca Mckeever static int alloc_nid_bottom_up_numa_no_overlap_high_check(void)
22964b41046eSRebecca Mckeever {
22974b41046eSRebecca Mckeever 	int nid_req = 7;
22984b41046eSRebecca Mckeever 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
22994b41046eSRebecca Mckeever 	struct memblock_region *min_node = &memblock.memory.regions[2];
23004b41046eSRebecca Mckeever 	struct memblock_region *max_node = &memblock.memory.regions[5];
23014b41046eSRebecca Mckeever 	void *allocated_ptr = NULL;
23024b41046eSRebecca Mckeever 	phys_addr_t size = SZ_64;
23034b41046eSRebecca Mckeever 	phys_addr_t max_addr;
23044b41046eSRebecca Mckeever 	phys_addr_t min_addr;
23054b41046eSRebecca Mckeever 
23064b41046eSRebecca Mckeever 	PREFIX_PUSH();
23074b41046eSRebecca Mckeever 	setup_numa_memblock(node_fractions);
23084b41046eSRebecca Mckeever 
23094b41046eSRebecca Mckeever 	min_addr = min_node->base;
23104b41046eSRebecca Mckeever 	max_addr = region_end(max_node);
23114b41046eSRebecca Mckeever 
231261da0332SRebecca Mckeever 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
23134b41046eSRebecca Mckeever 					       min_addr, max_addr, nid_req);
23144b41046eSRebecca Mckeever 
23154b41046eSRebecca Mckeever 	ASSERT_NE(allocated_ptr, NULL);
23164b41046eSRebecca Mckeever 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
23174b41046eSRebecca Mckeever 
23184b41046eSRebecca Mckeever 	ASSERT_EQ(new_rgn->size, size);
23194b41046eSRebecca Mckeever 	ASSERT_EQ(new_rgn->base, min_addr);
23204b41046eSRebecca Mckeever 	ASSERT_LE(region_end(new_rgn), region_end(min_node));
23214b41046eSRebecca Mckeever 
23224b41046eSRebecca Mckeever 	ASSERT_EQ(memblock.reserved.cnt, 1);
23234b41046eSRebecca Mckeever 	ASSERT_EQ(memblock.reserved.total_size, size);
23244b41046eSRebecca Mckeever 
23254b41046eSRebecca Mckeever 	test_pass_pop();
23264b41046eSRebecca Mckeever 
23274b41046eSRebecca Mckeever 	return 0;
23284b41046eSRebecca Mckeever }
23294b41046eSRebecca Mckeever 
23303e4519b7SRebecca Mckeever /*
23313e4519b7SRebecca Mckeever  * A test that tries to allocate a memory region in a specific NUMA node that
23323e4519b7SRebecca Mckeever  * does not have enough memory to allocate a region of the requested size.
23333e4519b7SRebecca Mckeever  * Additionally, none of the nodes have enough memory to allocate the region:
23343e4519b7SRebecca Mckeever  *
23353e4519b7SRebecca Mckeever  * +-----------------------------------+
23363e4519b7SRebecca Mckeever  * |                new                |
23373e4519b7SRebecca Mckeever  * +-----------------------------------+
23383e4519b7SRebecca Mckeever  *     |-------+-------+-------+-------+-------+-------+-------+-------|
23393e4519b7SRebecca Mckeever  *     | node0 | node1 | node2 | node3 | node4 | node5 | node6 | node7 |
23403e4519b7SRebecca Mckeever  *     +-------+-------+-------+-------+-------+-------+-------+-------+
23413e4519b7SRebecca Mckeever  *
23423e4519b7SRebecca Mckeever  * Expect no allocation to happen.
23433e4519b7SRebecca Mckeever  */
alloc_nid_numa_large_region_generic_check(void)234461da0332SRebecca Mckeever static int alloc_nid_numa_large_region_generic_check(void)
23453e4519b7SRebecca Mckeever {
23463e4519b7SRebecca Mckeever 	int nid_req = 3;
23473e4519b7SRebecca Mckeever 	void *allocated_ptr = NULL;
23483e4519b7SRebecca Mckeever 	phys_addr_t size = MEM_SIZE / SZ_2;
23493e4519b7SRebecca Mckeever 	phys_addr_t min_addr;
23503e4519b7SRebecca Mckeever 	phys_addr_t max_addr;
23513e4519b7SRebecca Mckeever 
23523e4519b7SRebecca Mckeever 	PREFIX_PUSH();
23533e4519b7SRebecca Mckeever 	setup_numa_memblock(node_fractions);
23543e4519b7SRebecca Mckeever 
23553e4519b7SRebecca Mckeever 	min_addr = memblock_start_of_DRAM();
23563e4519b7SRebecca Mckeever 	max_addr = memblock_end_of_DRAM();
23573e4519b7SRebecca Mckeever 
235861da0332SRebecca Mckeever 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
23593e4519b7SRebecca Mckeever 					       min_addr, max_addr, nid_req);
23603e4519b7SRebecca Mckeever 	ASSERT_EQ(allocated_ptr, NULL);
23613e4519b7SRebecca Mckeever 
23623e4519b7SRebecca Mckeever 	test_pass_pop();
23633e4519b7SRebecca Mckeever 
23643e4519b7SRebecca Mckeever 	return 0;
23653e4519b7SRebecca Mckeever }
23663e4519b7SRebecca Mckeever 
23673e4519b7SRebecca Mckeever /*
23683e4519b7SRebecca Mckeever  * A test that tries to allocate memory within min_addr and max_addr range when
23693e4519b7SRebecca Mckeever  * there are two reserved regions at the borders. The requested node starts at
23703e4519b7SRebecca Mckeever  * min_addr and ends at max_addr and is the same size as the region to be
23713e4519b7SRebecca Mckeever  * allocated:
23723e4519b7SRebecca Mckeever  *
23733e4519b7SRebecca Mckeever  *                     min_addr
23743e4519b7SRebecca Mckeever  *                     |                       max_addr
23753e4519b7SRebecca Mckeever  *                     |                       |
23763e4519b7SRebecca Mckeever  *                     v                       v
23773e4519b7SRebecca Mckeever  *  |      +-----------+-----------------------+-----------------------|
23783e4519b7SRebecca Mckeever  *  |      |   node5   |       requested       |         node7         |
23793e4519b7SRebecca Mckeever  *  +------+-----------+-----------------------+-----------------------+
23803e4519b7SRebecca Mckeever  *                     +                       +
23813e4519b7SRebecca Mckeever  *  |             +----+-----------------------+----+                  |
23823e4519b7SRebecca Mckeever  *  |             | r2 |          new          | r1 |                  |
23833e4519b7SRebecca Mckeever  *  +-------------+----+-----------------------+----+------------------+
23843e4519b7SRebecca Mckeever  *
23853e4519b7SRebecca Mckeever  * Expect to merge all of the regions into one. The region counter and total
23863e4519b7SRebecca Mckeever  * size fields get updated.
23873e4519b7SRebecca Mckeever  */
alloc_nid_numa_reserved_full_merge_generic_check(void)238861da0332SRebecca Mckeever static int alloc_nid_numa_reserved_full_merge_generic_check(void)
23893e4519b7SRebecca Mckeever {
23903e4519b7SRebecca Mckeever 	int nid_req = 6;
23913e4519b7SRebecca Mckeever 	int nid_next = nid_req + 1;
23923e4519b7SRebecca Mckeever 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
23933e4519b7SRebecca Mckeever 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
23943e4519b7SRebecca Mckeever 	struct memblock_region *next_node = &memblock.memory.regions[nid_next];
23953e4519b7SRebecca Mckeever 	void *allocated_ptr = NULL;
23963e4519b7SRebecca Mckeever 	struct region r1, r2;
23973e4519b7SRebecca Mckeever 	phys_addr_t size = req_node->size;
23983e4519b7SRebecca Mckeever 	phys_addr_t total_size;
23993e4519b7SRebecca Mckeever 	phys_addr_t max_addr;
24003e4519b7SRebecca Mckeever 	phys_addr_t min_addr;
24013e4519b7SRebecca Mckeever 
24023e4519b7SRebecca Mckeever 	PREFIX_PUSH();
24033e4519b7SRebecca Mckeever 	setup_numa_memblock(node_fractions);
24043e4519b7SRebecca Mckeever 
24053e4519b7SRebecca Mckeever 	r1.base = next_node->base;
24063e4519b7SRebecca Mckeever 	r1.size = SZ_128;
24073e4519b7SRebecca Mckeever 
24083e4519b7SRebecca Mckeever 	r2.size = SZ_128;
24093e4519b7SRebecca Mckeever 	r2.base = r1.base - (size + r2.size);
24103e4519b7SRebecca Mckeever 
24113e4519b7SRebecca Mckeever 	total_size = r1.size + r2.size + size;
24123e4519b7SRebecca Mckeever 	min_addr = r2.base + r2.size;
24133e4519b7SRebecca Mckeever 	max_addr = r1.base;
24143e4519b7SRebecca Mckeever 
24153e4519b7SRebecca Mckeever 	memblock_reserve(r1.base, r1.size);
24163e4519b7SRebecca Mckeever 	memblock_reserve(r2.base, r2.size);
24173e4519b7SRebecca Mckeever 
241861da0332SRebecca Mckeever 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
24193e4519b7SRebecca Mckeever 					       min_addr, max_addr, nid_req);
24203e4519b7SRebecca Mckeever 
24213e4519b7SRebecca Mckeever 	ASSERT_NE(allocated_ptr, NULL);
24223e4519b7SRebecca Mckeever 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
24233e4519b7SRebecca Mckeever 
24243e4519b7SRebecca Mckeever 	ASSERT_EQ(new_rgn->size, total_size);
24253e4519b7SRebecca Mckeever 	ASSERT_EQ(new_rgn->base, r2.base);
24263e4519b7SRebecca Mckeever 
24273e4519b7SRebecca Mckeever 	ASSERT_LE(new_rgn->base, req_node->base);
24283e4519b7SRebecca Mckeever 	ASSERT_LE(region_end(req_node), region_end(new_rgn));
24293e4519b7SRebecca Mckeever 
24303e4519b7SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.cnt, 1);
24313e4519b7SRebecca Mckeever 	ASSERT_EQ(memblock.reserved.total_size, total_size);
24323e4519b7SRebecca Mckeever 
24333e4519b7SRebecca Mckeever 	test_pass_pop();
24343e4519b7SRebecca Mckeever 
24353e4519b7SRebecca Mckeever 	return 0;
24363e4519b7SRebecca Mckeever }
24373e4519b7SRebecca Mckeever 
24383e4519b7SRebecca Mckeever /*
24393e4519b7SRebecca Mckeever  * A test that tries to allocate memory within min_addr and max_add range,
24403e4519b7SRebecca Mckeever  * where the total range can fit the region, but it is split between two nodes
24413e4519b7SRebecca Mckeever  * and everything else is reserved. Additionally, nid is set to NUMA_NO_NODE
24423e4519b7SRebecca Mckeever  * instead of requesting a specific node:
24433e4519b7SRebecca Mckeever  *
24443e4519b7SRebecca Mckeever  *                         +-----------+
24453e4519b7SRebecca Mckeever  *                         |    new    |
24463e4519b7SRebecca Mckeever  *                         +-----------+
24473e4519b7SRebecca Mckeever  *  |      +---------------------+-----------|
24483e4519b7SRebecca Mckeever  *  |      |      prev node      | next node |
24493e4519b7SRebecca Mckeever  *  +------+---------------------+-----------+
24503e4519b7SRebecca Mckeever  *                         +           +
24513e4519b7SRebecca Mckeever  *  |----------------------+           +-----|
24523e4519b7SRebecca Mckeever  *  |          r1          |           |  r2 |
24533e4519b7SRebecca Mckeever  *  +----------------------+-----------+-----+
24543e4519b7SRebecca Mckeever  *                         ^           ^
24553e4519b7SRebecca Mckeever  *                         |           |
24563e4519b7SRebecca Mckeever  *                         |           max_addr
24573e4519b7SRebecca Mckeever  *                         |
24583e4519b7SRebecca Mckeever  *                         min_addr
24593e4519b7SRebecca Mckeever  *
24603e4519b7SRebecca Mckeever  * Expect no allocation to happen.
24613e4519b7SRebecca Mckeever  */
alloc_nid_numa_split_all_reserved_generic_check(void)246261da0332SRebecca Mckeever static int alloc_nid_numa_split_all_reserved_generic_check(void)
24633e4519b7SRebecca Mckeever {
24643e4519b7SRebecca Mckeever 	void *allocated_ptr = NULL;
24653e4519b7SRebecca Mckeever 	struct memblock_region *next_node = &memblock.memory.regions[7];
24663e4519b7SRebecca Mckeever 	struct region r1, r2;
24673e4519b7SRebecca Mckeever 	phys_addr_t size = SZ_256;
24683e4519b7SRebecca Mckeever 	phys_addr_t max_addr;
24693e4519b7SRebecca Mckeever 	phys_addr_t min_addr;
24703e4519b7SRebecca Mckeever 
24713e4519b7SRebecca Mckeever 	PREFIX_PUSH();
24723e4519b7SRebecca Mckeever 	setup_numa_memblock(node_fractions);
24733e4519b7SRebecca Mckeever 
24743e4519b7SRebecca Mckeever 	r2.base = next_node->base + SZ_128;
24753e4519b7SRebecca Mckeever 	r2.size = memblock_end_of_DRAM() - r2.base;
24763e4519b7SRebecca Mckeever 
24773e4519b7SRebecca Mckeever 	r1.size = MEM_SIZE - (r2.size + size);
24783e4519b7SRebecca Mckeever 	r1.base = memblock_start_of_DRAM();
24793e4519b7SRebecca Mckeever 
24803e4519b7SRebecca Mckeever 	min_addr = r1.base + r1.size;
24813e4519b7SRebecca Mckeever 	max_addr = r2.base;
24823e4519b7SRebecca Mckeever 
24833e4519b7SRebecca Mckeever 	memblock_reserve(r1.base, r1.size);
24843e4519b7SRebecca Mckeever 	memblock_reserve(r2.base, r2.size);
24853e4519b7SRebecca Mckeever 
248661da0332SRebecca Mckeever 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
24873e4519b7SRebecca Mckeever 					       min_addr, max_addr,
24883e4519b7SRebecca Mckeever 					       NUMA_NO_NODE);
24893e4519b7SRebecca Mckeever 
24903e4519b7SRebecca Mckeever 	ASSERT_EQ(allocated_ptr, NULL);
24913e4519b7SRebecca Mckeever 
24923e4519b7SRebecca Mckeever 	test_pass_pop();
24933e4519b7SRebecca Mckeever 
24943e4519b7SRebecca Mckeever 	return 0;
24953e4519b7SRebecca Mckeever }
24963e4519b7SRebecca Mckeever 
2497*b842f4f5SClaudio Migliorelli /*
2498*b842f4f5SClaudio Migliorelli  * A simple test that tries to allocate a memory region through the
2499*b842f4f5SClaudio Migliorelli  * memblock_alloc_node() on a NUMA node with id `nid`. Expected to have the
2500*b842f4f5SClaudio Migliorelli  * correct NUMA node set for the new region.
2501*b842f4f5SClaudio Migliorelli  */
alloc_node_on_correct_nid(void)2502*b842f4f5SClaudio Migliorelli static int alloc_node_on_correct_nid(void)
2503*b842f4f5SClaudio Migliorelli {
2504*b842f4f5SClaudio Migliorelli 	int nid_req = 2;
2505*b842f4f5SClaudio Migliorelli 	void *allocated_ptr = NULL;
2506*b842f4f5SClaudio Migliorelli #ifdef CONFIG_NUMA
2507*b842f4f5SClaudio Migliorelli 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
2508*b842f4f5SClaudio Migliorelli #endif
2509*b842f4f5SClaudio Migliorelli 	phys_addr_t size = SZ_512;
2510*b842f4f5SClaudio Migliorelli 
2511*b842f4f5SClaudio Migliorelli 	PREFIX_PUSH();
2512*b842f4f5SClaudio Migliorelli 	setup_numa_memblock(node_fractions);
2513*b842f4f5SClaudio Migliorelli 
2514*b842f4f5SClaudio Migliorelli 	allocated_ptr = memblock_alloc_node(size, SMP_CACHE_BYTES, nid_req);
2515*b842f4f5SClaudio Migliorelli 
2516*b842f4f5SClaudio Migliorelli 	ASSERT_NE(allocated_ptr, NULL);
2517*b842f4f5SClaudio Migliorelli #ifdef CONFIG_NUMA
2518*b842f4f5SClaudio Migliorelli 	ASSERT_EQ(nid_req, req_node->nid);
2519*b842f4f5SClaudio Migliorelli #endif
2520*b842f4f5SClaudio Migliorelli 
2521*b842f4f5SClaudio Migliorelli 	test_pass_pop();
2522*b842f4f5SClaudio Migliorelli 
2523*b842f4f5SClaudio Migliorelli 	return 0;
2524*b842f4f5SClaudio Migliorelli }
2525*b842f4f5SClaudio Migliorelli 
252650c80241SRebecca Mckeever /* Test case wrappers for NUMA tests */
alloc_nid_numa_simple_check(void)252761da0332SRebecca Mckeever static int alloc_nid_numa_simple_check(void)
252850c80241SRebecca Mckeever {
252950c80241SRebecca Mckeever 	test_print("\tRunning %s...\n", __func__);
253050c80241SRebecca Mckeever 	memblock_set_bottom_up(false);
253161da0332SRebecca Mckeever 	alloc_nid_top_down_numa_simple_check();
25324b41046eSRebecca Mckeever 	memblock_set_bottom_up(true);
253361da0332SRebecca Mckeever 	alloc_nid_bottom_up_numa_simple_check();
253450c80241SRebecca Mckeever 
253550c80241SRebecca Mckeever 	return 0;
253650c80241SRebecca Mckeever }
253750c80241SRebecca Mckeever 
alloc_nid_numa_small_node_check(void)253861da0332SRebecca Mckeever static int alloc_nid_numa_small_node_check(void)
253950c80241SRebecca Mckeever {
254050c80241SRebecca Mckeever 	test_print("\tRunning %s...\n", __func__);
254150c80241SRebecca Mckeever 	memblock_set_bottom_up(false);
254261da0332SRebecca Mckeever 	alloc_nid_top_down_numa_small_node_check();
25434b41046eSRebecca Mckeever 	memblock_set_bottom_up(true);
254461da0332SRebecca Mckeever 	alloc_nid_bottom_up_numa_small_node_check();
254550c80241SRebecca Mckeever 
254650c80241SRebecca Mckeever 	return 0;
254750c80241SRebecca Mckeever }
254850c80241SRebecca Mckeever 
alloc_nid_numa_node_reserved_check(void)254961da0332SRebecca Mckeever static int alloc_nid_numa_node_reserved_check(void)
255050c80241SRebecca Mckeever {
255150c80241SRebecca Mckeever 	test_print("\tRunning %s...\n", __func__);
255250c80241SRebecca Mckeever 	memblock_set_bottom_up(false);
255361da0332SRebecca Mckeever 	alloc_nid_top_down_numa_node_reserved_check();
25544b41046eSRebecca Mckeever 	memblock_set_bottom_up(true);
255561da0332SRebecca Mckeever 	alloc_nid_bottom_up_numa_node_reserved_check();
255650c80241SRebecca Mckeever 
255750c80241SRebecca Mckeever 	return 0;
255850c80241SRebecca Mckeever }
255950c80241SRebecca Mckeever 
alloc_nid_numa_part_reserved_check(void)256061da0332SRebecca Mckeever static int alloc_nid_numa_part_reserved_check(void)
256150c80241SRebecca Mckeever {
256250c80241SRebecca Mckeever 	test_print("\tRunning %s...\n", __func__);
256350c80241SRebecca Mckeever 	memblock_set_bottom_up(false);
256461da0332SRebecca Mckeever 	alloc_nid_top_down_numa_part_reserved_check();
25654b41046eSRebecca Mckeever 	memblock_set_bottom_up(true);
256661da0332SRebecca Mckeever 	alloc_nid_bottom_up_numa_part_reserved_check();
256750c80241SRebecca Mckeever 
256850c80241SRebecca Mckeever 	return 0;
256950c80241SRebecca Mckeever }
257050c80241SRebecca Mckeever 
alloc_nid_numa_part_reserved_fallback_check(void)257161da0332SRebecca Mckeever static int alloc_nid_numa_part_reserved_fallback_check(void)
257250c80241SRebecca Mckeever {
257350c80241SRebecca Mckeever 	test_print("\tRunning %s...\n", __func__);
257450c80241SRebecca Mckeever 	memblock_set_bottom_up(false);
257561da0332SRebecca Mckeever 	alloc_nid_top_down_numa_part_reserved_fallback_check();
25764b41046eSRebecca Mckeever 	memblock_set_bottom_up(true);
257761da0332SRebecca Mckeever 	alloc_nid_bottom_up_numa_part_reserved_fallback_check();
257850c80241SRebecca Mckeever 
257950c80241SRebecca Mckeever 	return 0;
258050c80241SRebecca Mckeever }
258150c80241SRebecca Mckeever 
alloc_nid_numa_split_range_low_check(void)258261da0332SRebecca Mckeever static int alloc_nid_numa_split_range_low_check(void)
258350c80241SRebecca Mckeever {
258450c80241SRebecca Mckeever 	test_print("\tRunning %s...\n", __func__);
258550c80241SRebecca Mckeever 	memblock_set_bottom_up(false);
258661da0332SRebecca Mckeever 	alloc_nid_top_down_numa_split_range_low_check();
25874b41046eSRebecca Mckeever 	memblock_set_bottom_up(true);
258861da0332SRebecca Mckeever 	alloc_nid_bottom_up_numa_split_range_low_check();
258950c80241SRebecca Mckeever 
259050c80241SRebecca Mckeever 	return 0;
259150c80241SRebecca Mckeever }
259250c80241SRebecca Mckeever 
alloc_nid_numa_split_range_high_check(void)259361da0332SRebecca Mckeever static int alloc_nid_numa_split_range_high_check(void)
259450c80241SRebecca Mckeever {
259550c80241SRebecca Mckeever 	test_print("\tRunning %s...\n", __func__);
259650c80241SRebecca Mckeever 	memblock_set_bottom_up(false);
259761da0332SRebecca Mckeever 	alloc_nid_top_down_numa_split_range_high_check();
25984b41046eSRebecca Mckeever 	memblock_set_bottom_up(true);
259961da0332SRebecca Mckeever 	alloc_nid_bottom_up_numa_split_range_high_check();
260050c80241SRebecca Mckeever 
260150c80241SRebecca Mckeever 	return 0;
260250c80241SRebecca Mckeever }
260350c80241SRebecca Mckeever 
alloc_nid_numa_no_overlap_split_check(void)260461da0332SRebecca Mckeever static int alloc_nid_numa_no_overlap_split_check(void)
260550c80241SRebecca Mckeever {
260650c80241SRebecca Mckeever 	test_print("\tRunning %s...\n", __func__);
260750c80241SRebecca Mckeever 	memblock_set_bottom_up(false);
260861da0332SRebecca Mckeever 	alloc_nid_top_down_numa_no_overlap_split_check();
26094b41046eSRebecca Mckeever 	memblock_set_bottom_up(true);
261061da0332SRebecca Mckeever 	alloc_nid_bottom_up_numa_no_overlap_split_check();
261150c80241SRebecca Mckeever 
261250c80241SRebecca Mckeever 	return 0;
261350c80241SRebecca Mckeever }
261450c80241SRebecca Mckeever 
alloc_nid_numa_no_overlap_low_check(void)261561da0332SRebecca Mckeever static int alloc_nid_numa_no_overlap_low_check(void)
261650c80241SRebecca Mckeever {
261750c80241SRebecca Mckeever 	test_print("\tRunning %s...\n", __func__);
261850c80241SRebecca Mckeever 	memblock_set_bottom_up(false);
261961da0332SRebecca Mckeever 	alloc_nid_top_down_numa_no_overlap_low_check();
26204b41046eSRebecca Mckeever 	memblock_set_bottom_up(true);
262161da0332SRebecca Mckeever 	alloc_nid_bottom_up_numa_no_overlap_low_check();
262250c80241SRebecca Mckeever 
262350c80241SRebecca Mckeever 	return 0;
262450c80241SRebecca Mckeever }
262550c80241SRebecca Mckeever 
alloc_nid_numa_no_overlap_high_check(void)262661da0332SRebecca Mckeever static int alloc_nid_numa_no_overlap_high_check(void)
262750c80241SRebecca Mckeever {
262850c80241SRebecca Mckeever 	test_print("\tRunning %s...\n", __func__);
262950c80241SRebecca Mckeever 	memblock_set_bottom_up(false);
263061da0332SRebecca Mckeever 	alloc_nid_top_down_numa_no_overlap_high_check();
26314b41046eSRebecca Mckeever 	memblock_set_bottom_up(true);
263261da0332SRebecca Mckeever 	alloc_nid_bottom_up_numa_no_overlap_high_check();
263350c80241SRebecca Mckeever 
263450c80241SRebecca Mckeever 	return 0;
263550c80241SRebecca Mckeever }
263650c80241SRebecca Mckeever 
alloc_nid_numa_large_region_check(void)263761da0332SRebecca Mckeever static int alloc_nid_numa_large_region_check(void)
26383e4519b7SRebecca Mckeever {
26393e4519b7SRebecca Mckeever 	test_print("\tRunning %s...\n", __func__);
264061da0332SRebecca Mckeever 	run_top_down(alloc_nid_numa_large_region_generic_check);
264161da0332SRebecca Mckeever 	run_bottom_up(alloc_nid_numa_large_region_generic_check);
26423e4519b7SRebecca Mckeever 
26433e4519b7SRebecca Mckeever 	return 0;
26443e4519b7SRebecca Mckeever }
26453e4519b7SRebecca Mckeever 
alloc_nid_numa_reserved_full_merge_check(void)264661da0332SRebecca Mckeever static int alloc_nid_numa_reserved_full_merge_check(void)
26473e4519b7SRebecca Mckeever {
26483e4519b7SRebecca Mckeever 	test_print("\tRunning %s...\n", __func__);
264961da0332SRebecca Mckeever 	run_top_down(alloc_nid_numa_reserved_full_merge_generic_check);
265061da0332SRebecca Mckeever 	run_bottom_up(alloc_nid_numa_reserved_full_merge_generic_check);
26513e4519b7SRebecca Mckeever 
26523e4519b7SRebecca Mckeever 	return 0;
26533e4519b7SRebecca Mckeever }
26543e4519b7SRebecca Mckeever 
alloc_nid_numa_split_all_reserved_check(void)265561da0332SRebecca Mckeever static int alloc_nid_numa_split_all_reserved_check(void)
26563e4519b7SRebecca Mckeever {
26573e4519b7SRebecca Mckeever 	test_print("\tRunning %s...\n", __func__);
265861da0332SRebecca Mckeever 	run_top_down(alloc_nid_numa_split_all_reserved_generic_check);
265961da0332SRebecca Mckeever 	run_bottom_up(alloc_nid_numa_split_all_reserved_generic_check);
26603e4519b7SRebecca Mckeever 
26613e4519b7SRebecca Mckeever 	return 0;
26623e4519b7SRebecca Mckeever }
26633e4519b7SRebecca Mckeever 
alloc_node_numa_on_correct_nid(void)2664*b842f4f5SClaudio Migliorelli static int alloc_node_numa_on_correct_nid(void)
2665*b842f4f5SClaudio Migliorelli {
2666*b842f4f5SClaudio Migliorelli 	test_print("\tRunning %s...\n", __func__);
2667*b842f4f5SClaudio Migliorelli 	run_top_down(alloc_node_on_correct_nid);
2668*b842f4f5SClaudio Migliorelli 	run_bottom_up(alloc_node_on_correct_nid);
2669*b842f4f5SClaudio Migliorelli 
2670*b842f4f5SClaudio Migliorelli 	return 0;
2671*b842f4f5SClaudio Migliorelli }
2672*b842f4f5SClaudio Migliorelli 
__memblock_alloc_nid_numa_checks(void)267350c80241SRebecca Mckeever int __memblock_alloc_nid_numa_checks(void)
267450c80241SRebecca Mckeever {
267550c80241SRebecca Mckeever 	test_print("Running %s NUMA tests...\n",
267661da0332SRebecca Mckeever 		   get_memblock_alloc_nid_name(alloc_nid_test_flags));
267750c80241SRebecca Mckeever 
267861da0332SRebecca Mckeever 	alloc_nid_numa_simple_check();
267961da0332SRebecca Mckeever 	alloc_nid_numa_small_node_check();
268061da0332SRebecca Mckeever 	alloc_nid_numa_node_reserved_check();
268161da0332SRebecca Mckeever 	alloc_nid_numa_part_reserved_check();
268261da0332SRebecca Mckeever 	alloc_nid_numa_part_reserved_fallback_check();
268361da0332SRebecca Mckeever 	alloc_nid_numa_split_range_low_check();
268461da0332SRebecca Mckeever 	alloc_nid_numa_split_range_high_check();
268550c80241SRebecca Mckeever 
268661da0332SRebecca Mckeever 	alloc_nid_numa_no_overlap_split_check();
268761da0332SRebecca Mckeever 	alloc_nid_numa_no_overlap_low_check();
268861da0332SRebecca Mckeever 	alloc_nid_numa_no_overlap_high_check();
268961da0332SRebecca Mckeever 	alloc_nid_numa_large_region_check();
269061da0332SRebecca Mckeever 	alloc_nid_numa_reserved_full_merge_check();
269161da0332SRebecca Mckeever 	alloc_nid_numa_split_all_reserved_check();
269250c80241SRebecca Mckeever 
2693*b842f4f5SClaudio Migliorelli 	alloc_node_numa_on_correct_nid();
2694*b842f4f5SClaudio Migliorelli 
269550c80241SRebecca Mckeever 	return 0;
269650c80241SRebecca Mckeever }
269750c80241SRebecca Mckeever 
memblock_alloc_nid_checks_internal(int flags)269850c80241SRebecca Mckeever static int memblock_alloc_nid_checks_internal(int flags)
269950c80241SRebecca Mckeever {
270050c80241SRebecca Mckeever 	alloc_nid_test_flags = flags;
270150c80241SRebecca Mckeever 
270250c80241SRebecca Mckeever 	prefix_reset();
270361da0332SRebecca Mckeever 	prefix_push(get_memblock_alloc_nid_name(flags));
270450c80241SRebecca Mckeever 
270550c80241SRebecca Mckeever 	reset_memblock_attributes();
270650c80241SRebecca Mckeever 	dummy_physical_memory_init();
270750c80241SRebecca Mckeever 
270850c80241SRebecca Mckeever 	memblock_alloc_nid_range_checks();
270950c80241SRebecca Mckeever 	memblock_alloc_nid_numa_checks();
271050c80241SRebecca Mckeever 
27118f98435dSKarolina Drobnik 	dummy_physical_memory_cleanup();
27128f98435dSKarolina Drobnik 
271376586c00SRebecca Mckeever 	prefix_pop();
271476586c00SRebecca Mckeever 
27158f98435dSKarolina Drobnik 	return 0;
27168f98435dSKarolina Drobnik }
2717ae544fd6SRebecca Mckeever 
memblock_alloc_nid_checks(void)2718ae544fd6SRebecca Mckeever int memblock_alloc_nid_checks(void)
2719ae544fd6SRebecca Mckeever {
2720ae544fd6SRebecca Mckeever 	memblock_alloc_nid_checks_internal(TEST_F_NONE);
2721ae544fd6SRebecca Mckeever 	memblock_alloc_nid_checks_internal(TEST_F_RAW);
2722ae544fd6SRebecca Mckeever 
2723ae544fd6SRebecca Mckeever 	return 0;
2724ae544fd6SRebecca Mckeever }
272561da0332SRebecca Mckeever 
memblock_alloc_exact_nid_range_checks(void)272661da0332SRebecca Mckeever int memblock_alloc_exact_nid_range_checks(void)
272761da0332SRebecca Mckeever {
272861da0332SRebecca Mckeever 	alloc_nid_test_flags = (TEST_F_RAW | TEST_F_EXACT);
272961da0332SRebecca Mckeever 
273061da0332SRebecca Mckeever 	memblock_alloc_nid_range_checks();
273161da0332SRebecca Mckeever 
273261da0332SRebecca Mckeever 	return 0;
273361da0332SRebecca Mckeever }
2734