1 // SPDX-License-Identifier: GPL-2.0-or-later
2 #include "alloc_nid_api.h"
3 
4 /*
5  * A simple test that tries to allocate a memory region within min_addr and
6  * max_addr range:
7  *
8  *        +                   +
9  *   |    +       +-----------+      |
10  *   |    |       |    rgn    |      |
11  *   +----+-------+-----------+------+
12  *        ^                   ^
13  *        |                   |
14  *        min_addr           max_addr
15  *
16  * Expect to allocate a cleared region that ends at max_addr.
17  */
18 static int alloc_try_nid_top_down_simple_check(void)
19 {
20 	struct memblock_region *rgn = &memblock.reserved.regions[0];
21 	void *allocated_ptr = NULL;
22 	char *b;
23 
24 	PREFIX_PUSH();
25 
26 	phys_addr_t size = SZ_128;
27 	phys_addr_t min_addr;
28 	phys_addr_t max_addr;
29 	phys_addr_t rgn_end;
30 
31 	setup_memblock();
32 
33 	min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES * 2;
34 	max_addr = min_addr + SZ_512;
35 
36 	allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
37 					       min_addr, max_addr, NUMA_NO_NODE);
38 	b = (char *)allocated_ptr;
39 	rgn_end = rgn->base + rgn->size;
40 
41 	ASSERT_NE(allocated_ptr, NULL);
42 	ASSERT_EQ(*b, 0);
43 
44 	ASSERT_EQ(rgn->size, size);
45 	ASSERT_EQ(rgn->base, max_addr - size);
46 	ASSERT_EQ(rgn_end, max_addr);
47 
48 	ASSERT_EQ(memblock.reserved.cnt, 1);
49 	ASSERT_EQ(memblock.reserved.total_size, size);
50 
51 	test_pass_pop();
52 
53 	return 0;
54 }
55 
56 /*
57  * A simple test that tries to allocate a memory region within min_addr and
58  * max_addr range, where the end address is misaligned:
59  *
60  *         +       +            +
61  *  |      +       +---------+  +    |
62  *  |      |       |   rgn   |  |    |
63  *  +------+-------+---------+--+----+
64  *         ^       ^            ^
65  *         |       |            |
66  *       min_add   |            max_addr
67  *                 |
68  *                 Aligned address
69  *                 boundary
70  *
71  * Expect to allocate a cleared, aligned region that ends before max_addr.
72  */
73 static int alloc_try_nid_top_down_end_misaligned_check(void)
74 {
75 	struct memblock_region *rgn = &memblock.reserved.regions[0];
76 	void *allocated_ptr = NULL;
77 	char *b;
78 
79 	PREFIX_PUSH();
80 
81 	phys_addr_t size = SZ_128;
82 	phys_addr_t misalign = SZ_2;
83 	phys_addr_t min_addr;
84 	phys_addr_t max_addr;
85 	phys_addr_t rgn_end;
86 
87 	setup_memblock();
88 
89 	min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES * 2;
90 	max_addr = min_addr + SZ_512 + misalign;
91 
92 	allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
93 					       min_addr, max_addr, NUMA_NO_NODE);
94 	b = (char *)allocated_ptr;
95 	rgn_end = rgn->base + rgn->size;
96 
97 	ASSERT_NE(allocated_ptr, NULL);
98 	ASSERT_EQ(*b, 0);
99 
100 	ASSERT_EQ(rgn->size, size);
101 	ASSERT_EQ(rgn->base, max_addr - size - misalign);
102 	ASSERT_LT(rgn_end, max_addr);
103 
104 	ASSERT_EQ(memblock.reserved.cnt, 1);
105 	ASSERT_EQ(memblock.reserved.total_size, size);
106 
107 	test_pass_pop();
108 
109 	return 0;
110 }
111 
112 /*
113  * A simple test that tries to allocate a memory region, which spans over the
114  * min_addr and max_addr range:
115  *
116  *         +               +
117  *  |      +---------------+       |
118  *  |      |      rgn      |       |
119  *  +------+---------------+-------+
120  *         ^               ^
121  *         |               |
122  *         min_addr        max_addr
123  *
124  * Expect to allocate a cleared region that starts at min_addr and ends at
125  * max_addr, given that min_addr is aligned.
126  */
127 static int alloc_try_nid_exact_address_generic_check(void)
128 {
129 	struct memblock_region *rgn = &memblock.reserved.regions[0];
130 	void *allocated_ptr = NULL;
131 	char *b;
132 
133 	PREFIX_PUSH();
134 
135 	phys_addr_t size = SZ_1K;
136 	phys_addr_t min_addr;
137 	phys_addr_t max_addr;
138 	phys_addr_t rgn_end;
139 
140 	setup_memblock();
141 
142 	min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES;
143 	max_addr = min_addr + size;
144 
145 	allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
146 					       min_addr, max_addr, NUMA_NO_NODE);
147 	b = (char *)allocated_ptr;
148 	rgn_end = rgn->base + rgn->size;
149 
150 	ASSERT_NE(allocated_ptr, NULL);
151 	ASSERT_EQ(*b, 0);
152 
153 	ASSERT_EQ(rgn->size, size);
154 	ASSERT_EQ(rgn->base, min_addr);
155 	ASSERT_EQ(rgn_end, max_addr);
156 
157 	ASSERT_EQ(memblock.reserved.cnt, 1);
158 	ASSERT_EQ(memblock.reserved.total_size, size);
159 
160 	test_pass_pop();
161 
162 	return 0;
163 }
164 
165 /*
166  * A test that tries to allocate a memory region, which can't fit into
167  * min_addr and max_addr range:
168  *
169  *           +          +     +
170  *  |        +----------+-----+    |
171  *  |        |   rgn    +     |    |
172  *  +--------+----------+-----+----+
173  *           ^          ^     ^
174  *           |          |     |
175  *           Aligned    |    max_addr
176  *           address    |
177  *           boundary   min_add
178  *
179  * Expect to drop the lower limit and allocate a cleared memory region which
180  * ends at max_addr (if the address is aligned).
181  */
182 static int alloc_try_nid_top_down_narrow_range_check(void)
183 {
184 	struct memblock_region *rgn = &memblock.reserved.regions[0];
185 	void *allocated_ptr = NULL;
186 	char *b;
187 
188 	PREFIX_PUSH();
189 
190 	phys_addr_t size = SZ_256;
191 	phys_addr_t min_addr;
192 	phys_addr_t max_addr;
193 
194 	setup_memblock();
195 
196 	min_addr = memblock_start_of_DRAM() + SZ_512;
197 	max_addr = min_addr + SMP_CACHE_BYTES;
198 
199 	allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
200 					       min_addr, max_addr, NUMA_NO_NODE);
201 	b = (char *)allocated_ptr;
202 
203 	ASSERT_NE(allocated_ptr, NULL);
204 	ASSERT_EQ(*b, 0);
205 
206 	ASSERT_EQ(rgn->size, size);
207 	ASSERT_EQ(rgn->base, max_addr - size);
208 
209 	ASSERT_EQ(memblock.reserved.cnt, 1);
210 	ASSERT_EQ(memblock.reserved.total_size, size);
211 
212 	test_pass_pop();
213 
214 	return 0;
215 }
216 
217 /*
218  * A test that tries to allocate a memory region, which can't fit into
219  * min_addr and max_addr range, with the latter being too close to the beginning
220  * of the available memory:
221  *
222  *   +-------------+
223  *   |     new     |
224  *   +-------------+
225  *         +       +
226  *         |       +              |
227  *         |       |              |
228  *         +-------+--------------+
229  *         ^       ^
230  *         |       |
231  *         |       max_addr
232  *         |
233  *         min_addr
234  *
235  * Expect no allocation to happen.
236  */
237 static int alloc_try_nid_low_max_generic_check(void)
238 {
239 	void *allocated_ptr = NULL;
240 
241 	PREFIX_PUSH();
242 
243 	phys_addr_t size = SZ_1K;
244 	phys_addr_t min_addr;
245 	phys_addr_t max_addr;
246 
247 	setup_memblock();
248 
249 	min_addr = memblock_start_of_DRAM();
250 	max_addr = min_addr + SMP_CACHE_BYTES;
251 
252 	allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
253 					       min_addr, max_addr, NUMA_NO_NODE);
254 
255 	ASSERT_EQ(allocated_ptr, NULL);
256 
257 	test_pass_pop();
258 
259 	return 0;
260 }
261 
262 /*
263  * A test that tries to allocate a memory region within min_addr min_addr range,
264  * with min_addr being so close that it's next to an allocated region:
265  *
266  *          +                        +
267  *  |       +--------+---------------|
268  *  |       |   r1   |      rgn      |
269  *  +-------+--------+---------------+
270  *          ^                        ^
271  *          |                        |
272  *          min_addr                 max_addr
273  *
274  * Expect a merge of both regions. Only the region size gets updated.
275  */
276 static int alloc_try_nid_min_reserved_generic_check(void)
277 {
278 	struct memblock_region *rgn = &memblock.reserved.regions[0];
279 	void *allocated_ptr = NULL;
280 	char *b;
281 
282 	PREFIX_PUSH();
283 
284 	phys_addr_t r1_size = SZ_128;
285 	phys_addr_t r2_size = SZ_64;
286 	phys_addr_t total_size = r1_size + r2_size;
287 	phys_addr_t min_addr;
288 	phys_addr_t max_addr;
289 	phys_addr_t reserved_base;
290 
291 	setup_memblock();
292 
293 	max_addr = memblock_end_of_DRAM();
294 	min_addr = max_addr - r2_size;
295 	reserved_base = min_addr - r1_size;
296 
297 	memblock_reserve(reserved_base, r1_size);
298 
299 	allocated_ptr = memblock_alloc_try_nid(r2_size, SMP_CACHE_BYTES,
300 					       min_addr, max_addr, NUMA_NO_NODE);
301 	b = (char *)allocated_ptr;
302 
303 	ASSERT_NE(allocated_ptr, NULL);
304 	ASSERT_EQ(*b, 0);
305 
306 	ASSERT_EQ(rgn->size, total_size);
307 	ASSERT_EQ(rgn->base, reserved_base);
308 
309 	ASSERT_EQ(memblock.reserved.cnt, 1);
310 	ASSERT_EQ(memblock.reserved.total_size, total_size);
311 
312 	test_pass_pop();
313 
314 	return 0;
315 }
316 
317 /*
318  * A test that tries to allocate a memory region within min_addr and max_addr,
319  * with max_addr being so close that it's next to an allocated region:
320  *
321  *             +             +
322  *  |          +-------------+--------|
323  *  |          |     rgn     |   r1   |
324  *  +----------+-------------+--------+
325  *             ^             ^
326  *             |             |
327  *             min_addr      max_addr
328  *
329  * Expect a merge of regions. Only the region size gets updated.
330  */
331 static int alloc_try_nid_max_reserved_generic_check(void)
332 {
333 	struct memblock_region *rgn = &memblock.reserved.regions[0];
334 	void *allocated_ptr = NULL;
335 	char *b;
336 
337 	PREFIX_PUSH();
338 
339 	phys_addr_t r1_size = SZ_64;
340 	phys_addr_t r2_size = SZ_128;
341 	phys_addr_t total_size = r1_size + r2_size;
342 	phys_addr_t min_addr;
343 	phys_addr_t max_addr;
344 
345 	setup_memblock();
346 
347 	max_addr = memblock_end_of_DRAM() - r1_size;
348 	min_addr = max_addr - r2_size;
349 
350 	memblock_reserve(max_addr, r1_size);
351 
352 	allocated_ptr = memblock_alloc_try_nid(r2_size, SMP_CACHE_BYTES,
353 					       min_addr, max_addr, NUMA_NO_NODE);
354 	b = (char *)allocated_ptr;
355 
356 	ASSERT_NE(allocated_ptr, NULL);
357 	ASSERT_EQ(*b, 0);
358 
359 	ASSERT_EQ(rgn->size, total_size);
360 	ASSERT_EQ(rgn->base, min_addr);
361 
362 	ASSERT_EQ(memblock.reserved.cnt, 1);
363 	ASSERT_EQ(memblock.reserved.total_size, total_size);
364 
365 	test_pass_pop();
366 
367 	return 0;
368 }
369 
370 /*
371  * A test that tries to allocate memory within min_addr and max_add range, when
372  * there are two reserved regions at the borders, with a gap big enough to fit
373  * a new region:
374  *
375  *                +           +
376  *  |    +--------+   +-------+------+  |
377  *  |    |   r2   |   |  rgn  |  r1  |  |
378  *  +----+--------+---+-------+------+--+
379  *                ^           ^
380  *                |           |
381  *                min_addr    max_addr
382  *
383  * Expect to merge the new region with r1. The second region does not get
384  * updated. The total size field gets updated.
385  */
386 
387 static int alloc_try_nid_top_down_reserved_with_space_check(void)
388 {
389 	struct memblock_region *rgn1 = &memblock.reserved.regions[1];
390 	struct memblock_region *rgn2 = &memblock.reserved.regions[0];
391 	void *allocated_ptr = NULL;
392 	char *b;
393 	struct region r1, r2;
394 
395 	PREFIX_PUSH();
396 
397 	phys_addr_t r3_size = SZ_64;
398 	phys_addr_t gap_size = SMP_CACHE_BYTES;
399 	phys_addr_t total_size;
400 	phys_addr_t max_addr;
401 	phys_addr_t min_addr;
402 
403 	setup_memblock();
404 
405 	r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
406 	r1.size = SMP_CACHE_BYTES;
407 
408 	r2.size = SZ_128;
409 	r2.base = r1.base - (r3_size + gap_size + r2.size);
410 
411 	total_size = r1.size + r2.size + r3_size;
412 	min_addr = r2.base + r2.size;
413 	max_addr = r1.base;
414 
415 	memblock_reserve(r1.base, r1.size);
416 	memblock_reserve(r2.base, r2.size);
417 
418 	allocated_ptr = memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES,
419 					       min_addr, max_addr, NUMA_NO_NODE);
420 	b = (char *)allocated_ptr;
421 
422 	ASSERT_NE(allocated_ptr, NULL);
423 	ASSERT_EQ(*b, 0);
424 
425 	ASSERT_EQ(rgn1->size, r1.size + r3_size);
426 	ASSERT_EQ(rgn1->base, max_addr - r3_size);
427 
428 	ASSERT_EQ(rgn2->size, r2.size);
429 	ASSERT_EQ(rgn2->base, r2.base);
430 
431 	ASSERT_EQ(memblock.reserved.cnt, 2);
432 	ASSERT_EQ(memblock.reserved.total_size, total_size);
433 
434 	test_pass_pop();
435 
436 	return 0;
437 }
438 
439 /*
440  * A test that tries to allocate memory within min_addr and max_add range, when
441  * there are two reserved regions at the borders, with a gap of a size equal to
442  * the size of the new region:
443  *
444  *                 +        +
445  *  |     +--------+--------+--------+     |
446  *  |     |   r2   |   r3   |   r1   |     |
447  *  +-----+--------+--------+--------+-----+
448  *                 ^        ^
449  *                 |        |
450  *                 min_addr max_addr
451  *
452  * Expect to merge all of the regions into one. The region counter and total
453  * size fields get updated.
454  */
455 static int alloc_try_nid_reserved_full_merge_generic_check(void)
456 {
457 	struct memblock_region *rgn = &memblock.reserved.regions[0];
458 	void *allocated_ptr = NULL;
459 	char *b;
460 	struct region r1, r2;
461 
462 	PREFIX_PUSH();
463 
464 	phys_addr_t r3_size = SZ_64;
465 	phys_addr_t total_size;
466 	phys_addr_t max_addr;
467 	phys_addr_t min_addr;
468 
469 	setup_memblock();
470 
471 	r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
472 	r1.size = SMP_CACHE_BYTES;
473 
474 	r2.size = SZ_128;
475 	r2.base = r1.base - (r3_size + r2.size);
476 
477 	total_size = r1.size + r2.size + r3_size;
478 	min_addr = r2.base + r2.size;
479 	max_addr = r1.base;
480 
481 	memblock_reserve(r1.base, r1.size);
482 	memblock_reserve(r2.base, r2.size);
483 
484 	allocated_ptr = memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES,
485 					       min_addr, max_addr, NUMA_NO_NODE);
486 	b = (char *)allocated_ptr;
487 
488 	ASSERT_NE(allocated_ptr, NULL);
489 	ASSERT_EQ(*b, 0);
490 
491 	ASSERT_EQ(rgn->size, total_size);
492 	ASSERT_EQ(rgn->base, r2.base);
493 
494 	ASSERT_EQ(memblock.reserved.cnt, 1);
495 	ASSERT_EQ(memblock.reserved.total_size, total_size);
496 
497 	test_pass_pop();
498 
499 	return 0;
500 }
501 
502 /*
503  * A test that tries to allocate memory within min_addr and max_add range, when
504  * there are two reserved regions at the borders, with a gap that can't fit
505  * a new region:
506  *
507  *                       +    +
508  *  |  +----------+------+    +------+   |
509  *  |  |    r3    |  r2  |    |  r1  |   |
510  *  +--+----------+------+----+------+---+
511  *                       ^    ^
512  *                       |    |
513  *                       |    max_addr
514  *                       |
515  *                       min_addr
516  *
517  * Expect to merge the new region with r2. The second region does not get
518  * updated. The total size counter gets updated.
519  */
520 static int alloc_try_nid_top_down_reserved_no_space_check(void)
521 {
522 	struct memblock_region *rgn1 = &memblock.reserved.regions[1];
523 	struct memblock_region *rgn2 = &memblock.reserved.regions[0];
524 	void *allocated_ptr = NULL;
525 	char *b;
526 	struct region r1, r2;
527 
528 	PREFIX_PUSH();
529 
530 	phys_addr_t r3_size = SZ_256;
531 	phys_addr_t gap_size = SMP_CACHE_BYTES;
532 	phys_addr_t total_size;
533 	phys_addr_t max_addr;
534 	phys_addr_t min_addr;
535 
536 	setup_memblock();
537 
538 	r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
539 	r1.size = SMP_CACHE_BYTES;
540 
541 	r2.size = SZ_128;
542 	r2.base = r1.base - (r2.size + gap_size);
543 
544 	total_size = r1.size + r2.size + r3_size;
545 	min_addr = r2.base + r2.size;
546 	max_addr = r1.base;
547 
548 	memblock_reserve(r1.base, r1.size);
549 	memblock_reserve(r2.base, r2.size);
550 
551 	allocated_ptr = memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES,
552 					       min_addr, max_addr, NUMA_NO_NODE);
553 	b = (char *)allocated_ptr;
554 
555 	ASSERT_NE(allocated_ptr, NULL);
556 	ASSERT_EQ(*b, 0);
557 
558 	ASSERT_EQ(rgn1->size, r1.size);
559 	ASSERT_EQ(rgn1->base, r1.base);
560 
561 	ASSERT_EQ(rgn2->size, r2.size + r3_size);
562 	ASSERT_EQ(rgn2->base, r2.base - r3_size);
563 
564 	ASSERT_EQ(memblock.reserved.cnt, 2);
565 	ASSERT_EQ(memblock.reserved.total_size, total_size);
566 
567 	test_pass_pop();
568 
569 	return 0;
570 }
571 
572 /*
573  * A test that tries to allocate memory within min_addr and max_add range, but
574  * it's too narrow and everything else is reserved:
575  *
576  *            +-----------+
577  *            |    new    |
578  *            +-----------+
579  *                 +      +
580  *  |--------------+      +----------|
581  *  |      r2      |      |    r1    |
582  *  +--------------+------+----------+
583  *                 ^      ^
584  *                 |      |
585  *                 |      max_addr
586  *                 |
587  *                 min_addr
588  *
589  * Expect no allocation to happen.
590  */
591 
592 static int alloc_try_nid_reserved_all_generic_check(void)
593 {
594 	void *allocated_ptr = NULL;
595 	struct region r1, r2;
596 
597 	PREFIX_PUSH();
598 
599 	phys_addr_t r3_size = SZ_256;
600 	phys_addr_t gap_size = SMP_CACHE_BYTES;
601 	phys_addr_t max_addr;
602 	phys_addr_t min_addr;
603 
604 	setup_memblock();
605 
606 	r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES;
607 	r1.size = SMP_CACHE_BYTES;
608 
609 	r2.size = MEM_SIZE - (r1.size + gap_size);
610 	r2.base = memblock_start_of_DRAM();
611 
612 	min_addr = r2.base + r2.size;
613 	max_addr = r1.base;
614 
615 	memblock_reserve(r1.base, r1.size);
616 	memblock_reserve(r2.base, r2.size);
617 
618 	allocated_ptr = memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES,
619 					       min_addr, max_addr, NUMA_NO_NODE);
620 
621 	ASSERT_EQ(allocated_ptr, NULL);
622 
623 	test_pass_pop();
624 
625 	return 0;
626 }
627 
628 /*
629  * A test that tries to allocate a memory region, where max_addr is
630  * bigger than the end address of the available memory. Expect to allocate
631  * a cleared region that ends before the end of the memory.
632  */
633 static int alloc_try_nid_top_down_cap_max_check(void)
634 {
635 	struct memblock_region *rgn = &memblock.reserved.regions[0];
636 	void *allocated_ptr = NULL;
637 	char *b;
638 
639 	PREFIX_PUSH();
640 
641 	phys_addr_t size = SZ_256;
642 	phys_addr_t min_addr;
643 	phys_addr_t max_addr;
644 
645 	setup_memblock();
646 
647 	min_addr = memblock_end_of_DRAM() - SZ_1K;
648 	max_addr = memblock_end_of_DRAM() + SZ_256;
649 
650 	allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
651 					       min_addr, max_addr, NUMA_NO_NODE);
652 	b = (char *)allocated_ptr;
653 
654 	ASSERT_NE(allocated_ptr, NULL);
655 	ASSERT_EQ(*b, 0);
656 
657 	ASSERT_EQ(rgn->size, size);
658 	ASSERT_EQ(rgn->base, memblock_end_of_DRAM() - size);
659 
660 	ASSERT_EQ(memblock.reserved.cnt, 1);
661 	ASSERT_EQ(memblock.reserved.total_size, size);
662 
663 	test_pass_pop();
664 
665 	return 0;
666 }
667 
668 /*
669  * A test that tries to allocate a memory region, where min_addr is
670  * smaller than the start address of the available memory. Expect to allocate
671  * a cleared region that ends before the end of the memory.
672  */
673 static int alloc_try_nid_top_down_cap_min_check(void)
674 {
675 	struct memblock_region *rgn = &memblock.reserved.regions[0];
676 	void *allocated_ptr = NULL;
677 	char *b;
678 
679 	PREFIX_PUSH();
680 
681 	phys_addr_t size = SZ_1K;
682 	phys_addr_t min_addr;
683 	phys_addr_t max_addr;
684 
685 	setup_memblock();
686 
687 	min_addr = memblock_start_of_DRAM() - SZ_256;
688 	max_addr = memblock_end_of_DRAM();
689 
690 	allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
691 					       min_addr, max_addr, NUMA_NO_NODE);
692 	b = (char *)allocated_ptr;
693 
694 	ASSERT_NE(allocated_ptr, NULL);
695 	ASSERT_EQ(*b, 0);
696 
697 	ASSERT_EQ(rgn->size, size);
698 	ASSERT_EQ(rgn->base, memblock_end_of_DRAM() - size);
699 
700 	ASSERT_EQ(memblock.reserved.cnt, 1);
701 	ASSERT_EQ(memblock.reserved.total_size, size);
702 
703 	test_pass_pop();
704 
705 	return 0;
706 }
707 
708 /*
709  * A simple test that tries to allocate a memory region within min_addr and
710  * max_addr range:
711  *
712  *        +                       +
713  *   |    +-----------+           |      |
714  *   |    |    rgn    |           |      |
715  *   +----+-----------+-----------+------+
716  *        ^                       ^
717  *        |                       |
718  *        min_addr                max_addr
719  *
720  * Expect to allocate a cleared region that ends before max_addr.
721  */
722 static int alloc_try_nid_bottom_up_simple_check(void)
723 {
724 	struct memblock_region *rgn = &memblock.reserved.regions[0];
725 	void *allocated_ptr = NULL;
726 	char *b;
727 
728 	PREFIX_PUSH();
729 
730 	phys_addr_t size = SZ_128;
731 	phys_addr_t min_addr;
732 	phys_addr_t max_addr;
733 	phys_addr_t rgn_end;
734 
735 	setup_memblock();
736 
737 	min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES * 2;
738 	max_addr = min_addr + SZ_512;
739 
740 	allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
741 					       min_addr, max_addr,
742 					       NUMA_NO_NODE);
743 	b = (char *)allocated_ptr;
744 	rgn_end = rgn->base + rgn->size;
745 
746 	ASSERT_NE(allocated_ptr, NULL);
747 	ASSERT_EQ(*b, 0);
748 
749 	ASSERT_EQ(rgn->size, size);
750 	ASSERT_EQ(rgn->base, min_addr);
751 	ASSERT_LT(rgn_end, max_addr);
752 
753 	ASSERT_EQ(memblock.reserved.cnt, 1);
754 	ASSERT_EQ(memblock.reserved.total_size, size);
755 
756 	test_pass_pop();
757 
758 	return 0;
759 }
760 
761 /*
762  * A simple test that tries to allocate a memory region within min_addr and
763  * max_addr range, where the start address is misaligned:
764  *
765  *        +                     +
766  *  |     +   +-----------+     +     |
767  *  |     |   |    rgn    |     |     |
768  *  +-----+---+-----------+-----+-----+
769  *        ^   ^----.            ^
770  *        |        |            |
771  *     min_add     |            max_addr
772  *                 |
773  *                 Aligned address
774  *                 boundary
775  *
776  * Expect to allocate a cleared, aligned region that ends before max_addr.
777  */
778 static int alloc_try_nid_bottom_up_start_misaligned_check(void)
779 {
780 	struct memblock_region *rgn = &memblock.reserved.regions[0];
781 	void *allocated_ptr = NULL;
782 	char *b;
783 
784 	PREFIX_PUSH();
785 
786 	phys_addr_t size = SZ_128;
787 	phys_addr_t misalign = SZ_2;
788 	phys_addr_t min_addr;
789 	phys_addr_t max_addr;
790 	phys_addr_t rgn_end;
791 
792 	setup_memblock();
793 
794 	min_addr = memblock_start_of_DRAM() + misalign;
795 	max_addr = min_addr + SZ_512;
796 
797 	allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
798 					       min_addr, max_addr,
799 					       NUMA_NO_NODE);
800 	b = (char *)allocated_ptr;
801 	rgn_end = rgn->base + rgn->size;
802 
803 	ASSERT_NE(allocated_ptr, NULL);
804 	ASSERT_EQ(*b, 0);
805 
806 	ASSERT_EQ(rgn->size, size);
807 	ASSERT_EQ(rgn->base, min_addr + (SMP_CACHE_BYTES - misalign));
808 	ASSERT_LT(rgn_end, max_addr);
809 
810 	ASSERT_EQ(memblock.reserved.cnt, 1);
811 	ASSERT_EQ(memblock.reserved.total_size, size);
812 
813 	test_pass_pop();
814 
815 	return 0;
816 }
817 
818 /*
819  * A test that tries to allocate a memory region, which can't fit into min_addr
820  * and max_addr range:
821  *
822  *                      +    +
823  *  |---------+         +    +      |
824  *  |   rgn   |         |    |      |
825  *  +---------+---------+----+------+
826  *                      ^    ^
827  *                      |    |
828  *                      |    max_addr
829  *                      |
830  *                      min_add
831  *
832  * Expect to drop the lower limit and allocate a cleared memory region which
833  * starts at the beginning of the available memory.
834  */
835 static int alloc_try_nid_bottom_up_narrow_range_check(void)
836 {
837 	struct memblock_region *rgn = &memblock.reserved.regions[0];
838 	void *allocated_ptr = NULL;
839 	char *b;
840 
841 	PREFIX_PUSH();
842 
843 	phys_addr_t size = SZ_256;
844 	phys_addr_t min_addr;
845 	phys_addr_t max_addr;
846 
847 	setup_memblock();
848 
849 	min_addr = memblock_start_of_DRAM() + SZ_512;
850 	max_addr = min_addr + SMP_CACHE_BYTES;
851 
852 	allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
853 					       min_addr, max_addr,
854 					       NUMA_NO_NODE);
855 	b = (char *)allocated_ptr;
856 
857 	ASSERT_NE(allocated_ptr, NULL);
858 	ASSERT_EQ(*b, 0);
859 
860 	ASSERT_EQ(rgn->size, size);
861 	ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
862 
863 	ASSERT_EQ(memblock.reserved.cnt, 1);
864 	ASSERT_EQ(memblock.reserved.total_size, size);
865 
866 	test_pass_pop();
867 
868 	return 0;
869 }
870 
871 /*
872  * A test that tries to allocate memory within min_addr and max_add range, when
873  * there are two reserved regions at the borders, with a gap big enough to fit
874  * a new region:
875  *
876  *                +           +
877  *  |    +--------+-------+   +------+  |
878  *  |    |   r2   |  rgn  |   |  r1  |  |
879  *  +----+--------+-------+---+------+--+
880  *                ^           ^
881  *                |           |
882  *                min_addr    max_addr
883  *
884  * Expect to merge the new region with r2. The second region does not get
885  * updated. The total size field gets updated.
886  */
887 
888 static int alloc_try_nid_bottom_up_reserved_with_space_check(void)
889 {
890 	struct memblock_region *rgn1 = &memblock.reserved.regions[1];
891 	struct memblock_region *rgn2 = &memblock.reserved.regions[0];
892 	void *allocated_ptr = NULL;
893 	char *b;
894 	struct region r1, r2;
895 
896 	PREFIX_PUSH();
897 
898 	phys_addr_t r3_size = SZ_64;
899 	phys_addr_t gap_size = SMP_CACHE_BYTES;
900 	phys_addr_t total_size;
901 	phys_addr_t max_addr;
902 	phys_addr_t min_addr;
903 
904 	setup_memblock();
905 
906 	r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
907 	r1.size = SMP_CACHE_BYTES;
908 
909 	r2.size = SZ_128;
910 	r2.base = r1.base - (r3_size + gap_size + r2.size);
911 
912 	total_size = r1.size + r2.size + r3_size;
913 	min_addr = r2.base + r2.size;
914 	max_addr = r1.base;
915 
916 	memblock_reserve(r1.base, r1.size);
917 	memblock_reserve(r2.base, r2.size);
918 
919 	allocated_ptr = memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES,
920 					       min_addr, max_addr,
921 					       NUMA_NO_NODE);
922 	b = (char *)allocated_ptr;
923 
924 	ASSERT_NE(allocated_ptr, NULL);
925 	ASSERT_EQ(*b, 0);
926 
927 	ASSERT_EQ(rgn1->size, r1.size);
928 	ASSERT_EQ(rgn1->base, max_addr);
929 
930 	ASSERT_EQ(rgn2->size, r2.size + r3_size);
931 	ASSERT_EQ(rgn2->base, r2.base);
932 
933 	ASSERT_EQ(memblock.reserved.cnt, 2);
934 	ASSERT_EQ(memblock.reserved.total_size, total_size);
935 
936 	test_pass_pop();
937 
938 	return 0;
939 }
940 
941 /*
942  * A test that tries to allocate memory within min_addr and max_add range, when
943  * there are two reserved regions at the borders, with a gap of a size equal to
944  * the size of the new region:
945  *
946  *                         +   +
947  *  |----------+    +------+   +----+  |
948  *  |    r3    |    |  r2  |   | r1 |  |
949  *  +----------+----+------+---+----+--+
950  *                         ^   ^
951  *                         |   |
952  *                         |  max_addr
953  *                         |
954  *                         min_addr
955  *
956  * Expect to drop the lower limit and allocate memory at the beginning of the
957  * available memory. The region counter and total size fields get updated.
958  * Other regions are not modified.
959  */
960 
961 static int alloc_try_nid_bottom_up_reserved_no_space_check(void)
962 {
963 	struct memblock_region *rgn1 = &memblock.reserved.regions[2];
964 	struct memblock_region *rgn2 = &memblock.reserved.regions[1];
965 	struct memblock_region *rgn3 = &memblock.reserved.regions[0];
966 	void *allocated_ptr = NULL;
967 	char *b;
968 	struct region r1, r2;
969 
970 	PREFIX_PUSH();
971 
972 	phys_addr_t r3_size = SZ_256;
973 	phys_addr_t gap_size = SMP_CACHE_BYTES;
974 	phys_addr_t total_size;
975 	phys_addr_t max_addr;
976 	phys_addr_t min_addr;
977 
978 	setup_memblock();
979 
980 	r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
981 	r1.size = SMP_CACHE_BYTES;
982 
983 	r2.size = SZ_128;
984 	r2.base = r1.base - (r2.size + gap_size);
985 
986 	total_size = r1.size + r2.size + r3_size;
987 	min_addr = r2.base + r2.size;
988 	max_addr = r1.base;
989 
990 	memblock_reserve(r1.base, r1.size);
991 	memblock_reserve(r2.base, r2.size);
992 
993 	allocated_ptr = memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES,
994 					       min_addr, max_addr,
995 					       NUMA_NO_NODE);
996 	b = (char *)allocated_ptr;
997 
998 	ASSERT_NE(allocated_ptr, NULL);
999 	ASSERT_EQ(*b, 0);
1000 
1001 	ASSERT_EQ(rgn3->size, r3_size);
1002 	ASSERT_EQ(rgn3->base, memblock_start_of_DRAM());
1003 
1004 	ASSERT_EQ(rgn2->size, r2.size);
1005 	ASSERT_EQ(rgn2->base, r2.base);
1006 
1007 	ASSERT_EQ(rgn1->size, r1.size);
1008 	ASSERT_EQ(rgn1->base, r1.base);
1009 
1010 	ASSERT_EQ(memblock.reserved.cnt, 3);
1011 	ASSERT_EQ(memblock.reserved.total_size, total_size);
1012 
1013 	test_pass_pop();
1014 
1015 	return 0;
1016 }
1017 
1018 /*
1019  * A test that tries to allocate a memory region, where max_addr is
1020  * bigger than the end address of the available memory. Expect to allocate
1021  * a cleared region that starts at the min_addr
1022  */
1023 static int alloc_try_nid_bottom_up_cap_max_check(void)
1024 {
1025 	struct memblock_region *rgn = &memblock.reserved.regions[0];
1026 	void *allocated_ptr = NULL;
1027 	char *b;
1028 
1029 	PREFIX_PUSH();
1030 
1031 	phys_addr_t size = SZ_256;
1032 	phys_addr_t min_addr;
1033 	phys_addr_t max_addr;
1034 
1035 	setup_memblock();
1036 
1037 	min_addr = memblock_start_of_DRAM() + SZ_1K;
1038 	max_addr = memblock_end_of_DRAM() + SZ_256;
1039 
1040 	allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
1041 					       min_addr, max_addr,
1042 					       NUMA_NO_NODE);
1043 	b = (char *)allocated_ptr;
1044 
1045 	ASSERT_NE(allocated_ptr, NULL);
1046 	ASSERT_EQ(*b, 0);
1047 
1048 	ASSERT_EQ(rgn->size, size);
1049 	ASSERT_EQ(rgn->base, min_addr);
1050 
1051 	ASSERT_EQ(memblock.reserved.cnt, 1);
1052 	ASSERT_EQ(memblock.reserved.total_size, size);
1053 
1054 	test_pass_pop();
1055 
1056 	return 0;
1057 }
1058 
1059 /*
1060  * A test that tries to allocate a memory region, where min_addr is
1061  * smaller than the start address of the available memory. Expect to allocate
1062  * a cleared region at the beginning of the available memory.
1063  */
1064 static int alloc_try_nid_bottom_up_cap_min_check(void)
1065 {
1066 	struct memblock_region *rgn = &memblock.reserved.regions[0];
1067 	void *allocated_ptr = NULL;
1068 	char *b;
1069 
1070 	PREFIX_PUSH();
1071 
1072 	phys_addr_t size = SZ_1K;
1073 	phys_addr_t min_addr;
1074 	phys_addr_t max_addr;
1075 
1076 	setup_memblock();
1077 
1078 	min_addr = memblock_start_of_DRAM();
1079 	max_addr = memblock_end_of_DRAM() - SZ_256;
1080 
1081 	allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
1082 					       min_addr, max_addr,
1083 					       NUMA_NO_NODE);
1084 	b = (char *)allocated_ptr;
1085 
1086 	ASSERT_NE(allocated_ptr, NULL);
1087 	ASSERT_EQ(*b, 0);
1088 
1089 	ASSERT_EQ(rgn->size, size);
1090 	ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
1091 
1092 	ASSERT_EQ(memblock.reserved.cnt, 1);
1093 	ASSERT_EQ(memblock.reserved.total_size, size);
1094 
1095 	test_pass_pop();
1096 
1097 	return 0;
1098 }
1099 
1100 /* Test case wrappers */
1101 static int alloc_try_nid_simple_check(void)
1102 {
1103 	test_print("\tRunning %s...\n", __func__);
1104 	memblock_set_bottom_up(false);
1105 	alloc_try_nid_top_down_simple_check();
1106 	memblock_set_bottom_up(true);
1107 	alloc_try_nid_bottom_up_simple_check();
1108 
1109 	return 0;
1110 }
1111 
1112 static int alloc_try_nid_misaligned_check(void)
1113 {
1114 	test_print("\tRunning %s...\n", __func__);
1115 	memblock_set_bottom_up(false);
1116 	alloc_try_nid_top_down_end_misaligned_check();
1117 	memblock_set_bottom_up(true);
1118 	alloc_try_nid_bottom_up_start_misaligned_check();
1119 
1120 	return 0;
1121 }
1122 
1123 static int alloc_try_nid_narrow_range_check(void)
1124 {
1125 	test_print("\tRunning %s...\n", __func__);
1126 	memblock_set_bottom_up(false);
1127 	alloc_try_nid_top_down_narrow_range_check();
1128 	memblock_set_bottom_up(true);
1129 	alloc_try_nid_bottom_up_narrow_range_check();
1130 
1131 	return 0;
1132 }
1133 
1134 static int alloc_try_nid_reserved_with_space_check(void)
1135 {
1136 	test_print("\tRunning %s...\n", __func__);
1137 	memblock_set_bottom_up(false);
1138 	alloc_try_nid_top_down_reserved_with_space_check();
1139 	memblock_set_bottom_up(true);
1140 	alloc_try_nid_bottom_up_reserved_with_space_check();
1141 
1142 	return 0;
1143 }
1144 
1145 static int alloc_try_nid_reserved_no_space_check(void)
1146 {
1147 	test_print("\tRunning %s...\n", __func__);
1148 	memblock_set_bottom_up(false);
1149 	alloc_try_nid_top_down_reserved_no_space_check();
1150 	memblock_set_bottom_up(true);
1151 	alloc_try_nid_bottom_up_reserved_no_space_check();
1152 
1153 	return 0;
1154 }
1155 
1156 static int alloc_try_nid_cap_max_check(void)
1157 {
1158 	test_print("\tRunning %s...\n", __func__);
1159 	memblock_set_bottom_up(false);
1160 	alloc_try_nid_top_down_cap_max_check();
1161 	memblock_set_bottom_up(true);
1162 	alloc_try_nid_bottom_up_cap_max_check();
1163 
1164 	return 0;
1165 }
1166 
1167 static int alloc_try_nid_cap_min_check(void)
1168 {
1169 	test_print("\tRunning %s...\n", __func__);
1170 	memblock_set_bottom_up(false);
1171 	alloc_try_nid_top_down_cap_min_check();
1172 	memblock_set_bottom_up(true);
1173 	alloc_try_nid_bottom_up_cap_min_check();
1174 
1175 	return 0;
1176 }
1177 
1178 static int alloc_try_nid_min_reserved_check(void)
1179 {
1180 	test_print("\tRunning %s...\n", __func__);
1181 	memblock_set_bottom_up(false);
1182 	alloc_try_nid_min_reserved_generic_check();
1183 	memblock_set_bottom_up(true);
1184 	alloc_try_nid_min_reserved_generic_check();
1185 
1186 	return 0;
1187 }
1188 
1189 static int alloc_try_nid_max_reserved_check(void)
1190 {
1191 	test_print("\tRunning %s...\n", __func__);
1192 	memblock_set_bottom_up(false);
1193 	alloc_try_nid_max_reserved_generic_check();
1194 	memblock_set_bottom_up(true);
1195 	alloc_try_nid_max_reserved_generic_check();
1196 
1197 	return 0;
1198 }
1199 
1200 static int alloc_try_nid_exact_address_check(void)
1201 {
1202 	test_print("\tRunning %s...\n", __func__);
1203 	memblock_set_bottom_up(false);
1204 	alloc_try_nid_exact_address_generic_check();
1205 	memblock_set_bottom_up(true);
1206 	alloc_try_nid_exact_address_generic_check();
1207 
1208 	return 0;
1209 }
1210 
1211 static int alloc_try_nid_reserved_full_merge_check(void)
1212 {
1213 	test_print("\tRunning %s...\n", __func__);
1214 	memblock_set_bottom_up(false);
1215 	alloc_try_nid_reserved_full_merge_generic_check();
1216 	memblock_set_bottom_up(true);
1217 	alloc_try_nid_reserved_full_merge_generic_check();
1218 
1219 	return 0;
1220 }
1221 
1222 static int alloc_try_nid_reserved_all_check(void)
1223 {
1224 	test_print("\tRunning %s...\n", __func__);
1225 	memblock_set_bottom_up(false);
1226 	alloc_try_nid_reserved_all_generic_check();
1227 	memblock_set_bottom_up(true);
1228 	alloc_try_nid_reserved_all_generic_check();
1229 
1230 	return 0;
1231 }
1232 
1233 static int alloc_try_nid_low_max_check(void)
1234 {
1235 	test_print("\tRunning %s...\n", __func__);
1236 	memblock_set_bottom_up(false);
1237 	alloc_try_nid_low_max_generic_check();
1238 	memblock_set_bottom_up(true);
1239 	alloc_try_nid_low_max_generic_check();
1240 
1241 	return 0;
1242 }
1243 
1244 int memblock_alloc_nid_checks(void)
1245 {
1246 	const char *func_testing = "memblock_alloc_try_nid";
1247 
1248 	prefix_reset();
1249 	prefix_push(func_testing);
1250 	test_print("Running %s tests...\n", func_testing);
1251 
1252 	reset_memblock_attributes();
1253 	dummy_physical_memory_init();
1254 
1255 	alloc_try_nid_simple_check();
1256 	alloc_try_nid_misaligned_check();
1257 	alloc_try_nid_narrow_range_check();
1258 	alloc_try_nid_reserved_with_space_check();
1259 	alloc_try_nid_reserved_no_space_check();
1260 	alloc_try_nid_cap_max_check();
1261 	alloc_try_nid_cap_min_check();
1262 
1263 	alloc_try_nid_min_reserved_check();
1264 	alloc_try_nid_max_reserved_check();
1265 	alloc_try_nid_exact_address_check();
1266 	alloc_try_nid_reserved_full_merge_check();
1267 	alloc_try_nid_reserved_all_check();
1268 	alloc_try_nid_low_max_check();
1269 
1270 	dummy_physical_memory_cleanup();
1271 
1272 	prefix_pop();
1273 
1274 	return 0;
1275 }
1276