1 // SPDX-License-Identifier: GPL-2.0-or-later
2 #include "alloc_api.h"
3 
4 /*
5  * A simple test that tries to allocate a small memory region.
6  * Expect to allocate an aligned region near the end of the available memory.
7  */
8 static int alloc_top_down_simple_check(void)
9 {
10 	struct memblock_region *rgn = &memblock.reserved.regions[0];
11 	void *allocated_ptr = NULL;
12 
13 	phys_addr_t size = SZ_2;
14 	phys_addr_t expected_start;
15 
16 	setup_memblock();
17 
18 	expected_start = memblock_end_of_DRAM() - SMP_CACHE_BYTES;
19 
20 	allocated_ptr = memblock_alloc(size, SMP_CACHE_BYTES);
21 
22 	assert(allocated_ptr);
23 	assert(rgn->size == size);
24 	assert(rgn->base == expected_start);
25 
26 	assert(memblock.reserved.cnt == 1);
27 	assert(memblock.reserved.total_size == size);
28 
29 	return 0;
30 }
31 
32 /*
33  * A test that tries to allocate memory next to a reserved region that starts at
34  * the misaligned address. Expect to create two separate entries, with the new
35  * entry aligned to the provided alignment:
36  *
37  *              +
38  * |            +--------+         +--------|
39  * |            |  rgn2  |         |  rgn1  |
40  * +------------+--------+---------+--------+
41  *              ^
42  *              |
43  *              Aligned address boundary
44  *
45  * The allocation direction is top-down and region arrays are sorted from lower
46  * to higher addresses, so the new region will be the first entry in
47  * memory.reserved array. The previously reserved region does not get modified.
48  * Region counter and total size get updated.
49  */
50 static int alloc_top_down_disjoint_check(void)
51 {
52 	/* After allocation, this will point to the "old" region */
53 	struct memblock_region *rgn1 = &memblock.reserved.regions[1];
54 	struct memblock_region *rgn2 = &memblock.reserved.regions[0];
55 	struct region r1;
56 	void *allocated_ptr = NULL;
57 
58 	phys_addr_t r2_size = SZ_16;
59 	/* Use custom alignment */
60 	phys_addr_t alignment = SMP_CACHE_BYTES * 2;
61 	phys_addr_t total_size;
62 	phys_addr_t expected_start;
63 
64 	setup_memblock();
65 
66 	r1.base = memblock_end_of_DRAM() - SZ_2;
67 	r1.size = SZ_2;
68 
69 	total_size = r1.size + r2_size;
70 	expected_start = memblock_end_of_DRAM() - alignment;
71 
72 	memblock_reserve(r1.base, r1.size);
73 
74 	allocated_ptr = memblock_alloc(r2_size, alignment);
75 
76 	assert(allocated_ptr);
77 	assert(rgn1->size == r1.size);
78 	assert(rgn1->base == r1.base);
79 
80 	assert(rgn2->size == r2_size);
81 	assert(rgn2->base == expected_start);
82 
83 	assert(memblock.reserved.cnt == 2);
84 	assert(memblock.reserved.total_size == total_size);
85 
86 	return 0;
87 }
88 
89 /*
90  * A test that tries to allocate memory when there is enough space at the end
91  * of the previously reserved block (i.e. first fit):
92  *
93  *  |              +--------+--------------|
94  *  |              |   r1   |      r2      |
95  *  +--------------+--------+--------------+
96  *
97  * Expect a merge of both regions. Only the region size gets updated.
98  */
99 static int alloc_top_down_before_check(void)
100 {
101 	struct memblock_region *rgn = &memblock.reserved.regions[0];
102 	void *allocated_ptr = NULL;
103 
104 	/*
105 	 * The first region ends at the aligned address to test region merging
106 	 */
107 	phys_addr_t r1_size = SMP_CACHE_BYTES;
108 	phys_addr_t r2_size = SZ_512;
109 	phys_addr_t total_size = r1_size + r2_size;
110 
111 	setup_memblock();
112 
113 	memblock_reserve(memblock_end_of_DRAM() - total_size, r1_size);
114 
115 	allocated_ptr = memblock_alloc(r2_size, SMP_CACHE_BYTES);
116 
117 	assert(allocated_ptr);
118 	assert(rgn->size == total_size);
119 	assert(rgn->base == memblock_end_of_DRAM() - total_size);
120 
121 	assert(memblock.reserved.cnt == 1);
122 	assert(memblock.reserved.total_size == total_size);
123 
124 	return 0;
125 }
126 
127 /*
128  * A test that tries to allocate memory when there is not enough space at the
129  * end of the previously reserved block (i.e. second fit):
130  *
131  *  |            +-----------+------+     |
132  *  |            |     r2    |  r1  |     |
133  *  +------------+-----------+------+-----+
134  *
135  * Expect a merge of both regions. Both the base address and size of the region
136  * get updated.
137  */
138 static int alloc_top_down_after_check(void)
139 {
140 	struct memblock_region *rgn = &memblock.reserved.regions[0];
141 	struct region r1;
142 	void *allocated_ptr = NULL;
143 
144 	phys_addr_t r2_size = SZ_512;
145 	phys_addr_t total_size;
146 
147 	setup_memblock();
148 
149 	/*
150 	 * The first region starts at the aligned address to test region merging
151 	 */
152 	r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES;
153 	r1.size = SZ_8;
154 
155 	total_size = r1.size + r2_size;
156 
157 	memblock_reserve(r1.base, r1.size);
158 
159 	allocated_ptr = memblock_alloc(r2_size, SMP_CACHE_BYTES);
160 
161 	assert(allocated_ptr);
162 	assert(rgn->size == total_size);
163 	assert(rgn->base == r1.base - r2_size);
164 
165 	assert(memblock.reserved.cnt == 1);
166 	assert(memblock.reserved.total_size == total_size);
167 
168 	return 0;
169 }
170 
171 /*
172  * A test that tries to allocate memory when there are two reserved regions with
173  * a gap too small to fit the new region:
174  *
175  *  |       +--------+----------+   +------|
176  *  |       |   r3   |    r2    |   |  r1  |
177  *  +-------+--------+----------+---+------+
178  *
179  * Expect to allocate a region before the one that starts at the lower address,
180  * and merge them into one. The region counter and total size fields get
181  * updated.
182  */
183 static int alloc_top_down_second_fit_check(void)
184 {
185 	struct memblock_region *rgn = &memblock.reserved.regions[0];
186 	struct region r1, r2;
187 	void *allocated_ptr = NULL;
188 
189 	phys_addr_t r3_size = SZ_1K;
190 	phys_addr_t total_size;
191 
192 	setup_memblock();
193 
194 	r1.base = memblock_end_of_DRAM() - SZ_512;
195 	r1.size = SZ_512;
196 
197 	r2.base = r1.base - SZ_512;
198 	r2.size = SZ_256;
199 
200 	total_size = r1.size + r2.size + r3_size;
201 
202 	memblock_reserve(r1.base, r1.size);
203 	memblock_reserve(r2.base, r2.size);
204 
205 	allocated_ptr = memblock_alloc(r3_size, SMP_CACHE_BYTES);
206 
207 	assert(allocated_ptr);
208 	assert(rgn->size == r2.size + r3_size);
209 	assert(rgn->base == r2.base - r3_size);
210 
211 	assert(memblock.reserved.cnt == 2);
212 	assert(memblock.reserved.total_size == total_size);
213 
214 	return 0;
215 }
216 
217 /*
218  * A test that tries to allocate memory when there are two reserved regions with
219  * a gap big enough to accommodate the new region:
220  *
221  *  |     +--------+--------+--------+     |
222  *  |     |   r2   |   r3   |   r1   |     |
223  *  +-----+--------+--------+--------+-----+
224  *
225  * Expect to merge all of them, creating one big entry in memblock.reserved
226  * array. The region counter and total size fields get updated.
227  */
228 static int alloc_in_between_generic_check(void)
229 {
230 	struct memblock_region *rgn = &memblock.reserved.regions[0];
231 	struct region r1, r2;
232 	void *allocated_ptr = NULL;
233 
234 	phys_addr_t gap_size = SMP_CACHE_BYTES;
235 	phys_addr_t r3_size = SZ_64;
236 	/*
237 	 * Calculate regions size so there's just enough space for the new entry
238 	 */
239 	phys_addr_t rgn_size = (MEM_SIZE - (2 * gap_size + r3_size)) / 2;
240 	phys_addr_t total_size;
241 
242 	setup_memblock();
243 
244 	r1.size = rgn_size;
245 	r1.base = memblock_end_of_DRAM() - (gap_size + rgn_size);
246 
247 	r2.size = rgn_size;
248 	r2.base = memblock_start_of_DRAM() + gap_size;
249 
250 	total_size = r1.size + r2.size + r3_size;
251 
252 	memblock_reserve(r1.base, r1.size);
253 	memblock_reserve(r2.base, r2.size);
254 
255 	allocated_ptr = memblock_alloc(r3_size, SMP_CACHE_BYTES);
256 
257 	assert(allocated_ptr);
258 	assert(rgn->size == total_size);
259 	assert(rgn->base == r1.base - r2.size - r3_size);
260 
261 	assert(memblock.reserved.cnt == 1);
262 	assert(memblock.reserved.total_size == total_size);
263 
264 	return 0;
265 }
266 
267 /*
268  * A test that tries to allocate memory when the memory is filled with reserved
269  * regions with memory gaps too small to fit the new region:
270  *
271  * +-------+
272  * |  new  |
273  * +--+----+
274  *    |    +-----+    +-----+    +-----+    |
275  *    |    | res |    | res |    | res |    |
276  *    +----+-----+----+-----+----+-----+----+
277  *
278  * Expect no allocation to happen.
279  */
280 static int alloc_small_gaps_generic_check(void)
281 {
282 	void *allocated_ptr = NULL;
283 
284 	phys_addr_t region_size = SZ_1K;
285 	phys_addr_t gap_size = SZ_256;
286 	phys_addr_t region_end;
287 
288 	setup_memblock();
289 
290 	region_end = memblock_start_of_DRAM();
291 
292 	while (region_end < memblock_end_of_DRAM()) {
293 		memblock_reserve(region_end + gap_size, region_size);
294 		region_end += gap_size + region_size;
295 	}
296 
297 	allocated_ptr = memblock_alloc(region_size, SMP_CACHE_BYTES);
298 
299 	assert(!allocated_ptr);
300 
301 	return 0;
302 }
303 
304 /*
305  * A test that tries to allocate memory when all memory is reserved.
306  * Expect no allocation to happen.
307  */
308 static int alloc_all_reserved_generic_check(void)
309 {
310 	void *allocated_ptr = NULL;
311 
312 	setup_memblock();
313 
314 	/* Simulate full memory */
315 	memblock_reserve(memblock_start_of_DRAM(), MEM_SIZE);
316 
317 	allocated_ptr = memblock_alloc(SZ_256, SMP_CACHE_BYTES);
318 
319 	assert(!allocated_ptr);
320 
321 	return 0;
322 }
323 
324 /*
325  * A test that tries to allocate memory when the memory is almost full,
326  * with not enough space left for the new region:
327  *
328  *                                +-------+
329  *                                |  new  |
330  *                                +-------+
331  *  |-----------------------------+   |
332  *  |          reserved           |   |
333  *  +-----------------------------+---+
334  *
335  * Expect no allocation to happen.
336  */
337 static int alloc_no_space_generic_check(void)
338 {
339 	void *allocated_ptr = NULL;
340 
341 	setup_memblock();
342 
343 	phys_addr_t available_size = SZ_256;
344 	phys_addr_t reserved_size = MEM_SIZE - available_size;
345 
346 	/* Simulate almost-full memory */
347 	memblock_reserve(memblock_start_of_DRAM(), reserved_size);
348 
349 	allocated_ptr = memblock_alloc(SZ_1K, SMP_CACHE_BYTES);
350 
351 	assert(!allocated_ptr);
352 
353 	return 0;
354 }
355 
356 /*
357  * A test that tries to allocate memory when the memory is almost full,
358  * but there is just enough space left:
359  *
360  *  |---------------------------+---------|
361  *  |          reserved         |   new   |
362  *  +---------------------------+---------+
363  *
364  * Expect to allocate memory and merge all the regions. The total size field
365  * gets updated.
366  */
367 static int alloc_limited_space_generic_check(void)
368 {
369 	struct memblock_region *rgn = &memblock.reserved.regions[0];
370 	void *allocated_ptr = NULL;
371 
372 	phys_addr_t available_size = SZ_256;
373 	phys_addr_t reserved_size = MEM_SIZE - available_size;
374 
375 	setup_memblock();
376 
377 	/* Simulate almost-full memory */
378 	memblock_reserve(memblock_start_of_DRAM(), reserved_size);
379 
380 	allocated_ptr = memblock_alloc(available_size, SMP_CACHE_BYTES);
381 
382 	assert(allocated_ptr);
383 	assert(rgn->size == MEM_SIZE);
384 	assert(rgn->base == memblock_start_of_DRAM());
385 
386 	assert(memblock.reserved.cnt == 1);
387 	assert(memblock.reserved.total_size == MEM_SIZE);
388 
389 	return 0;
390 }
391 
392 /*
393  * A test that tries to allocate memory when there is no available memory
394  * registered (i.e. memblock.memory has only a dummy entry).
395  * Expect no allocation to happen.
396  */
397 static int alloc_no_memory_generic_check(void)
398 {
399 	struct memblock_region *rgn = &memblock.reserved.regions[0];
400 	void *allocated_ptr = NULL;
401 
402 	reset_memblock_regions();
403 
404 	allocated_ptr = memblock_alloc(SZ_1K, SMP_CACHE_BYTES);
405 
406 	assert(!allocated_ptr);
407 	assert(rgn->size == 0);
408 	assert(rgn->base == 0);
409 	assert(memblock.reserved.total_size == 0);
410 
411 	return 0;
412 }
413 
414 /*
415  * A simple test that tries to allocate a small memory region.
416  * Expect to allocate an aligned region at the beginning of the available
417  * memory.
418  */
419 static int alloc_bottom_up_simple_check(void)
420 {
421 	struct memblock_region *rgn = &memblock.reserved.regions[0];
422 	void *allocated_ptr = NULL;
423 
424 	setup_memblock();
425 
426 	allocated_ptr = memblock_alloc(SZ_2, SMP_CACHE_BYTES);
427 
428 	assert(allocated_ptr);
429 	assert(rgn->size == SZ_2);
430 	assert(rgn->base == memblock_start_of_DRAM());
431 
432 	assert(memblock.reserved.cnt == 1);
433 	assert(memblock.reserved.total_size == SZ_2);
434 
435 	return 0;
436 }
437 
438 /*
439  * A test that tries to allocate memory next to a reserved region that starts at
440  * the misaligned address. Expect to create two separate entries, with the new
441  * entry aligned to the provided alignment:
442  *
443  *                      +
444  *  |    +----------+   +----------+     |
445  *  |    |   rgn1   |   |   rgn2   |     |
446  *  +----+----------+---+----------+-----+
447  *                      ^
448  *                      |
449  *                      Aligned address boundary
450  *
451  * The allocation direction is bottom-up, so the new region will be the second
452  * entry in memory.reserved array. The previously reserved region does not get
453  * modified. Region counter and total size get updated.
454  */
455 static int alloc_bottom_up_disjoint_check(void)
456 {
457 	struct memblock_region *rgn1 = &memblock.reserved.regions[0];
458 	struct memblock_region *rgn2 = &memblock.reserved.regions[1];
459 	struct region r1;
460 	void *allocated_ptr = NULL;
461 
462 	phys_addr_t r2_size = SZ_16;
463 	/* Use custom alignment */
464 	phys_addr_t alignment = SMP_CACHE_BYTES * 2;
465 	phys_addr_t total_size;
466 	phys_addr_t expected_start;
467 
468 	setup_memblock();
469 
470 	r1.base = memblock_start_of_DRAM() + SZ_2;
471 	r1.size = SZ_2;
472 
473 	total_size = r1.size + r2_size;
474 	expected_start = memblock_start_of_DRAM() + alignment;
475 
476 	memblock_reserve(r1.base, r1.size);
477 
478 	allocated_ptr = memblock_alloc(r2_size, alignment);
479 
480 	assert(allocated_ptr);
481 
482 	assert(rgn1->size == r1.size);
483 	assert(rgn1->base == r1.base);
484 
485 	assert(rgn2->size == r2_size);
486 	assert(rgn2->base == expected_start);
487 
488 	assert(memblock.reserved.cnt == 2);
489 	assert(memblock.reserved.total_size == total_size);
490 
491 	return 0;
492 }
493 
494 /*
495  * A test that tries to allocate memory when there is enough space at
496  * the beginning of the previously reserved block (i.e. first fit):
497  *
498  *  |------------------+--------+         |
499  *  |        r1        |   r2   |         |
500  *  +------------------+--------+---------+
501  *
502  * Expect a merge of both regions. Only the region size gets updated.
503  */
504 static int alloc_bottom_up_before_check(void)
505 {
506 	struct memblock_region *rgn = &memblock.reserved.regions[0];
507 	void *allocated_ptr = NULL;
508 
509 	phys_addr_t r1_size = SZ_512;
510 	phys_addr_t r2_size = SZ_128;
511 	phys_addr_t total_size = r1_size + r2_size;
512 
513 	setup_memblock();
514 
515 	memblock_reserve(memblock_start_of_DRAM() + r1_size, r2_size);
516 
517 	allocated_ptr = memblock_alloc(r1_size, SMP_CACHE_BYTES);
518 
519 	assert(allocated_ptr);
520 	assert(rgn->size == total_size);
521 	assert(rgn->base == memblock_start_of_DRAM());
522 
523 	assert(memblock.reserved.cnt == 1);
524 	assert(memblock.reserved.total_size == total_size);
525 
526 	return 0;
527 }
528 
529 /*
530  * A test that tries to allocate memory when there is not enough space at
531  * the beginning of the previously reserved block (i.e. second fit):
532  *
533  *  |    +--------+--------------+         |
534  *  |    |   r1   |      r2      |         |
535  *  +----+--------+--------------+---------+
536  *
537  * Expect a merge of both regions. Only the region size gets updated.
538  */
539 static int alloc_bottom_up_after_check(void)
540 {
541 	struct memblock_region *rgn = &memblock.reserved.regions[0];
542 	struct region r1;
543 	void *allocated_ptr = NULL;
544 
545 	phys_addr_t r2_size = SZ_512;
546 	phys_addr_t total_size;
547 
548 	setup_memblock();
549 
550 	/*
551 	 * The first region starts at the aligned address to test region merging
552 	 */
553 	r1.base = memblock_start_of_DRAM() + SMP_CACHE_BYTES;
554 	r1.size = SZ_64;
555 
556 	total_size = r1.size + r2_size;
557 
558 	memblock_reserve(r1.base, r1.size);
559 
560 	allocated_ptr = memblock_alloc(r2_size, SMP_CACHE_BYTES);
561 
562 	assert(allocated_ptr);
563 	assert(rgn->size == total_size);
564 	assert(rgn->base == r1.base);
565 
566 	assert(memblock.reserved.cnt == 1);
567 	assert(memblock.reserved.total_size == total_size);
568 
569 	return 0;
570 }
571 
572 /*
573  * A test that tries to allocate memory when there are two reserved regions, the
574  * first one starting at the beginning of the available memory, with a gap too
575  * small to fit the new region:
576  *
577  *  |------------+     +--------+--------+  |
578  *  |     r1     |     |   r2   |   r3   |  |
579  *  +------------+-----+--------+--------+--+
580  *
581  * Expect to allocate after the second region, which starts at the higher
582  * address, and merge them into one. The region counter and total size fields
583  * get updated.
584  */
585 static int alloc_bottom_up_second_fit_check(void)
586 {
587 	struct memblock_region *rgn  = &memblock.reserved.regions[1];
588 	struct region r1, r2;
589 	void *allocated_ptr = NULL;
590 
591 	phys_addr_t r3_size = SZ_1K;
592 	phys_addr_t total_size;
593 
594 	setup_memblock();
595 
596 	r1.base = memblock_start_of_DRAM();
597 	r1.size = SZ_512;
598 
599 	r2.base = r1.base + r1.size + SZ_512;
600 	r2.size = SZ_256;
601 
602 	total_size = r1.size + r2.size + r3_size;
603 
604 	memblock_reserve(r1.base, r1.size);
605 	memblock_reserve(r2.base, r2.size);
606 
607 	allocated_ptr = memblock_alloc(r3_size, SMP_CACHE_BYTES);
608 
609 	assert(allocated_ptr);
610 	assert(rgn->size == r2.size + r3_size);
611 	assert(rgn->base == r2.base);
612 
613 	assert(memblock.reserved.cnt == 2);
614 	assert(memblock.reserved.total_size == total_size);
615 
616 	return 0;
617 }
618 
619 /* Test case wrappers */
620 static int alloc_simple_check(void)
621 {
622 	memblock_set_bottom_up(false);
623 	alloc_top_down_simple_check();
624 	memblock_set_bottom_up(true);
625 	alloc_bottom_up_simple_check();
626 
627 	return 0;
628 }
629 
630 static int alloc_disjoint_check(void)
631 {
632 	memblock_set_bottom_up(false);
633 	alloc_top_down_disjoint_check();
634 	memblock_set_bottom_up(true);
635 	alloc_bottom_up_disjoint_check();
636 
637 	return 0;
638 }
639 
640 static int alloc_before_check(void)
641 {
642 	memblock_set_bottom_up(false);
643 	alloc_top_down_before_check();
644 	memblock_set_bottom_up(true);
645 	alloc_bottom_up_before_check();
646 
647 	return 0;
648 }
649 
650 static int alloc_after_check(void)
651 {
652 	memblock_set_bottom_up(false);
653 	alloc_top_down_after_check();
654 	memblock_set_bottom_up(true);
655 	alloc_bottom_up_after_check();
656 
657 	return 0;
658 }
659 
660 static int alloc_in_between_check(void)
661 {
662 	memblock_set_bottom_up(false);
663 	alloc_in_between_generic_check();
664 	memblock_set_bottom_up(true);
665 	alloc_in_between_generic_check();
666 
667 	return 0;
668 }
669 
670 static int alloc_second_fit_check(void)
671 {
672 	memblock_set_bottom_up(false);
673 	alloc_top_down_second_fit_check();
674 	memblock_set_bottom_up(true);
675 	alloc_bottom_up_second_fit_check();
676 
677 	return 0;
678 }
679 
680 static int alloc_small_gaps_check(void)
681 {
682 	memblock_set_bottom_up(false);
683 	alloc_small_gaps_generic_check();
684 	memblock_set_bottom_up(true);
685 	alloc_small_gaps_generic_check();
686 
687 	return 0;
688 }
689 
690 static int alloc_all_reserved_check(void)
691 {
692 	memblock_set_bottom_up(false);
693 	alloc_all_reserved_generic_check();
694 	memblock_set_bottom_up(true);
695 	alloc_all_reserved_generic_check();
696 
697 	return 0;
698 }
699 
700 static int alloc_no_space_check(void)
701 {
702 	memblock_set_bottom_up(false);
703 	alloc_no_space_generic_check();
704 	memblock_set_bottom_up(true);
705 	alloc_no_space_generic_check();
706 
707 	return 0;
708 }
709 
710 static int alloc_limited_space_check(void)
711 {
712 	memblock_set_bottom_up(false);
713 	alloc_limited_space_generic_check();
714 	memblock_set_bottom_up(true);
715 	alloc_limited_space_generic_check();
716 
717 	return 0;
718 }
719 
720 static int alloc_no_memory_check(void)
721 {
722 	memblock_set_bottom_up(false);
723 	alloc_no_memory_generic_check();
724 	memblock_set_bottom_up(true);
725 	alloc_no_memory_generic_check();
726 
727 	return 0;
728 }
729 
730 int memblock_alloc_checks(void)
731 {
732 	reset_memblock_attributes();
733 	dummy_physical_memory_init();
734 
735 	alloc_simple_check();
736 	alloc_disjoint_check();
737 	alloc_before_check();
738 	alloc_after_check();
739 	alloc_second_fit_check();
740 	alloc_small_gaps_check();
741 	alloc_in_between_check();
742 	alloc_all_reserved_check();
743 	alloc_no_space_check();
744 	alloc_limited_space_check();
745 	alloc_no_memory_check();
746 
747 	dummy_physical_memory_cleanup();
748 
749 	return 0;
750 }
751