xref: /openbmc/u-boot/test/lib/lmb.c (revision 77c07e7e)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * (C) Copyright 2018 Simon Goldschmidt
4  */
5 
6 #include <common.h>
7 #include <lmb.h>
8 #include <dm/test.h>
9 #include <test/ut.h>
10 
11 static int check_lmb(struct unit_test_state *uts, struct lmb *lmb,
12 		     phys_addr_t ram_base, phys_size_t ram_size,
13 		     unsigned long num_reserved,
14 		     phys_addr_t base1, phys_size_t size1,
15 		     phys_addr_t base2, phys_size_t size2,
16 		     phys_addr_t base3, phys_size_t size3)
17 {
18 	ut_asserteq(lmb->memory.cnt, 1);
19 	ut_asserteq(lmb->memory.region[0].base, ram_base);
20 	ut_asserteq(lmb->memory.region[0].size, ram_size);
21 
22 	ut_asserteq(lmb->reserved.cnt, num_reserved);
23 	if (num_reserved > 0) {
24 		ut_asserteq(lmb->reserved.region[0].base, base1);
25 		ut_asserteq(lmb->reserved.region[0].size, size1);
26 	}
27 	if (num_reserved > 1) {
28 		ut_asserteq(lmb->reserved.region[1].base, base2);
29 		ut_asserteq(lmb->reserved.region[1].size, size2);
30 	}
31 	if (num_reserved > 2) {
32 		ut_asserteq(lmb->reserved.region[2].base, base3);
33 		ut_asserteq(lmb->reserved.region[2].size, size3);
34 	}
35 	return 0;
36 }
37 
38 #define ASSERT_LMB(lmb, ram_base, ram_size, num_reserved, base1, size1, \
39 		   base2, size2, base3, size3) \
40 		   ut_assert(!check_lmb(uts, lmb, ram_base, ram_size, \
41 			     num_reserved, base1, size1, base2, size2, base3, \
42 			     size3))
43 
44 /*
45  * Test helper function that reserves 64 KiB somewhere in the simulated RAM and
46  * then does some alloc + free tests.
47  */
48 static int test_multi_alloc(struct unit_test_state *uts,
49 			    const phys_addr_t ram, const phys_size_t ram_size,
50 			    const phys_addr_t alloc_64k_addr)
51 {
52 	const phys_addr_t ram_end = ram + ram_size;
53 	const phys_addr_t alloc_64k_end = alloc_64k_addr + 0x10000;
54 
55 	struct lmb lmb;
56 	long ret;
57 	phys_addr_t a, a2, b, b2, c, d;
58 
59 	/* check for overflow */
60 	ut_assert(ram_end == 0 || ram_end > ram);
61 	ut_assert(alloc_64k_end > alloc_64k_addr);
62 	/* check input addresses + size */
63 	ut_assert(alloc_64k_addr >= ram + 8);
64 	ut_assert(alloc_64k_end <= ram_end - 8);
65 
66 	lmb_init(&lmb);
67 
68 	ret = lmb_add(&lmb, ram, ram_size);
69 	ut_asserteq(ret, 0);
70 
71 	/* reserve 64KiB somewhere */
72 	ret = lmb_reserve(&lmb, alloc_64k_addr, 0x10000);
73 	ut_asserteq(ret, 0);
74 	ASSERT_LMB(&lmb, ram, ram_size, 1, alloc_64k_addr, 0x10000,
75 		   0, 0, 0, 0);
76 
77 	/* allocate somewhere, should be at the end of RAM */
78 	a = lmb_alloc(&lmb, 4, 1);
79 	ut_asserteq(a, ram_end - 4);
80 	ASSERT_LMB(&lmb, ram, ram_size, 2, alloc_64k_addr, 0x10000,
81 		   ram_end - 4, 4, 0, 0);
82 	/* alloc below end of reserved region -> below reserved region */
83 	b = lmb_alloc_base(&lmb, 4, 1, alloc_64k_end);
84 	ut_asserteq(b, alloc_64k_addr - 4);
85 	ASSERT_LMB(&lmb, ram, ram_size, 2,
86 		   alloc_64k_addr - 4, 0x10000 + 4, ram_end - 4, 4, 0, 0);
87 
88 	/* 2nd time */
89 	c = lmb_alloc(&lmb, 4, 1);
90 	ut_asserteq(c, ram_end - 8);
91 	ASSERT_LMB(&lmb, ram, ram_size, 2,
92 		   alloc_64k_addr - 4, 0x10000 + 4, ram_end - 8, 8, 0, 0);
93 	d = lmb_alloc_base(&lmb, 4, 1, alloc_64k_end);
94 	ut_asserteq(d, alloc_64k_addr - 8);
95 	ASSERT_LMB(&lmb, ram, ram_size, 2,
96 		   alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 8, 0, 0);
97 
98 	ret = lmb_free(&lmb, a, 4);
99 	ut_asserteq(ret, 0);
100 	ASSERT_LMB(&lmb, ram, ram_size, 2,
101 		   alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 4, 0, 0);
102 	/* allocate again to ensure we get the same address */
103 	a2 = lmb_alloc(&lmb, 4, 1);
104 	ut_asserteq(a, a2);
105 	ASSERT_LMB(&lmb, ram, ram_size, 2,
106 		   alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 8, 0, 0);
107 	ret = lmb_free(&lmb, a2, 4);
108 	ut_asserteq(ret, 0);
109 	ASSERT_LMB(&lmb, ram, ram_size, 2,
110 		   alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 4, 0, 0);
111 
112 	ret = lmb_free(&lmb, b, 4);
113 	ut_asserteq(ret, 0);
114 	ASSERT_LMB(&lmb, ram, ram_size, 3,
115 		   alloc_64k_addr - 8, 4, alloc_64k_addr, 0x10000,
116 		   ram_end - 8, 4);
117 	/* allocate again to ensure we get the same address */
118 	b2 = lmb_alloc_base(&lmb, 4, 1, alloc_64k_end);
119 	ut_asserteq(b, b2);
120 	ASSERT_LMB(&lmb, ram, ram_size, 2,
121 		   alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 4, 0, 0);
122 	ret = lmb_free(&lmb, b2, 4);
123 	ut_asserteq(ret, 0);
124 	ASSERT_LMB(&lmb, ram, ram_size, 3,
125 		   alloc_64k_addr - 8, 4, alloc_64k_addr, 0x10000,
126 		   ram_end - 8, 4);
127 
128 	ret = lmb_free(&lmb, c, 4);
129 	ut_asserteq(ret, 0);
130 	ASSERT_LMB(&lmb, ram, ram_size, 2,
131 		   alloc_64k_addr - 8, 4, alloc_64k_addr, 0x10000, 0, 0);
132 	ret = lmb_free(&lmb, d, 4);
133 	ut_asserteq(ret, 0);
134 	ASSERT_LMB(&lmb, ram, ram_size, 1, alloc_64k_addr, 0x10000,
135 		   0, 0, 0, 0);
136 
137 	return 0;
138 }
139 
140 static int test_multi_alloc_512mb(struct unit_test_state *uts,
141 				  const phys_addr_t ram)
142 {
143 	return test_multi_alloc(uts, ram, 0x20000000, ram + 0x10000000);
144 }
145 
146 /* Create a memory region with one reserved region and allocate */
147 static int lib_test_lmb_simple(struct unit_test_state *uts)
148 {
149 	int ret;
150 
151 	/* simulate 512 MiB RAM beginning at 1GiB */
152 	ret = test_multi_alloc_512mb(uts, 0x40000000);
153 	if (ret)
154 		return ret;
155 
156 	/* simulate 512 MiB RAM beginning at 1.5GiB */
157 	return test_multi_alloc_512mb(uts, 0xE0000000);
158 }
159 
160 DM_TEST(lib_test_lmb_simple, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);
161 
162 /* Simulate 512 MiB RAM, allocate some blocks that fit/don't fit */
163 static int test_bigblock(struct unit_test_state *uts, const phys_addr_t ram)
164 {
165 	const phys_size_t ram_size = 0x20000000;
166 	const phys_size_t big_block_size = 0x10000000;
167 	const phys_addr_t ram_end = ram + ram_size;
168 	const phys_addr_t alloc_64k_addr = ram + 0x10000000;
169 	struct lmb lmb;
170 	long ret;
171 	phys_addr_t a, b;
172 
173 	/* check for overflow */
174 	ut_assert(ram_end == 0 || ram_end > ram);
175 
176 	lmb_init(&lmb);
177 
178 	ret = lmb_add(&lmb, ram, ram_size);
179 	ut_asserteq(ret, 0);
180 
181 	/* reserve 64KiB in the middle of RAM */
182 	ret = lmb_reserve(&lmb, alloc_64k_addr, 0x10000);
183 	ut_asserteq(ret, 0);
184 	ASSERT_LMB(&lmb, ram, ram_size, 1, alloc_64k_addr, 0x10000,
185 		   0, 0, 0, 0);
186 
187 	/* allocate a big block, should be below reserved */
188 	a = lmb_alloc(&lmb, big_block_size, 1);
189 	ut_asserteq(a, ram);
190 	ASSERT_LMB(&lmb, ram, ram_size, 1, a,
191 		   big_block_size + 0x10000, 0, 0, 0, 0);
192 	/* allocate 2nd big block */
193 	/* This should fail, printing an error */
194 	b = lmb_alloc(&lmb, big_block_size, 1);
195 	ut_asserteq(b, 0);
196 	ASSERT_LMB(&lmb, ram, ram_size, 1, a,
197 		   big_block_size + 0x10000, 0, 0, 0, 0);
198 
199 	ret = lmb_free(&lmb, a, big_block_size);
200 	ut_asserteq(ret, 0);
201 	ASSERT_LMB(&lmb, ram, ram_size, 1, alloc_64k_addr, 0x10000,
202 		   0, 0, 0, 0);
203 
204 	/* allocate too big block */
205 	/* This should fail, printing an error */
206 	a = lmb_alloc(&lmb, ram_size, 1);
207 	ut_asserteq(a, 0);
208 	ASSERT_LMB(&lmb, ram, ram_size, 1, alloc_64k_addr, 0x10000,
209 		   0, 0, 0, 0);
210 
211 	return 0;
212 }
213 
214 static int lib_test_lmb_big(struct unit_test_state *uts)
215 {
216 	int ret;
217 
218 	/* simulate 512 MiB RAM beginning at 1GiB */
219 	ret = test_bigblock(uts, 0x40000000);
220 	if (ret)
221 		return ret;
222 
223 	/* simulate 512 MiB RAM beginning at 1.5GiB */
224 	return test_bigblock(uts, 0xE0000000);
225 }
226 
227 DM_TEST(lib_test_lmb_big, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);
228 
229 /* Simulate 512 MiB RAM, allocate a block without previous reservation */
230 static int test_noreserved(struct unit_test_state *uts, const phys_addr_t ram,
231 			   const phys_addr_t alloc_size, const ulong align)
232 {
233 	const phys_size_t ram_size = 0x20000000;
234 	const phys_addr_t ram_end = ram + ram_size;
235 	struct lmb lmb;
236 	long ret;
237 	phys_addr_t a, b;
238 	const phys_addr_t alloc_size_aligned = (alloc_size + align - 1) &
239 		~(align - 1);
240 
241 	/* check for overflow */
242 	ut_assert(ram_end == 0 || ram_end > ram);
243 
244 	lmb_init(&lmb);
245 
246 	ret = lmb_add(&lmb, ram, ram_size);
247 	ut_asserteq(ret, 0);
248 	ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
249 
250 	/* allocate a block */
251 	a = lmb_alloc(&lmb, alloc_size, align);
252 	ut_assert(a != 0);
253 	ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size - alloc_size_aligned,
254 		   alloc_size, 0, 0, 0, 0);
255 	/* allocate another block */
256 	b = lmb_alloc(&lmb, alloc_size, align);
257 	ut_assert(b != 0);
258 	if (alloc_size == alloc_size_aligned) {
259 		ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size -
260 			   (alloc_size_aligned * 2), alloc_size * 2, 0, 0, 0,
261 			   0);
262 	} else {
263 		ASSERT_LMB(&lmb, ram, ram_size, 2, ram + ram_size -
264 			   (alloc_size_aligned * 2), alloc_size, ram + ram_size
265 			   - alloc_size_aligned, alloc_size, 0, 0);
266 	}
267 	/* and free them */
268 	ret = lmb_free(&lmb, b, alloc_size);
269 	ut_asserteq(ret, 0);
270 	ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size - alloc_size_aligned,
271 		   alloc_size, 0, 0, 0, 0);
272 	ret = lmb_free(&lmb, a, alloc_size);
273 	ut_asserteq(ret, 0);
274 	ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
275 
276 	/* allocate a block with base*/
277 	b = lmb_alloc_base(&lmb, alloc_size, align, ram_end);
278 	ut_assert(a == b);
279 	ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size - alloc_size_aligned,
280 		   alloc_size, 0, 0, 0, 0);
281 	/* and free it */
282 	ret = lmb_free(&lmb, b, alloc_size);
283 	ut_asserteq(ret, 0);
284 	ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
285 
286 	return 0;
287 }
288 
289 static int lib_test_lmb_noreserved(struct unit_test_state *uts)
290 {
291 	int ret;
292 
293 	/* simulate 512 MiB RAM beginning at 1GiB */
294 	ret = test_noreserved(uts, 0x40000000, 4, 1);
295 	if (ret)
296 		return ret;
297 
298 	/* simulate 512 MiB RAM beginning at 1.5GiB */
299 	return test_noreserved(uts, 0xE0000000, 4, 1);
300 }
301 
302 DM_TEST(lib_test_lmb_noreserved, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);
303 
304 static int lib_test_lmb_unaligned_size(struct unit_test_state *uts)
305 {
306 	int ret;
307 
308 	/* simulate 512 MiB RAM beginning at 1GiB */
309 	ret = test_noreserved(uts, 0x40000000, 5, 8);
310 	if (ret)
311 		return ret;
312 
313 	/* simulate 512 MiB RAM beginning at 1.5GiB */
314 	return test_noreserved(uts, 0xE0000000, 5, 8);
315 }
316 
317 DM_TEST(lib_test_lmb_unaligned_size, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);
318 /*
319  * Simulate a RAM that starts at 0 and allocate down to address 0, which must
320  * fail as '0' means failure for the lmb_alloc functions.
321  */
322 static int lib_test_lmb_at_0(struct unit_test_state *uts)
323 {
324 	const phys_addr_t ram = 0;
325 	const phys_size_t ram_size = 0x20000000;
326 	struct lmb lmb;
327 	long ret;
328 	phys_addr_t a, b;
329 
330 	lmb_init(&lmb);
331 
332 	ret = lmb_add(&lmb, ram, ram_size);
333 	ut_asserteq(ret, 0);
334 
335 	/* allocate nearly everything */
336 	a = lmb_alloc(&lmb, ram_size - 4, 1);
337 	ut_asserteq(a, ram + 4);
338 	ASSERT_LMB(&lmb, ram, ram_size, 1, a, ram_size - 4,
339 		   0, 0, 0, 0);
340 	/* allocate the rest */
341 	/* This should fail as the allocated address would be 0 */
342 	b = lmb_alloc(&lmb, 4, 1);
343 	ut_asserteq(b, 0);
344 	/* check that this was an error by checking lmb */
345 	ASSERT_LMB(&lmb, ram, ram_size, 1, a, ram_size - 4,
346 		   0, 0, 0, 0);
347 	/* check that this was an error by freeing b */
348 	ret = lmb_free(&lmb, b, 4);
349 	ut_asserteq(ret, -1);
350 	ASSERT_LMB(&lmb, ram, ram_size, 1, a, ram_size - 4,
351 		   0, 0, 0, 0);
352 
353 	ret = lmb_free(&lmb, a, ram_size - 4);
354 	ut_asserteq(ret, 0);
355 	ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
356 
357 	return 0;
358 }
359 
360 DM_TEST(lib_test_lmb_at_0, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);
361 
362 /* Check that calling lmb_reserve with overlapping regions fails. */
363 static int lib_test_lmb_overlapping_reserve(struct unit_test_state *uts)
364 {
365 	const phys_addr_t ram = 0x40000000;
366 	const phys_size_t ram_size = 0x20000000;
367 	struct lmb lmb;
368 	long ret;
369 
370 	lmb_init(&lmb);
371 
372 	ret = lmb_add(&lmb, ram, ram_size);
373 	ut_asserteq(ret, 0);
374 
375 	ret = lmb_reserve(&lmb, 0x40010000, 0x10000);
376 	ut_asserteq(ret, 0);
377 	ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000,
378 		   0, 0, 0, 0);
379 	/* allocate overlapping region should fail */
380 	ret = lmb_reserve(&lmb, 0x40011000, 0x10000);
381 	ut_asserteq(ret, -1);
382 	ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000,
383 		   0, 0, 0, 0);
384 	/* allocate 3nd region */
385 	ret = lmb_reserve(&lmb, 0x40030000, 0x10000);
386 	ut_asserteq(ret, 0);
387 	ASSERT_LMB(&lmb, ram, ram_size, 2, 0x40010000, 0x10000,
388 		   0x40030000, 0x10000, 0, 0);
389 	/* allocate 2nd region */
390 	ret = lmb_reserve(&lmb, 0x40020000, 0x10000);
391 	ut_assert(ret >= 0);
392 	ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x30000,
393 		   0, 0, 0, 0);
394 
395 	return 0;
396 }
397 
398 DM_TEST(lib_test_lmb_overlapping_reserve,
399 	DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);
400 
401 /*
402  * Simulate 512 MiB RAM, reserve 3 blocks, allocate addresses in between.
403  * Expect addresses outside the memory range to fail.
404  */
405 static int test_alloc_addr(struct unit_test_state *uts, const phys_addr_t ram)
406 {
407 	const phys_size_t ram_size = 0x20000000;
408 	const phys_addr_t ram_end = ram + ram_size;
409 	const phys_size_t alloc_addr_a = ram + 0x8000000;
410 	const phys_size_t alloc_addr_b = ram + 0x8000000 * 2;
411 	const phys_size_t alloc_addr_c = ram + 0x8000000 * 3;
412 	struct lmb lmb;
413 	long ret;
414 	phys_addr_t a, b, c, d, e;
415 
416 	/* check for overflow */
417 	ut_assert(ram_end == 0 || ram_end > ram);
418 
419 	lmb_init(&lmb);
420 
421 	ret = lmb_add(&lmb, ram, ram_size);
422 	ut_asserteq(ret, 0);
423 
424 	/*  reserve 3 blocks */
425 	ret = lmb_reserve(&lmb, alloc_addr_a, 0x10000);
426 	ut_asserteq(ret, 0);
427 	ret = lmb_reserve(&lmb, alloc_addr_b, 0x10000);
428 	ut_asserteq(ret, 0);
429 	ret = lmb_reserve(&lmb, alloc_addr_c, 0x10000);
430 	ut_asserteq(ret, 0);
431 	ASSERT_LMB(&lmb, ram, ram_size, 3, alloc_addr_a, 0x10000,
432 		   alloc_addr_b, 0x10000, alloc_addr_c, 0x10000);
433 
434 	/* allocate blocks */
435 	a = lmb_alloc_addr(&lmb, ram, alloc_addr_a - ram);
436 	ut_asserteq(a, ram);
437 	ASSERT_LMB(&lmb, ram, ram_size, 3, ram, 0x8010000,
438 		   alloc_addr_b, 0x10000, alloc_addr_c, 0x10000);
439 	b = lmb_alloc_addr(&lmb, alloc_addr_a + 0x10000,
440 			   alloc_addr_b - alloc_addr_a - 0x10000);
441 	ut_asserteq(b, alloc_addr_a + 0x10000);
442 	ASSERT_LMB(&lmb, ram, ram_size, 2, ram, 0x10010000,
443 		   alloc_addr_c, 0x10000, 0, 0);
444 	c = lmb_alloc_addr(&lmb, alloc_addr_b + 0x10000,
445 			   alloc_addr_c - alloc_addr_b - 0x10000);
446 	ut_asserteq(c, alloc_addr_b + 0x10000);
447 	ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000,
448 		   0, 0, 0, 0);
449 	d = lmb_alloc_addr(&lmb, alloc_addr_c + 0x10000,
450 			   ram_end - alloc_addr_c - 0x10000);
451 	ut_asserteq(d, alloc_addr_c + 0x10000);
452 	ASSERT_LMB(&lmb, ram, ram_size, 1, ram, ram_size,
453 		   0, 0, 0, 0);
454 
455 	/* allocating anything else should fail */
456 	e = lmb_alloc(&lmb, 1, 1);
457 	ut_asserteq(e, 0);
458 	ASSERT_LMB(&lmb, ram, ram_size, 1, ram, ram_size,
459 		   0, 0, 0, 0);
460 
461 	ret = lmb_free(&lmb, d, ram_end - alloc_addr_c - 0x10000);
462 	ut_asserteq(ret, 0);
463 
464 	/* allocate at 3 points in free range */
465 
466 	d = lmb_alloc_addr(&lmb, ram_end - 4, 4);
467 	ut_asserteq(d, ram_end - 4);
468 	ASSERT_LMB(&lmb, ram, ram_size, 2, ram, 0x18010000,
469 		   d, 4, 0, 0);
470 	ret = lmb_free(&lmb, d, 4);
471 	ut_asserteq(ret, 0);
472 	ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000,
473 		   0, 0, 0, 0);
474 
475 	d = lmb_alloc_addr(&lmb, ram_end - 128, 4);
476 	ut_asserteq(d, ram_end - 128);
477 	ASSERT_LMB(&lmb, ram, ram_size, 2, ram, 0x18010000,
478 		   d, 4, 0, 0);
479 	ret = lmb_free(&lmb, d, 4);
480 	ut_asserteq(ret, 0);
481 	ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000,
482 		   0, 0, 0, 0);
483 
484 	d = lmb_alloc_addr(&lmb, alloc_addr_c + 0x10000, 4);
485 	ut_asserteq(d, alloc_addr_c + 0x10000);
486 	ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010004,
487 		   0, 0, 0, 0);
488 	ret = lmb_free(&lmb, d, 4);
489 	ut_asserteq(ret, 0);
490 	ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000,
491 		   0, 0, 0, 0);
492 
493 	/* allocate at the bottom */
494 	ret = lmb_free(&lmb, a, alloc_addr_a - ram);
495 	ut_asserteq(ret, 0);
496 	ASSERT_LMB(&lmb, ram, ram_size, 1, ram + 0x8000000, 0x10010000,
497 		   0, 0, 0, 0);
498 	d = lmb_alloc_addr(&lmb, ram, 4);
499 	ut_asserteq(d, ram);
500 	ASSERT_LMB(&lmb, ram, ram_size, 2, d, 4,
501 		   ram + 0x8000000, 0x10010000, 0, 0);
502 
503 	/* check that allocating outside memory fails */
504 	if (ram_end != 0) {
505 		ret = lmb_alloc_addr(&lmb, ram_end, 1);
506 		ut_asserteq(ret, 0);
507 	}
508 	if (ram != 0) {
509 		ret = lmb_alloc_addr(&lmb, ram - 1, 1);
510 		ut_asserteq(ret, 0);
511 	}
512 
513 	return 0;
514 }
515 
516 static int lib_test_lmb_alloc_addr(struct unit_test_state *uts)
517 {
518 	int ret;
519 
520 	/* simulate 512 MiB RAM beginning at 1GiB */
521 	ret = test_alloc_addr(uts, 0x40000000);
522 	if (ret)
523 		return ret;
524 
525 	/* simulate 512 MiB RAM beginning at 1.5GiB */
526 	return test_alloc_addr(uts, 0xE0000000);
527 }
528 
529 DM_TEST(lib_test_lmb_alloc_addr, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);
530 
531 /* Simulate 512 MiB RAM, reserve 3 blocks, check addresses in between */
532 static int test_get_unreserved_size(struct unit_test_state *uts,
533 				    const phys_addr_t ram)
534 {
535 	const phys_size_t ram_size = 0x20000000;
536 	const phys_addr_t ram_end = ram + ram_size;
537 	const phys_size_t alloc_addr_a = ram + 0x8000000;
538 	const phys_size_t alloc_addr_b = ram + 0x8000000 * 2;
539 	const phys_size_t alloc_addr_c = ram + 0x8000000 * 3;
540 	struct lmb lmb;
541 	long ret;
542 	phys_size_t s;
543 
544 	/* check for overflow */
545 	ut_assert(ram_end == 0 || ram_end > ram);
546 
547 	lmb_init(&lmb);
548 
549 	ret = lmb_add(&lmb, ram, ram_size);
550 	ut_asserteq(ret, 0);
551 
552 	/*  reserve 3 blocks */
553 	ret = lmb_reserve(&lmb, alloc_addr_a, 0x10000);
554 	ut_asserteq(ret, 0);
555 	ret = lmb_reserve(&lmb, alloc_addr_b, 0x10000);
556 	ut_asserteq(ret, 0);
557 	ret = lmb_reserve(&lmb, alloc_addr_c, 0x10000);
558 	ut_asserteq(ret, 0);
559 	ASSERT_LMB(&lmb, ram, ram_size, 3, alloc_addr_a, 0x10000,
560 		   alloc_addr_b, 0x10000, alloc_addr_c, 0x10000);
561 
562 	/* check addresses in between blocks */
563 	s = lmb_get_unreserved_size(&lmb, ram);
564 	ut_asserteq(s, alloc_addr_a - ram);
565 	s = lmb_get_unreserved_size(&lmb, ram + 0x10000);
566 	ut_asserteq(s, alloc_addr_a - ram - 0x10000);
567 	s = lmb_get_unreserved_size(&lmb, alloc_addr_a - 4);
568 	ut_asserteq(s, 4);
569 
570 	s = lmb_get_unreserved_size(&lmb, alloc_addr_a + 0x10000);
571 	ut_asserteq(s, alloc_addr_b - alloc_addr_a - 0x10000);
572 	s = lmb_get_unreserved_size(&lmb, alloc_addr_a + 0x20000);
573 	ut_asserteq(s, alloc_addr_b - alloc_addr_a - 0x20000);
574 	s = lmb_get_unreserved_size(&lmb, alloc_addr_b - 4);
575 	ut_asserteq(s, 4);
576 
577 	s = lmb_get_unreserved_size(&lmb, alloc_addr_c + 0x10000);
578 	ut_asserteq(s, ram_end - alloc_addr_c - 0x10000);
579 	s = lmb_get_unreserved_size(&lmb, alloc_addr_c + 0x20000);
580 	ut_asserteq(s, ram_end - alloc_addr_c - 0x20000);
581 	s = lmb_get_unreserved_size(&lmb, ram_end - 4);
582 	ut_asserteq(s, 4);
583 
584 	return 0;
585 }
586 
587 static int lib_test_lmb_get_unreserved_size(struct unit_test_state *uts)
588 {
589 	int ret;
590 
591 	/* simulate 512 MiB RAM beginning at 1GiB */
592 	ret = test_get_unreserved_size(uts, 0x40000000);
593 	if (ret)
594 		return ret;
595 
596 	/* simulate 512 MiB RAM beginning at 1.5GiB */
597 	return test_get_unreserved_size(uts, 0xE0000000);
598 }
599 
600 DM_TEST(lib_test_lmb_get_unreserved_size,
601 	DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);
602