xref: /openbmc/u-boot/lib/lmb.c (revision c6af2e7d)
1 /*
2  * Procedures for maintaining information about logical memory blocks.
3  *
4  * Peter Bergner, IBM Corp.	June 2001.
5  * Copyright (C) 2001 Peter Bergner.
6  *
7  *      This program is free software; you can redistribute it and/or
8  *      modify it under the terms of the GNU General Public License
9  *      as published by the Free Software Foundation; either version
10  *      2 of the License, or (at your option) any later version.
11  */
12 
13 #include <common.h>
14 #include <lmb.h>
15 
16 #define LMB_ALLOC_ANYWHERE	0
17 
18 void lmb_dump_all(struct lmb *lmb)
19 {
20 #ifdef DEBUG
21 	unsigned long i;
22 
23 	debug("lmb_dump_all:\n");
24 	debug("    memory.cnt		   = 0x%lx\n", lmb->memory.cnt);
25 	debug("    memory.size		   = 0x%llx\n",
26 	      (unsigned long long)lmb->memory.size);
27 	for (i=0; i < lmb->memory.cnt ;i++) {
28 		debug("    memory.reg[0x%lx].base   = 0x%llx\n", i,
29 			(long long unsigned)lmb->memory.region[i].base);
30 		debug("		   .size   = 0x%llx\n",
31 			(long long unsigned)lmb->memory.region[i].size);
32 	}
33 
34 	debug("\n    reserved.cnt	   = 0x%lx\n",
35 		lmb->reserved.cnt);
36 	debug("    reserved.size	   = 0x%llx\n",
37 		(long long unsigned)lmb->reserved.size);
38 	for (i=0; i < lmb->reserved.cnt ;i++) {
39 		debug("    reserved.reg[0x%lx].base = 0x%llx\n", i,
40 			(long long unsigned)lmb->reserved.region[i].base);
41 		debug("		     .size = 0x%llx\n",
42 			(long long unsigned)lmb->reserved.region[i].size);
43 	}
44 #endif /* DEBUG */
45 }
46 
47 static long lmb_addrs_overlap(phys_addr_t base1,
48 		phys_size_t size1, phys_addr_t base2, phys_size_t size2)
49 {
50 	return ((base1 < (base2+size2)) && (base2 < (base1+size1)));
51 }
52 
53 static long lmb_addrs_adjacent(phys_addr_t base1, phys_size_t size1,
54 		phys_addr_t base2, phys_size_t size2)
55 {
56 	if (base2 == base1 + size1)
57 		return 1;
58 	else if (base1 == base2 + size2)
59 		return -1;
60 
61 	return 0;
62 }
63 
64 static long lmb_regions_adjacent(struct lmb_region *rgn,
65 		unsigned long r1, unsigned long r2)
66 {
67 	phys_addr_t base1 = rgn->region[r1].base;
68 	phys_size_t size1 = rgn->region[r1].size;
69 	phys_addr_t base2 = rgn->region[r2].base;
70 	phys_size_t size2 = rgn->region[r2].size;
71 
72 	return lmb_addrs_adjacent(base1, size1, base2, size2);
73 }
74 
75 static void lmb_remove_region(struct lmb_region *rgn, unsigned long r)
76 {
77 	unsigned long i;
78 
79 	for (i = r; i < rgn->cnt - 1; i++) {
80 		rgn->region[i].base = rgn->region[i + 1].base;
81 		rgn->region[i].size = rgn->region[i + 1].size;
82 	}
83 	rgn->cnt--;
84 }
85 
86 /* Assumption: base addr of region 1 < base addr of region 2 */
87 static void lmb_coalesce_regions(struct lmb_region *rgn,
88 		unsigned long r1, unsigned long r2)
89 {
90 	rgn->region[r1].size += rgn->region[r2].size;
91 	lmb_remove_region(rgn, r2);
92 }
93 
94 void lmb_init(struct lmb *lmb)
95 {
96 	/* Create a dummy zero size LMB which will get coalesced away later.
97 	 * This simplifies the lmb_add() code below...
98 	 */
99 	lmb->memory.region[0].base = 0;
100 	lmb->memory.region[0].size = 0;
101 	lmb->memory.cnt = 1;
102 	lmb->memory.size = 0;
103 
104 	/* Ditto. */
105 	lmb->reserved.region[0].base = 0;
106 	lmb->reserved.region[0].size = 0;
107 	lmb->reserved.cnt = 1;
108 	lmb->reserved.size = 0;
109 }
110 
111 /* This routine called with relocation disabled. */
112 static long lmb_add_region(struct lmb_region *rgn, phys_addr_t base, phys_size_t size)
113 {
114 	unsigned long coalesced = 0;
115 	long adjacent, i;
116 
117 	if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) {
118 		rgn->region[0].base = base;
119 		rgn->region[0].size = size;
120 		return 0;
121 	}
122 
123 	/* First try and coalesce this LMB with another. */
124 	for (i=0; i < rgn->cnt; i++) {
125 		phys_addr_t rgnbase = rgn->region[i].base;
126 		phys_size_t rgnsize = rgn->region[i].size;
127 
128 		if ((rgnbase == base) && (rgnsize == size))
129 			/* Already have this region, so we're done */
130 			return 0;
131 
132 		adjacent = lmb_addrs_adjacent(base,size,rgnbase,rgnsize);
133 		if ( adjacent > 0 ) {
134 			rgn->region[i].base -= size;
135 			rgn->region[i].size += size;
136 			coalesced++;
137 			break;
138 		}
139 		else if ( adjacent < 0 ) {
140 			rgn->region[i].size += size;
141 			coalesced++;
142 			break;
143 		}
144 	}
145 
146 	if ((i < rgn->cnt-1) && lmb_regions_adjacent(rgn, i, i+1) ) {
147 		lmb_coalesce_regions(rgn, i, i+1);
148 		coalesced++;
149 	}
150 
151 	if (coalesced)
152 		return coalesced;
153 	if (rgn->cnt >= MAX_LMB_REGIONS)
154 		return -1;
155 
156 	/* Couldn't coalesce the LMB, so add it to the sorted table. */
157 	for (i = rgn->cnt-1; i >= 0; i--) {
158 		if (base < rgn->region[i].base) {
159 			rgn->region[i+1].base = rgn->region[i].base;
160 			rgn->region[i+1].size = rgn->region[i].size;
161 		} else {
162 			rgn->region[i+1].base = base;
163 			rgn->region[i+1].size = size;
164 			break;
165 		}
166 	}
167 
168 	if (base < rgn->region[0].base) {
169 		rgn->region[0].base = base;
170 		rgn->region[0].size = size;
171 	}
172 
173 	rgn->cnt++;
174 
175 	return 0;
176 }
177 
178 /* This routine may be called with relocation disabled. */
179 long lmb_add(struct lmb *lmb, phys_addr_t base, phys_size_t size)
180 {
181 	struct lmb_region *_rgn = &(lmb->memory);
182 
183 	return lmb_add_region(_rgn, base, size);
184 }
185 
186 long lmb_free(struct lmb *lmb, phys_addr_t base, phys_size_t size)
187 {
188 	struct lmb_region *rgn = &(lmb->reserved);
189 	phys_addr_t rgnbegin, rgnend;
190 	phys_addr_t end = base + size;
191 	int i;
192 
193 	rgnbegin = rgnend = 0; /* supress gcc warnings */
194 
195 	/* Find the region where (base, size) belongs to */
196 	for (i=0; i < rgn->cnt; i++) {
197 		rgnbegin = rgn->region[i].base;
198 		rgnend = rgnbegin + rgn->region[i].size;
199 
200 		if ((rgnbegin <= base) && (end <= rgnend))
201 			break;
202 	}
203 
204 	/* Didn't find the region */
205 	if (i == rgn->cnt)
206 		return -1;
207 
208 	/* Check to see if we are removing entire region */
209 	if ((rgnbegin == base) && (rgnend == end)) {
210 		lmb_remove_region(rgn, i);
211 		return 0;
212 	}
213 
214 	/* Check to see if region is matching at the front */
215 	if (rgnbegin == base) {
216 		rgn->region[i].base = end;
217 		rgn->region[i].size -= size;
218 		return 0;
219 	}
220 
221 	/* Check to see if the region is matching at the end */
222 	if (rgnend == end) {
223 		rgn->region[i].size -= size;
224 		return 0;
225 	}
226 
227 	/*
228 	 * We need to split the entry -  adjust the current one to the
229 	 * beginging of the hole and add the region after hole.
230 	 */
231 	rgn->region[i].size = base - rgn->region[i].base;
232 	return lmb_add_region(rgn, end, rgnend - end);
233 }
234 
235 long lmb_reserve(struct lmb *lmb, phys_addr_t base, phys_size_t size)
236 {
237 	struct lmb_region *_rgn = &(lmb->reserved);
238 
239 	return lmb_add_region(_rgn, base, size);
240 }
241 
242 long lmb_overlaps_region(struct lmb_region *rgn, phys_addr_t base,
243 				phys_size_t size)
244 {
245 	unsigned long i;
246 
247 	for (i=0; i < rgn->cnt; i++) {
248 		phys_addr_t rgnbase = rgn->region[i].base;
249 		phys_size_t rgnsize = rgn->region[i].size;
250 		if ( lmb_addrs_overlap(base,size,rgnbase,rgnsize) ) {
251 			break;
252 		}
253 	}
254 
255 	return (i < rgn->cnt) ? i : -1;
256 }
257 
258 phys_addr_t lmb_alloc(struct lmb *lmb, phys_size_t size, ulong align)
259 {
260 	return lmb_alloc_base(lmb, size, align, LMB_ALLOC_ANYWHERE);
261 }
262 
263 phys_addr_t lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phys_addr_t max_addr)
264 {
265 	phys_addr_t alloc;
266 
267 	alloc = __lmb_alloc_base(lmb, size, align, max_addr);
268 
269 	if (alloc == 0)
270 		printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n",
271 		      (ulong)size, (ulong)max_addr);
272 
273 	return alloc;
274 }
275 
276 static phys_addr_t lmb_align_down(phys_addr_t addr, phys_size_t size)
277 {
278 	return addr & ~(size - 1);
279 }
280 
281 static phys_addr_t lmb_align_up(phys_addr_t addr, ulong size)
282 {
283 	return (addr + (size - 1)) & ~(size - 1);
284 }
285 
286 phys_addr_t __lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phys_addr_t max_addr)
287 {
288 	long i, j;
289 	phys_addr_t base = 0;
290 	phys_addr_t res_base;
291 
292 	for (i = lmb->memory.cnt-1; i >= 0; i--) {
293 		phys_addr_t lmbbase = lmb->memory.region[i].base;
294 		phys_size_t lmbsize = lmb->memory.region[i].size;
295 
296 		if (lmbsize < size)
297 			continue;
298 		if (max_addr == LMB_ALLOC_ANYWHERE)
299 			base = lmb_align_down(lmbbase + lmbsize - size, align);
300 		else if (lmbbase < max_addr) {
301 			base = min(lmbbase + lmbsize, max_addr);
302 			base = lmb_align_down(base - size, align);
303 		} else
304 			continue;
305 
306 		while (base && lmbbase <= base) {
307 			j = lmb_overlaps_region(&lmb->reserved, base, size);
308 			if (j < 0) {
309 				/* This area isn't reserved, take it */
310 				if (lmb_add_region(&lmb->reserved, base,
311 							lmb_align_up(size,
312 								align)) < 0)
313 					return 0;
314 				return base;
315 			}
316 			res_base = lmb->reserved.region[j].base;
317 			if (res_base < size)
318 				break;
319 			base = lmb_align_down(res_base - size, align);
320 		}
321 	}
322 	return 0;
323 }
324 
325 int lmb_is_reserved(struct lmb *lmb, phys_addr_t addr)
326 {
327 	int i;
328 
329 	for (i = 0; i < lmb->reserved.cnt; i++) {
330 		phys_addr_t upper = lmb->reserved.region[i].base +
331 			lmb->reserved.region[i].size - 1;
332 		if ((addr >= lmb->reserved.region[i].base) && (addr <= upper))
333 			return 1;
334 	}
335 	return 0;
336 }
337 
338 void __board_lmb_reserve(struct lmb *lmb)
339 {
340 	/* please define platform specific board_lmb_reserve() */
341 }
342 void board_lmb_reserve(struct lmb *lmb) __attribute__((weak, alias("__board_lmb_reserve")));
343 
344 void __arch_lmb_reserve(struct lmb *lmb)
345 {
346 	/* please define platform specific arch_lmb_reserve() */
347 }
348 void arch_lmb_reserve(struct lmb *lmb) __attribute__((weak, alias("__arch_lmb_reserve")));
349