xref: /openbmc/linux/arch/microblaze/mm/init.c (revision f35e839a)
1 /*
2  * Copyright (C) 2007-2008 Michal Simek <monstr@monstr.eu>
3  * Copyright (C) 2006 Atmark Techno, Inc.
4  *
5  * This file is subject to the terms and conditions of the GNU General Public
6  * License. See the file "COPYING" in the main directory of this archive
7  * for more details.
8  */
9 
10 #include <linux/bootmem.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/memblock.h>
14 #include <linux/mm.h> /* mem_init */
15 #include <linux/initrd.h>
16 #include <linux/pagemap.h>
17 #include <linux/pfn.h>
18 #include <linux/slab.h>
19 #include <linux/swap.h>
20 #include <linux/export.h>
21 
22 #include <asm/page.h>
23 #include <asm/mmu_context.h>
24 #include <asm/pgalloc.h>
25 #include <asm/sections.h>
26 #include <asm/tlb.h>
27 #include <asm/fixmap.h>
28 
29 /* Use for MMU and noMMU because of PCI generic code */
30 int mem_init_done;
31 
32 #ifndef CONFIG_MMU
33 unsigned int __page_offset;
34 EXPORT_SYMBOL(__page_offset);
35 
36 #else
37 static int init_bootmem_done;
38 #endif /* CONFIG_MMU */
39 
40 char *klimit = _end;
41 
42 /*
43  * Initialize the bootmem system and give it all the memory we
44  * have available.
45  */
46 unsigned long memory_start;
47 EXPORT_SYMBOL(memory_start);
48 unsigned long memory_size;
49 EXPORT_SYMBOL(memory_size);
50 unsigned long lowmem_size;
51 
52 #ifdef CONFIG_HIGHMEM
53 pte_t *kmap_pte;
54 EXPORT_SYMBOL(kmap_pte);
55 pgprot_t kmap_prot;
56 EXPORT_SYMBOL(kmap_prot);
57 
58 static inline pte_t *virt_to_kpte(unsigned long vaddr)
59 {
60 	return pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr),
61 			vaddr), vaddr);
62 }
63 
64 static void __init highmem_init(void)
65 {
66 	pr_debug("%x\n", (u32)PKMAP_BASE);
67 	map_page(PKMAP_BASE, 0, 0);	/* XXX gross */
68 	pkmap_page_table = virt_to_kpte(PKMAP_BASE);
69 
70 	kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
71 	kmap_prot = PAGE_KERNEL;
72 }
73 
74 static unsigned long highmem_setup(void)
75 {
76 	unsigned long pfn;
77 	unsigned long reservedpages = 0;
78 
79 	for (pfn = max_low_pfn; pfn < max_pfn; ++pfn) {
80 		struct page *page = pfn_to_page(pfn);
81 
82 		/* FIXME not sure about */
83 		if (memblock_is_reserved(pfn << PAGE_SHIFT))
84 			continue;
85 		free_highmem_page(page);
86 		reservedpages++;
87 	}
88 	pr_info("High memory: %luk\n",
89 					totalhigh_pages << (PAGE_SHIFT-10));
90 
91 	return reservedpages;
92 }
93 #endif /* CONFIG_HIGHMEM */
94 
95 /*
96  * paging_init() sets up the page tables - in fact we've already done this.
97  */
98 static void __init paging_init(void)
99 {
100 	unsigned long zones_size[MAX_NR_ZONES];
101 #ifdef CONFIG_MMU
102 	int idx;
103 
104 	/* Setup fixmaps */
105 	for (idx = 0; idx < __end_of_fixed_addresses; idx++)
106 		clear_fixmap(idx);
107 #endif
108 
109 	/* Clean every zones */
110 	memset(zones_size, 0, sizeof(zones_size));
111 
112 #ifdef CONFIG_HIGHMEM
113 	highmem_init();
114 
115 	zones_size[ZONE_DMA] = max_low_pfn;
116 	zones_size[ZONE_HIGHMEM] = max_pfn;
117 #else
118 	zones_size[ZONE_DMA] = max_pfn;
119 #endif
120 
121 	/* We don't have holes in memory map */
122 	free_area_init_nodes(zones_size);
123 }
124 
125 void __init setup_memory(void)
126 {
127 	unsigned long map_size;
128 	struct memblock_region *reg;
129 
130 #ifndef CONFIG_MMU
131 	u32 kernel_align_start, kernel_align_size;
132 
133 	/* Find main memory where is the kernel */
134 	for_each_memblock(memory, reg) {
135 		memory_start = (u32)reg->base;
136 		lowmem_size = reg->size;
137 		if ((memory_start <= (u32)_text) &&
138 			((u32)_text <= (memory_start + lowmem_size - 1))) {
139 			memory_size = lowmem_size;
140 			PAGE_OFFSET = memory_start;
141 			pr_info("%s: Main mem: 0x%x, size 0x%08x\n",
142 				__func__, (u32) memory_start,
143 					(u32) memory_size);
144 			break;
145 		}
146 	}
147 
148 	if (!memory_start || !memory_size) {
149 		panic("%s: Missing memory setting 0x%08x, size=0x%08x\n",
150 			__func__, (u32) memory_start, (u32) memory_size);
151 	}
152 
153 	/* reservation of region where is the kernel */
154 	kernel_align_start = PAGE_DOWN((u32)_text);
155 	/* ALIGN can be remove because _end in vmlinux.lds.S is align */
156 	kernel_align_size = PAGE_UP((u32)klimit) - kernel_align_start;
157 	pr_info("%s: kernel addr:0x%08x-0x%08x size=0x%08x\n",
158 		__func__, kernel_align_start, kernel_align_start
159 			+ kernel_align_size, kernel_align_size);
160 	memblock_reserve(kernel_align_start, kernel_align_size);
161 #endif
162 	/*
163 	 * Kernel:
164 	 * start: base phys address of kernel - page align
165 	 * end: base phys address of kernel - page align
166 	 *
167 	 * min_low_pfn - the first page (mm/bootmem.c - node_boot_start)
168 	 * max_low_pfn
169 	 * max_mapnr - the first unused page (mm/bootmem.c - node_low_pfn)
170 	 * num_physpages - number of all pages
171 	 */
172 
173 	/* memory start is from the kernel end (aligned) to higher addr */
174 	min_low_pfn = memory_start >> PAGE_SHIFT; /* minimum for allocation */
175 	/* RAM is assumed contiguous */
176 	num_physpages = max_mapnr = memory_size >> PAGE_SHIFT;
177 	max_low_pfn = ((u64)memory_start + (u64)lowmem_size) >> PAGE_SHIFT;
178 	max_pfn = ((u64)memory_start + (u64)memory_size) >> PAGE_SHIFT;
179 
180 	pr_info("%s: max_mapnr: %#lx\n", __func__, max_mapnr);
181 	pr_info("%s: min_low_pfn: %#lx\n", __func__, min_low_pfn);
182 	pr_info("%s: max_low_pfn: %#lx\n", __func__, max_low_pfn);
183 	pr_info("%s: max_pfn: %#lx\n", __func__, max_pfn);
184 
185 	/*
186 	 * Find an area to use for the bootmem bitmap.
187 	 * We look for the first area which is at least
188 	 * 128kB in length (128kB is enough for a bitmap
189 	 * for 4GB of memory, using 4kB pages), plus 1 page
190 	 * (in case the address isn't page-aligned).
191 	 */
192 	map_size = init_bootmem_node(NODE_DATA(0),
193 		PFN_UP(TOPHYS((u32)klimit)), min_low_pfn, max_low_pfn);
194 	memblock_reserve(PFN_UP(TOPHYS((u32)klimit)) << PAGE_SHIFT, map_size);
195 
196 	/* Add active regions with valid PFNs */
197 	for_each_memblock(memory, reg) {
198 		unsigned long start_pfn, end_pfn;
199 
200 		start_pfn = memblock_region_memory_base_pfn(reg);
201 		end_pfn = memblock_region_memory_end_pfn(reg);
202 		memblock_set_node(start_pfn << PAGE_SHIFT,
203 					(end_pfn - start_pfn) << PAGE_SHIFT, 0);
204 	}
205 
206 	/* free bootmem is whole main memory */
207 	free_bootmem_with_active_regions(0, max_low_pfn);
208 
209 	/* reserve allocate blocks */
210 	for_each_memblock(reserved, reg) {
211 		unsigned long top = reg->base + reg->size - 1;
212 
213 		pr_debug("reserved - 0x%08x-0x%08x, %lx, %lx\n",
214 			 (u32) reg->base, (u32) reg->size, top,
215 						memory_start + lowmem_size - 1);
216 
217 		if (top <= (memory_start + lowmem_size - 1)) {
218 			reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
219 		} else if (reg->base < (memory_start + lowmem_size - 1)) {
220 			unsigned long trunc_size = memory_start + lowmem_size -
221 								reg->base;
222 			reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT);
223 		}
224 	}
225 
226 	/* XXX need to clip this if using highmem? */
227 	sparse_memory_present_with_active_regions(0);
228 
229 #ifdef CONFIG_MMU
230 	init_bootmem_done = 1;
231 #endif
232 	paging_init();
233 }
234 
235 #ifdef CONFIG_BLK_DEV_INITRD
236 void free_initrd_mem(unsigned long start, unsigned long end)
237 {
238 	free_reserved_area(start, end, 0, "initrd");
239 }
240 #endif
241 
242 void free_initmem(void)
243 {
244 	free_initmem_default(0);
245 }
246 
247 void __init mem_init(void)
248 {
249 	pg_data_t *pgdat;
250 	unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
251 
252 	high_memory = (void *)__va(memory_start + lowmem_size - 1);
253 
254 	/* this will put all memory onto the freelists */
255 	totalram_pages += free_all_bootmem();
256 
257 	for_each_online_pgdat(pgdat) {
258 		unsigned long i;
259 		struct page *page;
260 
261 		for (i = 0; i < pgdat->node_spanned_pages; i++) {
262 			if (!pfn_valid(pgdat->node_start_pfn + i))
263 				continue;
264 			page = pgdat_page_nr(pgdat, i);
265 			if (PageReserved(page))
266 				reservedpages++;
267 		}
268 	}
269 
270 #ifdef CONFIG_HIGHMEM
271 	reservedpages -= highmem_setup();
272 #endif
273 
274 	codesize = (unsigned long)&_sdata - (unsigned long)&_stext;
275 	datasize = (unsigned long)&_edata - (unsigned long)&_sdata;
276 	initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
277 	bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
278 
279 	pr_info("Memory: %luk/%luk available (%luk kernel code, ",
280 		nr_free_pages() << (PAGE_SHIFT-10),
281 		num_physpages << (PAGE_SHIFT-10),
282 		codesize >> 10);
283 	pr_cont("%luk reserved, %luk data, %luk bss, %luk init)\n",
284 		reservedpages << (PAGE_SHIFT-10),
285 		datasize >> 10,
286 		bsssize >> 10,
287 		initsize >> 10);
288 
289 #ifdef CONFIG_MMU
290 	pr_info("Kernel virtual memory layout:\n");
291 	pr_info("  * 0x%08lx..0x%08lx  : fixmap\n", FIXADDR_START, FIXADDR_TOP);
292 #ifdef CONFIG_HIGHMEM
293 	pr_info("  * 0x%08lx..0x%08lx  : highmem PTEs\n",
294 		PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
295 #endif /* CONFIG_HIGHMEM */
296 	pr_info("  * 0x%08lx..0x%08lx  : early ioremap\n",
297 		ioremap_bot, ioremap_base);
298 	pr_info("  * 0x%08lx..0x%08lx  : vmalloc & ioremap\n",
299 		(unsigned long)VMALLOC_START, VMALLOC_END);
300 #endif
301 	mem_init_done = 1;
302 }
303 
304 #ifndef CONFIG_MMU
305 int page_is_ram(unsigned long pfn)
306 {
307 	return __range_ok(pfn, 0);
308 }
309 #else
310 int page_is_ram(unsigned long pfn)
311 {
312 	return pfn < max_low_pfn;
313 }
314 
315 /*
316  * Check for command-line options that affect what MMU_init will do.
317  */
318 static void mm_cmdline_setup(void)
319 {
320 	unsigned long maxmem = 0;
321 	char *p = cmd_line;
322 
323 	/* Look for mem= option on command line */
324 	p = strstr(cmd_line, "mem=");
325 	if (p) {
326 		p += 4;
327 		maxmem = memparse(p, &p);
328 		if (maxmem && memory_size > maxmem) {
329 			memory_size = maxmem;
330 			memblock.memory.regions[0].size = memory_size;
331 		}
332 	}
333 }
334 
335 /*
336  * MMU_init_hw does the chip-specific initialization of the MMU hardware.
337  */
338 static void __init mmu_init_hw(void)
339 {
340 	/*
341 	 * The Zone Protection Register (ZPR) defines how protection will
342 	 * be applied to every page which is a member of a given zone. At
343 	 * present, we utilize only two of the zones.
344 	 * The zone index bits (of ZSEL) in the PTE are used for software
345 	 * indicators, except the LSB.  For user access, zone 1 is used,
346 	 * for kernel access, zone 0 is used.  We set all but zone 1
347 	 * to zero, allowing only kernel access as indicated in the PTE.
348 	 * For zone 1, we set a 01 binary (a value of 10 will not work)
349 	 * to allow user access as indicated in the PTE.  This also allows
350 	 * kernel access as indicated in the PTE.
351 	 */
352 	__asm__ __volatile__ ("ori r11, r0, 0x10000000;" \
353 			"mts rzpr, r11;"
354 			: : : "r11");
355 }
356 
357 /*
358  * MMU_init sets up the basic memory mappings for the kernel,
359  * including both RAM and possibly some I/O regions,
360  * and sets up the page tables and the MMU hardware ready to go.
361  */
362 
363 /* called from head.S */
364 asmlinkage void __init mmu_init(void)
365 {
366 	unsigned int kstart, ksize;
367 
368 	if (!memblock.reserved.cnt) {
369 		pr_emerg("Error memory count\n");
370 		machine_restart(NULL);
371 	}
372 
373 	if ((u32) memblock.memory.regions[0].size < 0x400000) {
374 		pr_emerg("Memory must be greater than 4MB\n");
375 		machine_restart(NULL);
376 	}
377 
378 	if ((u32) memblock.memory.regions[0].size < kernel_tlb) {
379 		pr_emerg("Kernel size is greater than memory node\n");
380 		machine_restart(NULL);
381 	}
382 
383 	/* Find main memory where the kernel is */
384 	memory_start = (u32) memblock.memory.regions[0].base;
385 	lowmem_size = memory_size = (u32) memblock.memory.regions[0].size;
386 
387 	if (lowmem_size > CONFIG_LOWMEM_SIZE) {
388 		lowmem_size = CONFIG_LOWMEM_SIZE;
389 #ifndef CONFIG_HIGHMEM
390 		memory_size = lowmem_size;
391 #endif
392 	}
393 
394 	mm_cmdline_setup(); /* FIXME parse args from command line - not used */
395 
396 	/*
397 	 * Map out the kernel text/data/bss from the available physical
398 	 * memory.
399 	 */
400 	kstart = __pa(CONFIG_KERNEL_START); /* kernel start */
401 	/* kernel size */
402 	ksize = PAGE_ALIGN(((u32)_end - (u32)CONFIG_KERNEL_START));
403 	memblock_reserve(kstart, ksize);
404 
405 #if defined(CONFIG_BLK_DEV_INITRD)
406 	/* Remove the init RAM disk from the available memory. */
407 	if (initrd_start) {
408 		unsigned long size;
409 		size = initrd_end - initrd_start;
410 		memblock_reserve(virt_to_phys(initrd_start), size);
411 	}
412 #endif /* CONFIG_BLK_DEV_INITRD */
413 
414 	/* Initialize the MMU hardware */
415 	mmu_init_hw();
416 
417 	/* Map in all of RAM starting at CONFIG_KERNEL_START */
418 	mapin_ram();
419 
420 	/* Extend vmalloc and ioremap area as big as possible */
421 #ifdef CONFIG_HIGHMEM
422 	ioremap_base = ioremap_bot = PKMAP_BASE;
423 #else
424 	ioremap_base = ioremap_bot = FIXADDR_START;
425 #endif
426 
427 	/* Initialize the context management stuff */
428 	mmu_context_init();
429 
430 	/* Shortly after that, the entire linear mapping will be available */
431 	/* This will also cause that unflatten device tree will be allocated
432 	 * inside 768MB limit */
433 	memblock_set_current_limit(memory_start + lowmem_size - 1);
434 }
435 
436 /* This is only called until mem_init is done. */
437 void __init *early_get_page(void)
438 {
439 	void *p;
440 	if (init_bootmem_done) {
441 		p = alloc_bootmem_pages(PAGE_SIZE);
442 	} else {
443 		/*
444 		 * Mem start + kernel_tlb -> here is limit
445 		 * because of mem mapping from head.S
446 		 */
447 		p = __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE,
448 					memory_start + kernel_tlb));
449 	}
450 	return p;
451 }
452 
453 #endif /* CONFIG_MMU */
454 
455 void * __init_refok alloc_maybe_bootmem(size_t size, gfp_t mask)
456 {
457 	if (mem_init_done)
458 		return kmalloc(size, mask);
459 	else
460 		return alloc_bootmem(size);
461 }
462 
463 void * __init_refok zalloc_maybe_bootmem(size_t size, gfp_t mask)
464 {
465 	void *p;
466 
467 	if (mem_init_done)
468 		p = kzalloc(size, mask);
469 	else {
470 		p = alloc_bootmem(size);
471 		if (p)
472 			memset(p, 0, size);
473 	}
474 	return p;
475 }
476