xref: /openbmc/linux/arch/powerpc/mm/init_32.c (revision fd589a8f)
1 /*
2  *  PowerPC version
3  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4  *
5  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
7  *    Copyright (C) 1996 Paul Mackerras
8  *  PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
9  *
10  *  Derived from "arch/i386/mm/init.c"
11  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
12  *
13  *  This program is free software; you can redistribute it and/or
14  *  modify it under the terms of the GNU General Public License
15  *  as published by the Free Software Foundation; either version
16  *  2 of the License, or (at your option) any later version.
17  *
18  */
19 
20 #include <linux/module.h>
21 #include <linux/sched.h>
22 #include <linux/kernel.h>
23 #include <linux/errno.h>
24 #include <linux/string.h>
25 #include <linux/types.h>
26 #include <linux/mm.h>
27 #include <linux/stddef.h>
28 #include <linux/init.h>
29 #include <linux/bootmem.h>
30 #include <linux/highmem.h>
31 #include <linux/initrd.h>
32 #include <linux/pagemap.h>
33 #include <linux/lmb.h>
34 
35 #include <asm/pgalloc.h>
36 #include <asm/prom.h>
37 #include <asm/io.h>
38 #include <asm/pgtable.h>
39 #include <asm/mmu.h>
40 #include <asm/smp.h>
41 #include <asm/machdep.h>
42 #include <asm/btext.h>
43 #include <asm/tlb.h>
44 #include <asm/sections.h>
45 #include <asm/system.h>
46 
47 #include "mmu_decl.h"
48 
49 #if defined(CONFIG_KERNEL_START_BOOL) || defined(CONFIG_LOWMEM_SIZE_BOOL)
50 /* The ammount of lowmem must be within 0xF0000000 - KERNELBASE. */
51 #if (CONFIG_LOWMEM_SIZE > (0xF0000000 - PAGE_OFFSET))
52 #error "You must adjust CONFIG_LOWMEM_SIZE or CONFIG_START_KERNEL"
53 #endif
54 #endif
55 #define MAX_LOW_MEM	CONFIG_LOWMEM_SIZE
56 
57 phys_addr_t total_memory;
58 phys_addr_t total_lowmem;
59 
60 phys_addr_t memstart_addr = (phys_addr_t)~0ull;
61 EXPORT_SYMBOL(memstart_addr);
62 phys_addr_t kernstart_addr;
63 EXPORT_SYMBOL(kernstart_addr);
64 phys_addr_t lowmem_end_addr;
65 
66 int boot_mapsize;
67 #ifdef CONFIG_PPC_PMAC
68 unsigned long agp_special_page;
69 EXPORT_SYMBOL(agp_special_page);
70 #endif
71 
72 void MMU_init(void);
73 
74 /* XXX should be in current.h  -- paulus */
75 extern struct task_struct *current_set[NR_CPUS];
76 
77 /*
78  * this tells the system to map all of ram with the segregs
79  * (i.e. page tables) instead of the bats.
80  * -- Cort
81  */
82 int __map_without_bats;
83 int __map_without_ltlbs;
84 
85 /* max amount of low RAM to map in */
86 unsigned long __max_low_memory = MAX_LOW_MEM;
87 
88 /*
89  * address of the limit of what is accessible with initial MMU setup -
90  * 256MB usually, but only 16MB on 601.
91  */
92 phys_addr_t __initial_memory_limit_addr = (phys_addr_t)0x10000000;
93 
94 /*
95  * Check for command-line options that affect what MMU_init will do.
96  */
97 void MMU_setup(void)
98 {
99 	/* Check for nobats option (used in mapin_ram). */
100 	if (strstr(cmd_line, "nobats")) {
101 		__map_without_bats = 1;
102 	}
103 
104 	if (strstr(cmd_line, "noltlbs")) {
105 		__map_without_ltlbs = 1;
106 	}
107 #ifdef CONFIG_DEBUG_PAGEALLOC
108 	__map_without_bats = 1;
109 	__map_without_ltlbs = 1;
110 #endif
111 }
112 
113 /*
114  * MMU_init sets up the basic memory mappings for the kernel,
115  * including both RAM and possibly some I/O regions,
116  * and sets up the page tables and the MMU hardware ready to go.
117  */
118 void __init MMU_init(void)
119 {
120 	if (ppc_md.progress)
121 		ppc_md.progress("MMU:enter", 0x111);
122 
123 	/* 601 can only access 16MB at the moment */
124 	if (PVR_VER(mfspr(SPRN_PVR)) == 1)
125 		__initial_memory_limit_addr = 0x01000000;
126 	/* 8xx can only access 8MB at the moment */
127 	if (PVR_VER(mfspr(SPRN_PVR)) == 0x50)
128 		__initial_memory_limit_addr = 0x00800000;
129 
130 	/* parse args from command line */
131 	MMU_setup();
132 
133 	if (lmb.memory.cnt > 1) {
134 		lmb.memory.cnt = 1;
135 		lmb_analyze();
136 		printk(KERN_WARNING "Only using first contiguous memory region");
137 	}
138 
139 	total_lowmem = total_memory = lmb_end_of_DRAM() - memstart_addr;
140 	lowmem_end_addr = memstart_addr + total_lowmem;
141 
142 #ifdef CONFIG_FSL_BOOKE
143 	/* Freescale Book-E parts expect lowmem to be mapped by fixed TLB
144 	 * entries, so we need to adjust lowmem to match the amount we can map
145 	 * in the fixed entries */
146 	adjust_total_lowmem();
147 #endif /* CONFIG_FSL_BOOKE */
148 
149 	if (total_lowmem > __max_low_memory) {
150 		total_lowmem = __max_low_memory;
151 		lowmem_end_addr = memstart_addr + total_lowmem;
152 #ifndef CONFIG_HIGHMEM
153 		total_memory = total_lowmem;
154 		lmb_enforce_memory_limit(lowmem_end_addr);
155 		lmb_analyze();
156 #endif /* CONFIG_HIGHMEM */
157 	}
158 
159 	/* Initialize the MMU hardware */
160 	if (ppc_md.progress)
161 		ppc_md.progress("MMU:hw init", 0x300);
162 	MMU_init_hw();
163 
164 	/* Map in all of RAM starting at KERNELBASE */
165 	if (ppc_md.progress)
166 		ppc_md.progress("MMU:mapin", 0x301);
167 	mapin_ram();
168 
169 	/* Initialize early top-down ioremap allocator */
170 	ioremap_bot = IOREMAP_TOP;
171 
172 	/* Map in I/O resources */
173 	if (ppc_md.progress)
174 		ppc_md.progress("MMU:setio", 0x302);
175 
176 	if (ppc_md.progress)
177 		ppc_md.progress("MMU:exit", 0x211);
178 
179 	/* From now on, btext is no longer BAT mapped if it was at all */
180 #ifdef CONFIG_BOOTX_TEXT
181 	btext_unmap();
182 #endif
183 }
184 
185 /* This is only called until mem_init is done. */
186 void __init *early_get_page(void)
187 {
188 	void *p;
189 
190 	if (init_bootmem_done) {
191 		p = alloc_bootmem_pages(PAGE_SIZE);
192 	} else {
193 		p = __va(lmb_alloc_base(PAGE_SIZE, PAGE_SIZE,
194 					__initial_memory_limit_addr));
195 	}
196 	return p;
197 }
198 
199 /* Free up now-unused memory */
200 static void free_sec(unsigned long start, unsigned long end, const char *name)
201 {
202 	unsigned long cnt = 0;
203 
204 	while (start < end) {
205 		ClearPageReserved(virt_to_page(start));
206 		init_page_count(virt_to_page(start));
207 		free_page(start);
208 		cnt++;
209 		start += PAGE_SIZE;
210  	}
211 	if (cnt) {
212 		printk(" %ldk %s", cnt << (PAGE_SHIFT - 10), name);
213 		totalram_pages += cnt;
214 	}
215 }
216 
217 void free_initmem(void)
218 {
219 #define FREESEC(TYPE) \
220 	free_sec((unsigned long)(&__ ## TYPE ## _begin), \
221 		 (unsigned long)(&__ ## TYPE ## _end), \
222 		 #TYPE);
223 
224 	printk ("Freeing unused kernel memory:");
225 	FREESEC(init);
226  	printk("\n");
227 	ppc_md.progress = NULL;
228 #undef FREESEC
229 }
230 
231 #ifdef CONFIG_BLK_DEV_INITRD
232 void free_initrd_mem(unsigned long start, unsigned long end)
233 {
234 	if (start < end)
235 		printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
236 	for (; start < end; start += PAGE_SIZE) {
237 		ClearPageReserved(virt_to_page(start));
238 		init_page_count(virt_to_page(start));
239 		free_page(start);
240 		totalram_pages++;
241 	}
242 }
243 #endif
244 
245 #ifdef CONFIG_PROC_KCORE
246 static struct kcore_list kcore_vmem;
247 
248 static int __init setup_kcore(void)
249 {
250 	int i;
251 
252 	for (i = 0; i < lmb.memory.cnt; i++) {
253 		unsigned long base;
254 		unsigned long size;
255 		struct kcore_list *kcore_mem;
256 
257 		base = lmb.memory.region[i].base;
258 		size = lmb.memory.region[i].size;
259 
260 		kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC);
261 		if (!kcore_mem)
262 			panic("%s: kmalloc failed\n", __func__);
263 
264 		/* must stay under 32 bits */
265 		if ( 0xfffffffful - (unsigned long)__va(base) < size) {
266 			size = 0xfffffffful - (unsigned long)(__va(base));
267 			printk(KERN_DEBUG "setup_kcore: restrict size=%lx\n",
268 						size);
269 		}
270 
271 		kclist_add(kcore_mem, __va(base), size);
272 	}
273 
274 	kclist_add(&kcore_vmem, (void *)VMALLOC_START,
275 		VMALLOC_END-VMALLOC_START);
276 
277 	return 0;
278 }
279 module_init(setup_kcore);
280 #endif
281