xref: /openbmc/linux/arch/mips/mm/init.c (revision 93d90ad7)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1994 - 2000 Ralf Baechle
7  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8  * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
9  * Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
10  */
11 #include <linux/bug.h>
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/smp.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/string.h>
20 #include <linux/types.h>
21 #include <linux/pagemap.h>
22 #include <linux/ptrace.h>
23 #include <linux/mman.h>
24 #include <linux/mm.h>
25 #include <linux/bootmem.h>
26 #include <linux/highmem.h>
27 #include <linux/swap.h>
28 #include <linux/proc_fs.h>
29 #include <linux/pfn.h>
30 #include <linux/hardirq.h>
31 #include <linux/gfp.h>
32 #include <linux/kcore.h>
33 
34 #include <asm/asm-offsets.h>
35 #include <asm/bootinfo.h>
36 #include <asm/cachectl.h>
37 #include <asm/cpu.h>
38 #include <asm/dma.h>
39 #include <asm/kmap_types.h>
40 #include <asm/mmu_context.h>
41 #include <asm/sections.h>
42 #include <asm/pgtable.h>
43 #include <asm/pgalloc.h>
44 #include <asm/tlb.h>
45 #include <asm/fixmap.h>
46 
47 /*
48  * We have up to 8 empty zeroed pages so we can map one of the right colour
49  * when needed.	 This is necessary only on R4000 / R4400 SC and MC versions
50  * where we have to avoid VCED / VECI exceptions for good performance at
51  * any price.  Since page is never written to after the initialization we
52  * don't have to care about aliases on other CPUs.
53  */
54 unsigned long empty_zero_page, zero_page_mask;
55 EXPORT_SYMBOL_GPL(empty_zero_page);
56 EXPORT_SYMBOL(zero_page_mask);
57 
58 /*
59  * Not static inline because used by IP27 special magic initialization code
60  */
61 void setup_zero_pages(void)
62 {
63 	unsigned int order, i;
64 	struct page *page;
65 
66 	if (cpu_has_vce)
67 		order = 3;
68 	else
69 		order = 0;
70 
71 	empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
72 	if (!empty_zero_page)
73 		panic("Oh boy, that early out of memory?");
74 
75 	page = virt_to_page((void *)empty_zero_page);
76 	split_page(page, order);
77 	for (i = 0; i < (1 << order); i++, page++)
78 		mark_page_reserved(page);
79 
80 	zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
81 }
82 
83 static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
84 {
85 	enum fixed_addresses idx;
86 	unsigned long vaddr, flags, entrylo;
87 	unsigned long old_ctx;
88 	pte_t pte;
89 	int tlbidx;
90 
91 	BUG_ON(Page_dcache_dirty(page));
92 
93 	pagefault_disable();
94 	idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
95 	idx += in_interrupt() ? FIX_N_COLOURS : 0;
96 	vaddr = __fix_to_virt(FIX_CMAP_END - idx);
97 	pte = mk_pte(page, prot);
98 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
99 	entrylo = pte.pte_high;
100 #else
101 	entrylo = pte_to_entrylo(pte_val(pte));
102 #endif
103 
104 	local_irq_save(flags);
105 	old_ctx = read_c0_entryhi();
106 	write_c0_entryhi(vaddr & (PAGE_MASK << 1));
107 	write_c0_entrylo0(entrylo);
108 	write_c0_entrylo1(entrylo);
109 	tlbidx = read_c0_wired();
110 	write_c0_wired(tlbidx + 1);
111 	write_c0_index(tlbidx);
112 	mtc0_tlbw_hazard();
113 	tlb_write_indexed();
114 	tlbw_use_hazard();
115 	write_c0_entryhi(old_ctx);
116 	local_irq_restore(flags);
117 
118 	return (void*) vaddr;
119 }
120 
121 void *kmap_coherent(struct page *page, unsigned long addr)
122 {
123 	return __kmap_pgprot(page, addr, PAGE_KERNEL);
124 }
125 
126 void *kmap_noncoherent(struct page *page, unsigned long addr)
127 {
128 	return __kmap_pgprot(page, addr, PAGE_KERNEL_NC);
129 }
130 
131 void kunmap_coherent(void)
132 {
133 	unsigned int wired;
134 	unsigned long flags, old_ctx;
135 
136 	local_irq_save(flags);
137 	old_ctx = read_c0_entryhi();
138 	wired = read_c0_wired() - 1;
139 	write_c0_wired(wired);
140 	write_c0_index(wired);
141 	write_c0_entryhi(UNIQUE_ENTRYHI(wired));
142 	write_c0_entrylo0(0);
143 	write_c0_entrylo1(0);
144 	mtc0_tlbw_hazard();
145 	tlb_write_indexed();
146 	tlbw_use_hazard();
147 	write_c0_entryhi(old_ctx);
148 	local_irq_restore(flags);
149 	pagefault_enable();
150 }
151 
152 void copy_user_highpage(struct page *to, struct page *from,
153 	unsigned long vaddr, struct vm_area_struct *vma)
154 {
155 	void *vfrom, *vto;
156 
157 	vto = kmap_atomic(to);
158 	if (cpu_has_dc_aliases &&
159 	    page_mapped(from) && !Page_dcache_dirty(from)) {
160 		vfrom = kmap_coherent(from, vaddr);
161 		copy_page(vto, vfrom);
162 		kunmap_coherent();
163 	} else {
164 		vfrom = kmap_atomic(from);
165 		copy_page(vto, vfrom);
166 		kunmap_atomic(vfrom);
167 	}
168 	if ((!cpu_has_ic_fills_f_dc) ||
169 	    pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
170 		flush_data_cache_page((unsigned long)vto);
171 	kunmap_atomic(vto);
172 	/* Make sure this page is cleared on other CPU's too before using it */
173 	smp_wmb();
174 }
175 
176 void copy_to_user_page(struct vm_area_struct *vma,
177 	struct page *page, unsigned long vaddr, void *dst, const void *src,
178 	unsigned long len)
179 {
180 	if (cpu_has_dc_aliases &&
181 	    page_mapped(page) && !Page_dcache_dirty(page)) {
182 		void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
183 		memcpy(vto, src, len);
184 		kunmap_coherent();
185 	} else {
186 		memcpy(dst, src, len);
187 		if (cpu_has_dc_aliases)
188 			SetPageDcacheDirty(page);
189 	}
190 	if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc)
191 		flush_cache_page(vma, vaddr, page_to_pfn(page));
192 }
193 
194 void copy_from_user_page(struct vm_area_struct *vma,
195 	struct page *page, unsigned long vaddr, void *dst, const void *src,
196 	unsigned long len)
197 {
198 	if (cpu_has_dc_aliases &&
199 	    page_mapped(page) && !Page_dcache_dirty(page)) {
200 		void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
201 		memcpy(dst, vfrom, len);
202 		kunmap_coherent();
203 	} else {
204 		memcpy(dst, src, len);
205 		if (cpu_has_dc_aliases)
206 			SetPageDcacheDirty(page);
207 	}
208 }
209 EXPORT_SYMBOL_GPL(copy_from_user_page);
210 
211 void __init fixrange_init(unsigned long start, unsigned long end,
212 	pgd_t *pgd_base)
213 {
214 #ifdef CONFIG_HIGHMEM
215 	pgd_t *pgd;
216 	pud_t *pud;
217 	pmd_t *pmd;
218 	pte_t *pte;
219 	int i, j, k;
220 	unsigned long vaddr;
221 
222 	vaddr = start;
223 	i = __pgd_offset(vaddr);
224 	j = __pud_offset(vaddr);
225 	k = __pmd_offset(vaddr);
226 	pgd = pgd_base + i;
227 
228 	for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
229 		pud = (pud_t *)pgd;
230 		for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) {
231 			pmd = (pmd_t *)pud;
232 			for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) {
233 				if (pmd_none(*pmd)) {
234 					pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
235 					set_pmd(pmd, __pmd((unsigned long)pte));
236 					BUG_ON(pte != pte_offset_kernel(pmd, 0));
237 				}
238 				vaddr += PMD_SIZE;
239 			}
240 			k = 0;
241 		}
242 		j = 0;
243 	}
244 #endif
245 }
246 
247 #ifndef CONFIG_NEED_MULTIPLE_NODES
248 int page_is_ram(unsigned long pagenr)
249 {
250 	int i;
251 
252 	for (i = 0; i < boot_mem_map.nr_map; i++) {
253 		unsigned long addr, end;
254 
255 		switch (boot_mem_map.map[i].type) {
256 		case BOOT_MEM_RAM:
257 		case BOOT_MEM_INIT_RAM:
258 			break;
259 		default:
260 			/* not usable memory */
261 			continue;
262 		}
263 
264 		addr = PFN_UP(boot_mem_map.map[i].addr);
265 		end = PFN_DOWN(boot_mem_map.map[i].addr +
266 			       boot_mem_map.map[i].size);
267 
268 		if (pagenr >= addr && pagenr < end)
269 			return 1;
270 	}
271 
272 	return 0;
273 }
274 
275 void __init paging_init(void)
276 {
277 	unsigned long max_zone_pfns[MAX_NR_ZONES];
278 	unsigned long lastpfn __maybe_unused;
279 
280 	pagetable_init();
281 
282 #ifdef CONFIG_HIGHMEM
283 	kmap_init();
284 #endif
285 #ifdef CONFIG_ZONE_DMA
286 	max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
287 #endif
288 #ifdef CONFIG_ZONE_DMA32
289 	max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
290 #endif
291 	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
292 	lastpfn = max_low_pfn;
293 #ifdef CONFIG_HIGHMEM
294 	max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
295 	lastpfn = highend_pfn;
296 
297 	if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
298 		printk(KERN_WARNING "This processor doesn't support highmem."
299 		       " %ldk highmem ignored\n",
300 		       (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
301 		max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
302 		lastpfn = max_low_pfn;
303 	}
304 #endif
305 
306 	free_area_init_nodes(max_zone_pfns);
307 }
308 
309 #ifdef CONFIG_64BIT
310 static struct kcore_list kcore_kseg0;
311 #endif
312 
313 static inline void mem_init_free_highmem(void)
314 {
315 #ifdef CONFIG_HIGHMEM
316 	unsigned long tmp;
317 
318 	for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
319 		struct page *page = pfn_to_page(tmp);
320 
321 		if (!page_is_ram(tmp))
322 			SetPageReserved(page);
323 		else
324 			free_highmem_page(page);
325 	}
326 #endif
327 }
328 
329 unsigned __weak platform_maar_init(unsigned num_maars)
330 {
331 	return 0;
332 }
333 
334 static void maar_init(void)
335 {
336 	unsigned num_maars, used, i;
337 
338 	if (!cpu_has_maar)
339 		return;
340 
341 	/* Detect the number of MAARs */
342 	write_c0_maari(~0);
343 	back_to_back_c0_hazard();
344 	num_maars = read_c0_maari() + 1;
345 
346 	/* MAARs should be in pairs */
347 	WARN_ON(num_maars % 2);
348 
349 	/* Configure the required MAARs */
350 	used = platform_maar_init(num_maars / 2);
351 
352 	/* Disable any further MAARs */
353 	for (i = (used * 2); i < num_maars; i++) {
354 		write_c0_maari(i);
355 		back_to_back_c0_hazard();
356 		write_c0_maar(0);
357 		back_to_back_c0_hazard();
358 	}
359 }
360 
361 void __init mem_init(void)
362 {
363 #ifdef CONFIG_HIGHMEM
364 #ifdef CONFIG_DISCONTIGMEM
365 #error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
366 #endif
367 	max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
368 #else
369 	max_mapnr = max_low_pfn;
370 #endif
371 	high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
372 
373 	maar_init();
374 	free_all_bootmem();
375 	setup_zero_pages();	/* Setup zeroed pages.  */
376 	mem_init_free_highmem();
377 	mem_init_print_info(NULL);
378 
379 #ifdef CONFIG_64BIT
380 	if ((unsigned long) &_text > (unsigned long) CKSEG0)
381 		/* The -4 is a hack so that user tools don't have to handle
382 		   the overflow.  */
383 		kclist_add(&kcore_kseg0, (void *) CKSEG0,
384 				0x80000000 - 4, KCORE_TEXT);
385 #endif
386 }
387 #endif /* !CONFIG_NEED_MULTIPLE_NODES */
388 
389 void free_init_pages(const char *what, unsigned long begin, unsigned long end)
390 {
391 	unsigned long pfn;
392 
393 	for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
394 		struct page *page = pfn_to_page(pfn);
395 		void *addr = phys_to_virt(PFN_PHYS(pfn));
396 
397 		memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
398 		free_reserved_page(page);
399 	}
400 	printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
401 }
402 
403 #ifdef CONFIG_BLK_DEV_INITRD
404 void free_initrd_mem(unsigned long start, unsigned long end)
405 {
406 	free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
407 			   "initrd");
408 }
409 #endif
410 
411 void (*free_init_pages_eva)(void *begin, void *end) = NULL;
412 
413 void __init_refok free_initmem(void)
414 {
415 	prom_free_prom_memory();
416 	/*
417 	 * Let the platform define a specific function to free the
418 	 * init section since EVA may have used any possible mapping
419 	 * between virtual and physical addresses.
420 	 */
421 	if (free_init_pages_eva)
422 		free_init_pages_eva((void *)&__init_begin, (void *)&__init_end);
423 	else
424 		free_initmem_default(POISON_FREE_INITMEM);
425 }
426 
427 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
428 unsigned long pgd_current[NR_CPUS];
429 #endif
430 
431 /*
432  * gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER
433  * are constants.  So we use the variants from asm-offset.h until that gcc
434  * will officially be retired.
435  *
436  * Align swapper_pg_dir in to 64K, allows its address to be loaded
437  * with a single LUI instruction in the TLB handlers.  If we used
438  * __aligned(64K), its size would get rounded up to the alignment
439  * size, and waste space.  So we place it in its own section and align
440  * it in the linker script.
441  */
442 pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
443 #ifndef __PAGETABLE_PMD_FOLDED
444 pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
445 #endif
446 pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
447