xref: /openbmc/linux/arch/mips/mm/init.c (revision f42b3800)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1994 - 2000 Ralf Baechle
7  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8  * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
9  * Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
10  */
11 #include <linux/bug.h>
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/errno.h>
18 #include <linux/string.h>
19 #include <linux/types.h>
20 #include <linux/pagemap.h>
21 #include <linux/ptrace.h>
22 #include <linux/mman.h>
23 #include <linux/mm.h>
24 #include <linux/bootmem.h>
25 #include <linux/highmem.h>
26 #include <linux/swap.h>
27 #include <linux/proc_fs.h>
28 #include <linux/pfn.h>
29 
30 #include <asm/asm-offsets.h>
31 #include <asm/bootinfo.h>
32 #include <asm/cachectl.h>
33 #include <asm/cpu.h>
34 #include <asm/dma.h>
35 #include <asm/kmap_types.h>
36 #include <asm/mmu_context.h>
37 #include <asm/sections.h>
38 #include <asm/pgtable.h>
39 #include <asm/pgalloc.h>
40 #include <asm/tlb.h>
41 #include <asm/fixmap.h>
42 
43 /* Atomicity and interruptability */
44 #ifdef CONFIG_MIPS_MT_SMTC
45 
46 #include <asm/mipsmtregs.h>
47 
48 #define ENTER_CRITICAL(flags) \
49 	{ \
50 	unsigned int mvpflags; \
51 	local_irq_save(flags);\
52 	mvpflags = dvpe()
53 #define EXIT_CRITICAL(flags) \
54 	evpe(mvpflags); \
55 	local_irq_restore(flags); \
56 	}
57 #else
58 
59 #define ENTER_CRITICAL(flags) local_irq_save(flags)
60 #define EXIT_CRITICAL(flags) local_irq_restore(flags)
61 
62 #endif /* CONFIG_MIPS_MT_SMTC */
63 
64 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
65 
66 /*
67  * We have up to 8 empty zeroed pages so we can map one of the right colour
68  * when needed.  This is necessary only on R4000 / R4400 SC and MC versions
69  * where we have to avoid VCED / VECI exceptions for good performance at
70  * any price.  Since page is never written to after the initialization we
71  * don't have to care about aliases on other CPUs.
72  */
73 unsigned long empty_zero_page, zero_page_mask;
74 
75 /*
76  * Not static inline because used by IP27 special magic initialization code
77  */
78 unsigned long setup_zero_pages(void)
79 {
80 	unsigned int order;
81 	unsigned long size;
82 	struct page *page;
83 
84 	if (cpu_has_vce)
85 		order = 3;
86 	else
87 		order = 0;
88 
89 	empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
90 	if (!empty_zero_page)
91 		panic("Oh boy, that early out of memory?");
92 
93 	page = virt_to_page((void *)empty_zero_page);
94 	split_page(page, order);
95 	while (page < virt_to_page((void *)(empty_zero_page + (PAGE_SIZE << order)))) {
96 		SetPageReserved(page);
97 		page++;
98 	}
99 
100 	size = PAGE_SIZE << order;
101 	zero_page_mask = (size - 1) & PAGE_MASK;
102 
103 	return 1UL << order;
104 }
105 
106 /*
107  * These are almost like kmap_atomic / kunmap_atmic except they take an
108  * additional address argument as the hint.
109  */
110 
111 #define kmap_get_fixmap_pte(vaddr)					\
112 	pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr))
113 
114 #ifdef CONFIG_MIPS_MT_SMTC
115 static pte_t *kmap_coherent_pte;
116 static void __init kmap_coherent_init(void)
117 {
118 	unsigned long vaddr;
119 
120 	/* cache the first coherent kmap pte */
121 	vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
122 	kmap_coherent_pte = kmap_get_fixmap_pte(vaddr);
123 }
124 #else
125 static inline void kmap_coherent_init(void) {}
126 #endif
127 
128 void *kmap_coherent(struct page *page, unsigned long addr)
129 {
130 	enum fixed_addresses idx;
131 	unsigned long vaddr, flags, entrylo;
132 	unsigned long old_ctx;
133 	pte_t pte;
134 	int tlbidx;
135 
136 	BUG_ON(Page_dcache_dirty(page));
137 
138 	inc_preempt_count();
139 	idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
140 #ifdef CONFIG_MIPS_MT_SMTC
141 	idx += FIX_N_COLOURS * smp_processor_id();
142 #endif
143 	vaddr = __fix_to_virt(FIX_CMAP_END - idx);
144 	pte = mk_pte(page, PAGE_KERNEL);
145 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1)
146 	entrylo = pte.pte_high;
147 #else
148 	entrylo = pte_val(pte) >> 6;
149 #endif
150 
151 	ENTER_CRITICAL(flags);
152 	old_ctx = read_c0_entryhi();
153 	write_c0_entryhi(vaddr & (PAGE_MASK << 1));
154 	write_c0_entrylo0(entrylo);
155 	write_c0_entrylo1(entrylo);
156 #ifdef CONFIG_MIPS_MT_SMTC
157 	set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte);
158 	/* preload TLB instead of local_flush_tlb_one() */
159 	mtc0_tlbw_hazard();
160 	tlb_probe();
161 	tlb_probe_hazard();
162 	tlbidx = read_c0_index();
163 	mtc0_tlbw_hazard();
164 	if (tlbidx < 0)
165 		tlb_write_random();
166 	else
167 		tlb_write_indexed();
168 #else
169 	tlbidx = read_c0_wired();
170 	write_c0_wired(tlbidx + 1);
171 	write_c0_index(tlbidx);
172 	mtc0_tlbw_hazard();
173 	tlb_write_indexed();
174 #endif
175 	tlbw_use_hazard();
176 	write_c0_entryhi(old_ctx);
177 	EXIT_CRITICAL(flags);
178 
179 	return (void*) vaddr;
180 }
181 
182 #define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
183 
184 void kunmap_coherent(void)
185 {
186 #ifndef CONFIG_MIPS_MT_SMTC
187 	unsigned int wired;
188 	unsigned long flags, old_ctx;
189 
190 	ENTER_CRITICAL(flags);
191 	old_ctx = read_c0_entryhi();
192 	wired = read_c0_wired() - 1;
193 	write_c0_wired(wired);
194 	write_c0_index(wired);
195 	write_c0_entryhi(UNIQUE_ENTRYHI(wired));
196 	write_c0_entrylo0(0);
197 	write_c0_entrylo1(0);
198 	mtc0_tlbw_hazard();
199 	tlb_write_indexed();
200 	tlbw_use_hazard();
201 	write_c0_entryhi(old_ctx);
202 	EXIT_CRITICAL(flags);
203 #endif
204 	dec_preempt_count();
205 	preempt_check_resched();
206 }
207 
208 void copy_user_highpage(struct page *to, struct page *from,
209 	unsigned long vaddr, struct vm_area_struct *vma)
210 {
211 	void *vfrom, *vto;
212 
213 	vto = kmap_atomic(to, KM_USER1);
214 	if (cpu_has_dc_aliases &&
215 	    page_mapped(from) && !Page_dcache_dirty(from)) {
216 		vfrom = kmap_coherent(from, vaddr);
217 		copy_page(vto, vfrom);
218 		kunmap_coherent();
219 	} else {
220 		vfrom = kmap_atomic(from, KM_USER0);
221 		copy_page(vto, vfrom);
222 		kunmap_atomic(vfrom, KM_USER0);
223 	}
224 	if (((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc) ||
225 	    pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
226 		flush_data_cache_page((unsigned long)vto);
227 	kunmap_atomic(vto, KM_USER1);
228 	/* Make sure this page is cleared on other CPU's too before using it */
229 	smp_wmb();
230 }
231 
232 EXPORT_SYMBOL(copy_user_highpage);
233 
234 void copy_to_user_page(struct vm_area_struct *vma,
235 	struct page *page, unsigned long vaddr, void *dst, const void *src,
236 	unsigned long len)
237 {
238 	if (cpu_has_dc_aliases &&
239 	    page_mapped(page) && !Page_dcache_dirty(page)) {
240 		void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
241 		memcpy(vto, src, len);
242 		kunmap_coherent();
243 	} else {
244 		memcpy(dst, src, len);
245 		if (cpu_has_dc_aliases)
246 			SetPageDcacheDirty(page);
247 	}
248 	if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc)
249 		flush_cache_page(vma, vaddr, page_to_pfn(page));
250 }
251 
252 EXPORT_SYMBOL(copy_to_user_page);
253 
254 void copy_from_user_page(struct vm_area_struct *vma,
255 	struct page *page, unsigned long vaddr, void *dst, const void *src,
256 	unsigned long len)
257 {
258 	if (cpu_has_dc_aliases &&
259 	    page_mapped(page) && !Page_dcache_dirty(page)) {
260 		void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
261 		memcpy(dst, vfrom, len);
262 		kunmap_coherent();
263 	} else {
264 		memcpy(dst, src, len);
265 		if (cpu_has_dc_aliases)
266 			SetPageDcacheDirty(page);
267 	}
268 }
269 
270 EXPORT_SYMBOL(copy_from_user_page);
271 
272 
273 #ifdef CONFIG_HIGHMEM
274 unsigned long highstart_pfn, highend_pfn;
275 
276 pte_t *kmap_pte;
277 pgprot_t kmap_prot;
278 
279 static void __init kmap_init(void)
280 {
281 	unsigned long kmap_vstart;
282 
283 	/* cache the first kmap pte */
284 	kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
285 	kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
286 
287 	kmap_prot = PAGE_KERNEL;
288 }
289 #endif /* CONFIG_HIGHMEM */
290 
291 void __init fixrange_init(unsigned long start, unsigned long end,
292 	pgd_t *pgd_base)
293 {
294 #if defined(CONFIG_HIGHMEM) || defined(CONFIG_MIPS_MT_SMTC)
295 	pgd_t *pgd;
296 	pud_t *pud;
297 	pmd_t *pmd;
298 	pte_t *pte;
299 	int i, j, k;
300 	unsigned long vaddr;
301 
302 	vaddr = start;
303 	i = __pgd_offset(vaddr);
304 	j = __pud_offset(vaddr);
305 	k = __pmd_offset(vaddr);
306 	pgd = pgd_base + i;
307 
308 	for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
309 		pud = (pud_t *)pgd;
310 		for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
311 			pmd = (pmd_t *)pud;
312 			for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
313 				if (pmd_none(*pmd)) {
314 					pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
315 					set_pmd(pmd, __pmd((unsigned long)pte));
316 					if (pte != pte_offset_kernel(pmd, 0))
317 						BUG();
318 				}
319 				vaddr += PMD_SIZE;
320 			}
321 			k = 0;
322 		}
323 		j = 0;
324 	}
325 #endif
326 }
327 
328 #ifndef CONFIG_NEED_MULTIPLE_NODES
329 static int __init page_is_ram(unsigned long pagenr)
330 {
331 	int i;
332 
333 	for (i = 0; i < boot_mem_map.nr_map; i++) {
334 		unsigned long addr, end;
335 
336 		if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
337 			/* not usable memory */
338 			continue;
339 
340 		addr = PFN_UP(boot_mem_map.map[i].addr);
341 		end = PFN_DOWN(boot_mem_map.map[i].addr +
342 			       boot_mem_map.map[i].size);
343 
344 		if (pagenr >= addr && pagenr < end)
345 			return 1;
346 	}
347 
348 	return 0;
349 }
350 
351 void __init paging_init(void)
352 {
353 	unsigned long max_zone_pfns[MAX_NR_ZONES];
354 	unsigned long lastpfn;
355 
356 	pagetable_init();
357 
358 #ifdef CONFIG_HIGHMEM
359 	kmap_init();
360 #endif
361 	kmap_coherent_init();
362 
363 #ifdef CONFIG_ZONE_DMA
364 	max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
365 #endif
366 #ifdef CONFIG_ZONE_DMA32
367 	max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
368 #endif
369 	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
370 	lastpfn = max_low_pfn;
371 #ifdef CONFIG_HIGHMEM
372 	max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
373 	lastpfn = highend_pfn;
374 
375 	if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
376 		printk(KERN_WARNING "This processor doesn't support highmem."
377 		       " %ldk highmem ignored\n",
378 		       (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
379 		max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
380 		lastpfn = max_low_pfn;
381 	}
382 #endif
383 
384 	free_area_init_nodes(max_zone_pfns);
385 }
386 
387 static struct kcore_list kcore_mem, kcore_vmalloc;
388 #ifdef CONFIG_64BIT
389 static struct kcore_list kcore_kseg0;
390 #endif
391 
392 void __init mem_init(void)
393 {
394 	unsigned long codesize, reservedpages, datasize, initsize;
395 	unsigned long tmp, ram;
396 
397 #ifdef CONFIG_HIGHMEM
398 #ifdef CONFIG_DISCONTIGMEM
399 #error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
400 #endif
401 	max_mapnr = highend_pfn;
402 #else
403 	max_mapnr = max_low_pfn;
404 #endif
405 	high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
406 
407 	totalram_pages += free_all_bootmem();
408 	totalram_pages -= setup_zero_pages();	/* Setup zeroed pages.  */
409 
410 	reservedpages = ram = 0;
411 	for (tmp = 0; tmp < max_low_pfn; tmp++)
412 		if (page_is_ram(tmp)) {
413 			ram++;
414 			if (PageReserved(pfn_to_page(tmp)))
415 				reservedpages++;
416 		}
417 	num_physpages = ram;
418 
419 #ifdef CONFIG_HIGHMEM
420 	for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
421 		struct page *page = pfn_to_page(tmp);
422 
423 		if (!page_is_ram(tmp)) {
424 			SetPageReserved(page);
425 			continue;
426 		}
427 		ClearPageReserved(page);
428 		init_page_count(page);
429 		__free_page(page);
430 		totalhigh_pages++;
431 	}
432 	totalram_pages += totalhigh_pages;
433 	num_physpages += totalhigh_pages;
434 #endif
435 
436 	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
437 	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
438 	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
439 
440 #ifdef CONFIG_64BIT
441 	if ((unsigned long) &_text > (unsigned long) CKSEG0)
442 		/* The -4 is a hack so that user tools don't have to handle
443 		   the overflow.  */
444 		kclist_add(&kcore_kseg0, (void *) CKSEG0, 0x80000000 - 4);
445 #endif
446 	kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
447 	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
448 		   VMALLOC_END-VMALLOC_START);
449 
450 	printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
451 	       "%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n",
452 	       (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
453 	       ram << (PAGE_SHIFT-10),
454 	       codesize >> 10,
455 	       reservedpages << (PAGE_SHIFT-10),
456 	       datasize >> 10,
457 	       initsize >> 10,
458 	       (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)));
459 }
460 #endif /* !CONFIG_NEED_MULTIPLE_NODES */
461 
462 void free_init_pages(const char *what, unsigned long begin, unsigned long end)
463 {
464 	unsigned long pfn;
465 
466 	for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
467 		struct page *page = pfn_to_page(pfn);
468 		void *addr = phys_to_virt(PFN_PHYS(pfn));
469 
470 		ClearPageReserved(page);
471 		init_page_count(page);
472 		memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
473 		__free_page(page);
474 		totalram_pages++;
475 	}
476 	printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
477 }
478 
479 #ifdef CONFIG_BLK_DEV_INITRD
480 void free_initrd_mem(unsigned long start, unsigned long end)
481 {
482 	free_init_pages("initrd memory",
483 			virt_to_phys((void *)start),
484 			virt_to_phys((void *)end));
485 }
486 #endif
487 
488 void __init_refok free_initmem(void)
489 {
490 	prom_free_prom_memory();
491 	free_init_pages("unused kernel memory",
492 			__pa_symbol(&__init_begin),
493 			__pa_symbol(&__init_end));
494 }
495 
496 unsigned long pgd_current[NR_CPUS];
497 /*
498  * On 64-bit we've got three-level pagetables with a slightly
499  * different layout ...
500  */
501 #define __page_aligned(order) __attribute__((__aligned__(PAGE_SIZE<<order)))
502 
503 /*
504  * gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER
505  * are constants.  So we use the variants from asm-offset.h until that gcc
506  * will officially be retired.
507  */
508 pgd_t swapper_pg_dir[_PTRS_PER_PGD] __page_aligned(_PGD_ORDER);
509 #ifdef CONFIG_64BIT
510 #ifdef MODULE_START
511 pgd_t module_pg_dir[PTRS_PER_PGD] __page_aligned(PGD_ORDER);
512 #endif
513 pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned(PMD_ORDER);
514 #endif
515 pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER);
516