xref: /openbmc/linux/arch/mips/mm/init.c (revision e868d61272caa648214046a096e5a6bfc068dc8c)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1994 - 2000 Ralf Baechle
7  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8  * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
9  * Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
10  */
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/signal.h>
14 #include <linux/sched.h>
15 #include <linux/kernel.h>
16 #include <linux/errno.h>
17 #include <linux/string.h>
18 #include <linux/types.h>
19 #include <linux/pagemap.h>
20 #include <linux/ptrace.h>
21 #include <linux/mman.h>
22 #include <linux/mm.h>
23 #include <linux/bootmem.h>
24 #include <linux/highmem.h>
25 #include <linux/swap.h>
26 #include <linux/proc_fs.h>
27 #include <linux/pfn.h>
28 
29 #include <asm/bootinfo.h>
30 #include <asm/cachectl.h>
31 #include <asm/cpu.h>
32 #include <asm/dma.h>
33 #include <asm/kmap_types.h>
34 #include <asm/mmu_context.h>
35 #include <asm/sections.h>
36 #include <asm/pgtable.h>
37 #include <asm/pgalloc.h>
38 #include <asm/tlb.h>
39 #include <asm/fixmap.h>
40 
41 /* Atomicity and interruptability */
42 #ifdef CONFIG_MIPS_MT_SMTC
43 
44 #include <asm/mipsmtregs.h>
45 
46 #define ENTER_CRITICAL(flags) \
47 	{ \
48 	unsigned int mvpflags; \
49 	local_irq_save(flags);\
50 	mvpflags = dvpe()
51 #define EXIT_CRITICAL(flags) \
52 	evpe(mvpflags); \
53 	local_irq_restore(flags); \
54 	}
55 #else
56 
57 #define ENTER_CRITICAL(flags) local_irq_save(flags)
58 #define EXIT_CRITICAL(flags) local_irq_restore(flags)
59 
60 #endif /* CONFIG_MIPS_MT_SMTC */
61 
62 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
63 
64 /*
65  * We have up to 8 empty zeroed pages so we can map one of the right colour
66  * when needed.  This is necessary only on R4000 / R4400 SC and MC versions
67  * where we have to avoid VCED / VECI exceptions for good performance at
68  * any price.  Since page is never written to after the initialization we
69  * don't have to care about aliases on other CPUs.
70  */
71 unsigned long empty_zero_page, zero_page_mask;
72 
73 /*
74  * Not static inline because used by IP27 special magic initialization code
75  */
76 unsigned long setup_zero_pages(void)
77 {
78 	unsigned int order;
79 	unsigned long size;
80 	struct page *page;
81 
82 	if (cpu_has_vce)
83 		order = 3;
84 	else
85 		order = 0;
86 
87 	empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
88 	if (!empty_zero_page)
89 		panic("Oh boy, that early out of memory?");
90 
91 	page = virt_to_page((void *)empty_zero_page);
92 	split_page(page, order);
93 	while (page < virt_to_page((void *)(empty_zero_page + (PAGE_SIZE << order)))) {
94 		SetPageReserved(page);
95 		page++;
96 	}
97 
98 	size = PAGE_SIZE << order;
99 	zero_page_mask = (size - 1) & PAGE_MASK;
100 
101 	return 1UL << order;
102 }
103 
104 /*
105  * These are almost like kmap_atomic / kunmap_atmic except they take an
106  * additional address argument as the hint.
107  */
108 
109 #define kmap_get_fixmap_pte(vaddr)					\
110 	pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr))
111 
112 #ifdef CONFIG_MIPS_MT_SMTC
113 static pte_t *kmap_coherent_pte;
114 static void __init kmap_coherent_init(void)
115 {
116 	unsigned long vaddr;
117 
118 	/* cache the first coherent kmap pte */
119 	vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
120 	kmap_coherent_pte = kmap_get_fixmap_pte(vaddr);
121 }
122 #else
123 static inline void kmap_coherent_init(void) {}
124 #endif
125 
126 void *kmap_coherent(struct page *page, unsigned long addr)
127 {
128 	enum fixed_addresses idx;
129 	unsigned long vaddr, flags, entrylo;
130 	unsigned long old_ctx;
131 	pte_t pte;
132 	int tlbidx;
133 
134 	inc_preempt_count();
135 	idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
136 #ifdef CONFIG_MIPS_MT_SMTC
137 	idx += FIX_N_COLOURS * smp_processor_id();
138 #endif
139 	vaddr = __fix_to_virt(FIX_CMAP_END - idx);
140 	pte = mk_pte(page, PAGE_KERNEL);
141 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1)
142 	entrylo = pte.pte_high;
143 #else
144 	entrylo = pte_val(pte) >> 6;
145 #endif
146 
147 	ENTER_CRITICAL(flags);
148 	old_ctx = read_c0_entryhi();
149 	write_c0_entryhi(vaddr & (PAGE_MASK << 1));
150 	write_c0_entrylo0(entrylo);
151 	write_c0_entrylo1(entrylo);
152 #ifdef CONFIG_MIPS_MT_SMTC
153 	set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte);
154 	/* preload TLB instead of local_flush_tlb_one() */
155 	mtc0_tlbw_hazard();
156 	tlb_probe();
157 	tlb_probe_hazard();
158 	tlbidx = read_c0_index();
159 	mtc0_tlbw_hazard();
160 	if (tlbidx < 0)
161 		tlb_write_random();
162 	else
163 		tlb_write_indexed();
164 #else
165 	tlbidx = read_c0_wired();
166 	write_c0_wired(tlbidx + 1);
167 	write_c0_index(tlbidx);
168 	mtc0_tlbw_hazard();
169 	tlb_write_indexed();
170 #endif
171 	tlbw_use_hazard();
172 	write_c0_entryhi(old_ctx);
173 	EXIT_CRITICAL(flags);
174 
175 	return (void*) vaddr;
176 }
177 
178 #define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
179 
180 void kunmap_coherent(void)
181 {
182 #ifndef CONFIG_MIPS_MT_SMTC
183 	unsigned int wired;
184 	unsigned long flags, old_ctx;
185 
186 	ENTER_CRITICAL(flags);
187 	old_ctx = read_c0_entryhi();
188 	wired = read_c0_wired() - 1;
189 	write_c0_wired(wired);
190 	write_c0_index(wired);
191 	write_c0_entryhi(UNIQUE_ENTRYHI(wired));
192 	write_c0_entrylo0(0);
193 	write_c0_entrylo1(0);
194 	mtc0_tlbw_hazard();
195 	tlb_write_indexed();
196 	tlbw_use_hazard();
197 	write_c0_entryhi(old_ctx);
198 	EXIT_CRITICAL(flags);
199 #endif
200 	dec_preempt_count();
201 	preempt_check_resched();
202 }
203 
204 void copy_user_highpage(struct page *to, struct page *from,
205 	unsigned long vaddr, struct vm_area_struct *vma)
206 {
207 	void *vfrom, *vto;
208 
209 	vto = kmap_atomic(to, KM_USER1);
210 	if (cpu_has_dc_aliases) {
211 		vfrom = kmap_coherent(from, vaddr);
212 		copy_page(vto, vfrom);
213 		kunmap_coherent();
214 	} else {
215 		vfrom = kmap_atomic(from, KM_USER0);
216 		copy_page(vto, vfrom);
217 		kunmap_atomic(vfrom, KM_USER0);
218 	}
219 	if (((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc) ||
220 	    pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
221 		flush_data_cache_page((unsigned long)vto);
222 	kunmap_atomic(vto, KM_USER1);
223 	/* Make sure this page is cleared on other CPU's too before using it */
224 	smp_wmb();
225 }
226 
227 EXPORT_SYMBOL(copy_user_highpage);
228 
229 void copy_to_user_page(struct vm_area_struct *vma,
230 	struct page *page, unsigned long vaddr, void *dst, const void *src,
231 	unsigned long len)
232 {
233 	if (cpu_has_dc_aliases) {
234 		void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
235 		memcpy(vto, src, len);
236 		kunmap_coherent();
237 	} else
238 		memcpy(dst, src, len);
239 	if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc)
240 		flush_cache_page(vma, vaddr, page_to_pfn(page));
241 }
242 
243 EXPORT_SYMBOL(copy_to_user_page);
244 
245 void copy_from_user_page(struct vm_area_struct *vma,
246 	struct page *page, unsigned long vaddr, void *dst, const void *src,
247 	unsigned long len)
248 {
249 	if (cpu_has_dc_aliases) {
250 		void *vfrom =
251 			kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
252 		memcpy(dst, vfrom, len);
253 		kunmap_coherent();
254 	} else
255 		memcpy(dst, src, len);
256 }
257 
258 EXPORT_SYMBOL(copy_from_user_page);
259 
260 
261 #ifdef CONFIG_HIGHMEM
262 unsigned long highstart_pfn, highend_pfn;
263 
264 pte_t *kmap_pte;
265 pgprot_t kmap_prot;
266 
267 static void __init kmap_init(void)
268 {
269 	unsigned long kmap_vstart;
270 
271 	/* cache the first kmap pte */
272 	kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
273 	kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
274 
275 	kmap_prot = PAGE_KERNEL;
276 }
277 #endif /* CONFIG_HIGHMEM */
278 
279 void __init fixrange_init(unsigned long start, unsigned long end,
280 	pgd_t *pgd_base)
281 {
282 #if defined(CONFIG_HIGHMEM) || defined(CONFIG_MIPS_MT_SMTC)
283 	pgd_t *pgd;
284 	pud_t *pud;
285 	pmd_t *pmd;
286 	pte_t *pte;
287 	int i, j, k;
288 	unsigned long vaddr;
289 
290 	vaddr = start;
291 	i = __pgd_offset(vaddr);
292 	j = __pud_offset(vaddr);
293 	k = __pmd_offset(vaddr);
294 	pgd = pgd_base + i;
295 
296 	for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
297 		pud = (pud_t *)pgd;
298 		for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
299 			pmd = (pmd_t *)pud;
300 			for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
301 				if (pmd_none(*pmd)) {
302 					pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
303 					set_pmd(pmd, __pmd((unsigned long)pte));
304 					if (pte != pte_offset_kernel(pmd, 0))
305 						BUG();
306 				}
307 				vaddr += PMD_SIZE;
308 			}
309 			k = 0;
310 		}
311 		j = 0;
312 	}
313 #endif
314 }
315 
316 #ifndef CONFIG_NEED_MULTIPLE_NODES
317 static int __init page_is_ram(unsigned long pagenr)
318 {
319 	int i;
320 
321 	for (i = 0; i < boot_mem_map.nr_map; i++) {
322 		unsigned long addr, end;
323 
324 		if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
325 			/* not usable memory */
326 			continue;
327 
328 		addr = PFN_UP(boot_mem_map.map[i].addr);
329 		end = PFN_DOWN(boot_mem_map.map[i].addr +
330 			       boot_mem_map.map[i].size);
331 
332 		if (pagenr >= addr && pagenr < end)
333 			return 1;
334 	}
335 
336 	return 0;
337 }
338 
339 void __init paging_init(void)
340 {
341 	unsigned long zones_size[MAX_NR_ZONES] = { 0, };
342 #ifndef CONFIG_FLATMEM
343 	unsigned long zholes_size[MAX_NR_ZONES] = { 0, };
344 	unsigned long i, j, pfn;
345 #endif
346 
347 	pagetable_init();
348 
349 #ifdef CONFIG_HIGHMEM
350 	kmap_init();
351 #endif
352 	kmap_coherent_init();
353 
354 #ifdef CONFIG_ZONE_DMA
355 	if (min_low_pfn < MAX_DMA_PFN && MAX_DMA_PFN <= max_low_pfn) {
356 		zones_size[ZONE_DMA] = MAX_DMA_PFN - min_low_pfn;
357 		zones_size[ZONE_NORMAL] = max_low_pfn - MAX_DMA_PFN;
358 	} else if (max_low_pfn < MAX_DMA_PFN)
359 		zones_size[ZONE_DMA] = max_low_pfn - min_low_pfn;
360 	else
361 #endif
362 	zones_size[ZONE_NORMAL] = max_low_pfn - min_low_pfn;
363 
364 #ifdef CONFIG_HIGHMEM
365 	zones_size[ZONE_HIGHMEM] = highend_pfn - highstart_pfn;
366 
367 	if (cpu_has_dc_aliases && zones_size[ZONE_HIGHMEM]) {
368 		printk(KERN_WARNING "This processor doesn't support highmem."
369 		       " %ldk highmem ignored\n", zones_size[ZONE_HIGHMEM]);
370 		zones_size[ZONE_HIGHMEM] = 0;
371 	}
372 #endif
373 
374 #ifdef CONFIG_FLATMEM
375 	free_area_init(zones_size);
376 #else
377 	pfn = min_low_pfn;
378 	for (i = 0; i < MAX_NR_ZONES; i++)
379 		for (j = 0; j < zones_size[i]; j++, pfn++)
380 			if (!page_is_ram(pfn))
381 				zholes_size[i]++;
382 	free_area_init_node(0, NODE_DATA(0), zones_size, 0, zholes_size);
383 #endif
384 }
385 
386 static struct kcore_list kcore_mem, kcore_vmalloc;
387 #ifdef CONFIG_64BIT
388 static struct kcore_list kcore_kseg0;
389 #endif
390 
391 void __init mem_init(void)
392 {
393 	unsigned long codesize, reservedpages, datasize, initsize;
394 	unsigned long tmp, ram;
395 
396 #ifdef CONFIG_HIGHMEM
397 #ifdef CONFIG_DISCONTIGMEM
398 #error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
399 #endif
400 	max_mapnr = highend_pfn;
401 #else
402 	max_mapnr = max_low_pfn;
403 #endif
404 	high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
405 
406 	totalram_pages += free_all_bootmem();
407 	totalram_pages -= setup_zero_pages();	/* Setup zeroed pages.  */
408 
409 	reservedpages = ram = 0;
410 	for (tmp = 0; tmp < max_low_pfn; tmp++)
411 		if (page_is_ram(tmp)) {
412 			ram++;
413 			if (PageReserved(pfn_to_page(tmp)))
414 				reservedpages++;
415 		}
416 	num_physpages = ram;
417 
418 #ifdef CONFIG_HIGHMEM
419 	for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
420 		struct page *page = mem_map + tmp;
421 
422 		if (!page_is_ram(tmp)) {
423 			SetPageReserved(page);
424 			continue;
425 		}
426 		ClearPageReserved(page);
427 		init_page_count(page);
428 		__free_page(page);
429 		totalhigh_pages++;
430 	}
431 	totalram_pages += totalhigh_pages;
432 	num_physpages += totalhigh_pages;
433 #endif
434 
435 	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
436 	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
437 	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
438 
439 #ifdef CONFIG_64BIT
440 	if ((unsigned long) &_text > (unsigned long) CKSEG0)
441 		/* The -4 is a hack so that user tools don't have to handle
442 		   the overflow.  */
443 		kclist_add(&kcore_kseg0, (void *) CKSEG0, 0x80000000 - 4);
444 #endif
445 	kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
446 	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
447 		   VMALLOC_END-VMALLOC_START);
448 
449 	printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
450 	       "%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n",
451 	       (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
452 	       ram << (PAGE_SHIFT-10),
453 	       codesize >> 10,
454 	       reservedpages << (PAGE_SHIFT-10),
455 	       datasize >> 10,
456 	       initsize >> 10,
457 	       (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)));
458 }
459 #endif /* !CONFIG_NEED_MULTIPLE_NODES */
460 
461 void free_init_pages(const char *what, unsigned long begin, unsigned long end)
462 {
463 	unsigned long pfn;
464 
465 	for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
466 		struct page *page = pfn_to_page(pfn);
467 		void *addr = phys_to_virt(PFN_PHYS(pfn));
468 
469 		ClearPageReserved(page);
470 		init_page_count(page);
471 		memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
472 		__free_page(page);
473 		totalram_pages++;
474 	}
475 	printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
476 }
477 
478 #ifdef CONFIG_BLK_DEV_INITRD
479 void free_initrd_mem(unsigned long start, unsigned long end)
480 {
481 	free_init_pages("initrd memory",
482 			virt_to_phys((void *)start),
483 			virt_to_phys((void *)end));
484 }
485 #endif
486 
487 void free_initmem(void)
488 {
489 	prom_free_prom_memory();
490 	free_init_pages("unused kernel memory",
491 			__pa_symbol(&__init_begin),
492 			__pa_symbol(&__init_end));
493 }
494 
495 unsigned long pgd_current[NR_CPUS];
496 /*
497  * On 64-bit we've got three-level pagetables with a slightly
498  * different layout ...
499  */
500 #define __page_aligned(order) __attribute__((__aligned__(PAGE_SIZE<<order)))
501 pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned(PGD_ORDER);
502 #ifdef CONFIG_64BIT
503 #ifdef MODULE_START
504 pgd_t module_pg_dir[PTRS_PER_PGD] __page_aligned(PGD_ORDER);
505 #endif
506 pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned(PMD_ORDER);
507 #endif
508 pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER);
509