xref: /openbmc/linux/arch/mips/mm/init.c (revision 4a5a77c9)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * This file is subject to the terms and conditions of the GNU General Public
31da177e4SLinus Torvalds  * License.  See the file "COPYING" in the main directory of this archive
41da177e4SLinus Torvalds  * for more details.
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  * Copyright (C) 1994 - 2000 Ralf Baechle
71da177e4SLinus Torvalds  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
81da177e4SLinus Torvalds  * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
91da177e4SLinus Torvalds  * Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
101da177e4SLinus Torvalds  */
11b868868aSRalf Baechle #include <linux/bug.h>
121da177e4SLinus Torvalds #include <linux/init.h>
13d9ba5778SPaul Gortmaker #include <linux/export.h>
141da177e4SLinus Torvalds #include <linux/signal.h>
151da177e4SLinus Torvalds #include <linux/sched.h>
16631330f5SRalf Baechle #include <linux/smp.h>
171da177e4SLinus Torvalds #include <linux/kernel.h>
181da177e4SLinus Torvalds #include <linux/errno.h>
191da177e4SLinus Torvalds #include <linux/string.h>
201da177e4SLinus Torvalds #include <linux/types.h>
211da177e4SLinus Torvalds #include <linux/pagemap.h>
221da177e4SLinus Torvalds #include <linux/ptrace.h>
231da177e4SLinus Torvalds #include <linux/mman.h>
241da177e4SLinus Torvalds #include <linux/mm.h>
2557c8a661SMike Rapoport #include <linux/memblock.h>
261da177e4SLinus Torvalds #include <linux/highmem.h>
271da177e4SLinus Torvalds #include <linux/swap.h>
283d503753SDaniel Jacobowitz #include <linux/proc_fs.h>
2922a9835cSDave Hansen #include <linux/pfn.h>
300f334a3eSKevin Cernekee #include <linux/hardirq.h>
315a0e3ad6STejun Heo #include <linux/gfp.h>
322f96b8c1SDavid Howells #include <linux/kcore.h>
332aa7687cSPaul Burton #include <linux/initrd.h>
341da177e4SLinus Torvalds 
351da177e4SLinus Torvalds #include <asm/bootinfo.h>
361da177e4SLinus Torvalds #include <asm/cachectl.h>
371da177e4SLinus Torvalds #include <asm/cpu.h>
381da177e4SLinus Torvalds #include <asm/dma.h>
39cbd95a89SPaul Burton #include <asm/maar.h>
401da177e4SLinus Torvalds #include <asm/mmu_context.h>
411da177e4SLinus Torvalds #include <asm/sections.h>
421da177e4SLinus Torvalds #include <asm/pgalloc.h>
431da177e4SLinus Torvalds #include <asm/tlb.h>
44f8829caeSRalf Baechle #include <asm/fixmap.h>
45f8829caeSRalf Baechle 
461da177e4SLinus Torvalds /*
471da177e4SLinus Torvalds  * We have up to 8 empty zeroed pages so we can map one of the right colour
481da177e4SLinus Torvalds  * when needed.	 This is necessary only on R4000 / R4400 SC and MC versions
491da177e4SLinus Torvalds  * where we have to avoid VCED / VECI exceptions for good performance at
501da177e4SLinus Torvalds  * any price.  Since page is never written to after the initialization we
511da177e4SLinus Torvalds  * don't have to care about aliases on other CPUs.
521da177e4SLinus Torvalds  */
531da177e4SLinus Torvalds unsigned long empty_zero_page, zero_page_mask;
54497d2adcSRalf Baechle EXPORT_SYMBOL_GPL(empty_zero_page);
550b70068eSArd Biesheuvel EXPORT_SYMBOL(zero_page_mask);
561da177e4SLinus Torvalds 
571da177e4SLinus Torvalds /*
581da177e4SLinus Torvalds  * Not static inline because used by IP27 special magic initialization code
591da177e4SLinus Torvalds  */
setup_zero_pages(void)6031605922SJiang Liu void setup_zero_pages(void)
611da177e4SLinus Torvalds {
6231605922SJiang Liu 	unsigned int order, i;
631da177e4SLinus Torvalds 	struct page *page;
641da177e4SLinus Torvalds 
651da177e4SLinus Torvalds 	if (cpu_has_vce)
661da177e4SLinus Torvalds 		order = 3;
671da177e4SLinus Torvalds 	else
681da177e4SLinus Torvalds 		order = 0;
691da177e4SLinus Torvalds 
701da177e4SLinus Torvalds 	empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
711da177e4SLinus Torvalds 	if (!empty_zero_page)
721da177e4SLinus Torvalds 		panic("Oh boy, that early out of memory?");
731da177e4SLinus Torvalds 
7499e3b942SFranck Bui-Huu 	page = virt_to_page((void *)empty_zero_page);
758dfcc9baSNick Piggin 	split_page(page, order);
7631605922SJiang Liu 	for (i = 0; i < (1 << order); i++, page++)
7731605922SJiang Liu 		mark_page_reserved(page);
781da177e4SLinus Torvalds 
7931605922SJiang Liu 	zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
801da177e4SLinus Torvalds }
811da177e4SLinus Torvalds 
__kmap_pgprot(struct page * page,unsigned long addr,pgprot_t prot)82e2a9e5adSPaul Burton static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
83f8829caeSRalf Baechle {
84f8829caeSRalf Baechle 	enum fixed_addresses idx;
853f649ab7SKees Cook 	unsigned int old_mmid;
86f8829caeSRalf Baechle 	unsigned long vaddr, flags, entrylo;
87f8829caeSRalf Baechle 	unsigned long old_ctx;
88f8829caeSRalf Baechle 	pte_t pte;
89f8829caeSRalf Baechle 	int tlbidx;
90f8829caeSRalf Baechle 
9115fa3e8eSMatthew Wilcox (Oracle) 	BUG_ON(folio_test_dcache_dirty(page_folio(page)));
92b868868aSRalf Baechle 
93ce01948eSDavid Hildenbrand 	preempt_disable();
94bdb43806SPeter Zijlstra 	pagefault_disable();
95f8829caeSRalf Baechle 	idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
960f334a3eSKevin Cernekee 	idx += in_interrupt() ? FIX_N_COLOURS : 0;
97f8829caeSRalf Baechle 	vaddr = __fix_to_virt(FIX_CMAP_END - idx);
98e2a9e5adSPaul Burton 	pte = mk_pte(page, prot);
997b2cb64fSPaul Burton #if defined(CONFIG_XPA)
100c5b36783SSteven J. Hill 	entrylo = pte_to_entrylo(pte.pte_high);
1017b2cb64fSPaul Burton #elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
1027b2cb64fSPaul Burton 	entrylo = pte.pte_high;
103f8829caeSRalf Baechle #else
1046dd9344cSDavid Daney 	entrylo = pte_to_entrylo(pte_val(pte));
105f8829caeSRalf Baechle #endif
106f8829caeSRalf Baechle 
107b633648cSRalf Baechle 	local_irq_save(flags);
108f8829caeSRalf Baechle 	old_ctx = read_c0_entryhi();
109f8829caeSRalf Baechle 	write_c0_entryhi(vaddr & (PAGE_MASK << 1));
110f8829caeSRalf Baechle 	write_c0_entrylo0(entrylo);
111f8829caeSRalf Baechle 	write_c0_entrylo1(entrylo);
112c8790d65SPaul Burton 	if (cpu_has_mmid) {
113c8790d65SPaul Burton 		old_mmid = read_c0_memorymapid();
114c8790d65SPaul Burton 		write_c0_memorymapid(MMID_KERNEL_WIRED);
115c8790d65SPaul Burton 	}
116c5b36783SSteven J. Hill #ifdef CONFIG_XPA
1174b6f99d3SJames Hogan 	if (cpu_has_xpa) {
118c5b36783SSteven J. Hill 		entrylo = (pte.pte_low & _PFNX_MASK);
119c5b36783SSteven J. Hill 		writex_c0_entrylo0(entrylo);
120c5b36783SSteven J. Hill 		writex_c0_entrylo1(entrylo);
1214b6f99d3SJames Hogan 	}
122c5b36783SSteven J. Hill #endif
12310313980SPaul Burton 	tlbidx = num_wired_entries();
124f8829caeSRalf Baechle 	write_c0_wired(tlbidx + 1);
125f8829caeSRalf Baechle 	write_c0_index(tlbidx);
126f8829caeSRalf Baechle 	mtc0_tlbw_hazard();
127f8829caeSRalf Baechle 	tlb_write_indexed();
128f8829caeSRalf Baechle 	tlbw_use_hazard();
129f8829caeSRalf Baechle 	write_c0_entryhi(old_ctx);
130c8790d65SPaul Burton 	if (cpu_has_mmid)
131c8790d65SPaul Burton 		write_c0_memorymapid(old_mmid);
132b633648cSRalf Baechle 	local_irq_restore(flags);
133f8829caeSRalf Baechle 
134f8829caeSRalf Baechle 	return (void*) vaddr;
135f8829caeSRalf Baechle }
136f8829caeSRalf Baechle 
kmap_coherent(struct page * page,unsigned long addr)137e2a9e5adSPaul Burton void *kmap_coherent(struct page *page, unsigned long addr)
138e2a9e5adSPaul Burton {
139e2a9e5adSPaul Burton 	return __kmap_pgprot(page, addr, PAGE_KERNEL);
140e2a9e5adSPaul Burton }
141e2a9e5adSPaul Burton 
kmap_noncoherent(struct page * page,unsigned long addr)142e2a9e5adSPaul Burton void *kmap_noncoherent(struct page *page, unsigned long addr)
143e2a9e5adSPaul Burton {
144e2a9e5adSPaul Burton 	return __kmap_pgprot(page, addr, PAGE_KERNEL_NC);
145e2a9e5adSPaul Burton }
146e2a9e5adSPaul Burton 
kunmap_coherent(void)147eacb9d61SRalf Baechle void kunmap_coherent(void)
148f8829caeSRalf Baechle {
149f8829caeSRalf Baechle 	unsigned int wired;
150f8829caeSRalf Baechle 	unsigned long flags, old_ctx;
151f8829caeSRalf Baechle 
152b633648cSRalf Baechle 	local_irq_save(flags);
153f8829caeSRalf Baechle 	old_ctx = read_c0_entryhi();
15410313980SPaul Burton 	wired = num_wired_entries() - 1;
155f8829caeSRalf Baechle 	write_c0_wired(wired);
156f8829caeSRalf Baechle 	write_c0_index(wired);
157f8829caeSRalf Baechle 	write_c0_entryhi(UNIQUE_ENTRYHI(wired));
158f8829caeSRalf Baechle 	write_c0_entrylo0(0);
159f8829caeSRalf Baechle 	write_c0_entrylo1(0);
160f8829caeSRalf Baechle 	mtc0_tlbw_hazard();
161f8829caeSRalf Baechle 	tlb_write_indexed();
162f8829caeSRalf Baechle 	tlbw_use_hazard();
163f8829caeSRalf Baechle 	write_c0_entryhi(old_ctx);
164b633648cSRalf Baechle 	local_irq_restore(flags);
165bdb43806SPeter Zijlstra 	pagefault_enable();
166ce01948eSDavid Hildenbrand 	preempt_enable();
167f8829caeSRalf Baechle }
168f8829caeSRalf Baechle 
copy_user_highpage(struct page * to,struct page * from,unsigned long vaddr,struct vm_area_struct * vma)169bcd02280SAtsushi Nemoto void copy_user_highpage(struct page *to, struct page *from,
170bcd02280SAtsushi Nemoto 	unsigned long vaddr, struct vm_area_struct *vma)
171bcd02280SAtsushi Nemoto {
17215fa3e8eSMatthew Wilcox (Oracle) 	struct folio *src = page_folio(from);
173bcd02280SAtsushi Nemoto 	void *vfrom, *vto;
174bcd02280SAtsushi Nemoto 
1759c02048fSCong Wang 	vto = kmap_atomic(to);
1769a74b3ebSRalf Baechle 	if (cpu_has_dc_aliases &&
17715fa3e8eSMatthew Wilcox (Oracle) 	    folio_mapped(src) && !folio_test_dcache_dirty(src)) {
178bcd02280SAtsushi Nemoto 		vfrom = kmap_coherent(from, vaddr);
179bcd02280SAtsushi Nemoto 		copy_page(vto, vfrom);
180eacb9d61SRalf Baechle 		kunmap_coherent();
181bcd02280SAtsushi Nemoto 	} else {
1829c02048fSCong Wang 		vfrom = kmap_atomic(from);
183bcd02280SAtsushi Nemoto 		copy_page(vto, vfrom);
1849c02048fSCong Wang 		kunmap_atomic(vfrom);
185bcd02280SAtsushi Nemoto 	}
18639b8d525SRalf Baechle 	if ((!cpu_has_ic_fills_f_dc) ||
187bcd02280SAtsushi Nemoto 	    pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
188bcd02280SAtsushi Nemoto 		flush_data_cache_page((unsigned long)vto);
1899c02048fSCong Wang 	kunmap_atomic(vto);
190bcd02280SAtsushi Nemoto 	/* Make sure this page is cleared on other CPU's too before using it */
191bcd02280SAtsushi Nemoto 	smp_wmb();
192bcd02280SAtsushi Nemoto }
193bcd02280SAtsushi Nemoto 
copy_to_user_page(struct vm_area_struct * vma,struct page * page,unsigned long vaddr,void * dst,const void * src,unsigned long len)194f8829caeSRalf Baechle void copy_to_user_page(struct vm_area_struct *vma,
195f8829caeSRalf Baechle 	struct page *page, unsigned long vaddr, void *dst, const void *src,
196f8829caeSRalf Baechle 	unsigned long len)
197f8829caeSRalf Baechle {
19815fa3e8eSMatthew Wilcox (Oracle) 	struct folio *folio = page_folio(page);
19915fa3e8eSMatthew Wilcox (Oracle) 
2009a74b3ebSRalf Baechle 	if (cpu_has_dc_aliases &&
20115fa3e8eSMatthew Wilcox (Oracle) 	    folio_mapped(folio) && !folio_test_dcache_dirty(folio)) {
202f8829caeSRalf Baechle 		void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
203f8829caeSRalf Baechle 		memcpy(vto, src, len);
204eacb9d61SRalf Baechle 		kunmap_coherent();
205985c30efSRalf Baechle 	} else {
206f8829caeSRalf Baechle 		memcpy(dst, src, len);
207985c30efSRalf Baechle 		if (cpu_has_dc_aliases)
20815fa3e8eSMatthew Wilcox (Oracle) 			folio_set_dcache_dirty(folio);
209985c30efSRalf Baechle 	}
210b2a3c5beSJames Hogan 	if (vma->vm_flags & VM_EXEC)
211f8829caeSRalf Baechle 		flush_cache_page(vma, vaddr, page_to_pfn(page));
212f8829caeSRalf Baechle }
213f8829caeSRalf Baechle 
copy_from_user_page(struct vm_area_struct * vma,struct page * page,unsigned long vaddr,void * dst,const void * src,unsigned long len)214f8829caeSRalf Baechle void copy_from_user_page(struct vm_area_struct *vma,
215f8829caeSRalf Baechle 	struct page *page, unsigned long vaddr, void *dst, const void *src,
216f8829caeSRalf Baechle 	unsigned long len)
217f8829caeSRalf Baechle {
21815fa3e8eSMatthew Wilcox (Oracle) 	struct folio *folio = page_folio(page);
21915fa3e8eSMatthew Wilcox (Oracle) 
2209a74b3ebSRalf Baechle 	if (cpu_has_dc_aliases &&
22115fa3e8eSMatthew Wilcox (Oracle) 	    folio_mapped(folio) && !folio_test_dcache_dirty(folio)) {
222985c30efSRalf Baechle 		void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
223f8829caeSRalf Baechle 		memcpy(dst, vfrom, len);
224eacb9d61SRalf Baechle 		kunmap_coherent();
225985c30efSRalf Baechle 	} else {
226f8829caeSRalf Baechle 		memcpy(dst, src, len);
227985c30efSRalf Baechle 		if (cpu_has_dc_aliases)
22815fa3e8eSMatthew Wilcox (Oracle) 			folio_set_dcache_dirty(folio);
229985c30efSRalf Baechle 	}
230f8829caeSRalf Baechle }
231bf9621aaSGeert Uytterhoeven EXPORT_SYMBOL_GPL(copy_from_user_page);
232f8829caeSRalf Baechle 
fixrange_init(unsigned long start,unsigned long end,pgd_t * pgd_base)23384fd089aSRalf Baechle void __init fixrange_init(unsigned long start, unsigned long end,
2341da177e4SLinus Torvalds 	pgd_t *pgd_base)
2351da177e4SLinus Torvalds {
236b633648cSRalf Baechle #ifdef CONFIG_HIGHMEM
2371da177e4SLinus Torvalds 	pgd_t *pgd;
238c6e8b587SRalf Baechle 	pud_t *pud;
2391da177e4SLinus Torvalds 	pmd_t *pmd;
2401da177e4SLinus Torvalds 	pte_t *pte;
241c6e8b587SRalf Baechle 	int i, j, k;
2421da177e4SLinus Torvalds 	unsigned long vaddr;
2431da177e4SLinus Torvalds 
2441da177e4SLinus Torvalds 	vaddr = start;
24531168f03SMike Rapoport 	i = pgd_index(vaddr);
24631168f03SMike Rapoport 	j = pud_index(vaddr);
24731168f03SMike Rapoport 	k = pmd_index(vaddr);
2481da177e4SLinus Torvalds 	pgd = pgd_base + i;
2491da177e4SLinus Torvalds 
250464fd83eSKevin Cernekee 	for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
251c6e8b587SRalf Baechle 		pud = (pud_t *)pgd;
252464fd83eSKevin Cernekee 		for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) {
253c6e8b587SRalf Baechle 			pmd = (pmd_t *)pud;
254464fd83eSKevin Cernekee 			for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) {
2551da177e4SLinus Torvalds 				if (pmd_none(*pmd)) {
256e8625dceSMike Rapoport 					pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
257e8625dceSMike Rapoport 									   PAGE_SIZE);
2588a7f97b9SMike Rapoport 					if (!pte)
2598a7f97b9SMike Rapoport 						panic("%s: Failed to allocate %lu bytes align=%lx\n",
2608a7f97b9SMike Rapoport 						      __func__, PAGE_SIZE,
2618a7f97b9SMike Rapoport 						      PAGE_SIZE);
2628a7f97b9SMike Rapoport 
263f8829caeSRalf Baechle 					set_pmd(pmd, __pmd((unsigned long)pte));
264b72b7092SRalf Baechle 					BUG_ON(pte != pte_offset_kernel(pmd, 0));
2651da177e4SLinus Torvalds 				}
2661da177e4SLinus Torvalds 				vaddr += PMD_SIZE;
2671da177e4SLinus Torvalds 			}
268c6e8b587SRalf Baechle 			k = 0;
269c6e8b587SRalf Baechle 		}
2701da177e4SLinus Torvalds 		j = 0;
2711da177e4SLinus Torvalds 	}
272f8829caeSRalf Baechle #endif
2731da177e4SLinus Torvalds }
2741da177e4SLinus Torvalds 
275a5718fe8SJiaxun Yang struct maar_walk_info {
276a5718fe8SJiaxun Yang 	struct maar_config cfg[16];
277a5718fe8SJiaxun Yang 	unsigned int num_cfg;
278a5718fe8SJiaxun Yang };
279a5718fe8SJiaxun Yang 
maar_res_walk(unsigned long start_pfn,unsigned long nr_pages,void * data)280a5718fe8SJiaxun Yang static int maar_res_walk(unsigned long start_pfn, unsigned long nr_pages,
281a5718fe8SJiaxun Yang 			 void *data)
282a5718fe8SJiaxun Yang {
283a5718fe8SJiaxun Yang 	struct maar_walk_info *wi = data;
284a5718fe8SJiaxun Yang 	struct maar_config *cfg = &wi->cfg[wi->num_cfg];
285a5718fe8SJiaxun Yang 	unsigned int maar_align;
286a5718fe8SJiaxun Yang 
287a5718fe8SJiaxun Yang 	/* MAAR registers hold physical addresses right shifted by 4 bits */
288a5718fe8SJiaxun Yang 	maar_align = BIT(MIPS_MAAR_ADDR_SHIFT + 4);
289a5718fe8SJiaxun Yang 
290a5718fe8SJiaxun Yang 	/* Fill in the MAAR config entry */
291a5718fe8SJiaxun Yang 	cfg->lower = ALIGN(PFN_PHYS(start_pfn), maar_align);
292a5718fe8SJiaxun Yang 	cfg->upper = ALIGN_DOWN(PFN_PHYS(start_pfn + nr_pages), maar_align) - 1;
293a5718fe8SJiaxun Yang 	cfg->attrs = MIPS_MAAR_S;
294a5718fe8SJiaxun Yang 
295a5718fe8SJiaxun Yang 	/* Ensure we don't overflow the cfg array */
296a5718fe8SJiaxun Yang 	if (!WARN_ON(wi->num_cfg >= ARRAY_SIZE(wi->cfg)))
297a5718fe8SJiaxun Yang 		wi->num_cfg++;
298a5718fe8SJiaxun Yang 
299a5718fe8SJiaxun Yang 	return 0;
300a5718fe8SJiaxun Yang }
301a5718fe8SJiaxun Yang 
302a5718fe8SJiaxun Yang 
platform_maar_init(unsigned num_pairs)303def3ab5dSPaul Burton unsigned __weak platform_maar_init(unsigned num_pairs)
304def3ab5dSPaul Burton {
305a5718fe8SJiaxun Yang 	unsigned int num_configured;
306a5718fe8SJiaxun Yang 	struct maar_walk_info wi;
307def3ab5dSPaul Burton 
308a5718fe8SJiaxun Yang 	wi.num_cfg = 0;
309a5718fe8SJiaxun Yang 	walk_system_ram_range(0, max_pfn, &wi, maar_res_walk);
310def3ab5dSPaul Burton 
311a5718fe8SJiaxun Yang 	num_configured = maar_config(wi.cfg, wi.num_cfg, num_pairs);
312a5718fe8SJiaxun Yang 	if (num_configured < wi.num_cfg)
313a5718fe8SJiaxun Yang 		pr_warn("Not enough MAAR pairs (%u) for all memory regions (%u)\n",
314a5718fe8SJiaxun Yang 			num_pairs, wi.num_cfg);
315def3ab5dSPaul Burton 
316def3ab5dSPaul Burton 	return num_configured;
317def3ab5dSPaul Burton }
318def3ab5dSPaul Burton 
maar_init(void)319e060f6edSPaul Burton void maar_init(void)
320def3ab5dSPaul Burton {
321def3ab5dSPaul Burton 	unsigned num_maars, used, i;
322651ca7f4SPaul Burton 	phys_addr_t lower, upper, attr;
323e060f6edSPaul Burton 	static struct {
324e060f6edSPaul Burton 		struct maar_config cfgs[3];
325e060f6edSPaul Burton 		unsigned used;
326e060f6edSPaul Burton 	} recorded = { { { 0 } }, 0 };
327def3ab5dSPaul Burton 
328def3ab5dSPaul Burton 	if (!cpu_has_maar)
329def3ab5dSPaul Burton 		return;
330def3ab5dSPaul Burton 
331def3ab5dSPaul Burton 	/* Detect the number of MAARs */
332def3ab5dSPaul Burton 	write_c0_maari(~0);
333def3ab5dSPaul Burton 	back_to_back_c0_hazard();
334def3ab5dSPaul Burton 	num_maars = read_c0_maari() + 1;
335def3ab5dSPaul Burton 
336def3ab5dSPaul Burton 	/* MAARs should be in pairs */
337def3ab5dSPaul Burton 	WARN_ON(num_maars % 2);
338def3ab5dSPaul Burton 
339e060f6edSPaul Burton 	/* Set MAARs using values we recorded already */
340e060f6edSPaul Burton 	if (recorded.used) {
341e060f6edSPaul Burton 		used = maar_config(recorded.cfgs, recorded.used, num_maars / 2);
342e060f6edSPaul Burton 		BUG_ON(used != recorded.used);
343e060f6edSPaul Burton 	} else {
344def3ab5dSPaul Burton 		/* Configure the required MAARs */
345def3ab5dSPaul Burton 		used = platform_maar_init(num_maars / 2);
346e060f6edSPaul Burton 	}
347def3ab5dSPaul Burton 
348def3ab5dSPaul Burton 	/* Disable any further MAARs */
349def3ab5dSPaul Burton 	for (i = (used * 2); i < num_maars; i++) {
350def3ab5dSPaul Burton 		write_c0_maari(i);
351def3ab5dSPaul Burton 		back_to_back_c0_hazard();
352def3ab5dSPaul Burton 		write_c0_maar(0);
353def3ab5dSPaul Burton 		back_to_back_c0_hazard();
354def3ab5dSPaul Burton 	}
355651ca7f4SPaul Burton 
356e060f6edSPaul Burton 	if (recorded.used)
357e060f6edSPaul Burton 		return;
358e060f6edSPaul Burton 
359651ca7f4SPaul Burton 	pr_info("MAAR configuration:\n");
360651ca7f4SPaul Burton 	for (i = 0; i < num_maars; i += 2) {
361651ca7f4SPaul Burton 		write_c0_maari(i);
362651ca7f4SPaul Burton 		back_to_back_c0_hazard();
363651ca7f4SPaul Burton 		upper = read_c0_maar();
3649ee195fdSSerge Semin #ifdef CONFIG_XPA
3659ee195fdSSerge Semin 		upper |= (phys_addr_t)readx_c0_maar() << MIPS_MAARX_ADDR_SHIFT;
3669ee195fdSSerge Semin #endif
367651ca7f4SPaul Burton 
368651ca7f4SPaul Burton 		write_c0_maari(i + 1);
369651ca7f4SPaul Burton 		back_to_back_c0_hazard();
370651ca7f4SPaul Burton 		lower = read_c0_maar();
3719ee195fdSSerge Semin #ifdef CONFIG_XPA
3729ee195fdSSerge Semin 		lower |= (phys_addr_t)readx_c0_maar() << MIPS_MAARX_ADDR_SHIFT;
3739ee195fdSSerge Semin #endif
374651ca7f4SPaul Burton 
375651ca7f4SPaul Burton 		attr = lower & upper;
376651ca7f4SPaul Burton 		lower = (lower & MIPS_MAAR_ADDR) << 4;
377651ca7f4SPaul Burton 		upper = ((upper & MIPS_MAAR_ADDR) << 4) | 0xffff;
378651ca7f4SPaul Burton 
379651ca7f4SPaul Burton 		pr_info("  [%d]: ", i / 2);
3809ee195fdSSerge Semin 		if ((attr & MIPS_MAAR_V) != MIPS_MAAR_V) {
381651ca7f4SPaul Burton 			pr_cont("disabled\n");
382651ca7f4SPaul Burton 			continue;
383651ca7f4SPaul Burton 		}
384651ca7f4SPaul Burton 
385651ca7f4SPaul Burton 		pr_cont("%pa-%pa", &lower, &upper);
386651ca7f4SPaul Burton 
387651ca7f4SPaul Burton 		if (attr & MIPS_MAAR_S)
388651ca7f4SPaul Burton 			pr_cont(" speculate");
389651ca7f4SPaul Burton 
390651ca7f4SPaul Burton 		pr_cont("\n");
391e060f6edSPaul Burton 
392e060f6edSPaul Burton 		/* Record the setup for use on secondary CPUs */
393e060f6edSPaul Burton 		if (used <= ARRAY_SIZE(recorded.cfgs)) {
394e060f6edSPaul Burton 			recorded.cfgs[recorded.used].lower = lower;
395e060f6edSPaul Burton 			recorded.cfgs[recorded.used].upper = upper;
396e060f6edSPaul Burton 			recorded.cfgs[recorded.used].attrs = attr;
397e060f6edSPaul Burton 			recorded.used++;
398e060f6edSPaul Burton 		}
399651ca7f4SPaul Burton 	}
400def3ab5dSPaul Burton }
401def3ab5dSPaul Burton 
402a9ee6cf5SMike Rapoport #ifndef CONFIG_NUMA
paging_init(void)4031da177e4SLinus Torvalds void __init paging_init(void)
4041da177e4SLinus Torvalds {
405cce335aeSRalf Baechle 	unsigned long max_zone_pfns[MAX_NR_ZONES];
4061da177e4SLinus Torvalds 
4071da177e4SLinus Torvalds 	pagetable_init();
4081da177e4SLinus Torvalds 
40905502339SAtsushi Nemoto #ifdef CONFIG_ZONE_DMA
410cce335aeSRalf Baechle 	max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
4111da177e4SLinus Torvalds #endif
412cce335aeSRalf Baechle #ifdef CONFIG_ZONE_DMA32
413cce335aeSRalf Baechle 	max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
414cce335aeSRalf Baechle #endif
415cce335aeSRalf Baechle 	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
4161da177e4SLinus Torvalds #ifdef CONFIG_HIGHMEM
417cce335aeSRalf Baechle 	max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
418cbb8fc07SFranck Bui-Huu 
419cce335aeSRalf Baechle 	if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
420cbb8fc07SFranck Bui-Huu 		printk(KERN_WARNING "This processor doesn't support highmem."
421cce335aeSRalf Baechle 		       " %ldk highmem ignored\n",
422cce335aeSRalf Baechle 		       (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
423cce335aeSRalf Baechle 		max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
424cbb8fc07SFranck Bui-Huu 	}
425*4a5a77c9SSerge Semin 
426*4a5a77c9SSerge Semin 	max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
427*4a5a77c9SSerge Semin #else
428*4a5a77c9SSerge Semin 	max_mapnr = max_low_pfn;
4291da177e4SLinus Torvalds #endif
430*4a5a77c9SSerge Semin 	high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
4311da177e4SLinus Torvalds 
4329691a071SMike Rapoport 	free_area_init(max_zone_pfns);
4331da177e4SLinus Torvalds }
4341da177e4SLinus Torvalds 
4353d503753SDaniel Jacobowitz #ifdef CONFIG_64BIT
4363d503753SDaniel Jacobowitz static struct kcore_list kcore_kseg0;
4373d503753SDaniel Jacobowitz #endif
4383d503753SDaniel Jacobowitz 
mem_init_free_highmem(void)439a5718fe8SJiaxun Yang static inline void __init mem_init_free_highmem(void)
4401132137eSJiang Liu {
4411132137eSJiang Liu #ifdef CONFIG_HIGHMEM
4421132137eSJiang Liu 	unsigned long tmp;
4431132137eSJiang Liu 
444058effe7SPaul Burton 	if (cpu_has_dc_aliases)
445058effe7SPaul Burton 		return;
446058effe7SPaul Burton 
4471132137eSJiang Liu 	for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
448625cfb6fSPaul Burton 		struct page *page = pfn_to_page(tmp);
449625cfb6fSPaul Burton 
450a5718fe8SJiaxun Yang 		if (!memblock_is_memory(PFN_PHYS(tmp)))
4511132137eSJiang Liu 			SetPageReserved(page);
4521132137eSJiang Liu 		else
4531132137eSJiang Liu 			free_highmem_page(page);
4541132137eSJiang Liu 	}
4551132137eSJiang Liu #endif
4561132137eSJiang Liu }
4571132137eSJiang Liu 
mem_init(void)4581da177e4SLinus Torvalds void __init mem_init(void)
4591da177e4SLinus Torvalds {
46005d013a0SPaul Burton 	/*
46115fa3e8eSMatthew Wilcox (Oracle) 	 * When PFN_PTE_SHIFT is greater than PAGE_SHIFT we won't have enough PTE
46205d013a0SPaul Burton 	 * bits to hold a full 32b physical address on MIPS32 systems.
46305d013a0SPaul Burton 	 */
46415fa3e8eSMatthew Wilcox (Oracle) 	BUILD_BUG_ON(IS_ENABLED(CONFIG_32BIT) && (PFN_PTE_SHIFT > PAGE_SHIFT));
46505d013a0SPaul Burton 
466ab9988a3SPaul Burton 	maar_init();
467c6ffc5caSMike Rapoport 	memblock_free_all();
46831605922SJiang Liu 	setup_zero_pages();	/* Setup zeroed pages.  */
4691132137eSJiang Liu 	mem_init_free_highmem();
4701da177e4SLinus Torvalds 
4713d503753SDaniel Jacobowitz #ifdef CONFIG_64BIT
4723d503753SDaniel Jacobowitz 	if ((unsigned long) &_text > (unsigned long) CKSEG0)
4733d503753SDaniel Jacobowitz 		/* The -4 is a hack so that user tools don't have to handle
4743d503753SDaniel Jacobowitz 		   the overflow.  */
475c30bb2a2SKAMEZAWA Hiroyuki 		kclist_add(&kcore_kseg0, (void *) CKSEG0,
476c30bb2a2SKAMEZAWA Hiroyuki 				0x80000000 - 4, KCORE_TEXT);
4773d503753SDaniel Jacobowitz #endif
4781da177e4SLinus Torvalds }
479a9ee6cf5SMike Rapoport #endif /* !CONFIG_NUMA */
4801da177e4SLinus Torvalds 
free_init_pages(const char * what,unsigned long begin,unsigned long end)481c44e8d5eSAtsushi Nemoto void free_init_pages(const char *what, unsigned long begin, unsigned long end)
4826fd11a21SRalf Baechle {
483acd86b86SFranck Bui-Huu 	unsigned long pfn;
4846fd11a21SRalf Baechle 
485acd86b86SFranck Bui-Huu 	for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
486acd86b86SFranck Bui-Huu 		struct page *page = pfn_to_page(pfn);
487acd86b86SFranck Bui-Huu 		void *addr = phys_to_virt(PFN_PHYS(pfn));
488acd86b86SFranck Bui-Huu 
489acd86b86SFranck Bui-Huu 		memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
49031605922SJiang Liu 		free_reserved_page(page);
4916fd11a21SRalf Baechle 	}
4926fd11a21SRalf Baechle 	printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
4936fd11a21SRalf Baechle }
4946fd11a21SRalf Baechle 
4950893d3fbSMarkos Chandras void (*free_init_pages_eva)(void *begin, void *end) = NULL;
4960893d3fbSMarkos Chandras 
prom_free_prom_memory(void)497a6e83aceSThomas Bogendoerfer void __weak __init prom_free_prom_memory(void)
498a6e83aceSThomas Bogendoerfer {
499a6e83aceSThomas Bogendoerfer 	/* nothing to do */
500a6e83aceSThomas Bogendoerfer }
501a6e83aceSThomas Bogendoerfer 
free_initmem(void)502bd721ea7SFabian Frederick void __ref free_initmem(void)
5031da177e4SLinus Torvalds {
504c44e8d5eSAtsushi Nemoto 	prom_free_prom_memory();
5050893d3fbSMarkos Chandras 	/*
5060893d3fbSMarkos Chandras 	 * Let the platform define a specific function to free the
5070893d3fbSMarkos Chandras 	 * init section since EVA may have used any possible mapping
5080893d3fbSMarkos Chandras 	 * between virtual and physical addresses.
5090893d3fbSMarkos Chandras 	 */
5100893d3fbSMarkos Chandras 	if (free_init_pages_eva)
5110893d3fbSMarkos Chandras 		free_init_pages_eva((void *)&__init_begin, (void *)&__init_end);
5120893d3fbSMarkos Chandras 	else
51331605922SJiang Liu 		free_initmem_default(POISON_FREE_INITMEM);
5141da177e4SLinus Torvalds }
51569a6c312SAtsushi Nemoto 
516f3c560a6SThomas Bogendoerfer #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
517f3c560a6SThomas Bogendoerfer unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
518f3c560a6SThomas Bogendoerfer EXPORT_SYMBOL(__per_cpu_offset);
519f3c560a6SThomas Bogendoerfer 
pcpu_cpu_distance(unsigned int from,unsigned int to)520f3c560a6SThomas Bogendoerfer static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
521f3c560a6SThomas Bogendoerfer {
522f3c560a6SThomas Bogendoerfer 	return node_distance(cpu_to_node(from), cpu_to_node(to));
523f3c560a6SThomas Bogendoerfer }
524f3c560a6SThomas Bogendoerfer 
pcpu_cpu_to_node(int cpu)5251ca3fb3aSKefeng Wang static int __init pcpu_cpu_to_node(int cpu)
5261ca3fb3aSKefeng Wang {
5271ca3fb3aSKefeng Wang 	return cpu_to_node(cpu);
5281ca3fb3aSKefeng Wang }
5291ca3fb3aSKefeng Wang 
setup_per_cpu_areas(void)530f3c560a6SThomas Bogendoerfer void __init setup_per_cpu_areas(void)
531f3c560a6SThomas Bogendoerfer {
532f3c560a6SThomas Bogendoerfer 	unsigned long delta;
533f3c560a6SThomas Bogendoerfer 	unsigned int cpu;
534f3c560a6SThomas Bogendoerfer 	int rc;
535f3c560a6SThomas Bogendoerfer 
536f3c560a6SThomas Bogendoerfer 	/*
537f3c560a6SThomas Bogendoerfer 	 * Always reserve area for module percpu variables.  That's
538f3c560a6SThomas Bogendoerfer 	 * what the legacy allocator did.
539f3c560a6SThomas Bogendoerfer 	 */
540f3c560a6SThomas Bogendoerfer 	rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
541f3c560a6SThomas Bogendoerfer 				    PERCPU_DYNAMIC_RESERVE, PAGE_SIZE,
542f3c560a6SThomas Bogendoerfer 				    pcpu_cpu_distance,
54323f91716SKefeng Wang 				    pcpu_cpu_to_node);
544f3c560a6SThomas Bogendoerfer 	if (rc < 0)
545f3c560a6SThomas Bogendoerfer 		panic("Failed to initialize percpu areas.");
546f3c560a6SThomas Bogendoerfer 
547f3c560a6SThomas Bogendoerfer 	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
548f3c560a6SThomas Bogendoerfer 	for_each_possible_cpu(cpu)
549f3c560a6SThomas Bogendoerfer 		__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
550f3c560a6SThomas Bogendoerfer }
551f3c560a6SThomas Bogendoerfer #endif
552f3c560a6SThomas Bogendoerfer 
55382622284SDavid Daney #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
55469a6c312SAtsushi Nemoto unsigned long pgd_current[NR_CPUS];
55582622284SDavid Daney #endif
5569975e77dSRalf Baechle 
5579975e77dSRalf Baechle /*
558485172b3SDavid Daney  * Align swapper_pg_dir in to 64K, allows its address to be loaded
559485172b3SDavid Daney  * with a single LUI instruction in the TLB handlers.  If we used
560485172b3SDavid Daney  * __aligned(64K), its size would get rounded up to the alignment
561485172b3SDavid Daney  * size, and waste space.  So we place it in its own section and align
562485172b3SDavid Daney  * it in the linker script.
5639975e77dSRalf Baechle  */
56433def849SJoe Perches pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
5653377e227SAlex Belits #ifndef __PAGETABLE_PUD_FOLDED
5663377e227SAlex Belits pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
5673377e227SAlex Belits #endif
568325f8a0aSDavid Daney #ifndef __PAGETABLE_PMD_FOLDED
569485172b3SDavid Daney pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
570ccf01516SJames Hogan EXPORT_SYMBOL_GPL(invalid_pmd_table);
57169a6c312SAtsushi Nemoto #endif
572485172b3SDavid Daney pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
573aa4089e6SPaul Burton EXPORT_SYMBOL(invalid_pte_table);
574