xref: /openbmc/linux/arch/powerpc/mm/pgtable_32.c (revision 6cc23ed2)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * This file contains the routines setting up the linux page tables.
4  *  -- paulus
5  *
6  *  Derived from arch/ppc/mm/init.c:
7  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8  *
9  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
10  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
11  *    Copyright (C) 1996 Paul Mackerras
12  *
13  *  Derived from "arch/i386/mm/init.c"
14  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
15  */
16 
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/mm.h>
21 #include <linux/vmalloc.h>
22 #include <linux/init.h>
23 #include <linux/highmem.h>
24 #include <linux/memblock.h>
25 #include <linux/slab.h>
26 
27 #include <asm/pgtable.h>
28 #include <asm/pgalloc.h>
29 #include <asm/fixmap.h>
30 #include <asm/setup.h>
31 #include <asm/sections.h>
32 
33 #include <mm/mmu_decl.h>
34 
35 extern char etext[], _stext[], _sinittext[], _einittext[];
36 
37 static void __init *early_alloc_pgtable(unsigned long size)
38 {
39 	void *ptr = memblock_alloc(size, size);
40 
41 	if (!ptr)
42 		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
43 		      __func__, size, size);
44 
45 	return ptr;
46 }
47 
48 static pte_t __init *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va)
49 {
50 	if (pmd_none(*pmdp)) {
51 		pte_t *ptep = early_alloc_pgtable(PTE_FRAG_SIZE);
52 
53 		pmd_populate_kernel(&init_mm, pmdp, ptep);
54 	}
55 	return pte_offset_kernel(pmdp, va);
56 }
57 
58 
59 int __ref map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot)
60 {
61 	pmd_t *pd;
62 	pte_t *pg;
63 	int err = -ENOMEM;
64 
65 	/* Use upper 10 bits of VA to index the first level map */
66 	pd = pmd_offset(pud_offset(pgd_offset_k(va), va), va);
67 	/* Use middle 10 bits of VA to index the second-level map */
68 	if (likely(slab_is_available()))
69 		pg = pte_alloc_kernel(pd, va);
70 	else
71 		pg = early_pte_alloc_kernel(pd, va);
72 	if (pg != 0) {
73 		err = 0;
74 		/* The PTE should never be already set nor present in the
75 		 * hash table
76 		 */
77 		BUG_ON((pte_present(*pg) | pte_hashpte(*pg)) && pgprot_val(prot));
78 		set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, prot));
79 	}
80 	smp_wmb();
81 	return err;
82 }
83 
84 /*
85  * Map in a chunk of physical memory starting at start.
86  */
87 static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
88 {
89 	unsigned long v, s;
90 	phys_addr_t p;
91 	int ktext;
92 
93 	s = offset;
94 	v = PAGE_OFFSET + s;
95 	p = memstart_addr + s;
96 	for (; s < top; s += PAGE_SIZE) {
97 		ktext = ((char *)v >= _stext && (char *)v < etext) ||
98 			((char *)v >= _sinittext && (char *)v < _einittext);
99 		map_kernel_page(v, p, ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL);
100 #ifdef CONFIG_PPC_BOOK3S_32
101 		if (ktext)
102 			hash_preload(&init_mm, v);
103 #endif
104 		v += PAGE_SIZE;
105 		p += PAGE_SIZE;
106 	}
107 }
108 
109 void __init mapin_ram(void)
110 {
111 	struct memblock_region *reg;
112 
113 	for_each_memblock(memory, reg) {
114 		phys_addr_t base = reg->base;
115 		phys_addr_t top = min(base + reg->size, total_lowmem);
116 
117 		if (base >= top)
118 			continue;
119 		base = mmu_mapin_ram(base, top);
120 		if (IS_ENABLED(CONFIG_BDI_SWITCH))
121 			__mapin_ram_chunk(reg->base, top);
122 		else
123 			__mapin_ram_chunk(base, top);
124 	}
125 }
126 
127 /* Scan the real Linux page tables and return a PTE pointer for
128  * a virtual address in a context.
129  * Returns true (1) if PTE was found, zero otherwise.  The pointer to
130  * the PTE pointer is unmodified if PTE is not found.
131  */
132 static int
133 get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp)
134 {
135         pgd_t	*pgd;
136 	pud_t	*pud;
137         pmd_t	*pmd;
138         pte_t	*pte;
139         int     retval = 0;
140 
141         pgd = pgd_offset(mm, addr & PAGE_MASK);
142         if (pgd) {
143 		pud = pud_offset(pgd, addr & PAGE_MASK);
144 		if (pud && pud_present(*pud)) {
145 			pmd = pmd_offset(pud, addr & PAGE_MASK);
146 			if (pmd_present(*pmd)) {
147 				pte = pte_offset_map(pmd, addr & PAGE_MASK);
148 				if (pte) {
149 					retval = 1;
150 					*ptep = pte;
151 					if (pmdp)
152 						*pmdp = pmd;
153 					/* XXX caller needs to do pte_unmap, yuck */
154 				}
155 			}
156 		}
157         }
158         return(retval);
159 }
160 
161 static int __change_page_attr_noflush(struct page *page, pgprot_t prot)
162 {
163 	pte_t *kpte;
164 	pmd_t *kpmd;
165 	unsigned long address;
166 
167 	BUG_ON(PageHighMem(page));
168 	address = (unsigned long)page_address(page);
169 
170 	if (v_block_mapped(address))
171 		return 0;
172 	if (!get_pteptr(&init_mm, address, &kpte, &kpmd))
173 		return -EINVAL;
174 	__set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0);
175 	pte_unmap(kpte);
176 
177 	return 0;
178 }
179 
180 /*
181  * Change the page attributes of an page in the linear mapping.
182  *
183  * THIS DOES NOTHING WITH BAT MAPPINGS, DEBUG USE ONLY
184  */
185 static int change_page_attr(struct page *page, int numpages, pgprot_t prot)
186 {
187 	int i, err = 0;
188 	unsigned long flags;
189 	struct page *start = page;
190 
191 	local_irq_save(flags);
192 	for (i = 0; i < numpages; i++, page++) {
193 		err = __change_page_attr_noflush(page, prot);
194 		if (err)
195 			break;
196 	}
197 	wmb();
198 	local_irq_restore(flags);
199 	flush_tlb_kernel_range((unsigned long)page_address(start),
200 			       (unsigned long)page_address(page));
201 	return err;
202 }
203 
204 void mark_initmem_nx(void)
205 {
206 	struct page *page = virt_to_page(_sinittext);
207 	unsigned long numpages = PFN_UP((unsigned long)_einittext) -
208 				 PFN_DOWN((unsigned long)_sinittext);
209 
210 	if (v_block_mapped((unsigned long)_stext + 1))
211 		mmu_mark_initmem_nx();
212 	else
213 		change_page_attr(page, numpages, PAGE_KERNEL);
214 }
215 
216 #ifdef CONFIG_STRICT_KERNEL_RWX
217 void mark_rodata_ro(void)
218 {
219 	struct page *page;
220 	unsigned long numpages;
221 
222 	if (v_block_mapped((unsigned long)_sinittext)) {
223 		mmu_mark_rodata_ro();
224 		return;
225 	}
226 
227 	page = virt_to_page(_stext);
228 	numpages = PFN_UP((unsigned long)_etext) -
229 		   PFN_DOWN((unsigned long)_stext);
230 
231 	change_page_attr(page, numpages, PAGE_KERNEL_ROX);
232 	/*
233 	 * mark .rodata as read only. Use __init_begin rather than __end_rodata
234 	 * to cover NOTES and EXCEPTION_TABLE.
235 	 */
236 	page = virt_to_page(__start_rodata);
237 	numpages = PFN_UP((unsigned long)__init_begin) -
238 		   PFN_DOWN((unsigned long)__start_rodata);
239 
240 	change_page_attr(page, numpages, PAGE_KERNEL_RO);
241 
242 	// mark_initmem_nx() should have already run by now
243 	ptdump_check_wx();
244 }
245 #endif
246 
247 #ifdef CONFIG_DEBUG_PAGEALLOC
248 void __kernel_map_pages(struct page *page, int numpages, int enable)
249 {
250 	if (PageHighMem(page))
251 		return;
252 
253 	change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
254 }
255 #endif /* CONFIG_DEBUG_PAGEALLOC */
256