xref: /openbmc/linux/arch/powerpc/mm/pgtable_64.c (revision 64d85cc9)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  This file contains ioremap and related functions for 64-bit machines.
4  *
5  *  Derived from arch/ppc64/mm/init.c
6  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7  *
8  *  Modifications by Paul Mackerras (PowerMac) (paulus@samba.org)
9  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
10  *    Copyright (C) 1996 Paul Mackerras
11  *
12  *  Derived from "arch/i386/mm/init.c"
13  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
14  *
15  *  Dave Engebretsen <engebret@us.ibm.com>
16  *      Rework for PPC64 port.
17  */
18 
19 #include <linux/signal.h>
20 #include <linux/sched.h>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/string.h>
24 #include <linux/export.h>
25 #include <linux/types.h>
26 #include <linux/mman.h>
27 #include <linux/mm.h>
28 #include <linux/swap.h>
29 #include <linux/stddef.h>
30 #include <linux/vmalloc.h>
31 #include <linux/slab.h>
32 #include <linux/hugetlb.h>
33 
34 #include <asm/pgalloc.h>
35 #include <asm/page.h>
36 #include <asm/prom.h>
37 #include <asm/io.h>
38 #include <asm/mmu_context.h>
39 #include <asm/pgtable.h>
40 #include <asm/mmu.h>
41 #include <asm/smp.h>
42 #include <asm/machdep.h>
43 #include <asm/tlb.h>
44 #include <asm/processor.h>
45 #include <asm/cputable.h>
46 #include <asm/sections.h>
47 #include <asm/firmware.h>
48 #include <asm/dma.h>
49 
50 #include <mm/mmu_decl.h>
51 
52 
53 #ifdef CONFIG_PPC_BOOK3S_64
54 /*
55  * partition table and process table for ISA 3.0
56  */
57 struct prtb_entry *process_tb;
58 struct patb_entry *partition_tb;
59 /*
60  * page table size
61  */
62 unsigned long __pte_index_size;
63 EXPORT_SYMBOL(__pte_index_size);
64 unsigned long __pmd_index_size;
65 EXPORT_SYMBOL(__pmd_index_size);
66 unsigned long __pud_index_size;
67 EXPORT_SYMBOL(__pud_index_size);
68 unsigned long __pgd_index_size;
69 EXPORT_SYMBOL(__pgd_index_size);
70 unsigned long __pud_cache_index;
71 EXPORT_SYMBOL(__pud_cache_index);
72 unsigned long __pte_table_size;
73 EXPORT_SYMBOL(__pte_table_size);
74 unsigned long __pmd_table_size;
75 EXPORT_SYMBOL(__pmd_table_size);
76 unsigned long __pud_table_size;
77 EXPORT_SYMBOL(__pud_table_size);
78 unsigned long __pgd_table_size;
79 EXPORT_SYMBOL(__pgd_table_size);
80 unsigned long __pmd_val_bits;
81 EXPORT_SYMBOL(__pmd_val_bits);
82 unsigned long __pud_val_bits;
83 EXPORT_SYMBOL(__pud_val_bits);
84 unsigned long __pgd_val_bits;
85 EXPORT_SYMBOL(__pgd_val_bits);
86 unsigned long __kernel_virt_start;
87 EXPORT_SYMBOL(__kernel_virt_start);
88 unsigned long __vmalloc_start;
89 EXPORT_SYMBOL(__vmalloc_start);
90 unsigned long __vmalloc_end;
91 EXPORT_SYMBOL(__vmalloc_end);
92 unsigned long __kernel_io_start;
93 EXPORT_SYMBOL(__kernel_io_start);
94 unsigned long __kernel_io_end;
95 struct page *vmemmap;
96 EXPORT_SYMBOL(vmemmap);
97 unsigned long __pte_frag_nr;
98 EXPORT_SYMBOL(__pte_frag_nr);
99 unsigned long __pte_frag_size_shift;
100 EXPORT_SYMBOL(__pte_frag_size_shift);
101 unsigned long ioremap_bot;
102 #else /* !CONFIG_PPC_BOOK3S_64 */
103 unsigned long ioremap_bot = IOREMAP_BASE;
104 #endif
105 
106 /**
107  * __ioremap_at - Low level function to establish the page tables
108  *                for an IO mapping
109  */
110 void __iomem *__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_t prot)
111 {
112 	unsigned long i;
113 
114 	/* We don't support the 4K PFN hack with ioremap */
115 	if (pgprot_val(prot) & H_PAGE_4K_PFN)
116 		return NULL;
117 
118 	if ((ea + size) >= (void *)IOREMAP_END) {
119 		pr_warn("Outside the supported range\n");
120 		return NULL;
121 	}
122 
123 	WARN_ON(pa & ~PAGE_MASK);
124 	WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
125 	WARN_ON(size & ~PAGE_MASK);
126 
127 	for (i = 0; i < size; i += PAGE_SIZE)
128 		if (map_kernel_page((unsigned long)ea + i, pa + i, prot))
129 			return NULL;
130 
131 	return (void __iomem *)ea;
132 }
133 
134 /**
135  * __iounmap_from - Low level function to tear down the page tables
136  *                  for an IO mapping. This is used for mappings that
137  *                  are manipulated manually, like partial unmapping of
138  *                  PCI IOs or ISA space.
139  */
140 void __iounmap_at(void *ea, unsigned long size)
141 {
142 	WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
143 	WARN_ON(size & ~PAGE_MASK);
144 
145 	unmap_kernel_range((unsigned long)ea, size);
146 }
147 
148 void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
149 				pgprot_t prot, void *caller)
150 {
151 	phys_addr_t paligned;
152 	void __iomem *ret;
153 
154 	/*
155 	 * Choose an address to map it to.
156 	 * Once the imalloc system is running, we use it.
157 	 * Before that, we map using addresses going
158 	 * up from ioremap_bot.  imalloc will use
159 	 * the addresses from ioremap_bot through
160 	 * IMALLOC_END
161 	 *
162 	 */
163 	paligned = addr & PAGE_MASK;
164 	size = PAGE_ALIGN(addr + size) - paligned;
165 
166 	if ((size == 0) || (paligned == 0))
167 		return NULL;
168 
169 	if (slab_is_available()) {
170 		struct vm_struct *area;
171 
172 		area = __get_vm_area_caller(size, VM_IOREMAP,
173 					    ioremap_bot, IOREMAP_END,
174 					    caller);
175 		if (area == NULL)
176 			return NULL;
177 
178 		area->phys_addr = paligned;
179 		ret = __ioremap_at(paligned, area->addr, size, prot);
180 		if (!ret)
181 			vunmap(area->addr);
182 	} else {
183 		ret = __ioremap_at(paligned, (void *)ioremap_bot, size, prot);
184 		if (ret)
185 			ioremap_bot += size;
186 	}
187 
188 	if (ret)
189 		ret += addr & ~PAGE_MASK;
190 	return ret;
191 }
192 
193 void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
194 			 unsigned long flags)
195 {
196 	return __ioremap_caller(addr, size, __pgprot(flags), __builtin_return_address(0));
197 }
198 
199 void __iomem * ioremap(phys_addr_t addr, unsigned long size)
200 {
201 	pgprot_t prot = pgprot_noncached(PAGE_KERNEL);
202 	void *caller = __builtin_return_address(0);
203 
204 	if (ppc_md.ioremap)
205 		return ppc_md.ioremap(addr, size, prot, caller);
206 	return __ioremap_caller(addr, size, prot, caller);
207 }
208 
209 void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size)
210 {
211 	pgprot_t prot = pgprot_noncached_wc(PAGE_KERNEL);
212 	void *caller = __builtin_return_address(0);
213 
214 	if (ppc_md.ioremap)
215 		return ppc_md.ioremap(addr, size, prot, caller);
216 	return __ioremap_caller(addr, size, prot, caller);
217 }
218 
219 void __iomem *ioremap_coherent(phys_addr_t addr, unsigned long size)
220 {
221 	pgprot_t prot = pgprot_cached(PAGE_KERNEL);
222 	void *caller = __builtin_return_address(0);
223 
224 	if (ppc_md.ioremap)
225 		return ppc_md.ioremap(addr, size, prot, caller);
226 	return __ioremap_caller(addr, size, prot, caller);
227 }
228 
229 void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size,
230 			     unsigned long flags)
231 {
232 	pte_t pte = __pte(flags);
233 	void *caller = __builtin_return_address(0);
234 
235 	/* writeable implies dirty for kernel addresses */
236 	if (pte_write(pte))
237 		pte = pte_mkdirty(pte);
238 
239 	/* we don't want to let _PAGE_EXEC leak out */
240 	pte = pte_exprotect(pte);
241 	/*
242 	 * Force kernel mapping.
243 	 */
244 	pte = pte_mkprivileged(pte);
245 
246 	if (ppc_md.ioremap)
247 		return ppc_md.ioremap(addr, size, pte_pgprot(pte), caller);
248 	return __ioremap_caller(addr, size, pte_pgprot(pte), caller);
249 }
250 
251 
252 /*
253  * Unmap an IO region and remove it from imalloc'd list.
254  * Access to IO memory should be serialized by driver.
255  */
256 void __iounmap(volatile void __iomem *token)
257 {
258 	void *addr;
259 
260 	if (!slab_is_available())
261 		return;
262 
263 	addr = (void *) ((unsigned long __force)
264 			 PCI_FIX_ADDR(token) & PAGE_MASK);
265 	if ((unsigned long)addr < ioremap_bot) {
266 		printk(KERN_WARNING "Attempt to iounmap early bolted mapping"
267 		       " at 0x%p\n", addr);
268 		return;
269 	}
270 	vunmap(addr);
271 }
272 
273 void iounmap(volatile void __iomem *token)
274 {
275 	if (ppc_md.iounmap)
276 		ppc_md.iounmap(token);
277 	else
278 		__iounmap(token);
279 }
280 
281 EXPORT_SYMBOL(ioremap);
282 EXPORT_SYMBOL(ioremap_wc);
283 EXPORT_SYMBOL(ioremap_prot);
284 EXPORT_SYMBOL(__ioremap);
285 EXPORT_SYMBOL(__ioremap_at);
286 EXPORT_SYMBOL(iounmap);
287 EXPORT_SYMBOL(__iounmap);
288 EXPORT_SYMBOL(__iounmap_at);
289 
290 #ifndef __PAGETABLE_PUD_FOLDED
291 /* 4 level page table */
292 struct page *pgd_page(pgd_t pgd)
293 {
294 	if (pgd_huge(pgd))
295 		return pte_page(pgd_pte(pgd));
296 	return virt_to_page(pgd_page_vaddr(pgd));
297 }
298 #endif
299 
300 struct page *pud_page(pud_t pud)
301 {
302 	if (pud_huge(pud))
303 		return pte_page(pud_pte(pud));
304 	return virt_to_page(pud_page_vaddr(pud));
305 }
306 
307 /*
308  * For hugepage we have pfn in the pmd, we use PTE_RPN_SHIFT bits for flags
309  * For PTE page, we have a PTE_FRAG_SIZE (4K) aligned virtual address.
310  */
311 struct page *pmd_page(pmd_t pmd)
312 {
313 	if (pmd_large(pmd) || pmd_huge(pmd) || pmd_devmap(pmd))
314 		return pte_page(pmd_pte(pmd));
315 	return virt_to_page(pmd_page_vaddr(pmd));
316 }
317 
318 #ifdef CONFIG_STRICT_KERNEL_RWX
319 void mark_rodata_ro(void)
320 {
321 	if (!mmu_has_feature(MMU_FTR_KERNEL_RO)) {
322 		pr_warn("Warning: Unable to mark rodata read only on this CPU.\n");
323 		return;
324 	}
325 
326 	if (radix_enabled())
327 		radix__mark_rodata_ro();
328 	else
329 		hash__mark_rodata_ro();
330 
331 	// mark_initmem_nx() should have already run by now
332 	ptdump_check_wx();
333 }
334 
335 void mark_initmem_nx(void)
336 {
337 	if (radix_enabled())
338 		radix__mark_initmem_nx();
339 	else
340 		hash__mark_initmem_nx();
341 }
342 #endif
343