xref: /openbmc/linux/arch/powerpc/mm/pgtable_64.c (revision 151f4e2b)
1 /*
2  *  This file contains ioremap and related functions for 64-bit machines.
3  *
4  *  Derived from arch/ppc64/mm/init.c
5  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6  *
7  *  Modifications by Paul Mackerras (PowerMac) (paulus@samba.org)
8  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
9  *    Copyright (C) 1996 Paul Mackerras
10  *
11  *  Derived from "arch/i386/mm/init.c"
12  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
13  *
14  *  Dave Engebretsen <engebret@us.ibm.com>
15  *      Rework for PPC64 port.
16  *
17  *  This program is free software; you can redistribute it and/or
18  *  modify it under the terms of the GNU General Public License
19  *  as published by the Free Software Foundation; either version
20  *  2 of the License, or (at your option) any later version.
21  *
22  */
23 
24 #include <linux/signal.h>
25 #include <linux/sched.h>
26 #include <linux/kernel.h>
27 #include <linux/errno.h>
28 #include <linux/string.h>
29 #include <linux/export.h>
30 #include <linux/types.h>
31 #include <linux/mman.h>
32 #include <linux/mm.h>
33 #include <linux/swap.h>
34 #include <linux/stddef.h>
35 #include <linux/vmalloc.h>
36 #include <linux/slab.h>
37 #include <linux/hugetlb.h>
38 
39 #include <asm/pgalloc.h>
40 #include <asm/page.h>
41 #include <asm/prom.h>
42 #include <asm/io.h>
43 #include <asm/mmu_context.h>
44 #include <asm/pgtable.h>
45 #include <asm/mmu.h>
46 #include <asm/smp.h>
47 #include <asm/machdep.h>
48 #include <asm/tlb.h>
49 #include <asm/processor.h>
50 #include <asm/cputable.h>
51 #include <asm/sections.h>
52 #include <asm/firmware.h>
53 #include <asm/dma.h>
54 
55 #include <mm/mmu_decl.h>
56 
57 
58 #ifdef CONFIG_PPC_BOOK3S_64
59 /*
60  * partition table and process table for ISA 3.0
61  */
62 struct prtb_entry *process_tb;
63 struct patb_entry *partition_tb;
64 /*
65  * page table size
66  */
67 unsigned long __pte_index_size;
68 EXPORT_SYMBOL(__pte_index_size);
69 unsigned long __pmd_index_size;
70 EXPORT_SYMBOL(__pmd_index_size);
71 unsigned long __pud_index_size;
72 EXPORT_SYMBOL(__pud_index_size);
73 unsigned long __pgd_index_size;
74 EXPORT_SYMBOL(__pgd_index_size);
75 unsigned long __pud_cache_index;
76 EXPORT_SYMBOL(__pud_cache_index);
77 unsigned long __pte_table_size;
78 EXPORT_SYMBOL(__pte_table_size);
79 unsigned long __pmd_table_size;
80 EXPORT_SYMBOL(__pmd_table_size);
81 unsigned long __pud_table_size;
82 EXPORT_SYMBOL(__pud_table_size);
83 unsigned long __pgd_table_size;
84 EXPORT_SYMBOL(__pgd_table_size);
85 unsigned long __pmd_val_bits;
86 EXPORT_SYMBOL(__pmd_val_bits);
87 unsigned long __pud_val_bits;
88 EXPORT_SYMBOL(__pud_val_bits);
89 unsigned long __pgd_val_bits;
90 EXPORT_SYMBOL(__pgd_val_bits);
91 unsigned long __kernel_virt_start;
92 EXPORT_SYMBOL(__kernel_virt_start);
93 unsigned long __vmalloc_start;
94 EXPORT_SYMBOL(__vmalloc_start);
95 unsigned long __vmalloc_end;
96 EXPORT_SYMBOL(__vmalloc_end);
97 unsigned long __kernel_io_start;
98 EXPORT_SYMBOL(__kernel_io_start);
99 unsigned long __kernel_io_end;
100 struct page *vmemmap;
101 EXPORT_SYMBOL(vmemmap);
102 unsigned long __pte_frag_nr;
103 EXPORT_SYMBOL(__pte_frag_nr);
104 unsigned long __pte_frag_size_shift;
105 EXPORT_SYMBOL(__pte_frag_size_shift);
106 unsigned long ioremap_bot;
107 #else /* !CONFIG_PPC_BOOK3S_64 */
108 unsigned long ioremap_bot = IOREMAP_BASE;
109 #endif
110 
111 /**
112  * __ioremap_at - Low level function to establish the page tables
113  *                for an IO mapping
114  */
115 void __iomem *__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_t prot)
116 {
117 	unsigned long i;
118 
119 	/* We don't support the 4K PFN hack with ioremap */
120 	if (pgprot_val(prot) & H_PAGE_4K_PFN)
121 		return NULL;
122 
123 	if ((ea + size) >= (void *)IOREMAP_END) {
124 		pr_warn("Outside the supported range\n");
125 		return NULL;
126 	}
127 
128 	WARN_ON(pa & ~PAGE_MASK);
129 	WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
130 	WARN_ON(size & ~PAGE_MASK);
131 
132 	for (i = 0; i < size; i += PAGE_SIZE)
133 		if (map_kernel_page((unsigned long)ea + i, pa + i, prot))
134 			return NULL;
135 
136 	return (void __iomem *)ea;
137 }
138 
139 /**
140  * __iounmap_from - Low level function to tear down the page tables
141  *                  for an IO mapping. This is used for mappings that
142  *                  are manipulated manually, like partial unmapping of
143  *                  PCI IOs or ISA space.
144  */
145 void __iounmap_at(void *ea, unsigned long size)
146 {
147 	WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
148 	WARN_ON(size & ~PAGE_MASK);
149 
150 	unmap_kernel_range((unsigned long)ea, size);
151 }
152 
153 void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
154 				pgprot_t prot, void *caller)
155 {
156 	phys_addr_t paligned;
157 	void __iomem *ret;
158 
159 	/*
160 	 * Choose an address to map it to.
161 	 * Once the imalloc system is running, we use it.
162 	 * Before that, we map using addresses going
163 	 * up from ioremap_bot.  imalloc will use
164 	 * the addresses from ioremap_bot through
165 	 * IMALLOC_END
166 	 *
167 	 */
168 	paligned = addr & PAGE_MASK;
169 	size = PAGE_ALIGN(addr + size) - paligned;
170 
171 	if ((size == 0) || (paligned == 0))
172 		return NULL;
173 
174 	if (slab_is_available()) {
175 		struct vm_struct *area;
176 
177 		area = __get_vm_area_caller(size, VM_IOREMAP,
178 					    ioremap_bot, IOREMAP_END,
179 					    caller);
180 		if (area == NULL)
181 			return NULL;
182 
183 		area->phys_addr = paligned;
184 		ret = __ioremap_at(paligned, area->addr, size, prot);
185 		if (!ret)
186 			vunmap(area->addr);
187 	} else {
188 		ret = __ioremap_at(paligned, (void *)ioremap_bot, size, prot);
189 		if (ret)
190 			ioremap_bot += size;
191 	}
192 
193 	if (ret)
194 		ret += addr & ~PAGE_MASK;
195 	return ret;
196 }
197 
198 void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
199 			 unsigned long flags)
200 {
201 	return __ioremap_caller(addr, size, __pgprot(flags), __builtin_return_address(0));
202 }
203 
204 void __iomem * ioremap(phys_addr_t addr, unsigned long size)
205 {
206 	pgprot_t prot = pgprot_noncached(PAGE_KERNEL);
207 	void *caller = __builtin_return_address(0);
208 
209 	if (ppc_md.ioremap)
210 		return ppc_md.ioremap(addr, size, prot, caller);
211 	return __ioremap_caller(addr, size, prot, caller);
212 }
213 
214 void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size)
215 {
216 	pgprot_t prot = pgprot_noncached_wc(PAGE_KERNEL);
217 	void *caller = __builtin_return_address(0);
218 
219 	if (ppc_md.ioremap)
220 		return ppc_md.ioremap(addr, size, prot, caller);
221 	return __ioremap_caller(addr, size, prot, caller);
222 }
223 
224 void __iomem *ioremap_coherent(phys_addr_t addr, unsigned long size)
225 {
226 	pgprot_t prot = pgprot_cached(PAGE_KERNEL);
227 	void *caller = __builtin_return_address(0);
228 
229 	if (ppc_md.ioremap)
230 		return ppc_md.ioremap(addr, size, prot, caller);
231 	return __ioremap_caller(addr, size, prot, caller);
232 }
233 
234 void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size,
235 			     unsigned long flags)
236 {
237 	pte_t pte = __pte(flags);
238 	void *caller = __builtin_return_address(0);
239 
240 	/* writeable implies dirty for kernel addresses */
241 	if (pte_write(pte))
242 		pte = pte_mkdirty(pte);
243 
244 	/* we don't want to let _PAGE_EXEC leak out */
245 	pte = pte_exprotect(pte);
246 	/*
247 	 * Force kernel mapping.
248 	 */
249 	pte = pte_mkprivileged(pte);
250 
251 	if (ppc_md.ioremap)
252 		return ppc_md.ioremap(addr, size, pte_pgprot(pte), caller);
253 	return __ioremap_caller(addr, size, pte_pgprot(pte), caller);
254 }
255 
256 
257 /*
258  * Unmap an IO region and remove it from imalloc'd list.
259  * Access to IO memory should be serialized by driver.
260  */
261 void __iounmap(volatile void __iomem *token)
262 {
263 	void *addr;
264 
265 	if (!slab_is_available())
266 		return;
267 
268 	addr = (void *) ((unsigned long __force)
269 			 PCI_FIX_ADDR(token) & PAGE_MASK);
270 	if ((unsigned long)addr < ioremap_bot) {
271 		printk(KERN_WARNING "Attempt to iounmap early bolted mapping"
272 		       " at 0x%p\n", addr);
273 		return;
274 	}
275 	vunmap(addr);
276 }
277 
278 void iounmap(volatile void __iomem *token)
279 {
280 	if (ppc_md.iounmap)
281 		ppc_md.iounmap(token);
282 	else
283 		__iounmap(token);
284 }
285 
286 EXPORT_SYMBOL(ioremap);
287 EXPORT_SYMBOL(ioremap_wc);
288 EXPORT_SYMBOL(ioremap_prot);
289 EXPORT_SYMBOL(__ioremap);
290 EXPORT_SYMBOL(__ioremap_at);
291 EXPORT_SYMBOL(iounmap);
292 EXPORT_SYMBOL(__iounmap);
293 EXPORT_SYMBOL(__iounmap_at);
294 
295 #ifndef __PAGETABLE_PUD_FOLDED
296 /* 4 level page table */
297 struct page *pgd_page(pgd_t pgd)
298 {
299 	if (pgd_huge(pgd))
300 		return pte_page(pgd_pte(pgd));
301 	return virt_to_page(pgd_page_vaddr(pgd));
302 }
303 #endif
304 
305 struct page *pud_page(pud_t pud)
306 {
307 	if (pud_huge(pud))
308 		return pte_page(pud_pte(pud));
309 	return virt_to_page(pud_page_vaddr(pud));
310 }
311 
312 /*
313  * For hugepage we have pfn in the pmd, we use PTE_RPN_SHIFT bits for flags
314  * For PTE page, we have a PTE_FRAG_SIZE (4K) aligned virtual address.
315  */
316 struct page *pmd_page(pmd_t pmd)
317 {
318 	if (pmd_large(pmd) || pmd_huge(pmd) || pmd_devmap(pmd))
319 		return pte_page(pmd_pte(pmd));
320 	return virt_to_page(pmd_page_vaddr(pmd));
321 }
322 
323 #ifdef CONFIG_STRICT_KERNEL_RWX
324 void mark_rodata_ro(void)
325 {
326 	if (!mmu_has_feature(MMU_FTR_KERNEL_RO)) {
327 		pr_warn("Warning: Unable to mark rodata read only on this CPU.\n");
328 		return;
329 	}
330 
331 	if (radix_enabled())
332 		radix__mark_rodata_ro();
333 	else
334 		hash__mark_rodata_ro();
335 
336 	// mark_initmem_nx() should have already run by now
337 	ptdump_check_wx();
338 }
339 
340 void mark_initmem_nx(void)
341 {
342 	if (radix_enabled())
343 		radix__mark_initmem_nx();
344 	else
345 		hash__mark_initmem_nx();
346 }
347 #endif
348