xref: /openbmc/linux/arch/arm64/include/asm/pgtable.h (revision 161f4089)
1 /*
2  * Copyright (C) 2012 ARM Ltd.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
15  */
16 #ifndef __ASM_PGTABLE_H
17 #define __ASM_PGTABLE_H
18 
19 #include <asm/proc-fns.h>
20 
21 #include <asm/memory.h>
22 #include <asm/pgtable-hwdef.h>
23 
24 /*
25  * Software defined PTE bits definition.
26  */
27 #define PTE_VALID		(_AT(pteval_t, 1) << 0)
28 #define PTE_PROT_NONE		(_AT(pteval_t, 1) << 2)	/* only when !PTE_VALID */
29 #define PTE_FILE		(_AT(pteval_t, 1) << 3)	/* only when !pte_present() */
30 #define PTE_DIRTY		(_AT(pteval_t, 1) << 55)
31 #define PTE_SPECIAL		(_AT(pteval_t, 1) << 56)
32 
33 /*
34  * VMALLOC and SPARSEMEM_VMEMMAP ranges.
35  */
36 #define VMALLOC_START		(UL(0xffffffffffffffff) << VA_BITS)
37 #define VMALLOC_END		(PAGE_OFFSET - UL(0x400000000) - SZ_64K)
38 
39 #define vmemmap			((struct page *)(VMALLOC_END + SZ_64K))
40 
41 #define FIRST_USER_ADDRESS	0
42 
43 #ifndef __ASSEMBLY__
44 extern void __pte_error(const char *file, int line, unsigned long val);
45 extern void __pmd_error(const char *file, int line, unsigned long val);
46 extern void __pgd_error(const char *file, int line, unsigned long val);
47 
48 #define pte_ERROR(pte)		__pte_error(__FILE__, __LINE__, pte_val(pte))
49 #ifndef CONFIG_ARM64_64K_PAGES
50 #define pmd_ERROR(pmd)		__pmd_error(__FILE__, __LINE__, pmd_val(pmd))
51 #endif
52 #define pgd_ERROR(pgd)		__pgd_error(__FILE__, __LINE__, pgd_val(pgd))
53 
54 /*
55  * The pgprot_* and protection_map entries will be fixed up at runtime to
56  * include the cachable and bufferable bits based on memory policy, as well as
57  * any architecture dependent bits like global/ASID and SMP shared mapping
58  * bits.
59  */
60 #define _PAGE_DEFAULT		PTE_TYPE_PAGE | PTE_AF
61 
62 extern pgprot_t pgprot_default;
63 
64 #define __pgprot_modify(prot,mask,bits) \
65 	__pgprot((pgprot_val(prot) & ~(mask)) | (bits))
66 
67 #define _MOD_PROT(p, b)		__pgprot_modify(p, 0, b)
68 
69 #define PAGE_NONE		__pgprot_modify(pgprot_default, PTE_TYPE_MASK, PTE_PROT_NONE | PTE_RDONLY | PTE_PXN | PTE_UXN)
70 #define PAGE_SHARED		_MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
71 #define PAGE_SHARED_EXEC	_MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN)
72 #define PAGE_COPY		_MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
73 #define PAGE_COPY_EXEC		_MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
74 #define PAGE_READONLY		_MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
75 #define PAGE_READONLY_EXEC	_MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
76 #define PAGE_KERNEL		_MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY)
77 #define PAGE_KERNEL_EXEC	_MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY)
78 
79 #define PAGE_HYP		_MOD_PROT(pgprot_default, PTE_HYP)
80 #define PAGE_HYP_DEVICE		__pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
81 
82 #define PAGE_S2			__pgprot_modify(pgprot_default, PTE_S2_MEMATTR_MASK, PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
83 #define PAGE_S2_DEVICE		__pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDWR | PTE_UXN)
84 
85 #define __PAGE_NONE		__pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_RDONLY | PTE_PXN | PTE_UXN)
86 #define __PAGE_SHARED		__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
87 #define __PAGE_SHARED_EXEC	__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
88 #define __PAGE_COPY		__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
89 #define __PAGE_COPY_EXEC	__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
90 #define __PAGE_READONLY		__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
91 #define __PAGE_READONLY_EXEC	__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
92 
93 #endif /* __ASSEMBLY__ */
94 
95 #define __P000  __PAGE_NONE
96 #define __P001  __PAGE_READONLY
97 #define __P010  __PAGE_COPY
98 #define __P011  __PAGE_COPY
99 #define __P100  __PAGE_READONLY_EXEC
100 #define __P101  __PAGE_READONLY_EXEC
101 #define __P110  __PAGE_COPY_EXEC
102 #define __P111  __PAGE_COPY_EXEC
103 
104 #define __S000  __PAGE_NONE
105 #define __S001  __PAGE_READONLY
106 #define __S010  __PAGE_SHARED
107 #define __S011  __PAGE_SHARED
108 #define __S100  __PAGE_READONLY_EXEC
109 #define __S101  __PAGE_READONLY_EXEC
110 #define __S110  __PAGE_SHARED_EXEC
111 #define __S111  __PAGE_SHARED_EXEC
112 
113 #ifndef __ASSEMBLY__
114 /*
115  * ZERO_PAGE is a global shared page that is always zero: used
116  * for zero-mapped memory areas etc..
117  */
118 extern struct page *empty_zero_page;
119 #define ZERO_PAGE(vaddr)	(empty_zero_page)
120 
121 #define pte_pfn(pte)		((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT)
122 
123 #define pfn_pte(pfn,prot)	(__pte(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
124 
125 #define pte_none(pte)		(!pte_val(pte))
126 #define pte_clear(mm,addr,ptep)	set_pte(ptep, __pte(0))
127 #define pte_page(pte)		(pfn_to_page(pte_pfn(pte)))
128 #define pte_offset_kernel(dir,addr)	(pmd_page_vaddr(*(dir)) + pte_index(addr))
129 
130 #define pte_offset_map(dir,addr)	pte_offset_kernel((dir), (addr))
131 #define pte_offset_map_nested(dir,addr)	pte_offset_kernel((dir), (addr))
132 #define pte_unmap(pte)			do { } while (0)
133 #define pte_unmap_nested(pte)		do { } while (0)
134 
135 /*
136  * The following only work if pte_present(). Undefined behaviour otherwise.
137  */
138 #define pte_present(pte)	(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE))
139 #define pte_dirty(pte)		(pte_val(pte) & PTE_DIRTY)
140 #define pte_young(pte)		(pte_val(pte) & PTE_AF)
141 #define pte_special(pte)	(pte_val(pte) & PTE_SPECIAL)
142 #define pte_write(pte)		(!(pte_val(pte) & PTE_RDONLY))
143 #define pte_exec(pte)		(!(pte_val(pte) & PTE_UXN))
144 
145 #define pte_valid_user(pte) \
146 	((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
147 
148 #define PTE_BIT_FUNC(fn,op) \
149 static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
150 
151 PTE_BIT_FUNC(wrprotect, |= PTE_RDONLY);
152 PTE_BIT_FUNC(mkwrite,   &= ~PTE_RDONLY);
153 PTE_BIT_FUNC(mkclean,   &= ~PTE_DIRTY);
154 PTE_BIT_FUNC(mkdirty,   |= PTE_DIRTY);
155 PTE_BIT_FUNC(mkold,     &= ~PTE_AF);
156 PTE_BIT_FUNC(mkyoung,   |= PTE_AF);
157 PTE_BIT_FUNC(mkspecial, |= PTE_SPECIAL);
158 
159 static inline void set_pte(pte_t *ptep, pte_t pte)
160 {
161 	*ptep = pte;
162 }
163 
164 extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
165 
166 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
167 			      pte_t *ptep, pte_t pte)
168 {
169 	if (pte_valid_user(pte)) {
170 		if (pte_exec(pte))
171 			__sync_icache_dcache(pte, addr);
172 		if (!pte_dirty(pte))
173 			pte = pte_wrprotect(pte);
174 	}
175 
176 	set_pte(ptep, pte);
177 }
178 
179 /*
180  * Huge pte definitions.
181  */
182 #define pte_huge(pte)		(!(pte_val(pte) & PTE_TABLE_BIT))
183 #define pte_mkhuge(pte)		(__pte(pte_val(pte) & ~PTE_TABLE_BIT))
184 
185 /*
186  * Hugetlb definitions.
187  */
188 #define HUGE_MAX_HSTATE		2
189 #define HPAGE_SHIFT		PMD_SHIFT
190 #define HPAGE_SIZE		(_AC(1, UL) << HPAGE_SHIFT)
191 #define HPAGE_MASK		(~(HPAGE_SIZE - 1))
192 #define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
193 
194 #define __HAVE_ARCH_PTE_SPECIAL
195 
196 /*
197  * Software PMD bits for THP
198  */
199 
200 #define PMD_SECT_DIRTY		(_AT(pmdval_t, 1) << 55)
201 #define PMD_SECT_SPLITTING	(_AT(pmdval_t, 1) << 57)
202 
203 /*
204  * THP definitions.
205  */
206 #define pmd_young(pmd)		(pmd_val(pmd) & PMD_SECT_AF)
207 
208 #define __HAVE_ARCH_PMD_WRITE
209 #define pmd_write(pmd)		(!(pmd_val(pmd) & PMD_SECT_RDONLY))
210 
211 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
212 #define pmd_trans_huge(pmd)	(pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
213 #define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING)
214 #endif
215 
216 #define PMD_BIT_FUNC(fn,op) \
217 static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; }
218 
219 PMD_BIT_FUNC(wrprotect,	|= PMD_SECT_RDONLY);
220 PMD_BIT_FUNC(mkold,	&= ~PMD_SECT_AF);
221 PMD_BIT_FUNC(mksplitting, |= PMD_SECT_SPLITTING);
222 PMD_BIT_FUNC(mkwrite,   &= ~PMD_SECT_RDONLY);
223 PMD_BIT_FUNC(mkdirty,   |= PMD_SECT_DIRTY);
224 PMD_BIT_FUNC(mkyoung,   |= PMD_SECT_AF);
225 PMD_BIT_FUNC(mknotpresent, &= ~PMD_TYPE_MASK);
226 
227 #define pmd_mkhuge(pmd)		(__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
228 
229 #define pmd_pfn(pmd)		(((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
230 #define pfn_pmd(pfn,prot)	(__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
231 #define mk_pmd(page,prot)	pfn_pmd(page_to_pfn(page),prot)
232 
233 #define pmd_page(pmd)           pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
234 
235 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
236 {
237 	const pmdval_t mask = PMD_SECT_USER | PMD_SECT_PXN | PMD_SECT_UXN |
238 			      PMD_SECT_RDONLY | PMD_SECT_PROT_NONE |
239 			      PMD_SECT_VALID;
240 	pmd_val(pmd) = (pmd_val(pmd) & ~mask) | (pgprot_val(newprot) & mask);
241 	return pmd;
242 }
243 
244 #define set_pmd_at(mm, addr, pmdp, pmd)	set_pmd(pmdp, pmd)
245 
246 static inline int has_transparent_hugepage(void)
247 {
248 	return 1;
249 }
250 
251 /*
252  * Mark the prot value as uncacheable and unbufferable.
253  */
254 #define pgprot_noncached(prot) \
255 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE))
256 #define pgprot_writecombine(prot) \
257 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_GRE))
258 #define pgprot_dmacoherent(prot) \
259 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC))
260 #define __HAVE_PHYS_MEM_ACCESS_PROT
261 struct file;
262 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
263 				     unsigned long size, pgprot_t vma_prot);
264 
265 #define pmd_none(pmd)		(!pmd_val(pmd))
266 #define pmd_present(pmd)	(pmd_val(pmd))
267 
268 #define pmd_bad(pmd)		(!(pmd_val(pmd) & 2))
269 
270 #define pmd_table(pmd)		((pmd_val(pmd) & PMD_TYPE_MASK) == \
271 				 PMD_TYPE_TABLE)
272 #define pmd_sect(pmd)		((pmd_val(pmd) & PMD_TYPE_MASK) == \
273 				 PMD_TYPE_SECT)
274 
275 
276 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
277 {
278 	*pmdp = pmd;
279 	dsb();
280 }
281 
282 static inline void pmd_clear(pmd_t *pmdp)
283 {
284 	set_pmd(pmdp, __pmd(0));
285 }
286 
287 static inline pte_t *pmd_page_vaddr(pmd_t pmd)
288 {
289 	return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK);
290 }
291 
292 #define pmd_page(pmd)		pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
293 
294 /*
295  * Conversion functions: convert a page and protection to a page entry,
296  * and a page entry and page directory to the page they refer to.
297  */
298 #define mk_pte(page,prot)	pfn_pte(page_to_pfn(page),prot)
299 
300 #ifndef CONFIG_ARM64_64K_PAGES
301 
302 #define pud_none(pud)		(!pud_val(pud))
303 #define pud_bad(pud)		(!(pud_val(pud) & 2))
304 #define pud_present(pud)	(pud_val(pud))
305 
306 static inline void set_pud(pud_t *pudp, pud_t pud)
307 {
308 	*pudp = pud;
309 	dsb();
310 }
311 
312 static inline void pud_clear(pud_t *pudp)
313 {
314 	set_pud(pudp, __pud(0));
315 }
316 
317 static inline pmd_t *pud_page_vaddr(pud_t pud)
318 {
319 	return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK);
320 }
321 
322 #endif	/* CONFIG_ARM64_64K_PAGES */
323 
324 /* to find an entry in a page-table-directory */
325 #define pgd_index(addr)		(((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
326 
327 #define pgd_offset(mm, addr)	((mm)->pgd+pgd_index(addr))
328 
329 /* to find an entry in a kernel page-table-directory */
330 #define pgd_offset_k(addr)	pgd_offset(&init_mm, addr)
331 
332 /* Find an entry in the second-level page table.. */
333 #ifndef CONFIG_ARM64_64K_PAGES
334 #define pmd_index(addr)		(((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
335 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
336 {
337 	return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr);
338 }
339 #endif
340 
341 /* Find an entry in the third-level page table.. */
342 #define pte_index(addr)		(((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
343 
344 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
345 {
346 	const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
347 			      PTE_PROT_NONE | PTE_VALID;
348 	pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
349 	return pte;
350 }
351 
352 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
353 extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
354 
355 #define SWAPPER_DIR_SIZE	(3 * PAGE_SIZE)
356 #define IDMAP_DIR_SIZE		(2 * PAGE_SIZE)
357 
358 /*
359  * Encode and decode a swap entry:
360  *	bits 0, 2:	present (must both be zero)
361  *	bit  3:		PTE_FILE
362  *	bits 4-8:	swap type
363  *	bits 9-63:	swap offset
364  */
365 #define __SWP_TYPE_SHIFT	4
366 #define __SWP_TYPE_BITS		6
367 #define __SWP_TYPE_MASK		((1 << __SWP_TYPE_BITS) - 1)
368 #define __SWP_OFFSET_SHIFT	(__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
369 
370 #define __swp_type(x)		(((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
371 #define __swp_offset(x)		((x).val >> __SWP_OFFSET_SHIFT)
372 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
373 
374 #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
375 #define __swp_entry_to_pte(swp)	((pte_t) { (swp).val })
376 
377 /*
378  * Ensure that there are not more swap files than can be encoded in the kernel
379  * the PTEs.
380  */
381 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
382 
383 /*
384  * Encode and decode a file entry:
385  *	bits 0, 2:	present (must both be zero)
386  *	bit  3:		PTE_FILE
387  *	bits 4-63:	file offset / PAGE_SIZE
388  */
389 #define pte_file(pte)		(pte_val(pte) & PTE_FILE)
390 #define pte_to_pgoff(x)		(pte_val(x) >> 4)
391 #define pgoff_to_pte(x)		__pte(((x) << 4) | PTE_FILE)
392 
393 #define PTE_FILE_MAX_BITS	60
394 
395 extern int kern_addr_valid(unsigned long addr);
396 
397 #include <asm-generic/pgtable.h>
398 
399 #define pgtable_cache_init() do { } while (0)
400 
401 #endif /* !__ASSEMBLY__ */
402 
403 #endif /* __ASM_PGTABLE_H */
404