xref: /openbmc/linux/arch/powerpc/include/asm/book3s/64/pgtable.h (revision d37cf9b63113f13d742713881ce691fc615d8b3b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
3 #define _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
4 
5 #include <asm-generic/pgtable-nop4d.h>
6 
7 #ifndef __ASSEMBLY__
8 #include <linux/mmdebug.h>
9 #include <linux/bug.h>
10 #include <linux/sizes.h>
11 #endif
12 
13 /*
14  * Common bits between hash and Radix page table
15  */
16 
17 #define _PAGE_EXEC		0x00001 /* execute permission */
18 #define _PAGE_WRITE		0x00002 /* write access allowed */
19 #define _PAGE_READ		0x00004	/* read access allowed */
20 #define _PAGE_RW		(_PAGE_READ | _PAGE_WRITE)
21 #define _PAGE_RWX		(_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)
22 #define _PAGE_PRIVILEGED	0x00008 /* kernel access only */
23 #define _PAGE_SAO		0x00010 /* Strong access order */
24 #define _PAGE_NON_IDEMPOTENT	0x00020 /* non idempotent memory */
25 #define _PAGE_TOLERANT		0x00030 /* tolerant memory, cache inhibited */
26 #define _PAGE_DIRTY		0x00080 /* C: page changed */
27 #define _PAGE_ACCESSED		0x00100 /* R: page referenced */
28 /*
29  * Software bits
30  */
31 #define _RPAGE_SW0		0x2000000000000000UL
32 #define _RPAGE_SW1		0x00800
33 #define _RPAGE_SW2		0x00400
34 #define _RPAGE_SW3		0x00200
35 #define _RPAGE_RSV1		0x00040UL
36 
37 #define _RPAGE_PKEY_BIT4	0x1000000000000000UL
38 #define _RPAGE_PKEY_BIT3	0x0800000000000000UL
39 #define _RPAGE_PKEY_BIT2	0x0400000000000000UL
40 #define _RPAGE_PKEY_BIT1	0x0200000000000000UL
41 #define _RPAGE_PKEY_BIT0	0x0100000000000000UL
42 
43 #define _PAGE_PTE		0x4000000000000000UL	/* distinguishes PTEs from pointers */
44 #define _PAGE_PRESENT		0x8000000000000000UL	/* pte contains a translation */
45 /*
46  * We need to mark a pmd pte invalid while splitting. We can do that by clearing
47  * the _PAGE_PRESENT bit. But then that will be taken as a swap pte. In order to
48  * differentiate between two use a SW field when invalidating.
49  *
50  * We do that temporary invalidate for regular pte entry in ptep_set_access_flags
51  *
52  * This is used only when _PAGE_PRESENT is cleared.
53  */
54 #define _PAGE_INVALID		_RPAGE_SW0
55 
56 /*
57  * Top and bottom bits of RPN which can be used by hash
58  * translation mode, because we expect them to be zero
59  * otherwise.
60  */
61 #define _RPAGE_RPN0		0x01000
62 #define _RPAGE_RPN1		0x02000
63 #define _RPAGE_RPN43		0x0080000000000000UL
64 #define _RPAGE_RPN42		0x0040000000000000UL
65 #define _RPAGE_RPN41		0x0020000000000000UL
66 
67 /* Max physical address bit as per radix table */
68 #define _RPAGE_PA_MAX		56
69 
70 /*
71  * Max physical address bit we will use for now.
72  *
73  * This is mostly a hardware limitation and for now Power9 has
74  * a 51 bit limit.
75  *
76  * This is different from the number of physical bit required to address
77  * the last byte of memory. That is defined by MAX_PHYSMEM_BITS.
78  * MAX_PHYSMEM_BITS is a linux limitation imposed by the maximum
79  * number of sections we can support (SECTIONS_SHIFT).
80  *
81  * This is different from Radix page table limitation above and
82  * should always be less than that. The limit is done such that
83  * we can overload the bits between _RPAGE_PA_MAX and _PAGE_PA_MAX
84  * for hash linux page table specific bits.
85  *
86  * In order to be compatible with future hardware generations we keep
87  * some offsets and limit this for now to 53
88  */
89 #define _PAGE_PA_MAX		53
90 
91 #define _PAGE_SOFT_DIRTY	_RPAGE_SW3 /* software: software dirty tracking */
92 #define _PAGE_SPECIAL		_RPAGE_SW2 /* software: special page */
93 #define _PAGE_DEVMAP		_RPAGE_SW1 /* software: ZONE_DEVICE page */
94 
95 /*
96  * Drivers request for cache inhibited pte mapping using _PAGE_NO_CACHE
97  * Instead of fixing all of them, add an alternate define which
98  * maps CI pte mapping.
99  */
100 #define _PAGE_NO_CACHE		_PAGE_TOLERANT
101 /*
102  * We support _RPAGE_PA_MAX bit real address in pte. On the linux side
103  * we are limited by _PAGE_PA_MAX. Clear everything above _PAGE_PA_MAX
104  * and every thing below PAGE_SHIFT;
105  */
106 #define PTE_RPN_MASK	(((1UL << _PAGE_PA_MAX) - 1) & (PAGE_MASK))
107 #define PTE_RPN_SHIFT	PAGE_SHIFT
108 /*
109  * set of bits not changed in pmd_modify. Even though we have hash specific bits
110  * in here, on radix we expect them to be zero.
111  */
112 #define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
113 			 _PAGE_ACCESSED | H_PAGE_THP_HUGE | _PAGE_PTE | \
114 			 _PAGE_SOFT_DIRTY | _PAGE_DEVMAP)
115 /*
116  * user access blocked by key
117  */
118 #define _PAGE_KERNEL_RW		(_PAGE_PRIVILEGED | _PAGE_RW | _PAGE_DIRTY)
119 #define _PAGE_KERNEL_RO		 (_PAGE_PRIVILEGED | _PAGE_READ)
120 #define _PAGE_KERNEL_ROX	 (_PAGE_PRIVILEGED | _PAGE_READ | _PAGE_EXEC)
121 #define _PAGE_KERNEL_RWX	(_PAGE_PRIVILEGED | _PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC)
122 /*
123  * _PAGE_CHG_MASK masks of bits that are to be preserved across
124  * pgprot changes
125  */
126 #define _PAGE_CHG_MASK	(PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
127 			 _PAGE_ACCESSED | _PAGE_SPECIAL | _PAGE_PTE |	\
128 			 _PAGE_SOFT_DIRTY | _PAGE_DEVMAP)
129 
130 /*
131  * We define 2 sets of base prot bits, one for basic pages (ie,
132  * cacheable kernel and user pages) and one for non cacheable
133  * pages. We always set _PAGE_COHERENT when SMP is enabled or
134  * the processor might need it for DMA coherency.
135  */
136 #define _PAGE_BASE_NC	(_PAGE_PRESENT | _PAGE_ACCESSED)
137 #define _PAGE_BASE	(_PAGE_BASE_NC)
138 
139 /* Permission masks used to generate the __P and __S table,
140  *
141  * Note:__pgprot is defined in arch/powerpc/include/asm/page.h
142  *
143  * Write permissions imply read permissions for now (we could make write-only
144  * pages on BookE but we don't bother for now). Execute permission control is
145  * possible on platforms that define _PAGE_EXEC
146  */
147 #define PAGE_NONE	__pgprot(_PAGE_BASE | _PAGE_PRIVILEGED)
148 #define PAGE_SHARED	__pgprot(_PAGE_BASE | _PAGE_RW)
149 #define PAGE_SHARED_X	__pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_EXEC)
150 #define PAGE_COPY	__pgprot(_PAGE_BASE | _PAGE_READ)
151 #define PAGE_COPY_X	__pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
152 #define PAGE_READONLY	__pgprot(_PAGE_BASE | _PAGE_READ)
153 #define PAGE_READONLY_X	__pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
154 /* Radix only, Hash uses PAGE_READONLY_X + execute-only pkey instead */
155 #define PAGE_EXECONLY	__pgprot(_PAGE_BASE | _PAGE_EXEC)
156 
157 /* Permission masks used for kernel mappings */
158 #define PAGE_KERNEL	__pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
159 #define PAGE_KERNEL_NC	__pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_TOLERANT)
160 #define PAGE_KERNEL_NCG	__pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NON_IDEMPOTENT)
161 #define PAGE_KERNEL_X	__pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
162 #define PAGE_KERNEL_RO	__pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
163 #define PAGE_KERNEL_ROX	__pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
164 
165 #ifndef __ASSEMBLY__
166 /*
167  * page table defines
168  */
169 extern unsigned long __pte_index_size;
170 extern unsigned long __pmd_index_size;
171 extern unsigned long __pud_index_size;
172 extern unsigned long __pgd_index_size;
173 extern unsigned long __pud_cache_index;
174 #define PTE_INDEX_SIZE  __pte_index_size
175 #define PMD_INDEX_SIZE  __pmd_index_size
176 #define PUD_INDEX_SIZE  __pud_index_size
177 #define PGD_INDEX_SIZE  __pgd_index_size
178 /* pmd table use page table fragments */
179 #define PMD_CACHE_INDEX  0
180 #define PUD_CACHE_INDEX __pud_cache_index
181 /*
182  * Because of use of pte fragments and THP, size of page table
183  * are not always derived out of index size above.
184  */
185 extern unsigned long __pte_table_size;
186 extern unsigned long __pmd_table_size;
187 extern unsigned long __pud_table_size;
188 extern unsigned long __pgd_table_size;
189 #define PTE_TABLE_SIZE	__pte_table_size
190 #define PMD_TABLE_SIZE	__pmd_table_size
191 #define PUD_TABLE_SIZE	__pud_table_size
192 #define PGD_TABLE_SIZE	__pgd_table_size
193 
194 extern unsigned long __pmd_val_bits;
195 extern unsigned long __pud_val_bits;
196 extern unsigned long __pgd_val_bits;
197 #define PMD_VAL_BITS	__pmd_val_bits
198 #define PUD_VAL_BITS	__pud_val_bits
199 #define PGD_VAL_BITS	__pgd_val_bits
200 
201 extern unsigned long __pte_frag_nr;
202 #define PTE_FRAG_NR __pte_frag_nr
203 extern unsigned long __pte_frag_size_shift;
204 #define PTE_FRAG_SIZE_SHIFT __pte_frag_size_shift
205 #define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT)
206 
207 extern unsigned long __pmd_frag_nr;
208 #define PMD_FRAG_NR __pmd_frag_nr
209 extern unsigned long __pmd_frag_size_shift;
210 #define PMD_FRAG_SIZE_SHIFT __pmd_frag_size_shift
211 #define PMD_FRAG_SIZE (1UL << PMD_FRAG_SIZE_SHIFT)
212 
213 #define PTRS_PER_PTE	(1 << PTE_INDEX_SIZE)
214 #define PTRS_PER_PMD	(1 << PMD_INDEX_SIZE)
215 #define PTRS_PER_PUD	(1 << PUD_INDEX_SIZE)
216 #define PTRS_PER_PGD	(1 << PGD_INDEX_SIZE)
217 
218 #define MAX_PTRS_PER_PTE ((H_PTRS_PER_PTE > R_PTRS_PER_PTE) ? H_PTRS_PER_PTE : R_PTRS_PER_PTE)
219 #define MAX_PTRS_PER_PMD ((H_PTRS_PER_PMD > R_PTRS_PER_PMD) ? H_PTRS_PER_PMD : R_PTRS_PER_PMD)
220 #define MAX_PTRS_PER_PUD ((H_PTRS_PER_PUD > R_PTRS_PER_PUD) ? H_PTRS_PER_PUD : R_PTRS_PER_PUD)
221 #define MAX_PTRS_PER_PGD	(1 << (H_PGD_INDEX_SIZE > RADIX_PGD_INDEX_SIZE ? \
222 				       H_PGD_INDEX_SIZE : RADIX_PGD_INDEX_SIZE))
223 
224 /* PMD_SHIFT determines what a second-level page table entry can map */
225 #define PMD_SHIFT	(PAGE_SHIFT + PTE_INDEX_SIZE)
226 #define PMD_SIZE	(1UL << PMD_SHIFT)
227 #define PMD_MASK	(~(PMD_SIZE-1))
228 
229 /* PUD_SHIFT determines what a third-level page table entry can map */
230 #define PUD_SHIFT	(PMD_SHIFT + PMD_INDEX_SIZE)
231 #define PUD_SIZE	(1UL << PUD_SHIFT)
232 #define PUD_MASK	(~(PUD_SIZE-1))
233 
234 /* PGDIR_SHIFT determines what a fourth-level page table entry can map */
235 #define PGDIR_SHIFT	(PUD_SHIFT + PUD_INDEX_SIZE)
236 #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
237 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
238 
239 /* Bits to mask out from a PMD to get to the PTE page */
240 #define PMD_MASKED_BITS		0xc0000000000000ffUL
241 /* Bits to mask out from a PUD to get to the PMD page */
242 #define PUD_MASKED_BITS		0xc0000000000000ffUL
243 /* Bits to mask out from a PGD to get to the PUD page */
244 #define P4D_MASKED_BITS		0xc0000000000000ffUL
245 
246 /*
247  * Used as an indicator for rcu callback functions
248  */
249 enum pgtable_index {
250 	PTE_INDEX = 0,
251 	PMD_INDEX,
252 	PUD_INDEX,
253 	PGD_INDEX,
254 	/*
255 	 * Below are used with 4k page size and hugetlb
256 	 */
257 	HTLB_16M_INDEX,
258 	HTLB_16G_INDEX,
259 };
260 
261 extern unsigned long __vmalloc_start;
262 extern unsigned long __vmalloc_end;
263 #define VMALLOC_START	__vmalloc_start
264 #define VMALLOC_END	__vmalloc_end
265 
ioremap_max_order(void)266 static inline unsigned int ioremap_max_order(void)
267 {
268 	if (radix_enabled())
269 		return PUD_SHIFT;
270 	return 7 + PAGE_SHIFT; /* default from linux/vmalloc.h */
271 }
272 #define IOREMAP_MAX_ORDER ioremap_max_order()
273 
274 extern unsigned long __kernel_virt_start;
275 extern unsigned long __kernel_io_start;
276 extern unsigned long __kernel_io_end;
277 #define KERN_VIRT_START __kernel_virt_start
278 #define KERN_IO_START  __kernel_io_start
279 #define KERN_IO_END __kernel_io_end
280 
281 extern struct page *vmemmap;
282 extern unsigned long pci_io_base;
283 #endif /* __ASSEMBLY__ */
284 
285 #include <asm/book3s/64/hash.h>
286 #include <asm/book3s/64/radix.h>
287 
288 #if H_MAX_PHYSMEM_BITS > R_MAX_PHYSMEM_BITS
289 #define  MAX_PHYSMEM_BITS	H_MAX_PHYSMEM_BITS
290 #else
291 #define  MAX_PHYSMEM_BITS	R_MAX_PHYSMEM_BITS
292 #endif
293 
294 
295 #ifdef CONFIG_PPC_64K_PAGES
296 #include <asm/book3s/64/pgtable-64k.h>
297 #else
298 #include <asm/book3s/64/pgtable-4k.h>
299 #endif
300 
301 #include <asm/barrier.h>
302 /*
303  * IO space itself carved into the PIO region (ISA and PHB IO space) and
304  * the ioremap space
305  *
306  *  ISA_IO_BASE = KERN_IO_START, 64K reserved area
307  *  PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces
308  * IOREMAP_BASE = ISA_IO_BASE + 2G to VMALLOC_START + PGTABLE_RANGE
309  */
310 #define FULL_IO_SIZE	0x80000000ul
311 #define  ISA_IO_BASE	(KERN_IO_START)
312 #define  ISA_IO_END	(KERN_IO_START + 0x10000ul)
313 #define  PHB_IO_BASE	(ISA_IO_END)
314 #define  PHB_IO_END	(KERN_IO_START + FULL_IO_SIZE)
315 #define IOREMAP_BASE	(PHB_IO_END)
316 #define IOREMAP_START	(ioremap_bot)
317 #define IOREMAP_END	(KERN_IO_END - FIXADDR_SIZE)
318 #define FIXADDR_SIZE	SZ_32M
319 
320 #ifndef __ASSEMBLY__
321 
pte_update(struct mm_struct * mm,unsigned long addr,pte_t * ptep,unsigned long clr,unsigned long set,int huge)322 static inline unsigned long pte_update(struct mm_struct *mm, unsigned long addr,
323 				       pte_t *ptep, unsigned long clr,
324 				       unsigned long set, int huge)
325 {
326 	if (radix_enabled())
327 		return radix__pte_update(mm, addr, ptep, clr, set, huge);
328 	return hash__pte_update(mm, addr, ptep, clr, set, huge);
329 }
330 /*
331  * For hash even if we have _PAGE_ACCESSED = 0, we do a pte_update.
332  * We currently remove entries from the hashtable regardless of whether
333  * the entry was young or dirty.
334  *
335  * We should be more intelligent about this but for the moment we override
336  * these functions and force a tlb flush unconditionally
337  * For radix: H_PAGE_HASHPTE should be zero. Hence we can use the same
338  * function for both hash and radix.
339  */
__ptep_test_and_clear_young(struct mm_struct * mm,unsigned long addr,pte_t * ptep)340 static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
341 					      unsigned long addr, pte_t *ptep)
342 {
343 	unsigned long old;
344 
345 	if ((pte_raw(*ptep) & cpu_to_be64(_PAGE_ACCESSED | H_PAGE_HASHPTE)) == 0)
346 		return 0;
347 	old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
348 	return (old & _PAGE_ACCESSED) != 0;
349 }
350 
351 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
352 #define ptep_test_and_clear_young(__vma, __addr, __ptep)	\
353 ({								\
354 	__ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \
355 })
356 
357 /*
358  * On Book3S CPUs, clearing the accessed bit without a TLB flush
359  * doesn't cause data corruption. [ It could cause incorrect
360  * page aging and the (mistaken) reclaim of hot pages, but the
361  * chance of that should be relatively low. ]
362  *
363  * So as a performance optimization don't flush the TLB when
364  * clearing the accessed bit, it will eventually be flushed by
365  * a context switch or a VM operation anyway. [ In the rare
366  * event of it not getting flushed for a long time the delay
367  * shouldn't really matter because there's no real memory
368  * pressure for swapout to react to. ]
369  *
370  * Note: this optimisation also exists in pte_needs_flush() and
371  * huge_pmd_needs_flush().
372  */
373 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
374 #define ptep_clear_flush_young ptep_test_and_clear_young
375 
376 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
377 #define pmdp_clear_flush_young pmdp_test_and_clear_young
378 
pte_write(pte_t pte)379 static inline int pte_write(pte_t pte)
380 {
381 	return !!(pte_raw(pte) & cpu_to_be64(_PAGE_WRITE));
382 }
383 
pte_read(pte_t pte)384 static inline int pte_read(pte_t pte)
385 {
386 	return !!(pte_raw(pte) & cpu_to_be64(_PAGE_READ));
387 }
388 
389 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
ptep_set_wrprotect(struct mm_struct * mm,unsigned long addr,pte_t * ptep)390 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
391 				      pte_t *ptep)
392 {
393 	if (pte_write(*ptep))
394 		pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0);
395 }
396 
397 #define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT
huge_ptep_set_wrprotect(struct mm_struct * mm,unsigned long addr,pte_t * ptep)398 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
399 					   unsigned long addr, pte_t *ptep)
400 {
401 	if (pte_write(*ptep))
402 		pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 1);
403 }
404 
405 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
ptep_get_and_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)406 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
407 				       unsigned long addr, pte_t *ptep)
408 {
409 	unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0);
410 	return __pte(old);
411 }
412 
413 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
ptep_get_and_clear_full(struct mm_struct * mm,unsigned long addr,pte_t * ptep,int full)414 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
415 					    unsigned long addr,
416 					    pte_t *ptep, int full)
417 {
418 	if (full && radix_enabled()) {
419 		/*
420 		 * We know that this is a full mm pte clear and
421 		 * hence can be sure there is no parallel set_pte.
422 		 */
423 		return radix__ptep_get_and_clear_full(mm, addr, ptep, full);
424 	}
425 	return ptep_get_and_clear(mm, addr, ptep);
426 }
427 
428 
pte_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)429 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
430 			     pte_t * ptep)
431 {
432 	pte_update(mm, addr, ptep, ~0UL, 0, 0);
433 }
434 
pte_dirty(pte_t pte)435 static inline int pte_dirty(pte_t pte)
436 {
437 	return !!(pte_raw(pte) & cpu_to_be64(_PAGE_DIRTY));
438 }
439 
pte_young(pte_t pte)440 static inline int pte_young(pte_t pte)
441 {
442 	return !!(pte_raw(pte) & cpu_to_be64(_PAGE_ACCESSED));
443 }
444 
pte_special(pte_t pte)445 static inline int pte_special(pte_t pte)
446 {
447 	return !!(pte_raw(pte) & cpu_to_be64(_PAGE_SPECIAL));
448 }
449 
pte_exec(pte_t pte)450 static inline bool pte_exec(pte_t pte)
451 {
452 	return !!(pte_raw(pte) & cpu_to_be64(_PAGE_EXEC));
453 }
454 
455 
456 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
pte_soft_dirty(pte_t pte)457 static inline bool pte_soft_dirty(pte_t pte)
458 {
459 	return !!(pte_raw(pte) & cpu_to_be64(_PAGE_SOFT_DIRTY));
460 }
461 
pte_mksoft_dirty(pte_t pte)462 static inline pte_t pte_mksoft_dirty(pte_t pte)
463 {
464 	return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_SOFT_DIRTY));
465 }
466 
pte_clear_soft_dirty(pte_t pte)467 static inline pte_t pte_clear_soft_dirty(pte_t pte)
468 {
469 	return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_SOFT_DIRTY));
470 }
471 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
472 
473 #ifdef CONFIG_NUMA_BALANCING
pte_protnone(pte_t pte)474 static inline int pte_protnone(pte_t pte)
475 {
476 	return (pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_PTE | _PAGE_RWX)) ==
477 		cpu_to_be64(_PAGE_PRESENT | _PAGE_PTE);
478 }
479 #endif /* CONFIG_NUMA_BALANCING */
480 
pte_hw_valid(pte_t pte)481 static inline bool pte_hw_valid(pte_t pte)
482 {
483 	return (pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_PTE)) ==
484 		cpu_to_be64(_PAGE_PRESENT | _PAGE_PTE);
485 }
486 
pte_present(pte_t pte)487 static inline int pte_present(pte_t pte)
488 {
489 	/*
490 	 * A pte is considerent present if _PAGE_PRESENT is set.
491 	 * We also need to consider the pte present which is marked
492 	 * invalid during ptep_set_access_flags. Hence we look for _PAGE_INVALID
493 	 * if we find _PAGE_PRESENT cleared.
494 	 */
495 
496 	if (pte_hw_valid(pte))
497 		return true;
498 	return (pte_raw(pte) & cpu_to_be64(_PAGE_INVALID | _PAGE_PTE)) ==
499 		cpu_to_be64(_PAGE_INVALID | _PAGE_PTE);
500 }
501 
502 #ifdef CONFIG_PPC_MEM_KEYS
503 extern bool arch_pte_access_permitted(u64 pte, bool write, bool execute);
504 #else
arch_pte_access_permitted(u64 pte,bool write,bool execute)505 static inline bool arch_pte_access_permitted(u64 pte, bool write, bool execute)
506 {
507 	return true;
508 }
509 #endif /* CONFIG_PPC_MEM_KEYS */
510 
pte_user(pte_t pte)511 static inline bool pte_user(pte_t pte)
512 {
513 	return !(pte_raw(pte) & cpu_to_be64(_PAGE_PRIVILEGED));
514 }
515 
516 #define pte_access_permitted pte_access_permitted
pte_access_permitted(pte_t pte,bool write)517 static inline bool pte_access_permitted(pte_t pte, bool write)
518 {
519 	/*
520 	 * _PAGE_READ is needed for any access and will be
521 	 * cleared for PROT_NONE
522 	 */
523 	if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte))
524 		return false;
525 
526 	if (write && !pte_write(pte))
527 		return false;
528 
529 	return arch_pte_access_permitted(pte_val(pte), write, 0);
530 }
531 
532 /*
533  * Conversion functions: convert a page and protection to a page entry,
534  * and a page entry and page directory to the page they refer to.
535  *
536  * Even if PTEs can be unsigned long long, a PFN is always an unsigned
537  * long for now.
538  */
pfn_pte(unsigned long pfn,pgprot_t pgprot)539 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
540 {
541 	VM_BUG_ON(pfn >> (64 - PAGE_SHIFT));
542 	VM_BUG_ON((pfn << PAGE_SHIFT) & ~PTE_RPN_MASK);
543 
544 	return __pte(((pte_basic_t)pfn << PAGE_SHIFT) | pgprot_val(pgprot) | _PAGE_PTE);
545 }
546 
547 /* Generic modifiers for PTE bits */
pte_wrprotect(pte_t pte)548 static inline pte_t pte_wrprotect(pte_t pte)
549 {
550 	return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_WRITE));
551 }
552 
pte_exprotect(pte_t pte)553 static inline pte_t pte_exprotect(pte_t pte)
554 {
555 	return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_EXEC));
556 }
557 
pte_mkclean(pte_t pte)558 static inline pte_t pte_mkclean(pte_t pte)
559 {
560 	return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_DIRTY));
561 }
562 
pte_mkold(pte_t pte)563 static inline pte_t pte_mkold(pte_t pte)
564 {
565 	return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_ACCESSED));
566 }
567 
pte_mkexec(pte_t pte)568 static inline pte_t pte_mkexec(pte_t pte)
569 {
570 	return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_EXEC));
571 }
572 
pte_mkwrite_novma(pte_t pte)573 static inline pte_t pte_mkwrite_novma(pte_t pte)
574 {
575 	/*
576 	 * write implies read, hence set both
577 	 */
578 	return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_RW));
579 }
580 
pte_mkdirty(pte_t pte)581 static inline pte_t pte_mkdirty(pte_t pte)
582 {
583 	return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_DIRTY | _PAGE_SOFT_DIRTY));
584 }
585 
pte_mkyoung(pte_t pte)586 static inline pte_t pte_mkyoung(pte_t pte)
587 {
588 	return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_ACCESSED));
589 }
590 
pte_mkspecial(pte_t pte)591 static inline pte_t pte_mkspecial(pte_t pte)
592 {
593 	return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_SPECIAL));
594 }
595 
pte_mkhuge(pte_t pte)596 static inline pte_t pte_mkhuge(pte_t pte)
597 {
598 	return pte;
599 }
600 
pte_mkdevmap(pte_t pte)601 static inline pte_t pte_mkdevmap(pte_t pte)
602 {
603 	return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_SPECIAL | _PAGE_DEVMAP));
604 }
605 
pte_mkprivileged(pte_t pte)606 static inline pte_t pte_mkprivileged(pte_t pte)
607 {
608 	return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_PRIVILEGED));
609 }
610 
pte_mkuser(pte_t pte)611 static inline pte_t pte_mkuser(pte_t pte)
612 {
613 	return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_PRIVILEGED));
614 }
615 
616 /*
617  * This is potentially called with a pmd as the argument, in which case it's not
618  * safe to check _PAGE_DEVMAP unless we also confirm that _PAGE_PTE is set.
619  * That's because the bit we use for _PAGE_DEVMAP is not reserved for software
620  * use in page directory entries (ie. non-ptes).
621  */
pte_devmap(pte_t pte)622 static inline int pte_devmap(pte_t pte)
623 {
624 	u64 mask = cpu_to_be64(_PAGE_DEVMAP | _PAGE_PTE);
625 
626 	return (pte_raw(pte) & mask) == mask;
627 }
628 
pte_modify(pte_t pte,pgprot_t newprot)629 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
630 {
631 	/* FIXME!! check whether this need to be a conditional */
632 	return __pte_raw((pte_raw(pte) & cpu_to_be64(_PAGE_CHG_MASK)) |
633 			 cpu_to_be64(pgprot_val(newprot)));
634 }
635 
636 /* Encode and de-code a swap entry */
637 #define MAX_SWAPFILES_CHECK() do { \
638 	BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS); \
639 	/*							\
640 	 * Don't have overlapping bits with _PAGE_HPTEFLAGS	\
641 	 * We filter HPTEFLAGS on set_pte.			\
642 	 */							\
643 	BUILD_BUG_ON(_PAGE_HPTEFLAGS & SWP_TYPE_MASK); \
644 	BUILD_BUG_ON(_PAGE_HPTEFLAGS & _PAGE_SWP_SOFT_DIRTY);	\
645 	BUILD_BUG_ON(_PAGE_HPTEFLAGS & _PAGE_SWP_EXCLUSIVE);	\
646 	} while (0)
647 
648 #define SWP_TYPE_BITS 5
649 #define SWP_TYPE_MASK		((1UL << SWP_TYPE_BITS) - 1)
650 #define __swp_type(x)		((x).val & SWP_TYPE_MASK)
651 #define __swp_offset(x)		(((x).val & PTE_RPN_MASK) >> PAGE_SHIFT)
652 #define __swp_entry(type, offset)	((swp_entry_t) { \
653 				(type) | (((offset) << PAGE_SHIFT) & PTE_RPN_MASK)})
654 /*
655  * swp_entry_t must be independent of pte bits. We build a swp_entry_t from
656  * swap type and offset we get from swap and convert that to pte to find a
657  * matching pte in linux page table.
658  * Clear bits not found in swap entries here.
659  */
660 #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val((pte)) & ~_PAGE_PTE })
661 #define __swp_entry_to_pte(x)	__pte((x).val | _PAGE_PTE)
662 #define __pmd_to_swp_entry(pmd)	(__pte_to_swp_entry(pmd_pte(pmd)))
663 #define __swp_entry_to_pmd(x)	(pte_pmd(__swp_entry_to_pte(x)))
664 
665 #ifdef CONFIG_MEM_SOFT_DIRTY
666 #define _PAGE_SWP_SOFT_DIRTY	_PAGE_SOFT_DIRTY
667 #else
668 #define _PAGE_SWP_SOFT_DIRTY	0UL
669 #endif /* CONFIG_MEM_SOFT_DIRTY */
670 
671 #define _PAGE_SWP_EXCLUSIVE	_PAGE_NON_IDEMPOTENT
672 
673 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
pte_swp_mksoft_dirty(pte_t pte)674 static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
675 {
676 	return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_SWP_SOFT_DIRTY));
677 }
678 
pte_swp_soft_dirty(pte_t pte)679 static inline bool pte_swp_soft_dirty(pte_t pte)
680 {
681 	return !!(pte_raw(pte) & cpu_to_be64(_PAGE_SWP_SOFT_DIRTY));
682 }
683 
pte_swp_clear_soft_dirty(pte_t pte)684 static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
685 {
686 	return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_SWP_SOFT_DIRTY));
687 }
688 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
689 
pte_swp_mkexclusive(pte_t pte)690 static inline pte_t pte_swp_mkexclusive(pte_t pte)
691 {
692 	return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_SWP_EXCLUSIVE));
693 }
694 
pte_swp_exclusive(pte_t pte)695 static inline int pte_swp_exclusive(pte_t pte)
696 {
697 	return !!(pte_raw(pte) & cpu_to_be64(_PAGE_SWP_EXCLUSIVE));
698 }
699 
pte_swp_clear_exclusive(pte_t pte)700 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
701 {
702 	return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_SWP_EXCLUSIVE));
703 }
704 
check_pte_access(unsigned long access,unsigned long ptev)705 static inline bool check_pte_access(unsigned long access, unsigned long ptev)
706 {
707 	/*
708 	 * This check for _PAGE_RWX and _PAGE_PRESENT bits
709 	 */
710 	if (access & ~ptev)
711 		return false;
712 	/*
713 	 * This check for access to privilege space
714 	 */
715 	if ((access & _PAGE_PRIVILEGED) != (ptev & _PAGE_PRIVILEGED))
716 		return false;
717 
718 	return true;
719 }
720 /*
721  * Generic functions with hash/radix callbacks
722  */
723 
__ptep_set_access_flags(struct vm_area_struct * vma,pte_t * ptep,pte_t entry,unsigned long address,int psize)724 static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
725 					   pte_t *ptep, pte_t entry,
726 					   unsigned long address,
727 					   int psize)
728 {
729 	if (radix_enabled())
730 		return radix__ptep_set_access_flags(vma, ptep, entry,
731 						    address, psize);
732 	return hash__ptep_set_access_flags(ptep, entry);
733 }
734 
735 #define __HAVE_ARCH_PTE_SAME
pte_same(pte_t pte_a,pte_t pte_b)736 static inline int pte_same(pte_t pte_a, pte_t pte_b)
737 {
738 	if (radix_enabled())
739 		return radix__pte_same(pte_a, pte_b);
740 	return hash__pte_same(pte_a, pte_b);
741 }
742 
pte_none(pte_t pte)743 static inline int pte_none(pte_t pte)
744 {
745 	if (radix_enabled())
746 		return radix__pte_none(pte);
747 	return hash__pte_none(pte);
748 }
749 
__set_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte,int percpu)750 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
751 				pte_t *ptep, pte_t pte, int percpu)
752 {
753 
754 	VM_WARN_ON(!(pte_raw(pte) & cpu_to_be64(_PAGE_PTE)));
755 	/*
756 	 * Keep the _PAGE_PTE added till we are sure we handle _PAGE_PTE
757 	 * in all the callers.
758 	 */
759 	pte = __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_PTE));
760 
761 	if (radix_enabled())
762 		return radix__set_pte_at(mm, addr, ptep, pte, percpu);
763 	return hash__set_pte_at(mm, addr, ptep, pte, percpu);
764 }
765 
766 #define _PAGE_CACHE_CTL	(_PAGE_SAO | _PAGE_NON_IDEMPOTENT | _PAGE_TOLERANT)
767 
768 #define pgprot_noncached pgprot_noncached
pgprot_noncached(pgprot_t prot)769 static inline pgprot_t pgprot_noncached(pgprot_t prot)
770 {
771 	return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
772 			_PAGE_NON_IDEMPOTENT);
773 }
774 
775 #define pgprot_noncached_wc pgprot_noncached_wc
pgprot_noncached_wc(pgprot_t prot)776 static inline pgprot_t pgprot_noncached_wc(pgprot_t prot)
777 {
778 	return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
779 			_PAGE_TOLERANT);
780 }
781 
782 #define pgprot_cached pgprot_cached
pgprot_cached(pgprot_t prot)783 static inline pgprot_t pgprot_cached(pgprot_t prot)
784 {
785 	return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL));
786 }
787 
788 #define pgprot_writecombine pgprot_writecombine
pgprot_writecombine(pgprot_t prot)789 static inline pgprot_t pgprot_writecombine(pgprot_t prot)
790 {
791 	return pgprot_noncached_wc(prot);
792 }
793 /*
794  * check a pte mapping have cache inhibited property
795  */
pte_ci(pte_t pte)796 static inline bool pte_ci(pte_t pte)
797 {
798 	__be64 pte_v = pte_raw(pte);
799 
800 	if (((pte_v & cpu_to_be64(_PAGE_CACHE_CTL)) == cpu_to_be64(_PAGE_TOLERANT)) ||
801 	    ((pte_v & cpu_to_be64(_PAGE_CACHE_CTL)) == cpu_to_be64(_PAGE_NON_IDEMPOTENT)))
802 		return true;
803 	return false;
804 }
805 
pmd_clear(pmd_t * pmdp)806 static inline void pmd_clear(pmd_t *pmdp)
807 {
808 	if (IS_ENABLED(CONFIG_DEBUG_VM) && !radix_enabled()) {
809 		/*
810 		 * Don't use this if we can possibly have a hash page table
811 		 * entry mapping this.
812 		 */
813 		WARN_ON((pmd_val(*pmdp) & (H_PAGE_HASHPTE | _PAGE_PTE)) == (H_PAGE_HASHPTE | _PAGE_PTE));
814 	}
815 	*pmdp = __pmd(0);
816 }
817 
pmd_none(pmd_t pmd)818 static inline int pmd_none(pmd_t pmd)
819 {
820 	return !pmd_raw(pmd);
821 }
822 
pmd_present(pmd_t pmd)823 static inline int pmd_present(pmd_t pmd)
824 {
825 	/*
826 	 * A pmd is considerent present if _PAGE_PRESENT is set.
827 	 * We also need to consider the pmd present which is marked
828 	 * invalid during a split. Hence we look for _PAGE_INVALID
829 	 * if we find _PAGE_PRESENT cleared.
830 	 */
831 	if (pmd_raw(pmd) & cpu_to_be64(_PAGE_PRESENT | _PAGE_INVALID))
832 		return true;
833 
834 	return false;
835 }
836 
pmd_is_serializing(pmd_t pmd)837 static inline int pmd_is_serializing(pmd_t pmd)
838 {
839 	/*
840 	 * If the pmd is undergoing a split, the _PAGE_PRESENT bit is clear
841 	 * and _PAGE_INVALID is set (see pmd_present, pmdp_invalidate).
842 	 *
843 	 * This condition may also occur when flushing a pmd while flushing
844 	 * it (see ptep_modify_prot_start), so callers must ensure this
845 	 * case is fine as well.
846 	 */
847 	if ((pmd_raw(pmd) & cpu_to_be64(_PAGE_PRESENT | _PAGE_INVALID)) ==
848 						cpu_to_be64(_PAGE_INVALID))
849 		return true;
850 
851 	return false;
852 }
853 
pmd_bad(pmd_t pmd)854 static inline int pmd_bad(pmd_t pmd)
855 {
856 	if (radix_enabled())
857 		return radix__pmd_bad(pmd);
858 	return hash__pmd_bad(pmd);
859 }
860 
pud_clear(pud_t * pudp)861 static inline void pud_clear(pud_t *pudp)
862 {
863 	if (IS_ENABLED(CONFIG_DEBUG_VM) && !radix_enabled()) {
864 		/*
865 		 * Don't use this if we can possibly have a hash page table
866 		 * entry mapping this.
867 		 */
868 		WARN_ON((pud_val(*pudp) & (H_PAGE_HASHPTE | _PAGE_PTE)) == (H_PAGE_HASHPTE | _PAGE_PTE));
869 	}
870 	*pudp = __pud(0);
871 }
872 
pud_none(pud_t pud)873 static inline int pud_none(pud_t pud)
874 {
875 	return !pud_raw(pud);
876 }
877 
pud_present(pud_t pud)878 static inline int pud_present(pud_t pud)
879 {
880 	return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PRESENT));
881 }
882 
883 extern struct page *pud_page(pud_t pud);
884 extern struct page *pmd_page(pmd_t pmd);
pud_pte(pud_t pud)885 static inline pte_t pud_pte(pud_t pud)
886 {
887 	return __pte_raw(pud_raw(pud));
888 }
889 
pte_pud(pte_t pte)890 static inline pud_t pte_pud(pte_t pte)
891 {
892 	return __pud_raw(pte_raw(pte));
893 }
894 
pudp_ptep(pud_t * pud)895 static inline pte_t *pudp_ptep(pud_t *pud)
896 {
897 	return (pte_t *)pud;
898 }
899 
900 #define pud_pfn(pud)		pte_pfn(pud_pte(pud))
901 #define pud_dirty(pud)		pte_dirty(pud_pte(pud))
902 #define pud_young(pud)		pte_young(pud_pte(pud))
903 #define pud_mkold(pud)		pte_pud(pte_mkold(pud_pte(pud)))
904 #define pud_wrprotect(pud)	pte_pud(pte_wrprotect(pud_pte(pud)))
905 #define pud_mkdirty(pud)	pte_pud(pte_mkdirty(pud_pte(pud)))
906 #define pud_mkclean(pud)	pte_pud(pte_mkclean(pud_pte(pud)))
907 #define pud_mkyoung(pud)	pte_pud(pte_mkyoung(pud_pte(pud)))
908 #define pud_mkwrite(pud)	pte_pud(pte_mkwrite_novma(pud_pte(pud)))
909 #define pud_write(pud)		pte_write(pud_pte(pud))
910 
911 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
912 #define pud_soft_dirty(pmd)    pte_soft_dirty(pud_pte(pud))
913 #define pud_mksoft_dirty(pmd)  pte_pud(pte_mksoft_dirty(pud_pte(pud)))
914 #define pud_clear_soft_dirty(pmd) pte_pud(pte_clear_soft_dirty(pud_pte(pud)))
915 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
916 
pud_bad(pud_t pud)917 static inline int pud_bad(pud_t pud)
918 {
919 	if (radix_enabled())
920 		return radix__pud_bad(pud);
921 	return hash__pud_bad(pud);
922 }
923 
924 #define pud_access_permitted pud_access_permitted
pud_access_permitted(pud_t pud,bool write)925 static inline bool pud_access_permitted(pud_t pud, bool write)
926 {
927 	return pte_access_permitted(pud_pte(pud), write);
928 }
929 
930 #define __p4d_raw(x)	((p4d_t) { __pgd_raw(x) })
p4d_raw(p4d_t x)931 static inline __be64 p4d_raw(p4d_t x)
932 {
933 	return pgd_raw(x.pgd);
934 }
935 
936 #define p4d_write(p4d)		pte_write(p4d_pte(p4d))
937 
p4d_clear(p4d_t * p4dp)938 static inline void p4d_clear(p4d_t *p4dp)
939 {
940 	*p4dp = __p4d(0);
941 }
942 
p4d_none(p4d_t p4d)943 static inline int p4d_none(p4d_t p4d)
944 {
945 	return !p4d_raw(p4d);
946 }
947 
p4d_present(p4d_t p4d)948 static inline int p4d_present(p4d_t p4d)
949 {
950 	return !!(p4d_raw(p4d) & cpu_to_be64(_PAGE_PRESENT));
951 }
952 
p4d_pte(p4d_t p4d)953 static inline pte_t p4d_pte(p4d_t p4d)
954 {
955 	return __pte_raw(p4d_raw(p4d));
956 }
957 
pte_p4d(pte_t pte)958 static inline p4d_t pte_p4d(pte_t pte)
959 {
960 	return __p4d_raw(pte_raw(pte));
961 }
962 
p4d_bad(p4d_t p4d)963 static inline int p4d_bad(p4d_t p4d)
964 {
965 	if (radix_enabled())
966 		return radix__p4d_bad(p4d);
967 	return hash__p4d_bad(p4d);
968 }
969 
970 #define p4d_access_permitted p4d_access_permitted
p4d_access_permitted(p4d_t p4d,bool write)971 static inline bool p4d_access_permitted(p4d_t p4d, bool write)
972 {
973 	return pte_access_permitted(p4d_pte(p4d), write);
974 }
975 
976 extern struct page *p4d_page(p4d_t p4d);
977 
978 /* Pointers in the page table tree are physical addresses */
979 #define __pgtable_ptr_val(ptr)	__pa(ptr)
980 
p4d_pgtable(p4d_t p4d)981 static inline pud_t *p4d_pgtable(p4d_t p4d)
982 {
983 	return (pud_t *)__va(p4d_val(p4d) & ~P4D_MASKED_BITS);
984 }
985 
pud_pgtable(pud_t pud)986 static inline pmd_t *pud_pgtable(pud_t pud)
987 {
988 	return (pmd_t *)__va(pud_val(pud) & ~PUD_MASKED_BITS);
989 }
990 
991 #define pte_ERROR(e) \
992 	pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
993 #define pmd_ERROR(e) \
994 	pr_err("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
995 #define pud_ERROR(e) \
996 	pr_err("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e))
997 #define pgd_ERROR(e) \
998 	pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
999 
map_kernel_page(unsigned long ea,unsigned long pa,pgprot_t prot)1000 static inline int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
1001 {
1002 	if (radix_enabled()) {
1003 #if defined(CONFIG_PPC_RADIX_MMU) && defined(DEBUG_VM)
1004 		unsigned long page_size = 1 << mmu_psize_defs[mmu_io_psize].shift;
1005 		WARN((page_size != PAGE_SIZE), "I/O page size != PAGE_SIZE");
1006 #endif
1007 		return radix__map_kernel_page(ea, pa, prot, PAGE_SIZE);
1008 	}
1009 	return hash__map_kernel_page(ea, pa, prot);
1010 }
1011 
1012 void unmap_kernel_page(unsigned long va);
1013 
vmemmap_create_mapping(unsigned long start,unsigned long page_size,unsigned long phys)1014 static inline int __meminit vmemmap_create_mapping(unsigned long start,
1015 						   unsigned long page_size,
1016 						   unsigned long phys)
1017 {
1018 	if (radix_enabled())
1019 		return radix__vmemmap_create_mapping(start, page_size, phys);
1020 	return hash__vmemmap_create_mapping(start, page_size, phys);
1021 }
1022 
1023 #ifdef CONFIG_MEMORY_HOTPLUG
vmemmap_remove_mapping(unsigned long start,unsigned long page_size)1024 static inline void vmemmap_remove_mapping(unsigned long start,
1025 					  unsigned long page_size)
1026 {
1027 	if (radix_enabled())
1028 		return radix__vmemmap_remove_mapping(start, page_size);
1029 	return hash__vmemmap_remove_mapping(start, page_size);
1030 }
1031 #endif
1032 
1033 #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
__kernel_map_pages(struct page * page,int numpages,int enable)1034 static inline void __kernel_map_pages(struct page *page, int numpages, int enable)
1035 {
1036 	if (radix_enabled())
1037 		radix__kernel_map_pages(page, numpages, enable);
1038 	else
1039 		hash__kernel_map_pages(page, numpages, enable);
1040 }
1041 #endif
1042 
pmd_pte(pmd_t pmd)1043 static inline pte_t pmd_pte(pmd_t pmd)
1044 {
1045 	return __pte_raw(pmd_raw(pmd));
1046 }
1047 
pte_pmd(pte_t pte)1048 static inline pmd_t pte_pmd(pte_t pte)
1049 {
1050 	return __pmd_raw(pte_raw(pte));
1051 }
1052 
pmdp_ptep(pmd_t * pmd)1053 static inline pte_t *pmdp_ptep(pmd_t *pmd)
1054 {
1055 	return (pte_t *)pmd;
1056 }
1057 #define pmd_pfn(pmd)		pte_pfn(pmd_pte(pmd))
1058 #define pmd_dirty(pmd)		pte_dirty(pmd_pte(pmd))
1059 #define pmd_young(pmd)		pte_young(pmd_pte(pmd))
1060 #define pmd_mkold(pmd)		pte_pmd(pte_mkold(pmd_pte(pmd)))
1061 #define pmd_wrprotect(pmd)	pte_pmd(pte_wrprotect(pmd_pte(pmd)))
1062 #define pmd_mkdirty(pmd)	pte_pmd(pte_mkdirty(pmd_pte(pmd)))
1063 #define pmd_mkclean(pmd)	pte_pmd(pte_mkclean(pmd_pte(pmd)))
1064 #define pmd_mkyoung(pmd)	pte_pmd(pte_mkyoung(pmd_pte(pmd)))
1065 #define pmd_mkwrite_novma(pmd)	pte_pmd(pte_mkwrite_novma(pmd_pte(pmd)))
1066 
1067 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
1068 #define pmd_soft_dirty(pmd)    pte_soft_dirty(pmd_pte(pmd))
1069 #define pmd_mksoft_dirty(pmd)  pte_pmd(pte_mksoft_dirty(pmd_pte(pmd)))
1070 #define pmd_clear_soft_dirty(pmd) pte_pmd(pte_clear_soft_dirty(pmd_pte(pmd)))
1071 
1072 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1073 #define pmd_swp_mksoft_dirty(pmd)	pte_pmd(pte_swp_mksoft_dirty(pmd_pte(pmd)))
1074 #define pmd_swp_soft_dirty(pmd)		pte_swp_soft_dirty(pmd_pte(pmd))
1075 #define pmd_swp_clear_soft_dirty(pmd)	pte_pmd(pte_swp_clear_soft_dirty(pmd_pte(pmd)))
1076 #endif
1077 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
1078 
1079 #ifdef CONFIG_NUMA_BALANCING
pmd_protnone(pmd_t pmd)1080 static inline int pmd_protnone(pmd_t pmd)
1081 {
1082 	return pte_protnone(pmd_pte(pmd));
1083 }
1084 #endif /* CONFIG_NUMA_BALANCING */
1085 
1086 #define pmd_write(pmd)		pte_write(pmd_pte(pmd))
1087 
1088 #define pmd_access_permitted pmd_access_permitted
pmd_access_permitted(pmd_t pmd,bool write)1089 static inline bool pmd_access_permitted(pmd_t pmd, bool write)
1090 {
1091 	/*
1092 	 * pmdp_invalidate sets this combination (which is not caught by
1093 	 * !pte_present() check in pte_access_permitted), to prevent
1094 	 * lock-free lookups, as part of the serialize_against_pte_lookup()
1095 	 * synchronisation.
1096 	 *
1097 	 * This also catches the case where the PTE's hardware PRESENT bit is
1098 	 * cleared while TLB is flushed, which is suboptimal but should not
1099 	 * be frequent.
1100 	 */
1101 	if (pmd_is_serializing(pmd))
1102 		return false;
1103 
1104 	return pte_access_permitted(pmd_pte(pmd), write);
1105 }
1106 
1107 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1108 extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot);
1109 extern pud_t pfn_pud(unsigned long pfn, pgprot_t pgprot);
1110 extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot);
1111 extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot);
1112 extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1113 		       pmd_t *pmdp, pmd_t pmd);
1114 extern void set_pud_at(struct mm_struct *mm, unsigned long addr,
1115 		       pud_t *pudp, pud_t pud);
1116 
update_mmu_cache_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmd)1117 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
1118 					unsigned long addr, pmd_t *pmd)
1119 {
1120 }
1121 
update_mmu_cache_pud(struct vm_area_struct * vma,unsigned long addr,pud_t * pud)1122 static inline void update_mmu_cache_pud(struct vm_area_struct *vma,
1123 					unsigned long addr, pud_t *pud)
1124 {
1125 }
1126 
1127 extern int hash__has_transparent_hugepage(void);
has_transparent_hugepage(void)1128 static inline int has_transparent_hugepage(void)
1129 {
1130 	if (radix_enabled())
1131 		return radix__has_transparent_hugepage();
1132 	return hash__has_transparent_hugepage();
1133 }
1134 #define has_transparent_hugepage has_transparent_hugepage
1135 
has_transparent_pud_hugepage(void)1136 static inline int has_transparent_pud_hugepage(void)
1137 {
1138 	if (radix_enabled())
1139 		return radix__has_transparent_pud_hugepage();
1140 	return 0;
1141 }
1142 #define has_transparent_pud_hugepage has_transparent_pud_hugepage
1143 
1144 static inline unsigned long
pmd_hugepage_update(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp,unsigned long clr,unsigned long set)1145 pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp,
1146 		    unsigned long clr, unsigned long set)
1147 {
1148 	if (radix_enabled())
1149 		return radix__pmd_hugepage_update(mm, addr, pmdp, clr, set);
1150 	return hash__pmd_hugepage_update(mm, addr, pmdp, clr, set);
1151 }
1152 
1153 static inline unsigned long
pud_hugepage_update(struct mm_struct * mm,unsigned long addr,pud_t * pudp,unsigned long clr,unsigned long set)1154 pud_hugepage_update(struct mm_struct *mm, unsigned long addr, pud_t *pudp,
1155 		    unsigned long clr, unsigned long set)
1156 {
1157 	if (radix_enabled())
1158 		return radix__pud_hugepage_update(mm, addr, pudp, clr, set);
1159 	BUG();
1160 	return pud_val(*pudp);
1161 }
1162 
1163 /*
1164  * returns true for pmd migration entries, THP, devmap, hugetlb
1165  * But compile time dependent on THP config
1166  */
pmd_large(pmd_t pmd)1167 static inline int pmd_large(pmd_t pmd)
1168 {
1169 	return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
1170 }
1171 
pud_large(pud_t pud)1172 static inline int pud_large(pud_t pud)
1173 {
1174 	return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
1175 }
1176 
1177 /*
1178  * For radix we should always find H_PAGE_HASHPTE zero. Hence
1179  * the below will work for radix too
1180  */
__pmdp_test_and_clear_young(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp)1181 static inline int __pmdp_test_and_clear_young(struct mm_struct *mm,
1182 					      unsigned long addr, pmd_t *pmdp)
1183 {
1184 	unsigned long old;
1185 
1186 	if ((pmd_raw(*pmdp) & cpu_to_be64(_PAGE_ACCESSED | H_PAGE_HASHPTE)) == 0)
1187 		return 0;
1188 	old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED, 0);
1189 	return ((old & _PAGE_ACCESSED) != 0);
1190 }
1191 
__pudp_test_and_clear_young(struct mm_struct * mm,unsigned long addr,pud_t * pudp)1192 static inline int __pudp_test_and_clear_young(struct mm_struct *mm,
1193 					      unsigned long addr, pud_t *pudp)
1194 {
1195 	unsigned long old;
1196 
1197 	if ((pud_raw(*pudp) & cpu_to_be64(_PAGE_ACCESSED | H_PAGE_HASHPTE)) == 0)
1198 		return 0;
1199 	old = pud_hugepage_update(mm, addr, pudp, _PAGE_ACCESSED, 0);
1200 	return ((old & _PAGE_ACCESSED) != 0);
1201 }
1202 
1203 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
pmdp_set_wrprotect(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp)1204 static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
1205 				      pmd_t *pmdp)
1206 {
1207 	if (pmd_write(*pmdp))
1208 		pmd_hugepage_update(mm, addr, pmdp, _PAGE_WRITE, 0);
1209 }
1210 
1211 #define __HAVE_ARCH_PUDP_SET_WRPROTECT
pudp_set_wrprotect(struct mm_struct * mm,unsigned long addr,pud_t * pudp)1212 static inline void pudp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
1213 				      pud_t *pudp)
1214 {
1215 	if (pud_write(*pudp))
1216 		pud_hugepage_update(mm, addr, pudp, _PAGE_WRITE, 0);
1217 }
1218 
1219 /*
1220  * Only returns true for a THP. False for pmd migration entry.
1221  * We also need to return true when we come across a pte that
1222  * in between a thp split. While splitting THP, we mark the pmd
1223  * invalid (pmdp_invalidate()) before we set it with pte page
1224  * address. A pmd_trans_huge() check against a pmd entry during that time
1225  * should return true.
1226  * We should not call this on a hugetlb entry. We should check for HugeTLB
1227  * entry using vma->vm_flags
1228  * The page table walk rule is explained in Documentation/mm/transhuge.rst
1229  */
pmd_trans_huge(pmd_t pmd)1230 static inline int pmd_trans_huge(pmd_t pmd)
1231 {
1232 	if (!pmd_present(pmd))
1233 		return false;
1234 
1235 	if (radix_enabled())
1236 		return radix__pmd_trans_huge(pmd);
1237 	return hash__pmd_trans_huge(pmd);
1238 }
1239 
pud_trans_huge(pud_t pud)1240 static inline int pud_trans_huge(pud_t pud)
1241 {
1242 	if (!pud_present(pud))
1243 		return false;
1244 
1245 	if (radix_enabled())
1246 		return radix__pud_trans_huge(pud);
1247 	return 0;
1248 }
1249 
1250 
1251 #define __HAVE_ARCH_PMD_SAME
pmd_same(pmd_t pmd_a,pmd_t pmd_b)1252 static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
1253 {
1254 	if (radix_enabled())
1255 		return radix__pmd_same(pmd_a, pmd_b);
1256 	return hash__pmd_same(pmd_a, pmd_b);
1257 }
1258 
1259 #define pud_same pud_same
pud_same(pud_t pud_a,pud_t pud_b)1260 static inline int pud_same(pud_t pud_a, pud_t pud_b)
1261 {
1262 	if (radix_enabled())
1263 		return radix__pud_same(pud_a, pud_b);
1264 	return hash__pud_same(pud_a, pud_b);
1265 }
1266 
1267 
__pmd_mkhuge(pmd_t pmd)1268 static inline pmd_t __pmd_mkhuge(pmd_t pmd)
1269 {
1270 	if (radix_enabled())
1271 		return radix__pmd_mkhuge(pmd);
1272 	return hash__pmd_mkhuge(pmd);
1273 }
1274 
__pud_mkhuge(pud_t pud)1275 static inline pud_t __pud_mkhuge(pud_t pud)
1276 {
1277 	if (radix_enabled())
1278 		return radix__pud_mkhuge(pud);
1279 	BUG();
1280 	return pud;
1281 }
1282 
1283 /*
1284  * pfn_pmd return a pmd_t that can be used as pmd pte entry.
1285  */
pmd_mkhuge(pmd_t pmd)1286 static inline pmd_t pmd_mkhuge(pmd_t pmd)
1287 {
1288 #ifdef CONFIG_DEBUG_VM
1289 	if (radix_enabled())
1290 		WARN_ON((pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE)) == 0);
1291 	else
1292 		WARN_ON((pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE | H_PAGE_THP_HUGE)) !=
1293 			cpu_to_be64(_PAGE_PTE | H_PAGE_THP_HUGE));
1294 #endif
1295 	return pmd;
1296 }
1297 
pud_mkhuge(pud_t pud)1298 static inline pud_t pud_mkhuge(pud_t pud)
1299 {
1300 #ifdef CONFIG_DEBUG_VM
1301 	if (radix_enabled())
1302 		WARN_ON((pud_raw(pud) & cpu_to_be64(_PAGE_PTE)) == 0);
1303 	else
1304 		WARN_ON(1);
1305 #endif
1306 	return pud;
1307 }
1308 
1309 
1310 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1311 extern int pmdp_set_access_flags(struct vm_area_struct *vma,
1312 				 unsigned long address, pmd_t *pmdp,
1313 				 pmd_t entry, int dirty);
1314 #define __HAVE_ARCH_PUDP_SET_ACCESS_FLAGS
1315 extern int pudp_set_access_flags(struct vm_area_struct *vma,
1316 				 unsigned long address, pud_t *pudp,
1317 				 pud_t entry, int dirty);
1318 
1319 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1320 extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1321 				     unsigned long address, pmd_t *pmdp);
1322 #define __HAVE_ARCH_PUDP_TEST_AND_CLEAR_YOUNG
1323 extern int pudp_test_and_clear_young(struct vm_area_struct *vma,
1324 				     unsigned long address, pud_t *pudp);
1325 
1326 
1327 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
pmdp_huge_get_and_clear(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp)1328 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
1329 					    unsigned long addr, pmd_t *pmdp)
1330 {
1331 	if (radix_enabled())
1332 		return radix__pmdp_huge_get_and_clear(mm, addr, pmdp);
1333 	return hash__pmdp_huge_get_and_clear(mm, addr, pmdp);
1334 }
1335 
1336 #define __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
pudp_huge_get_and_clear(struct mm_struct * mm,unsigned long addr,pud_t * pudp)1337 static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
1338 					    unsigned long addr, pud_t *pudp)
1339 {
1340 	if (radix_enabled())
1341 		return radix__pudp_huge_get_and_clear(mm, addr, pudp);
1342 	BUG();
1343 	return *pudp;
1344 }
1345 
pmdp_collapse_flush(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)1346 static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
1347 					unsigned long address, pmd_t *pmdp)
1348 {
1349 	if (radix_enabled())
1350 		return radix__pmdp_collapse_flush(vma, address, pmdp);
1351 	return hash__pmdp_collapse_flush(vma, address, pmdp);
1352 }
1353 #define pmdp_collapse_flush pmdp_collapse_flush
1354 
1355 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
1356 pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
1357 				   unsigned long addr,
1358 				   pmd_t *pmdp, int full);
1359 
1360 #define __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR_FULL
1361 pud_t pudp_huge_get_and_clear_full(struct vm_area_struct *vma,
1362 				   unsigned long addr,
1363 				   pud_t *pudp, int full);
1364 
1365 #define __HAVE_ARCH_PGTABLE_DEPOSIT
pgtable_trans_huge_deposit(struct mm_struct * mm,pmd_t * pmdp,pgtable_t pgtable)1366 static inline void pgtable_trans_huge_deposit(struct mm_struct *mm,
1367 					      pmd_t *pmdp, pgtable_t pgtable)
1368 {
1369 	if (radix_enabled())
1370 		return radix__pgtable_trans_huge_deposit(mm, pmdp, pgtable);
1371 	return hash__pgtable_trans_huge_deposit(mm, pmdp, pgtable);
1372 }
1373 
1374 #define __HAVE_ARCH_PGTABLE_WITHDRAW
pgtable_trans_huge_withdraw(struct mm_struct * mm,pmd_t * pmdp)1375 static inline pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm,
1376 						    pmd_t *pmdp)
1377 {
1378 	if (radix_enabled())
1379 		return radix__pgtable_trans_huge_withdraw(mm, pmdp);
1380 	return hash__pgtable_trans_huge_withdraw(mm, pmdp);
1381 }
1382 
1383 #define __HAVE_ARCH_PMDP_INVALIDATE
1384 extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
1385 			     pmd_t *pmdp);
1386 
1387 #define pmd_move_must_withdraw pmd_move_must_withdraw
1388 struct spinlock;
1389 extern int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
1390 				  struct spinlock *old_pmd_ptl,
1391 				  struct vm_area_struct *vma);
1392 /*
1393  * Hash translation mode use the deposited table to store hash pte
1394  * slot information.
1395  */
1396 #define arch_needs_pgtable_deposit arch_needs_pgtable_deposit
arch_needs_pgtable_deposit(void)1397 static inline bool arch_needs_pgtable_deposit(void)
1398 {
1399 	if (radix_enabled())
1400 		return false;
1401 	return true;
1402 }
1403 extern void serialize_against_pte_lookup(struct mm_struct *mm);
1404 
1405 
pmd_mkdevmap(pmd_t pmd)1406 static inline pmd_t pmd_mkdevmap(pmd_t pmd)
1407 {
1408 	if (radix_enabled())
1409 		return radix__pmd_mkdevmap(pmd);
1410 	return hash__pmd_mkdevmap(pmd);
1411 }
1412 
pud_mkdevmap(pud_t pud)1413 static inline pud_t pud_mkdevmap(pud_t pud)
1414 {
1415 	if (radix_enabled())
1416 		return radix__pud_mkdevmap(pud);
1417 	BUG();
1418 	return pud;
1419 }
1420 
pmd_devmap(pmd_t pmd)1421 static inline int pmd_devmap(pmd_t pmd)
1422 {
1423 	return pte_devmap(pmd_pte(pmd));
1424 }
1425 
pud_devmap(pud_t pud)1426 static inline int pud_devmap(pud_t pud)
1427 {
1428 	return pte_devmap(pud_pte(pud));
1429 }
1430 
pgd_devmap(pgd_t pgd)1431 static inline int pgd_devmap(pgd_t pgd)
1432 {
1433 	return 0;
1434 }
1435 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1436 
1437 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1438 pte_t ptep_modify_prot_start(struct vm_area_struct *, unsigned long, pte_t *);
1439 void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long,
1440 			     pte_t *, pte_t, pte_t);
1441 
1442 /*
1443  * Returns true for a R -> RW upgrade of pte
1444  */
is_pte_rw_upgrade(unsigned long old_val,unsigned long new_val)1445 static inline bool is_pte_rw_upgrade(unsigned long old_val, unsigned long new_val)
1446 {
1447 	if (!(old_val & _PAGE_READ))
1448 		return false;
1449 
1450 	if ((!(old_val & _PAGE_WRITE)) && (new_val & _PAGE_WRITE))
1451 		return true;
1452 
1453 	return false;
1454 }
1455 
1456 /*
1457  * Like pmd_huge() and pmd_large(), but works regardless of config options
1458  */
1459 #define pmd_is_leaf pmd_is_leaf
1460 #define pmd_leaf pmd_is_leaf
pmd_is_leaf(pmd_t pmd)1461 static inline bool pmd_is_leaf(pmd_t pmd)
1462 {
1463 	return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
1464 }
1465 
1466 #define pud_is_leaf pud_is_leaf
1467 #define pud_leaf pud_is_leaf
pud_is_leaf(pud_t pud)1468 static inline bool pud_is_leaf(pud_t pud)
1469 {
1470 	return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
1471 }
1472 
1473 #endif /* __ASSEMBLY__ */
1474 #endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */
1475