1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_NOHASH_PGTABLE_H
3 #define _ASM_POWERPC_NOHASH_PGTABLE_H
4 
5 #if defined(CONFIG_PPC64)
6 #include <asm/nohash/64/pgtable.h>
7 #else
8 #include <asm/nohash/32/pgtable.h>
9 #endif
10 
11 /* Permission masks used for kernel mappings */
12 #define PAGE_KERNEL	__pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
13 #define PAGE_KERNEL_NC	__pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE)
14 #define PAGE_KERNEL_NCG	__pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
15 				 _PAGE_NO_CACHE | _PAGE_GUARDED)
16 #define PAGE_KERNEL_X	__pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
17 #define PAGE_KERNEL_RO	__pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
18 #define PAGE_KERNEL_ROX	__pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
19 
20 /*
21  * Protection used for kernel text. We want the debuggers to be able to
22  * set breakpoints anywhere, so don't write protect the kernel text
23  * on platforms where such control is possible.
24  */
25 #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
26 	defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
27 #define PAGE_KERNEL_TEXT	PAGE_KERNEL_X
28 #else
29 #define PAGE_KERNEL_TEXT	PAGE_KERNEL_ROX
30 #endif
31 
32 /* Make modules code happy. We don't set RO yet */
33 #define PAGE_KERNEL_EXEC	PAGE_KERNEL_X
34 
35 /* Advertise special mapping type for AGP */
36 #define PAGE_AGP		(PAGE_KERNEL_NC)
37 #define HAVE_PAGE_AGP
38 
39 #ifndef __ASSEMBLY__
40 
41 /* Generic accessors to PTE bits */
42 #ifndef pte_write
43 static inline int pte_write(pte_t pte)
44 {
45 	return pte_val(pte) & _PAGE_RW;
46 }
47 #endif
48 static inline int pte_read(pte_t pte)		{ return 1; }
49 static inline int pte_dirty(pte_t pte)		{ return pte_val(pte) & _PAGE_DIRTY; }
50 static inline int pte_special(pte_t pte)	{ return pte_val(pte) & _PAGE_SPECIAL; }
51 static inline int pte_none(pte_t pte)		{ return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
52 static inline bool pte_hashpte(pte_t pte)	{ return false; }
53 static inline bool pte_ci(pte_t pte)		{ return pte_val(pte) & _PAGE_NO_CACHE; }
54 static inline bool pte_exec(pte_t pte)		{ return pte_val(pte) & _PAGE_EXEC; }
55 
56 #ifdef CONFIG_NUMA_BALANCING
57 /*
58  * These work without NUMA balancing but the kernel does not care. See the
59  * comment in include/linux/pgtable.h . On powerpc, this will only
60  * work for user pages and always return true for kernel pages.
61  */
62 static inline int pte_protnone(pte_t pte)
63 {
64 	return pte_present(pte) && !pte_user(pte);
65 }
66 
67 static inline int pmd_protnone(pmd_t pmd)
68 {
69 	return pte_protnone(pmd_pte(pmd));
70 }
71 #endif /* CONFIG_NUMA_BALANCING */
72 
73 static inline int pte_present(pte_t pte)
74 {
75 	return pte_val(pte) & _PAGE_PRESENT;
76 }
77 
78 static inline bool pte_hw_valid(pte_t pte)
79 {
80 	return pte_val(pte) & _PAGE_PRESENT;
81 }
82 
83 /*
84  * Don't just check for any non zero bits in __PAGE_USER, since for book3e
85  * and PTE_64BIT, PAGE_KERNEL_X contains _PAGE_BAP_SR which is also in
86  * _PAGE_USER.  Need to explicitly match _PAGE_BAP_UR bit in that case too.
87  */
88 #ifndef pte_user
89 static inline bool pte_user(pte_t pte)
90 {
91 	return (pte_val(pte) & _PAGE_USER) == _PAGE_USER;
92 }
93 #endif
94 
95 /*
96  * We only find page table entry in the last level
97  * Hence no need for other accessors
98  */
99 #define pte_access_permitted pte_access_permitted
100 static inline bool pte_access_permitted(pte_t pte, bool write)
101 {
102 	/*
103 	 * A read-only access is controlled by _PAGE_USER bit.
104 	 * We have _PAGE_READ set for WRITE and EXECUTE
105 	 */
106 	if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte))
107 		return false;
108 
109 	if (write && !pte_write(pte))
110 		return false;
111 
112 	return true;
113 }
114 
115 /* Conversion functions: convert a page and protection to a page entry,
116  * and a page entry and page directory to the page they refer to.
117  *
118  * Even if PTEs can be unsigned long long, a PFN is always an unsigned
119  * long for now.
120  */
121 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) {
122 	return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
123 		     pgprot_val(pgprot)); }
124 static inline unsigned long pte_pfn(pte_t pte)	{
125 	return pte_val(pte) >> PTE_RPN_SHIFT; }
126 
127 /* Generic modifiers for PTE bits */
128 static inline pte_t pte_exprotect(pte_t pte)
129 {
130 	return __pte(pte_val(pte) & ~_PAGE_EXEC);
131 }
132 
133 static inline pte_t pte_mkclean(pte_t pte)
134 {
135 	return __pte(pte_val(pte) & ~_PAGE_DIRTY);
136 }
137 
138 static inline pte_t pte_mkold(pte_t pte)
139 {
140 	return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
141 }
142 
143 static inline pte_t pte_mkspecial(pte_t pte)
144 {
145 	return __pte(pte_val(pte) | _PAGE_SPECIAL);
146 }
147 
148 #ifndef pte_mkhuge
149 static inline pte_t pte_mkhuge(pte_t pte)
150 {
151 	return __pte(pte_val(pte));
152 }
153 #endif
154 
155 #ifndef pte_mkprivileged
156 static inline pte_t pte_mkprivileged(pte_t pte)
157 {
158 	return __pte(pte_val(pte) & ~_PAGE_USER);
159 }
160 #endif
161 
162 #ifndef pte_mkuser
163 static inline pte_t pte_mkuser(pte_t pte)
164 {
165 	return __pte(pte_val(pte) | _PAGE_USER);
166 }
167 #endif
168 
169 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
170 {
171 	return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
172 }
173 
174 /* Insert a PTE, top-level function is out of line. It uses an inline
175  * low level function in the respective pgtable-* files
176  */
177 extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
178 		       pte_t pte);
179 
180 /* This low level function performs the actual PTE insertion
181  * Setting the PTE depends on the MMU type and other factors. It's
182  * an horrible mess that I'm not going to try to clean up now but
183  * I'm keeping it in one place rather than spread around
184  */
185 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
186 				pte_t *ptep, pte_t pte, int percpu)
187 {
188 	/* Second case is 32-bit with 64-bit PTE.  In this case, we
189 	 * can just store as long as we do the two halves in the right order
190 	 * with a barrier in between.
191 	 * In the percpu case, we also fallback to the simple update
192 	 */
193 	if (IS_ENABLED(CONFIG_PPC32) && IS_ENABLED(CONFIG_PTE_64BIT) && !percpu) {
194 		__asm__ __volatile__("\
195 			stw%X0 %2,%0\n\
196 			eieio\n\
197 			stw%X1 %L2,%1"
198 		: "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
199 		: "r" (pte) : "memory");
200 		return;
201 	}
202 	/* Anything else just stores the PTE normally. That covers all 64-bit
203 	 * cases, and 32-bit non-hash with 32-bit PTEs.
204 	 */
205 #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES)
206 	ptep->pte = ptep->pte1 = ptep->pte2 = ptep->pte3 = pte_val(pte);
207 #else
208 	*ptep = pte;
209 #endif
210 
211 	/*
212 	 * With hardware tablewalk, a sync is needed to ensure that
213 	 * subsequent accesses see the PTE we just wrote.  Unlike userspace
214 	 * mappings, we can't tolerate spurious faults, so make sure
215 	 * the new PTE will be seen the first time.
216 	 */
217 	if (IS_ENABLED(CONFIG_PPC_BOOK3E_64) && is_kernel_addr(addr))
218 		mb();
219 }
220 
221 
222 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
223 extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
224 				 pte_t *ptep, pte_t entry, int dirty);
225 
226 /*
227  * Macro to mark a page protection value as "uncacheable".
228  */
229 
230 #define _PAGE_CACHE_CTL	(_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
231 			 _PAGE_WRITETHRU)
232 
233 #define pgprot_noncached(prot)	  (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
234 				            _PAGE_NO_CACHE | _PAGE_GUARDED))
235 
236 #define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
237 				            _PAGE_NO_CACHE))
238 
239 #define pgprot_cached(prot)       (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
240 				            _PAGE_COHERENT))
241 
242 #if _PAGE_WRITETHRU != 0
243 #define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
244 				            _PAGE_COHERENT | _PAGE_WRITETHRU))
245 #else
246 #define pgprot_cached_wthru(prot)	pgprot_noncached(prot)
247 #endif
248 
249 #define pgprot_cached_noncoherent(prot) \
250 		(__pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL))
251 
252 #define pgprot_writecombine pgprot_noncached_wc
253 
254 struct file;
255 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
256 				     unsigned long size, pgprot_t vma_prot);
257 #define __HAVE_PHYS_MEM_ACCESS_PROT
258 
259 #ifdef CONFIG_HUGETLB_PAGE
260 static inline int hugepd_ok(hugepd_t hpd)
261 {
262 #ifdef CONFIG_PPC_8xx
263 	return ((hpd_val(hpd) & _PMD_PAGE_MASK) == _PMD_PAGE_8M);
264 #else
265 	/* We clear the top bit to indicate hugepd */
266 	return (hpd_val(hpd) && (hpd_val(hpd) & PD_HUGE) == 0);
267 #endif
268 }
269 
270 static inline int pmd_huge(pmd_t pmd)
271 {
272 	return 0;
273 }
274 
275 static inline int pud_huge(pud_t pud)
276 {
277 	return 0;
278 }
279 
280 static inline int pgd_huge(pgd_t pgd)
281 {
282 	return 0;
283 }
284 #define pgd_huge		pgd_huge
285 
286 #define is_hugepd(hpd)		(hugepd_ok(hpd))
287 #endif
288 
289 /*
290  * This gets called at the end of handling a page fault, when
291  * the kernel has put a new PTE into the page table for the process.
292  * We use it to ensure coherency between the i-cache and d-cache
293  * for the page which has just been mapped in.
294  */
295 #if defined(CONFIG_PPC_FSL_BOOK3E) && defined(CONFIG_HUGETLB_PAGE)
296 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
297 #else
298 static inline
299 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) {}
300 #endif
301 
302 #endif /* __ASSEMBLY__ */
303 #endif
304