1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_NOHASH_PGTABLE_H
3 #define _ASM_POWERPC_NOHASH_PGTABLE_H
4
5 #if defined(CONFIG_PPC64)
6 #include <asm/nohash/64/pgtable.h>
7 #else
8 #include <asm/nohash/32/pgtable.h>
9 #endif
10
11 /* Permission masks used for kernel mappings */
12 #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
13 #define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE)
14 #define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE | _PAGE_GUARDED)
15 #define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
16 #define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
17 #define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
18
19 #ifndef __ASSEMBLY__
20
21 /* Generic accessors to PTE bits */
22 #ifndef pte_write
pte_write(pte_t pte)23 static inline int pte_write(pte_t pte)
24 {
25 return pte_val(pte) & _PAGE_RW;
26 }
27 #endif
28 #ifndef pte_read
pte_read(pte_t pte)29 static inline int pte_read(pte_t pte) { return 1; }
30 #endif
pte_dirty(pte_t pte)31 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
pte_special(pte_t pte)32 static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
pte_none(pte_t pte)33 static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
pte_hashpte(pte_t pte)34 static inline bool pte_hashpte(pte_t pte) { return false; }
pte_ci(pte_t pte)35 static inline bool pte_ci(pte_t pte) { return pte_val(pte) & _PAGE_NO_CACHE; }
pte_exec(pte_t pte)36 static inline bool pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
37
38 #ifdef CONFIG_NUMA_BALANCING
39 /*
40 * These work without NUMA balancing but the kernel does not care. See the
41 * comment in include/linux/pgtable.h . On powerpc, this will only
42 * work for user pages and always return true for kernel pages.
43 */
pte_protnone(pte_t pte)44 static inline int pte_protnone(pte_t pte)
45 {
46 return pte_present(pte) && !pte_user(pte);
47 }
48
pmd_protnone(pmd_t pmd)49 static inline int pmd_protnone(pmd_t pmd)
50 {
51 return pte_protnone(pmd_pte(pmd));
52 }
53 #endif /* CONFIG_NUMA_BALANCING */
54
pte_present(pte_t pte)55 static inline int pte_present(pte_t pte)
56 {
57 return pte_val(pte) & _PAGE_PRESENT;
58 }
59
pte_hw_valid(pte_t pte)60 static inline bool pte_hw_valid(pte_t pte)
61 {
62 return pte_val(pte) & _PAGE_PRESENT;
63 }
64
65 /*
66 * Don't just check for any non zero bits in __PAGE_USER, since for book3e
67 * and PTE_64BIT, PAGE_KERNEL_X contains _PAGE_BAP_SR which is also in
68 * _PAGE_USER. Need to explicitly match _PAGE_BAP_UR bit in that case too.
69 */
70 #ifndef pte_user
pte_user(pte_t pte)71 static inline bool pte_user(pte_t pte)
72 {
73 return (pte_val(pte) & _PAGE_USER) == _PAGE_USER;
74 }
75 #endif
76
77 /*
78 * We only find page table entry in the last level
79 * Hence no need for other accessors
80 */
81 #define pte_access_permitted pte_access_permitted
pte_access_permitted(pte_t pte,bool write)82 static inline bool pte_access_permitted(pte_t pte, bool write)
83 {
84 /*
85 * A read-only access is controlled by _PAGE_USER bit.
86 * We have _PAGE_READ set for WRITE and EXECUTE
87 */
88 if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte))
89 return false;
90
91 if (write && !pte_write(pte))
92 return false;
93
94 return true;
95 }
96
97 /* Conversion functions: convert a page and protection to a page entry,
98 * and a page entry and page directory to the page they refer to.
99 *
100 * Even if PTEs can be unsigned long long, a PFN is always an unsigned
101 * long for now.
102 */
pfn_pte(unsigned long pfn,pgprot_t pgprot)103 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) {
104 return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
105 pgprot_val(pgprot)); }
106
107 /* Generic modifiers for PTE bits */
pte_exprotect(pte_t pte)108 static inline pte_t pte_exprotect(pte_t pte)
109 {
110 return __pte(pte_val(pte) & ~_PAGE_EXEC);
111 }
112
pte_mkclean(pte_t pte)113 static inline pte_t pte_mkclean(pte_t pte)
114 {
115 return __pte(pte_val(pte) & ~_PAGE_DIRTY);
116 }
117
pte_mkold(pte_t pte)118 static inline pte_t pte_mkold(pte_t pte)
119 {
120 return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
121 }
122
pte_mkspecial(pte_t pte)123 static inline pte_t pte_mkspecial(pte_t pte)
124 {
125 return __pte(pte_val(pte) | _PAGE_SPECIAL);
126 }
127
128 #ifndef pte_mkhuge
pte_mkhuge(pte_t pte)129 static inline pte_t pte_mkhuge(pte_t pte)
130 {
131 return __pte(pte_val(pte));
132 }
133 #endif
134
135 #ifndef pte_mkprivileged
pte_mkprivileged(pte_t pte)136 static inline pte_t pte_mkprivileged(pte_t pte)
137 {
138 return __pte(pte_val(pte) & ~_PAGE_USER);
139 }
140 #endif
141
142 #ifndef pte_mkuser
pte_mkuser(pte_t pte)143 static inline pte_t pte_mkuser(pte_t pte)
144 {
145 return __pte(pte_val(pte) | _PAGE_USER);
146 }
147 #endif
148
pte_modify(pte_t pte,pgprot_t newprot)149 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
150 {
151 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
152 }
153
pte_swp_exclusive(pte_t pte)154 static inline int pte_swp_exclusive(pte_t pte)
155 {
156 return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
157 }
158
pte_swp_mkexclusive(pte_t pte)159 static inline pte_t pte_swp_mkexclusive(pte_t pte)
160 {
161 return __pte(pte_val(pte) | _PAGE_SWP_EXCLUSIVE);
162 }
163
pte_swp_clear_exclusive(pte_t pte)164 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
165 {
166 return __pte(pte_val(pte) & ~_PAGE_SWP_EXCLUSIVE);
167 }
168
169 /* This low level function performs the actual PTE insertion
170 * Setting the PTE depends on the MMU type and other factors. It's
171 * an horrible mess that I'm not going to try to clean up now but
172 * I'm keeping it in one place rather than spread around
173 */
__set_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte,int percpu)174 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
175 pte_t *ptep, pte_t pte, int percpu)
176 {
177 /* Second case is 32-bit with 64-bit PTE. In this case, we
178 * can just store as long as we do the two halves in the right order
179 * with a barrier in between.
180 * In the percpu case, we also fallback to the simple update
181 */
182 if (IS_ENABLED(CONFIG_PPC32) && IS_ENABLED(CONFIG_PTE_64BIT) && !percpu) {
183 __asm__ __volatile__("\
184 stw%X0 %2,%0\n\
185 mbar\n\
186 stw%X1 %L2,%1"
187 : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
188 : "r" (pte) : "memory");
189 return;
190 }
191 /* Anything else just stores the PTE normally. That covers all 64-bit
192 * cases, and 32-bit non-hash with 32-bit PTEs.
193 */
194 #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES)
195 ptep->pte3 = ptep->pte2 = ptep->pte1 = ptep->pte = pte_val(pte);
196 #else
197 *ptep = pte;
198 #endif
199
200 /*
201 * With hardware tablewalk, a sync is needed to ensure that
202 * subsequent accesses see the PTE we just wrote. Unlike userspace
203 * mappings, we can't tolerate spurious faults, so make sure
204 * the new PTE will be seen the first time.
205 */
206 if (IS_ENABLED(CONFIG_PPC_BOOK3E_64) && is_kernel_addr(addr))
207 mb();
208 }
209
210
211 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
212 extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
213 pte_t *ptep, pte_t entry, int dirty);
214
215 /*
216 * Macro to mark a page protection value as "uncacheable".
217 */
218
219 #define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
220 _PAGE_WRITETHRU)
221
222 #define pgprot_noncached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
223 _PAGE_NO_CACHE | _PAGE_GUARDED))
224
225 #define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
226 _PAGE_NO_CACHE))
227
228 #define pgprot_cached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
229 _PAGE_COHERENT))
230
231 #if _PAGE_WRITETHRU != 0
232 #define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
233 _PAGE_COHERENT | _PAGE_WRITETHRU))
234 #else
235 #define pgprot_cached_wthru(prot) pgprot_noncached(prot)
236 #endif
237
238 #define pgprot_cached_noncoherent(prot) \
239 (__pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL))
240
241 #define pgprot_writecombine pgprot_noncached_wc
242
243 struct file;
244 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
245 unsigned long size, pgprot_t vma_prot);
246 #define __HAVE_PHYS_MEM_ACCESS_PROT
247
248 #ifdef CONFIG_HUGETLB_PAGE
hugepd_ok(hugepd_t hpd)249 static inline int hugepd_ok(hugepd_t hpd)
250 {
251 #ifdef CONFIG_PPC_8xx
252 return ((hpd_val(hpd) & _PMD_PAGE_MASK) == _PMD_PAGE_8M);
253 #else
254 /* We clear the top bit to indicate hugepd */
255 return (hpd_val(hpd) && (hpd_val(hpd) & PD_HUGE) == 0);
256 #endif
257 }
258
pmd_huge(pmd_t pmd)259 static inline int pmd_huge(pmd_t pmd)
260 {
261 return 0;
262 }
263
pud_huge(pud_t pud)264 static inline int pud_huge(pud_t pud)
265 {
266 return 0;
267 }
268
269 #define is_hugepd(hpd) (hugepd_ok(hpd))
270 #endif
271
272 /*
273 * This gets called at the end of handling a page fault, when
274 * the kernel has put a new PTE into the page table for the process.
275 * We use it to ensure coherency between the i-cache and d-cache
276 * for the page which has just been mapped in.
277 */
278 #if defined(CONFIG_PPC_E500) && defined(CONFIG_HUGETLB_PAGE)
279 void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
280 unsigned long address, pte_t *ptep, unsigned int nr);
281 #else
update_mmu_cache_range(struct vm_fault * vmf,struct vm_area_struct * vma,unsigned long address,pte_t * ptep,unsigned int nr)282 static inline void update_mmu_cache_range(struct vm_fault *vmf,
283 struct vm_area_struct *vma, unsigned long address,
284 pte_t *ptep, unsigned int nr) {}
285 #endif
286
287 #endif /* __ASSEMBLY__ */
288 #endif
289