1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_POWERPC_NOHASH_PGTABLE_H 3 #define _ASM_POWERPC_NOHASH_PGTABLE_H 4 5 #if defined(CONFIG_PPC64) 6 #include <asm/nohash/64/pgtable.h> 7 #else 8 #include <asm/nohash/32/pgtable.h> 9 #endif 10 11 /* Permission masks used for kernel mappings */ 12 #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW) 13 #define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE) 14 #define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \ 15 _PAGE_NO_CACHE | _PAGE_GUARDED) 16 #define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX) 17 #define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO) 18 #define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX) 19 20 /* 21 * Protection used for kernel text. We want the debuggers to be able to 22 * set breakpoints anywhere, so don't write protect the kernel text 23 * on platforms where such control is possible. 24 */ 25 #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\ 26 defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE) 27 #define PAGE_KERNEL_TEXT PAGE_KERNEL_X 28 #else 29 #define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX 30 #endif 31 32 /* Make modules code happy. We don't set RO yet */ 33 #define PAGE_KERNEL_EXEC PAGE_KERNEL_X 34 35 /* Advertise special mapping type for AGP */ 36 #define PAGE_AGP (PAGE_KERNEL_NC) 37 #define HAVE_PAGE_AGP 38 39 #ifndef __ASSEMBLY__ 40 41 /* Generic accessors to PTE bits */ 42 #ifndef pte_write 43 static inline int pte_write(pte_t pte) 44 { 45 return pte_val(pte) & _PAGE_RW; 46 } 47 #endif 48 static inline int pte_read(pte_t pte) { return 1; } 49 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } 50 static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } 51 static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; } 52 static inline bool pte_hashpte(pte_t pte) { return false; } 53 static inline bool pte_ci(pte_t pte) { return pte_val(pte) & _PAGE_NO_CACHE; } 54 static inline bool pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; } 55 56 #ifdef CONFIG_NUMA_BALANCING 57 /* 58 * These work without NUMA balancing but the kernel does not care. See the 59 * comment in include/linux/pgtable.h . On powerpc, this will only 60 * work for user pages and always return true for kernel pages. 61 */ 62 static inline int pte_protnone(pte_t pte) 63 { 64 return pte_present(pte) && !pte_user(pte); 65 } 66 67 static inline int pmd_protnone(pmd_t pmd) 68 { 69 return pte_protnone(pmd_pte(pmd)); 70 } 71 #endif /* CONFIG_NUMA_BALANCING */ 72 73 static inline int pte_present(pte_t pte) 74 { 75 return pte_val(pte) & _PAGE_PRESENT; 76 } 77 78 static inline bool pte_hw_valid(pte_t pte) 79 { 80 return pte_val(pte) & _PAGE_PRESENT; 81 } 82 83 /* 84 * Don't just check for any non zero bits in __PAGE_USER, since for book3e 85 * and PTE_64BIT, PAGE_KERNEL_X contains _PAGE_BAP_SR which is also in 86 * _PAGE_USER. Need to explicitly match _PAGE_BAP_UR bit in that case too. 87 */ 88 #ifndef pte_user 89 static inline bool pte_user(pte_t pte) 90 { 91 return (pte_val(pte) & _PAGE_USER) == _PAGE_USER; 92 } 93 #endif 94 95 /* 96 * We only find page table entry in the last level 97 * Hence no need for other accessors 98 */ 99 #define pte_access_permitted pte_access_permitted 100 static inline bool pte_access_permitted(pte_t pte, bool write) 101 { 102 /* 103 * A read-only access is controlled by _PAGE_USER bit. 104 * We have _PAGE_READ set for WRITE and EXECUTE 105 */ 106 if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte)) 107 return false; 108 109 if (write && !pte_write(pte)) 110 return false; 111 112 return true; 113 } 114 115 /* Conversion functions: convert a page and protection to a page entry, 116 * and a page entry and page directory to the page they refer to. 117 * 118 * Even if PTEs can be unsigned long long, a PFN is always an unsigned 119 * long for now. 120 */ 121 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) { 122 return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) | 123 pgprot_val(pgprot)); } 124 static inline unsigned long pte_pfn(pte_t pte) { 125 return pte_val(pte) >> PTE_RPN_SHIFT; } 126 127 /* Generic modifiers for PTE bits */ 128 static inline pte_t pte_exprotect(pte_t pte) 129 { 130 return __pte(pte_val(pte) & ~_PAGE_EXEC); 131 } 132 133 static inline pte_t pte_mkclean(pte_t pte) 134 { 135 return __pte(pte_val(pte) & ~_PAGE_DIRTY); 136 } 137 138 static inline pte_t pte_mkold(pte_t pte) 139 { 140 return __pte(pte_val(pte) & ~_PAGE_ACCESSED); 141 } 142 143 static inline pte_t pte_mkpte(pte_t pte) 144 { 145 return pte; 146 } 147 148 static inline pte_t pte_mkspecial(pte_t pte) 149 { 150 return __pte(pte_val(pte) | _PAGE_SPECIAL); 151 } 152 153 #ifndef pte_mkhuge 154 static inline pte_t pte_mkhuge(pte_t pte) 155 { 156 return __pte(pte_val(pte)); 157 } 158 #endif 159 160 #ifndef pte_mkprivileged 161 static inline pte_t pte_mkprivileged(pte_t pte) 162 { 163 return __pte(pte_val(pte) & ~_PAGE_USER); 164 } 165 #endif 166 167 #ifndef pte_mkuser 168 static inline pte_t pte_mkuser(pte_t pte) 169 { 170 return __pte(pte_val(pte) | _PAGE_USER); 171 } 172 #endif 173 174 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 175 { 176 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); 177 } 178 179 /* Insert a PTE, top-level function is out of line. It uses an inline 180 * low level function in the respective pgtable-* files 181 */ 182 extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, 183 pte_t pte); 184 185 /* This low level function performs the actual PTE insertion 186 * Setting the PTE depends on the MMU type and other factors. It's 187 * an horrible mess that I'm not going to try to clean up now but 188 * I'm keeping it in one place rather than spread around 189 */ 190 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, 191 pte_t *ptep, pte_t pte, int percpu) 192 { 193 /* Second case is 32-bit with 64-bit PTE. In this case, we 194 * can just store as long as we do the two halves in the right order 195 * with a barrier in between. 196 * In the percpu case, we also fallback to the simple update 197 */ 198 if (IS_ENABLED(CONFIG_PPC32) && IS_ENABLED(CONFIG_PTE_64BIT) && !percpu) { 199 __asm__ __volatile__("\ 200 stw%U0%X0 %2,%0\n\ 201 eieio\n\ 202 stw%U0%X0 %L2,%1" 203 : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) 204 : "r" (pte) : "memory"); 205 return; 206 } 207 /* Anything else just stores the PTE normally. That covers all 64-bit 208 * cases, and 32-bit non-hash with 32-bit PTEs. 209 */ 210 #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES) 211 ptep->pte = ptep->pte1 = ptep->pte2 = ptep->pte3 = pte_val(pte); 212 #else 213 *ptep = pte; 214 #endif 215 216 /* 217 * With hardware tablewalk, a sync is needed to ensure that 218 * subsequent accesses see the PTE we just wrote. Unlike userspace 219 * mappings, we can't tolerate spurious faults, so make sure 220 * the new PTE will be seen the first time. 221 */ 222 if (IS_ENABLED(CONFIG_PPC_BOOK3E_64) && is_kernel_addr(addr)) 223 mb(); 224 } 225 226 227 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 228 extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, 229 pte_t *ptep, pte_t entry, int dirty); 230 231 /* 232 * Macro to mark a page protection value as "uncacheable". 233 */ 234 235 #define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \ 236 _PAGE_WRITETHRU) 237 238 #define pgprot_noncached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ 239 _PAGE_NO_CACHE | _PAGE_GUARDED)) 240 241 #define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ 242 _PAGE_NO_CACHE)) 243 244 #define pgprot_cached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ 245 _PAGE_COHERENT)) 246 247 #if _PAGE_WRITETHRU != 0 248 #define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ 249 _PAGE_COHERENT | _PAGE_WRITETHRU)) 250 #else 251 #define pgprot_cached_wthru(prot) pgprot_noncached(prot) 252 #endif 253 254 #define pgprot_cached_noncoherent(prot) \ 255 (__pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL)) 256 257 #define pgprot_writecombine pgprot_noncached_wc 258 259 struct file; 260 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 261 unsigned long size, pgprot_t vma_prot); 262 #define __HAVE_PHYS_MEM_ACCESS_PROT 263 264 #ifdef CONFIG_HUGETLB_PAGE 265 static inline int hugepd_ok(hugepd_t hpd) 266 { 267 #ifdef CONFIG_PPC_8xx 268 return ((hpd_val(hpd) & _PMD_PAGE_MASK) == _PMD_PAGE_8M); 269 #else 270 /* We clear the top bit to indicate hugepd */ 271 return (hpd_val(hpd) && (hpd_val(hpd) & PD_HUGE) == 0); 272 #endif 273 } 274 275 static inline int pmd_huge(pmd_t pmd) 276 { 277 return 0; 278 } 279 280 static inline int pud_huge(pud_t pud) 281 { 282 return 0; 283 } 284 285 static inline int pgd_huge(pgd_t pgd) 286 { 287 return 0; 288 } 289 #define pgd_huge pgd_huge 290 291 #define is_hugepd(hpd) (hugepd_ok(hpd)) 292 #endif 293 294 /* 295 * This gets called at the end of handling a page fault, when 296 * the kernel has put a new PTE into the page table for the process. 297 * We use it to ensure coherency between the i-cache and d-cache 298 * for the page which has just been mapped in. 299 */ 300 #if defined(CONFIG_PPC_FSL_BOOK3E) && defined(CONFIG_HUGETLB_PAGE) 301 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep); 302 #else 303 static inline 304 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) {} 305 #endif 306 307 #endif /* __ASSEMBLY__ */ 308 #endif 309