1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_POWERPC_NOHASH_32_PGTABLE_H 3 #define _ASM_POWERPC_NOHASH_32_PGTABLE_H 4 5 #include <asm-generic/pgtable-nopmd.h> 6 7 #ifndef __ASSEMBLY__ 8 #include <linux/sched.h> 9 #include <linux/threads.h> 10 #include <asm/mmu.h> /* For sub-arch specific PPC_PIN_SIZE */ 11 12 #ifdef CONFIG_44x 13 extern int icache_44x_need_flush; 14 #endif 15 16 #endif /* __ASSEMBLY__ */ 17 18 #define PTE_INDEX_SIZE PTE_SHIFT 19 #define PMD_INDEX_SIZE 0 20 #define PUD_INDEX_SIZE 0 21 #define PGD_INDEX_SIZE (32 - PGDIR_SHIFT) 22 23 #define PMD_CACHE_INDEX PMD_INDEX_SIZE 24 #define PUD_CACHE_INDEX PUD_INDEX_SIZE 25 26 #ifndef __ASSEMBLY__ 27 #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE) 28 #define PMD_TABLE_SIZE 0 29 #define PUD_TABLE_SIZE 0 30 #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) 31 32 #define PMD_MASKED_BITS (PTE_TABLE_SIZE - 1) 33 #endif /* __ASSEMBLY__ */ 34 35 #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) 36 #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) 37 38 /* 39 * The normal case is that PTEs are 32-bits and we have a 1-page 40 * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus 41 * 42 * For any >32-bit physical address platform, we can use the following 43 * two level page table layout where the pgdir is 8KB and the MS 13 bits 44 * are an index to the second level table. The combined pgdir/pmd first 45 * level has 2048 entries and the second level has 512 64-bit PTE entries. 46 * -Matt 47 */ 48 /* PGDIR_SHIFT determines what a top-level page table entry can map */ 49 #define PGDIR_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE) 50 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 51 #define PGDIR_MASK (~(PGDIR_SIZE-1)) 52 53 /* Bits to mask out from a PGD to get to the PUD page */ 54 #define PGD_MASKED_BITS 0 55 56 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) 57 58 #define pte_ERROR(e) \ 59 pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \ 60 (unsigned long long)pte_val(e)) 61 #define pgd_ERROR(e) \ 62 pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) 63 64 #ifndef __ASSEMBLY__ 65 66 int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); 67 68 #endif /* !__ASSEMBLY__ */ 69 70 71 /* 72 * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary 73 * value (for now) on others, from where we can start layout kernel 74 * virtual space that goes below PKMAP and FIXMAP 75 */ 76 #include <asm/fixmap.h> 77 78 /* 79 * ioremap_bot starts at that address. Early ioremaps move down from there, 80 * until mem_init() at which point this becomes the top of the vmalloc 81 * and ioremap space 82 */ 83 #ifdef CONFIG_HIGHMEM 84 #define IOREMAP_TOP PKMAP_BASE 85 #else 86 #define IOREMAP_TOP FIXADDR_START 87 #endif 88 89 /* PPC32 shares vmalloc area with ioremap */ 90 #define IOREMAP_START VMALLOC_START 91 #define IOREMAP_END VMALLOC_END 92 93 /* 94 * Just any arbitrary offset to the start of the vmalloc VM area: the 95 * current 16MB value just means that there will be a 64MB "hole" after the 96 * physical memory until the kernel virtual memory starts. That means that 97 * any out-of-bounds memory accesses will hopefully be caught. 98 * The vmalloc() routines leaves a hole of 4kB between each vmalloced 99 * area for the same reason. ;) 100 * 101 * We no longer map larger than phys RAM with the BATs so we don't have 102 * to worry about the VMALLOC_OFFSET causing problems. We do have to worry 103 * about clashes between our early calls to ioremap() that start growing down 104 * from IOREMAP_TOP being run into the VM area allocations (growing upwards 105 * from VMALLOC_START). For this reason we have ioremap_bot to check when 106 * we actually run into our mappings setup in the early boot with the VM 107 * system. This really does become a problem for machines with good amounts 108 * of RAM. -- Cort 109 */ 110 #define VMALLOC_OFFSET (0x1000000) /* 16M */ 111 #ifdef PPC_PIN_SIZE 112 #define VMALLOC_START (((ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) 113 #else 114 #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) 115 #endif 116 117 #ifdef CONFIG_KASAN_VMALLOC 118 #define VMALLOC_END ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT) 119 #else 120 #define VMALLOC_END ioremap_bot 121 #endif 122 123 /* 124 * Bits in a linux-style PTE. These match the bits in the 125 * (hardware-defined) PowerPC PTE as closely as possible. 126 */ 127 128 #if defined(CONFIG_40x) 129 #include <asm/nohash/32/pte-40x.h> 130 #elif defined(CONFIG_44x) 131 #include <asm/nohash/32/pte-44x.h> 132 #elif defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT) 133 #include <asm/nohash/pte-book3e.h> 134 #elif defined(CONFIG_FSL_BOOKE) 135 #include <asm/nohash/32/pte-fsl-booke.h> 136 #elif defined(CONFIG_PPC_8xx) 137 #include <asm/nohash/32/pte-8xx.h> 138 #endif 139 140 /* 141 * Location of the PFN in the PTE. Most 32-bit platforms use the same 142 * as _PAGE_SHIFT here (ie, naturally aligned). 143 * Platform who don't just pre-define the value so we don't override it here. 144 */ 145 #ifndef PTE_RPN_SHIFT 146 #define PTE_RPN_SHIFT (PAGE_SHIFT) 147 #endif 148 149 /* 150 * The mask covered by the RPN must be a ULL on 32-bit platforms with 151 * 64-bit PTEs. 152 */ 153 #if defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) 154 #define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1)) 155 #define MAX_POSSIBLE_PHYSMEM_BITS 36 156 #else 157 #define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1)) 158 #define MAX_POSSIBLE_PHYSMEM_BITS 32 159 #endif 160 161 /* 162 * _PAGE_CHG_MASK masks of bits that are to be preserved across 163 * pgprot changes. 164 */ 165 #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPECIAL) 166 167 #ifndef __ASSEMBLY__ 168 169 #define pte_clear(mm, addr, ptep) \ 170 do { pte_update(mm, addr, ptep, ~0, 0, 0); } while (0) 171 172 #ifndef pte_mkwrite 173 static inline pte_t pte_mkwrite(pte_t pte) 174 { 175 return __pte(pte_val(pte) | _PAGE_RW); 176 } 177 #endif 178 179 static inline pte_t pte_mkdirty(pte_t pte) 180 { 181 return __pte(pte_val(pte) | _PAGE_DIRTY); 182 } 183 184 static inline pte_t pte_mkyoung(pte_t pte) 185 { 186 return __pte(pte_val(pte) | _PAGE_ACCESSED); 187 } 188 189 #ifndef pte_wrprotect 190 static inline pte_t pte_wrprotect(pte_t pte) 191 { 192 return __pte(pte_val(pte) & ~_PAGE_RW); 193 } 194 #endif 195 196 static inline pte_t pte_mkexec(pte_t pte) 197 { 198 return __pte(pte_val(pte) | _PAGE_EXEC); 199 } 200 201 #define pmd_none(pmd) (!pmd_val(pmd)) 202 #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD) 203 #define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK) 204 static inline void pmd_clear(pmd_t *pmdp) 205 { 206 *pmdp = __pmd(0); 207 } 208 209 /* 210 * PTE updates. This function is called whenever an existing 211 * valid PTE is updated. This does -not- include set_pte_at() 212 * which nowadays only sets a new PTE. 213 * 214 * Depending on the type of MMU, we may need to use atomic updates 215 * and the PTE may be either 32 or 64 bit wide. In the later case, 216 * when using atomic updates, only the low part of the PTE is 217 * accessed atomically. 218 * 219 * In addition, on 44x, we also maintain a global flag indicating 220 * that an executable user mapping was modified, which is needed 221 * to properly flush the virtually tagged instruction cache of 222 * those implementations. 223 * 224 * On the 8xx, the page tables are a bit special. For 16k pages, we have 225 * 4 identical entries. For 512k pages, we have 128 entries as if it was 226 * 4k pages, but they are flagged as 512k pages for the hardware. 227 * For other page sizes, we have a single entry in the table. 228 */ 229 #ifdef CONFIG_PPC_8xx 230 static pmd_t *pmd_off(struct mm_struct *mm, unsigned long addr); 231 static int hugepd_ok(hugepd_t hpd); 232 233 static int number_of_cells_per_pte(pmd_t *pmd, pte_basic_t val, int huge) 234 { 235 if (!huge) 236 return PAGE_SIZE / SZ_4K; 237 else if (hugepd_ok(*((hugepd_t *)pmd))) 238 return 1; 239 else if (IS_ENABLED(CONFIG_PPC_4K_PAGES) && !(val & _PAGE_HUGE)) 240 return SZ_16K / SZ_4K; 241 else 242 return SZ_512K / SZ_4K; 243 } 244 245 static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p, 246 unsigned long clr, unsigned long set, int huge) 247 { 248 pte_basic_t *entry = &p->pte; 249 pte_basic_t old = pte_val(*p); 250 pte_basic_t new = (old & ~(pte_basic_t)clr) | set; 251 int num, i; 252 pmd_t *pmd = pmd_off(mm, addr); 253 254 num = number_of_cells_per_pte(pmd, new, huge); 255 256 for (i = 0; i < num; i++, entry++, new += SZ_4K) 257 *entry = new; 258 259 return old; 260 } 261 262 #ifdef CONFIG_PPC_16K_PAGES 263 #define __HAVE_ARCH_PTEP_GET 264 static inline pte_t ptep_get(pte_t *ptep) 265 { 266 pte_basic_t val = READ_ONCE(ptep->pte); 267 pte_t pte = {val, val, val, val}; 268 269 return pte; 270 } 271 #endif /* CONFIG_PPC_16K_PAGES */ 272 273 #else 274 static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p, 275 unsigned long clr, unsigned long set, int huge) 276 { 277 pte_basic_t old = pte_val(*p); 278 pte_basic_t new = (old & ~(pte_basic_t)clr) | set; 279 280 *p = __pte(new); 281 282 #ifdef CONFIG_44x 283 if ((old & _PAGE_USER) && (old & _PAGE_EXEC)) 284 icache_44x_need_flush = 1; 285 #endif 286 return old; 287 } 288 #endif 289 290 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 291 static inline int __ptep_test_and_clear_young(struct mm_struct *mm, 292 unsigned long addr, pte_t *ptep) 293 { 294 unsigned long old; 295 old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0); 296 return (old & _PAGE_ACCESSED) != 0; 297 } 298 #define ptep_test_and_clear_young(__vma, __addr, __ptep) \ 299 __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep) 300 301 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR 302 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, 303 pte_t *ptep) 304 { 305 return __pte(pte_update(mm, addr, ptep, ~0, 0, 0)); 306 } 307 308 #define __HAVE_ARCH_PTEP_SET_WRPROTECT 309 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, 310 pte_t *ptep) 311 { 312 unsigned long clr = ~pte_val(pte_wrprotect(__pte(~0))); 313 unsigned long set = pte_val(pte_wrprotect(__pte(0))); 314 315 pte_update(mm, addr, ptep, clr, set, 0); 316 } 317 318 static inline void __ptep_set_access_flags(struct vm_area_struct *vma, 319 pte_t *ptep, pte_t entry, 320 unsigned long address, 321 int psize) 322 { 323 pte_t pte_set = pte_mkyoung(pte_mkdirty(pte_mkwrite(pte_mkexec(__pte(0))))); 324 pte_t pte_clr = pte_mkyoung(pte_mkdirty(pte_mkwrite(pte_mkexec(__pte(~0))))); 325 unsigned long set = pte_val(entry) & pte_val(pte_set); 326 unsigned long clr = ~pte_val(entry) & ~pte_val(pte_clr); 327 int huge = psize > mmu_virtual_psize ? 1 : 0; 328 329 pte_update(vma->vm_mm, address, ptep, clr, set, huge); 330 331 flush_tlb_page(vma, address); 332 } 333 334 static inline int pte_young(pte_t pte) 335 { 336 return pte_val(pte) & _PAGE_ACCESSED; 337 } 338 339 #define __HAVE_ARCH_PTE_SAME 340 #define pte_same(A,B) ((pte_val(A) ^ pte_val(B)) == 0) 341 342 /* 343 * Note that on Book E processors, the pmd contains the kernel virtual 344 * (lowmem) address of the pte page. The physical address is less useful 345 * because everything runs with translation enabled (even the TLB miss 346 * handler). On everything else the pmd contains the physical address 347 * of the pte page. -- paulus 348 */ 349 #ifndef CONFIG_BOOKE 350 #define pmd_page(pmd) \ 351 pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) 352 #else 353 #define pmd_page_vaddr(pmd) \ 354 ((unsigned long)(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1))) 355 #define pmd_page(pmd) \ 356 pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT)) 357 #endif 358 359 /* 360 * Encode and decode a swap entry. 361 * Note that the bits we use in a PTE for representing a swap entry 362 * must not include the _PAGE_PRESENT bit. 363 * -- paulus 364 */ 365 #define __swp_type(entry) ((entry).val & 0x1f) 366 #define __swp_offset(entry) ((entry).val >> 5) 367 #define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) }) 368 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 }) 369 #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 }) 370 371 #endif /* !__ASSEMBLY__ */ 372 373 #endif /* __ASM_POWERPC_NOHASH_32_PGTABLE_H */ 374