1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_POWERPC_NOHASH_32_PGTABLE_H 3 #define _ASM_POWERPC_NOHASH_32_PGTABLE_H 4 5 #include <asm-generic/pgtable-nopmd.h> 6 7 #ifndef __ASSEMBLY__ 8 #include <linux/sched.h> 9 #include <linux/threads.h> 10 #include <asm/mmu.h> /* For sub-arch specific PPC_PIN_SIZE */ 11 12 #ifdef CONFIG_44x 13 extern int icache_44x_need_flush; 14 #endif 15 16 #endif /* __ASSEMBLY__ */ 17 18 #define PTE_INDEX_SIZE PTE_SHIFT 19 #define PMD_INDEX_SIZE 0 20 #define PUD_INDEX_SIZE 0 21 #define PGD_INDEX_SIZE (32 - PGDIR_SHIFT) 22 23 #define PMD_CACHE_INDEX PMD_INDEX_SIZE 24 #define PUD_CACHE_INDEX PUD_INDEX_SIZE 25 26 #ifndef __ASSEMBLY__ 27 #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE) 28 #define PMD_TABLE_SIZE 0 29 #define PUD_TABLE_SIZE 0 30 #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) 31 32 #define PMD_MASKED_BITS (PTE_TABLE_SIZE - 1) 33 #endif /* __ASSEMBLY__ */ 34 35 #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) 36 #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) 37 38 /* 39 * The normal case is that PTEs are 32-bits and we have a 1-page 40 * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus 41 * 42 * For any >32-bit physical address platform, we can use the following 43 * two level page table layout where the pgdir is 8KB and the MS 13 bits 44 * are an index to the second level table. The combined pgdir/pmd first 45 * level has 2048 entries and the second level has 512 64-bit PTE entries. 46 * -Matt 47 */ 48 /* PGDIR_SHIFT determines what a top-level page table entry can map */ 49 #define PGDIR_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE) 50 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 51 #define PGDIR_MASK (~(PGDIR_SIZE-1)) 52 53 /* Bits to mask out from a PGD to get to the PUD page */ 54 #define PGD_MASKED_BITS 0 55 56 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) 57 #define FIRST_USER_ADDRESS 0UL 58 59 #define pte_ERROR(e) \ 60 pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \ 61 (unsigned long long)pte_val(e)) 62 #define pgd_ERROR(e) \ 63 pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) 64 65 #ifndef __ASSEMBLY__ 66 67 int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); 68 69 #endif /* !__ASSEMBLY__ */ 70 71 72 /* 73 * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary 74 * value (for now) on others, from where we can start layout kernel 75 * virtual space that goes below PKMAP and FIXMAP 76 */ 77 #include <asm/fixmap.h> 78 79 /* 80 * ioremap_bot starts at that address. Early ioremaps move down from there, 81 * until mem_init() at which point this becomes the top of the vmalloc 82 * and ioremap space 83 */ 84 #ifdef CONFIG_HIGHMEM 85 #define IOREMAP_TOP PKMAP_BASE 86 #else 87 #define IOREMAP_TOP FIXADDR_START 88 #endif 89 90 /* PPC32 shares vmalloc area with ioremap */ 91 #define IOREMAP_START VMALLOC_START 92 #define IOREMAP_END VMALLOC_END 93 94 /* 95 * Just any arbitrary offset to the start of the vmalloc VM area: the 96 * current 16MB value just means that there will be a 64MB "hole" after the 97 * physical memory until the kernel virtual memory starts. That means that 98 * any out-of-bounds memory accesses will hopefully be caught. 99 * The vmalloc() routines leaves a hole of 4kB between each vmalloced 100 * area for the same reason. ;) 101 * 102 * We no longer map larger than phys RAM with the BATs so we don't have 103 * to worry about the VMALLOC_OFFSET causing problems. We do have to worry 104 * about clashes between our early calls to ioremap() that start growing down 105 * from IOREMAP_TOP being run into the VM area allocations (growing upwards 106 * from VMALLOC_START). For this reason we have ioremap_bot to check when 107 * we actually run into our mappings setup in the early boot with the VM 108 * system. This really does become a problem for machines with good amounts 109 * of RAM. -- Cort 110 */ 111 #define VMALLOC_OFFSET (0x1000000) /* 16M */ 112 #ifdef PPC_PIN_SIZE 113 #define VMALLOC_START (((ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) 114 #else 115 #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) 116 #endif 117 118 #ifdef CONFIG_KASAN_VMALLOC 119 #define VMALLOC_END ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT) 120 #else 121 #define VMALLOC_END ioremap_bot 122 #endif 123 124 /* 125 * Bits in a linux-style PTE. These match the bits in the 126 * (hardware-defined) PowerPC PTE as closely as possible. 127 */ 128 129 #if defined(CONFIG_40x) 130 #include <asm/nohash/32/pte-40x.h> 131 #elif defined(CONFIG_44x) 132 #include <asm/nohash/32/pte-44x.h> 133 #elif defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT) 134 #include <asm/nohash/pte-book3e.h> 135 #elif defined(CONFIG_FSL_BOOKE) 136 #include <asm/nohash/32/pte-fsl-booke.h> 137 #elif defined(CONFIG_PPC_8xx) 138 #include <asm/nohash/32/pte-8xx.h> 139 #endif 140 141 /* 142 * Location of the PFN in the PTE. Most 32-bit platforms use the same 143 * as _PAGE_SHIFT here (ie, naturally aligned). 144 * Platform who don't just pre-define the value so we don't override it here. 145 */ 146 #ifndef PTE_RPN_SHIFT 147 #define PTE_RPN_SHIFT (PAGE_SHIFT) 148 #endif 149 150 /* 151 * The mask covered by the RPN must be a ULL on 32-bit platforms with 152 * 64-bit PTEs. 153 */ 154 #if defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) 155 #define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1)) 156 #else 157 #define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1)) 158 #endif 159 160 /* 161 * _PAGE_CHG_MASK masks of bits that are to be preserved across 162 * pgprot changes. 163 */ 164 #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPECIAL) 165 166 #ifndef __ASSEMBLY__ 167 168 #define pte_clear(mm, addr, ptep) \ 169 do { pte_update(mm, addr, ptep, ~0, 0, 0); } while (0) 170 171 #ifndef pte_mkwrite 172 static inline pte_t pte_mkwrite(pte_t pte) 173 { 174 return __pte(pte_val(pte) | _PAGE_RW); 175 } 176 #endif 177 178 static inline pte_t pte_mkdirty(pte_t pte) 179 { 180 return __pte(pte_val(pte) | _PAGE_DIRTY); 181 } 182 183 static inline pte_t pte_mkyoung(pte_t pte) 184 { 185 return __pte(pte_val(pte) | _PAGE_ACCESSED); 186 } 187 188 #ifndef pte_wrprotect 189 static inline pte_t pte_wrprotect(pte_t pte) 190 { 191 return __pte(pte_val(pte) & ~_PAGE_RW); 192 } 193 #endif 194 195 static inline pte_t pte_mkexec(pte_t pte) 196 { 197 return __pte(pte_val(pte) | _PAGE_EXEC); 198 } 199 200 #define pmd_none(pmd) (!pmd_val(pmd)) 201 #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD) 202 #define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK) 203 static inline void pmd_clear(pmd_t *pmdp) 204 { 205 *pmdp = __pmd(0); 206 } 207 208 /* to find an entry in a page-table-directory */ 209 #define pgd_index(address) ((address) >> PGDIR_SHIFT) 210 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) 211 212 /* 213 * PTE updates. This function is called whenever an existing 214 * valid PTE is updated. This does -not- include set_pte_at() 215 * which nowadays only sets a new PTE. 216 * 217 * Depending on the type of MMU, we may need to use atomic updates 218 * and the PTE may be either 32 or 64 bit wide. In the later case, 219 * when using atomic updates, only the low part of the PTE is 220 * accessed atomically. 221 * 222 * In addition, on 44x, we also maintain a global flag indicating 223 * that an executable user mapping was modified, which is needed 224 * to properly flush the virtually tagged instruction cache of 225 * those implementations. 226 * 227 * On the 8xx, the page tables are a bit special. For 16k pages, we have 228 * 4 identical entries. For 512k pages, we have 128 entries as if it was 229 * 4k pages, but they are flagged as 512k pages for the hardware. 230 * For other page sizes, we have a single entry in the table. 231 */ 232 #ifdef CONFIG_PPC_8xx 233 static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p, 234 unsigned long clr, unsigned long set, int huge) 235 { 236 pte_basic_t *entry = &p->pte; 237 pte_basic_t old = pte_val(*p); 238 pte_basic_t new = (old & ~(pte_basic_t)clr) | set; 239 int num, i; 240 pmd_t *pmd = pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, addr), addr), addr), addr); 241 242 if (!huge) 243 num = PAGE_SIZE / SZ_4K; 244 else if ((pmd_val(*pmd) & _PMD_PAGE_MASK) != _PMD_PAGE_8M) 245 num = SZ_512K / SZ_4K; 246 else 247 num = 1; 248 249 for (i = 0; i < num; i++, entry++, new += SZ_4K) 250 *entry = new; 251 252 return old; 253 } 254 #else 255 static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p, 256 unsigned long clr, unsigned long set, int huge) 257 { 258 pte_basic_t old = pte_val(*p); 259 pte_basic_t new = (old & ~(pte_basic_t)clr) | set; 260 261 *p = __pte(new); 262 263 #ifdef CONFIG_44x 264 if ((old & _PAGE_USER) && (old & _PAGE_EXEC)) 265 icache_44x_need_flush = 1; 266 #endif 267 return old; 268 } 269 #endif 270 271 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 272 static inline int __ptep_test_and_clear_young(struct mm_struct *mm, 273 unsigned long addr, pte_t *ptep) 274 { 275 unsigned long old; 276 old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0); 277 return (old & _PAGE_ACCESSED) != 0; 278 } 279 #define ptep_test_and_clear_young(__vma, __addr, __ptep) \ 280 __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep) 281 282 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR 283 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, 284 pte_t *ptep) 285 { 286 return __pte(pte_update(mm, addr, ptep, ~0, 0, 0)); 287 } 288 289 #define __HAVE_ARCH_PTEP_SET_WRPROTECT 290 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, 291 pte_t *ptep) 292 { 293 unsigned long clr = ~pte_val(pte_wrprotect(__pte(~0))); 294 unsigned long set = pte_val(pte_wrprotect(__pte(0))); 295 296 pte_update(mm, addr, ptep, clr, set, 0); 297 } 298 299 static inline void __ptep_set_access_flags(struct vm_area_struct *vma, 300 pte_t *ptep, pte_t entry, 301 unsigned long address, 302 int psize) 303 { 304 pte_t pte_set = pte_mkyoung(pte_mkdirty(pte_mkwrite(pte_mkexec(__pte(0))))); 305 pte_t pte_clr = pte_mkyoung(pte_mkdirty(pte_mkwrite(pte_mkexec(__pte(~0))))); 306 unsigned long set = pte_val(entry) & pte_val(pte_set); 307 unsigned long clr = ~pte_val(entry) & ~pte_val(pte_clr); 308 int huge = psize > mmu_virtual_psize ? 1 : 0; 309 310 pte_update(vma->vm_mm, address, ptep, clr, set, huge); 311 312 flush_tlb_page(vma, address); 313 } 314 315 static inline int pte_young(pte_t pte) 316 { 317 return pte_val(pte) & _PAGE_ACCESSED; 318 } 319 320 #define __HAVE_ARCH_PTE_SAME 321 #define pte_same(A,B) ((pte_val(A) ^ pte_val(B)) == 0) 322 323 /* 324 * Note that on Book E processors, the pmd contains the kernel virtual 325 * (lowmem) address of the pte page. The physical address is less useful 326 * because everything runs with translation enabled (even the TLB miss 327 * handler). On everything else the pmd contains the physical address 328 * of the pte page. -- paulus 329 */ 330 #ifndef CONFIG_BOOKE 331 #define pmd_page(pmd) \ 332 pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) 333 #else 334 #define pmd_page_vaddr(pmd) \ 335 ((unsigned long)(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1))) 336 #define pmd_page(pmd) \ 337 pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT)) 338 #endif 339 340 /* 341 * Encode and decode a swap entry. 342 * Note that the bits we use in a PTE for representing a swap entry 343 * must not include the _PAGE_PRESENT bit. 344 * -- paulus 345 */ 346 #define __swp_type(entry) ((entry).val & 0x1f) 347 #define __swp_offset(entry) ((entry).val >> 5) 348 #define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) }) 349 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 }) 350 #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 }) 351 352 #endif /* !__ASSEMBLY__ */ 353 354 #endif /* __ASM_POWERPC_NOHASH_32_PGTABLE_H */ 355