1 #ifndef _ASM_POWERPC_PGTABLE_RADIX_H 2 #define _ASM_POWERPC_PGTABLE_RADIX_H 3 4 #ifndef __ASSEMBLY__ 5 #include <asm/cmpxchg.h> 6 #endif 7 8 #ifdef CONFIG_PPC_64K_PAGES 9 #include <asm/book3s/64/radix-64k.h> 10 #else 11 #include <asm/book3s/64/radix-4k.h> 12 #endif 13 14 /* 15 * For P9 DD1 only, we need to track whether the pte's huge. 16 */ 17 #define R_PAGE_LARGE _RPAGE_RSV1 18 19 20 #ifndef __ASSEMBLY__ 21 #include <asm/book3s/64/tlbflush-radix.h> 22 #include <asm/cpu_has_feature.h> 23 #endif 24 25 /* An empty PTE can still have a R or C writeback */ 26 #define RADIX_PTE_NONE_MASK (_PAGE_DIRTY | _PAGE_ACCESSED) 27 28 /* Bits to set in a RPMD/RPUD/RPGD */ 29 #define RADIX_PMD_VAL_BITS (0x8000000000000000UL | RADIX_PTE_INDEX_SIZE) 30 #define RADIX_PUD_VAL_BITS (0x8000000000000000UL | RADIX_PMD_INDEX_SIZE) 31 #define RADIX_PGD_VAL_BITS (0x8000000000000000UL | RADIX_PUD_INDEX_SIZE) 32 33 /* Don't have anything in the reserved bits and leaf bits */ 34 #define RADIX_PMD_BAD_BITS 0x60000000000000e0UL 35 #define RADIX_PUD_BAD_BITS 0x60000000000000e0UL 36 #define RADIX_PGD_BAD_BITS 0x60000000000000e0UL 37 38 /* 39 * Size of EA range mapped by our pagetables. 40 */ 41 #define RADIX_PGTABLE_EADDR_SIZE (RADIX_PTE_INDEX_SIZE + RADIX_PMD_INDEX_SIZE + \ 42 RADIX_PUD_INDEX_SIZE + RADIX_PGD_INDEX_SIZE + PAGE_SHIFT) 43 #define RADIX_PGTABLE_RANGE (ASM_CONST(1) << RADIX_PGTABLE_EADDR_SIZE) 44 45 /* 46 * We support 52 bit address space, Use top bit for kernel 47 * virtual mapping. Also make sure kernel fit in the top 48 * quadrant. 49 * 50 * +------------------+ 51 * +------------------+ Kernel virtual map (0xc008000000000000) 52 * | | 53 * | | 54 * | | 55 * 0b11......+------------------+ Kernel linear map (0xc....) 56 * | | 57 * | 2 quadrant | 58 * | | 59 * 0b10......+------------------+ 60 * | | 61 * | 1 quadrant | 62 * | | 63 * 0b01......+------------------+ 64 * | | 65 * | 0 quadrant | 66 * | | 67 * 0b00......+------------------+ 68 * 69 * 70 * 3rd quadrant expanded: 71 * +------------------------------+ 72 * | | 73 * | | 74 * | | 75 * +------------------------------+ Kernel IO map end (0xc010000000000000) 76 * | | 77 * | | 78 * | 1/2 of virtual map | 79 * | | 80 * | | 81 * +------------------------------+ Kernel IO map start 82 * | | 83 * | 1/4 of virtual map | 84 * | | 85 * +------------------------------+ Kernel vmemap start 86 * | | 87 * | 1/4 of virtual map | 88 * | | 89 * +------------------------------+ Kernel virt start (0xc008000000000000) 90 * | | 91 * | | 92 * | | 93 * +------------------------------+ Kernel linear (0xc.....) 94 */ 95 96 #define RADIX_KERN_VIRT_START ASM_CONST(0xc008000000000000) 97 #define RADIX_KERN_VIRT_SIZE ASM_CONST(0x0008000000000000) 98 99 /* 100 * The vmalloc space starts at the beginning of that region, and 101 * occupies a quarter of it on radix config. 102 * (we keep a quarter for the virtual memmap) 103 */ 104 #define RADIX_VMALLOC_START RADIX_KERN_VIRT_START 105 #define RADIX_VMALLOC_SIZE (RADIX_KERN_VIRT_SIZE >> 2) 106 #define RADIX_VMALLOC_END (RADIX_VMALLOC_START + RADIX_VMALLOC_SIZE) 107 /* 108 * Defines the address of the vmemap area, in its own region on 109 * hash table CPUs. 110 */ 111 #define RADIX_VMEMMAP_BASE (RADIX_VMALLOC_END) 112 113 #ifndef __ASSEMBLY__ 114 #define RADIX_PTE_TABLE_SIZE (sizeof(pte_t) << RADIX_PTE_INDEX_SIZE) 115 #define RADIX_PMD_TABLE_SIZE (sizeof(pmd_t) << RADIX_PMD_INDEX_SIZE) 116 #define RADIX_PUD_TABLE_SIZE (sizeof(pud_t) << RADIX_PUD_INDEX_SIZE) 117 #define RADIX_PGD_TABLE_SIZE (sizeof(pgd_t) << RADIX_PGD_INDEX_SIZE) 118 119 #ifdef CONFIG_STRICT_KERNEL_RWX 120 extern void radix__mark_rodata_ro(void); 121 extern void radix__mark_initmem_nx(void); 122 #endif 123 124 static inline unsigned long __radix_pte_update(pte_t *ptep, unsigned long clr, 125 unsigned long set) 126 { 127 pte_t pte; 128 unsigned long old_pte, new_pte; 129 130 do { 131 pte = READ_ONCE(*ptep); 132 old_pte = pte_val(pte); 133 new_pte = (old_pte | set) & ~clr; 134 135 } while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte))); 136 137 return old_pte; 138 } 139 140 141 static inline unsigned long radix__pte_update(struct mm_struct *mm, 142 unsigned long addr, 143 pte_t *ptep, unsigned long clr, 144 unsigned long set, 145 int huge) 146 { 147 unsigned long old_pte; 148 149 if (cpu_has_feature(CPU_FTR_POWER9_DD1)) { 150 151 unsigned long new_pte; 152 153 old_pte = __radix_pte_update(ptep, ~0ul, 0); 154 /* 155 * new value of pte 156 */ 157 new_pte = (old_pte | set) & ~clr; 158 radix__flush_tlb_pte_p9_dd1(old_pte, mm, addr); 159 if (new_pte) 160 __radix_pte_update(ptep, 0, new_pte); 161 } else 162 old_pte = __radix_pte_update(ptep, clr, set); 163 if (!huge) 164 assert_pte_locked(mm, addr); 165 166 return old_pte; 167 } 168 169 static inline pte_t radix__ptep_get_and_clear_full(struct mm_struct *mm, 170 unsigned long addr, 171 pte_t *ptep, int full) 172 { 173 unsigned long old_pte; 174 175 if (full) { 176 /* 177 * If we are trying to clear the pte, we can skip 178 * the DD1 pte update sequence and batch the tlb flush. The 179 * tlb flush batching is done by mmu gather code. We 180 * still keep the cmp_xchg update to make sure we get 181 * correct R/C bit which might be updated via Nest MMU. 182 */ 183 old_pte = __radix_pte_update(ptep, ~0ul, 0); 184 } else 185 old_pte = radix__pte_update(mm, addr, ptep, ~0ul, 0, 0); 186 187 return __pte(old_pte); 188 } 189 190 /* 191 * Set the dirty and/or accessed bits atomically in a linux PTE, this 192 * function doesn't need to invalidate tlb. 193 */ 194 static inline void radix__ptep_set_access_flags(struct mm_struct *mm, 195 pte_t *ptep, pte_t entry, 196 unsigned long address) 197 { 198 199 unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED | 200 _PAGE_RW | _PAGE_EXEC); 201 202 if (cpu_has_feature(CPU_FTR_POWER9_DD1)) { 203 204 unsigned long old_pte, new_pte; 205 206 old_pte = __radix_pte_update(ptep, ~0, 0); 207 /* 208 * new value of pte 209 */ 210 new_pte = old_pte | set; 211 radix__flush_tlb_pte_p9_dd1(old_pte, mm, address); 212 __radix_pte_update(ptep, 0, new_pte); 213 } else 214 __radix_pte_update(ptep, 0, set); 215 asm volatile("ptesync" : : : "memory"); 216 } 217 218 static inline int radix__pte_same(pte_t pte_a, pte_t pte_b) 219 { 220 return ((pte_raw(pte_a) ^ pte_raw(pte_b)) == 0); 221 } 222 223 static inline int radix__pte_none(pte_t pte) 224 { 225 return (pte_val(pte) & ~RADIX_PTE_NONE_MASK) == 0; 226 } 227 228 static inline void radix__set_pte_at(struct mm_struct *mm, unsigned long addr, 229 pte_t *ptep, pte_t pte, int percpu) 230 { 231 *ptep = pte; 232 asm volatile("ptesync" : : : "memory"); 233 } 234 235 static inline int radix__pmd_bad(pmd_t pmd) 236 { 237 return !!(pmd_val(pmd) & RADIX_PMD_BAD_BITS); 238 } 239 240 static inline int radix__pmd_same(pmd_t pmd_a, pmd_t pmd_b) 241 { 242 return ((pmd_raw(pmd_a) ^ pmd_raw(pmd_b)) == 0); 243 } 244 245 static inline int radix__pud_bad(pud_t pud) 246 { 247 return !!(pud_val(pud) & RADIX_PUD_BAD_BITS); 248 } 249 250 251 static inline int radix__pgd_bad(pgd_t pgd) 252 { 253 return !!(pgd_val(pgd) & RADIX_PGD_BAD_BITS); 254 } 255 256 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 257 258 static inline int radix__pmd_trans_huge(pmd_t pmd) 259 { 260 return (pmd_val(pmd) & (_PAGE_PTE | _PAGE_DEVMAP)) == _PAGE_PTE; 261 } 262 263 static inline pmd_t radix__pmd_mkhuge(pmd_t pmd) 264 { 265 if (cpu_has_feature(CPU_FTR_POWER9_DD1)) 266 return __pmd(pmd_val(pmd) | _PAGE_PTE | R_PAGE_LARGE); 267 return __pmd(pmd_val(pmd) | _PAGE_PTE); 268 } 269 static inline void radix__pmdp_huge_split_prepare(struct vm_area_struct *vma, 270 unsigned long address, pmd_t *pmdp) 271 { 272 /* Nothing to do for radix. */ 273 return; 274 } 275 276 extern unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, 277 pmd_t *pmdp, unsigned long clr, 278 unsigned long set); 279 extern pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, 280 unsigned long address, pmd_t *pmdp); 281 extern void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, 282 pgtable_t pgtable); 283 extern pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); 284 extern pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm, 285 unsigned long addr, pmd_t *pmdp); 286 extern int radix__has_transparent_hugepage(void); 287 #endif 288 289 extern int __meminit radix__vmemmap_create_mapping(unsigned long start, 290 unsigned long page_size, 291 unsigned long phys); 292 extern void radix__vmemmap_remove_mapping(unsigned long start, 293 unsigned long page_size); 294 295 extern int radix__map_kernel_page(unsigned long ea, unsigned long pa, 296 pgprot_t flags, unsigned int psz); 297 298 static inline unsigned long radix__get_tree_size(void) 299 { 300 unsigned long rts_field; 301 /* 302 * We support 52 bits, hence: 303 * DD1 52-28 = 24, 0b11000 304 * Others 52-31 = 21, 0b10101 305 * RTS encoding details 306 * bits 0 - 3 of rts -> bits 6 - 8 unsigned long 307 * bits 4 - 5 of rts -> bits 62 - 63 of unsigned long 308 */ 309 if (cpu_has_feature(CPU_FTR_POWER9_DD1)) 310 rts_field = (0x3UL << 61); 311 else { 312 rts_field = (0x5UL << 5); /* 6 - 8 bits */ 313 rts_field |= (0x2UL << 61); 314 } 315 return rts_field; 316 } 317 318 #ifdef CONFIG_MEMORY_HOTPLUG 319 int radix__create_section_mapping(unsigned long start, unsigned long end); 320 int radix__remove_section_mapping(unsigned long start, unsigned long end); 321 #endif /* CONFIG_MEMORY_HOTPLUG */ 322 #endif /* __ASSEMBLY__ */ 323 #endif 324