1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_PGTABLE_64_H 3 #define _ASM_X86_PGTABLE_64_H 4 5 #include <linux/const.h> 6 #include <asm/pgtable_64_types.h> 7 8 #ifndef __ASSEMBLY__ 9 10 /* 11 * This file contains the functions and defines necessary to modify and use 12 * the x86-64 page table tree. 13 */ 14 #include <asm/processor.h> 15 #include <linux/bitops.h> 16 #include <linux/threads.h> 17 18 extern p4d_t level4_kernel_pgt[512]; 19 extern p4d_t level4_ident_pgt[512]; 20 extern pud_t level3_kernel_pgt[512]; 21 extern pud_t level3_ident_pgt[512]; 22 extern pmd_t level2_kernel_pgt[512]; 23 extern pmd_t level2_fixmap_pgt[512]; 24 extern pmd_t level2_ident_pgt[512]; 25 extern pte_t level1_fixmap_pgt[512]; 26 extern pgd_t init_top_pgt[]; 27 28 #define swapper_pg_dir init_top_pgt 29 30 extern void paging_init(void); 31 static inline void sync_initial_page_table(void) { } 32 33 #define pte_ERROR(e) \ 34 pr_err("%s:%d: bad pte %p(%016lx)\n", \ 35 __FILE__, __LINE__, &(e), pte_val(e)) 36 #define pmd_ERROR(e) \ 37 pr_err("%s:%d: bad pmd %p(%016lx)\n", \ 38 __FILE__, __LINE__, &(e), pmd_val(e)) 39 #define pud_ERROR(e) \ 40 pr_err("%s:%d: bad pud %p(%016lx)\n", \ 41 __FILE__, __LINE__, &(e), pud_val(e)) 42 43 #if CONFIG_PGTABLE_LEVELS >= 5 44 #define p4d_ERROR(e) \ 45 pr_err("%s:%d: bad p4d %p(%016lx)\n", \ 46 __FILE__, __LINE__, &(e), p4d_val(e)) 47 #endif 48 49 #define pgd_ERROR(e) \ 50 pr_err("%s:%d: bad pgd %p(%016lx)\n", \ 51 __FILE__, __LINE__, &(e), pgd_val(e)) 52 53 struct mm_struct; 54 55 void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte); 56 void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte); 57 58 static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, 59 pte_t *ptep) 60 { 61 *ptep = native_make_pte(0); 62 } 63 64 static inline void native_set_pte(pte_t *ptep, pte_t pte) 65 { 66 *ptep = pte; 67 } 68 69 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) 70 { 71 native_set_pte(ptep, pte); 72 } 73 74 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) 75 { 76 *pmdp = pmd; 77 } 78 79 static inline void native_pmd_clear(pmd_t *pmd) 80 { 81 native_set_pmd(pmd, native_make_pmd(0)); 82 } 83 84 static inline pte_t native_ptep_get_and_clear(pte_t *xp) 85 { 86 #ifdef CONFIG_SMP 87 return native_make_pte(xchg(&xp->pte, 0)); 88 #else 89 /* native_local_ptep_get_and_clear, 90 but duplicated because of cyclic dependency */ 91 pte_t ret = *xp; 92 native_pte_clear(NULL, 0, xp); 93 return ret; 94 #endif 95 } 96 97 static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp) 98 { 99 #ifdef CONFIG_SMP 100 return native_make_pmd(xchg(&xp->pmd, 0)); 101 #else 102 /* native_local_pmdp_get_and_clear, 103 but duplicated because of cyclic dependency */ 104 pmd_t ret = *xp; 105 native_pmd_clear(xp); 106 return ret; 107 #endif 108 } 109 110 static inline void native_set_pud(pud_t *pudp, pud_t pud) 111 { 112 *pudp = pud; 113 } 114 115 static inline void native_pud_clear(pud_t *pud) 116 { 117 native_set_pud(pud, native_make_pud(0)); 118 } 119 120 static inline pud_t native_pudp_get_and_clear(pud_t *xp) 121 { 122 #ifdef CONFIG_SMP 123 return native_make_pud(xchg(&xp->pud, 0)); 124 #else 125 /* native_local_pudp_get_and_clear, 126 * but duplicated because of cyclic dependency 127 */ 128 pud_t ret = *xp; 129 130 native_pud_clear(xp); 131 return ret; 132 #endif 133 } 134 135 #ifdef CONFIG_PAGE_TABLE_ISOLATION 136 /* 137 * All top-level PAGE_TABLE_ISOLATION page tables are order-1 pages 138 * (8k-aligned and 8k in size). The kernel one is at the beginning 4k and 139 * the user one is in the last 4k. To switch between them, you 140 * just need to flip the 12th bit in their addresses. 141 */ 142 #define PTI_PGTABLE_SWITCH_BIT PAGE_SHIFT 143 144 /* 145 * This generates better code than the inline assembly in 146 * __set_bit(). 147 */ 148 static inline void *ptr_set_bit(void *ptr, int bit) 149 { 150 unsigned long __ptr = (unsigned long)ptr; 151 152 __ptr |= BIT(bit); 153 return (void *)__ptr; 154 } 155 static inline void *ptr_clear_bit(void *ptr, int bit) 156 { 157 unsigned long __ptr = (unsigned long)ptr; 158 159 __ptr &= ~BIT(bit); 160 return (void *)__ptr; 161 } 162 163 static inline pgd_t *kernel_to_user_pgdp(pgd_t *pgdp) 164 { 165 return ptr_set_bit(pgdp, PTI_PGTABLE_SWITCH_BIT); 166 } 167 168 static inline pgd_t *user_to_kernel_pgdp(pgd_t *pgdp) 169 { 170 return ptr_clear_bit(pgdp, PTI_PGTABLE_SWITCH_BIT); 171 } 172 173 static inline p4d_t *kernel_to_user_p4dp(p4d_t *p4dp) 174 { 175 return ptr_set_bit(p4dp, PTI_PGTABLE_SWITCH_BIT); 176 } 177 178 static inline p4d_t *user_to_kernel_p4dp(p4d_t *p4dp) 179 { 180 return ptr_clear_bit(p4dp, PTI_PGTABLE_SWITCH_BIT); 181 } 182 #endif /* CONFIG_PAGE_TABLE_ISOLATION */ 183 184 /* 185 * Page table pages are page-aligned. The lower half of the top 186 * level is used for userspace and the top half for the kernel. 187 * 188 * Returns true for parts of the PGD that map userspace and 189 * false for the parts that map the kernel. 190 */ 191 static inline bool pgdp_maps_userspace(void *__ptr) 192 { 193 unsigned long ptr = (unsigned long)__ptr; 194 195 return (ptr & ~PAGE_MASK) < (PAGE_SIZE / 2); 196 } 197 198 #ifdef CONFIG_PAGE_TABLE_ISOLATION 199 pgd_t __pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd); 200 201 /* 202 * Take a PGD location (pgdp) and a pgd value that needs to be set there. 203 * Populates the user and returns the resulting PGD that must be set in 204 * the kernel copy of the page tables. 205 */ 206 static inline pgd_t pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd) 207 { 208 if (!static_cpu_has(X86_FEATURE_PTI)) 209 return pgd; 210 return __pti_set_user_pgd(pgdp, pgd); 211 } 212 #else 213 static inline pgd_t pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd) 214 { 215 return pgd; 216 } 217 #endif 218 219 static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d) 220 { 221 pgd_t pgd; 222 223 if (pgtable_l5_enabled || !IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) { 224 *p4dp = p4d; 225 return; 226 } 227 228 pgd = native_make_pgd(native_p4d_val(p4d)); 229 pgd = pti_set_user_pgd((pgd_t *)p4dp, pgd); 230 *p4dp = native_make_p4d(native_pgd_val(pgd)); 231 } 232 233 static inline void native_p4d_clear(p4d_t *p4d) 234 { 235 native_set_p4d(p4d, native_make_p4d(0)); 236 } 237 238 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd) 239 { 240 *pgdp = pti_set_user_pgd(pgdp, pgd); 241 } 242 243 static inline void native_pgd_clear(pgd_t *pgd) 244 { 245 native_set_pgd(pgd, native_make_pgd(0)); 246 } 247 248 extern void sync_global_pgds(unsigned long start, unsigned long end); 249 250 /* 251 * Conversion functions: convert a page and protection to a page entry, 252 * and a page entry and page directory to the page they refer to. 253 */ 254 255 /* 256 * Level 4 access. 257 */ 258 static inline int pgd_large(pgd_t pgd) { return 0; } 259 #define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE) 260 261 /* PUD - Level3 access */ 262 263 /* PMD - Level 2 access */ 264 265 /* PTE - Level 1 access. */ 266 267 /* x86-64 always has all page tables mapped. */ 268 #define pte_offset_map(dir, address) pte_offset_kernel((dir), (address)) 269 #define pte_unmap(pte) ((void)(pte))/* NOP */ 270 271 /* 272 * Encode and de-code a swap entry 273 * 274 * | ... | 11| 10| 9|8|7|6|5| 4| 3|2| 1|0| <- bit number 275 * | ... |SW3|SW2|SW1|G|L|D|A|CD|WT|U| W|P| <- bit names 276 * | OFFSET (14->63) | TYPE (9-13) |0|0|X|X| X| X|X|SD|0| <- swp entry 277 * 278 * G (8) is aliased and used as a PROT_NONE indicator for 279 * !present ptes. We need to start storing swap entries above 280 * there. We also need to avoid using A and D because of an 281 * erratum where they can be incorrectly set by hardware on 282 * non-present PTEs. 283 * 284 * SD (1) in swp entry is used to store soft dirty bit, which helps us 285 * remember soft dirty over page migration 286 * 287 * Bit 7 in swp entry should be 0 because pmd_present checks not only P, 288 * but also L and G. 289 */ 290 #define SWP_TYPE_FIRST_BIT (_PAGE_BIT_PROTNONE + 1) 291 #define SWP_TYPE_BITS 5 292 /* Place the offset above the type: */ 293 #define SWP_OFFSET_FIRST_BIT (SWP_TYPE_FIRST_BIT + SWP_TYPE_BITS) 294 295 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS) 296 297 #define __swp_type(x) (((x).val >> (SWP_TYPE_FIRST_BIT)) \ 298 & ((1U << SWP_TYPE_BITS) - 1)) 299 #define __swp_offset(x) ((x).val >> SWP_OFFSET_FIRST_BIT) 300 #define __swp_entry(type, offset) ((swp_entry_t) { \ 301 ((type) << (SWP_TYPE_FIRST_BIT)) \ 302 | ((offset) << SWP_OFFSET_FIRST_BIT) }) 303 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) }) 304 #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val((pmd)) }) 305 #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val }) 306 #define __swp_entry_to_pmd(x) ((pmd_t) { .pmd = (x).val }) 307 308 extern int kern_addr_valid(unsigned long addr); 309 extern void cleanup_highmap(void); 310 311 #define HAVE_ARCH_UNMAPPED_AREA 312 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 313 314 #define pgtable_cache_init() do { } while (0) 315 #define check_pgt_cache() do { } while (0) 316 317 #define PAGE_AGP PAGE_KERNEL_NOCACHE 318 #define HAVE_PAGE_AGP 1 319 320 /* fs/proc/kcore.c */ 321 #define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK) 322 #define kc_offset_to_vaddr(o) ((o) | ~__VIRTUAL_MASK) 323 324 #define __HAVE_ARCH_PTE_SAME 325 326 #define vmemmap ((struct page *)VMEMMAP_START) 327 328 extern void init_extra_mapping_uc(unsigned long phys, unsigned long size); 329 extern void init_extra_mapping_wb(unsigned long phys, unsigned long size); 330 331 #define gup_fast_permitted gup_fast_permitted 332 static inline bool gup_fast_permitted(unsigned long start, int nr_pages, 333 int write) 334 { 335 unsigned long len, end; 336 337 len = (unsigned long)nr_pages << PAGE_SHIFT; 338 end = start + len; 339 if (end < start) 340 return false; 341 if (end >> __VIRTUAL_MASK_SHIFT) 342 return false; 343 return true; 344 } 345 346 #endif /* !__ASSEMBLY__ */ 347 #endif /* _ASM_X86_PGTABLE_64_H */ 348