1 #ifndef _ASM_X86_PGTABLE_64_H 2 #define _ASM_X86_PGTABLE_64_H 3 4 #include <linux/const.h> 5 #include <asm/pgtable_64_types.h> 6 7 #ifndef __ASSEMBLY__ 8 9 /* 10 * This file contains the functions and defines necessary to modify and use 11 * the x86-64 page table tree. 12 */ 13 #include <asm/processor.h> 14 #include <linux/bitops.h> 15 #include <linux/threads.h> 16 17 extern p4d_t level4_kernel_pgt[512]; 18 extern p4d_t level4_ident_pgt[512]; 19 extern pud_t level3_kernel_pgt[512]; 20 extern pud_t level3_ident_pgt[512]; 21 extern pmd_t level2_kernel_pgt[512]; 22 extern pmd_t level2_fixmap_pgt[512]; 23 extern pmd_t level2_ident_pgt[512]; 24 extern pte_t level1_fixmap_pgt[512]; 25 extern pgd_t init_top_pgt[]; 26 27 #define swapper_pg_dir init_top_pgt 28 29 extern void paging_init(void); 30 31 #define pte_ERROR(e) \ 32 pr_err("%s:%d: bad pte %p(%016lx)\n", \ 33 __FILE__, __LINE__, &(e), pte_val(e)) 34 #define pmd_ERROR(e) \ 35 pr_err("%s:%d: bad pmd %p(%016lx)\n", \ 36 __FILE__, __LINE__, &(e), pmd_val(e)) 37 #define pud_ERROR(e) \ 38 pr_err("%s:%d: bad pud %p(%016lx)\n", \ 39 __FILE__, __LINE__, &(e), pud_val(e)) 40 41 #if CONFIG_PGTABLE_LEVELS >= 5 42 #define p4d_ERROR(e) \ 43 pr_err("%s:%d: bad p4d %p(%016lx)\n", \ 44 __FILE__, __LINE__, &(e), p4d_val(e)) 45 #endif 46 47 #define pgd_ERROR(e) \ 48 pr_err("%s:%d: bad pgd %p(%016lx)\n", \ 49 __FILE__, __LINE__, &(e), pgd_val(e)) 50 51 struct mm_struct; 52 53 void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte); 54 void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte); 55 56 static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, 57 pte_t *ptep) 58 { 59 *ptep = native_make_pte(0); 60 } 61 62 static inline void native_set_pte(pte_t *ptep, pte_t pte) 63 { 64 *ptep = pte; 65 } 66 67 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) 68 { 69 native_set_pte(ptep, pte); 70 } 71 72 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) 73 { 74 *pmdp = pmd; 75 } 76 77 static inline void native_pmd_clear(pmd_t *pmd) 78 { 79 native_set_pmd(pmd, native_make_pmd(0)); 80 } 81 82 static inline pte_t native_ptep_get_and_clear(pte_t *xp) 83 { 84 #ifdef CONFIG_SMP 85 return native_make_pte(xchg(&xp->pte, 0)); 86 #else 87 /* native_local_ptep_get_and_clear, 88 but duplicated because of cyclic dependency */ 89 pte_t ret = *xp; 90 native_pte_clear(NULL, 0, xp); 91 return ret; 92 #endif 93 } 94 95 static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp) 96 { 97 #ifdef CONFIG_SMP 98 return native_make_pmd(xchg(&xp->pmd, 0)); 99 #else 100 /* native_local_pmdp_get_and_clear, 101 but duplicated because of cyclic dependency */ 102 pmd_t ret = *xp; 103 native_pmd_clear(xp); 104 return ret; 105 #endif 106 } 107 108 static inline void native_set_pud(pud_t *pudp, pud_t pud) 109 { 110 *pudp = pud; 111 } 112 113 static inline void native_pud_clear(pud_t *pud) 114 { 115 native_set_pud(pud, native_make_pud(0)); 116 } 117 118 static inline pud_t native_pudp_get_and_clear(pud_t *xp) 119 { 120 #ifdef CONFIG_SMP 121 return native_make_pud(xchg(&xp->pud, 0)); 122 #else 123 /* native_local_pudp_get_and_clear, 124 * but duplicated because of cyclic dependency 125 */ 126 pud_t ret = *xp; 127 128 native_pud_clear(xp); 129 return ret; 130 #endif 131 } 132 133 static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d) 134 { 135 *p4dp = p4d; 136 } 137 138 static inline void native_p4d_clear(p4d_t *p4d) 139 { 140 #ifdef CONFIG_X86_5LEVEL 141 native_set_p4d(p4d, native_make_p4d(0)); 142 #else 143 native_set_p4d(p4d, (p4d_t) { .pgd = native_make_pgd(0)}); 144 #endif 145 } 146 147 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd) 148 { 149 *pgdp = pgd; 150 } 151 152 static inline void native_pgd_clear(pgd_t *pgd) 153 { 154 native_set_pgd(pgd, native_make_pgd(0)); 155 } 156 157 extern void sync_global_pgds(unsigned long start, unsigned long end); 158 159 /* 160 * Conversion functions: convert a page and protection to a page entry, 161 * and a page entry and page directory to the page they refer to. 162 */ 163 164 /* 165 * Level 4 access. 166 */ 167 static inline int pgd_large(pgd_t pgd) { return 0; } 168 #define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE) 169 170 /* PUD - Level3 access */ 171 172 /* PMD - Level 2 access */ 173 174 /* PTE - Level 1 access. */ 175 176 /* x86-64 always has all page tables mapped. */ 177 #define pte_offset_map(dir, address) pte_offset_kernel((dir), (address)) 178 #define pte_unmap(pte) ((void)(pte))/* NOP */ 179 180 /* 181 * Encode and de-code a swap entry 182 * 183 * | ... | 11| 10| 9|8|7|6|5| 4| 3|2| 1|0| <- bit number 184 * | ... |SW3|SW2|SW1|G|L|D|A|CD|WT|U| W|P| <- bit names 185 * | OFFSET (14->63) | TYPE (9-13) |0|0|X|X| X| X|X|SD|0| <- swp entry 186 * 187 * G (8) is aliased and used as a PROT_NONE indicator for 188 * !present ptes. We need to start storing swap entries above 189 * there. We also need to avoid using A and D because of an 190 * erratum where they can be incorrectly set by hardware on 191 * non-present PTEs. 192 * 193 * SD (1) in swp entry is used to store soft dirty bit, which helps us 194 * remember soft dirty over page migration 195 * 196 * Bit 7 in swp entry should be 0 because pmd_present checks not only P, 197 * but also L and G. 198 */ 199 #define SWP_TYPE_FIRST_BIT (_PAGE_BIT_PROTNONE + 1) 200 #define SWP_TYPE_BITS 5 201 /* Place the offset above the type: */ 202 #define SWP_OFFSET_FIRST_BIT (SWP_TYPE_FIRST_BIT + SWP_TYPE_BITS) 203 204 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS) 205 206 #define __swp_type(x) (((x).val >> (SWP_TYPE_FIRST_BIT)) \ 207 & ((1U << SWP_TYPE_BITS) - 1)) 208 #define __swp_offset(x) ((x).val >> SWP_OFFSET_FIRST_BIT) 209 #define __swp_entry(type, offset) ((swp_entry_t) { \ 210 ((type) << (SWP_TYPE_FIRST_BIT)) \ 211 | ((offset) << SWP_OFFSET_FIRST_BIT) }) 212 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) }) 213 #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val((pmd)) }) 214 #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val }) 215 #define __swp_entry_to_pmd(x) ((pmd_t) { .pmd = (x).val }) 216 217 extern int kern_addr_valid(unsigned long addr); 218 extern void cleanup_highmap(void); 219 220 #define HAVE_ARCH_UNMAPPED_AREA 221 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 222 223 #define pgtable_cache_init() do { } while (0) 224 #define check_pgt_cache() do { } while (0) 225 226 #define PAGE_AGP PAGE_KERNEL_NOCACHE 227 #define HAVE_PAGE_AGP 1 228 229 /* fs/proc/kcore.c */ 230 #define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK) 231 #define kc_offset_to_vaddr(o) ((o) | ~__VIRTUAL_MASK) 232 233 #define __HAVE_ARCH_PTE_SAME 234 235 #define vmemmap ((struct page *)VMEMMAP_START) 236 237 extern void init_extra_mapping_uc(unsigned long phys, unsigned long size); 238 extern void init_extra_mapping_wb(unsigned long phys, unsigned long size); 239 240 #define gup_fast_permitted gup_fast_permitted 241 static inline bool gup_fast_permitted(unsigned long start, int nr_pages, 242 int write) 243 { 244 unsigned long len, end; 245 246 len = (unsigned long)nr_pages << PAGE_SHIFT; 247 end = start + len; 248 if (end < start) 249 return false; 250 if (end >> __VIRTUAL_MASK_SHIFT) 251 return false; 252 return true; 253 } 254 255 #endif /* !__ASSEMBLY__ */ 256 #endif /* _ASM_X86_PGTABLE_64_H */ 257