xref: /openbmc/linux/arch/x86/include/asm/pgtable_64.h (revision b34e08d5)
1 #ifndef _ASM_X86_PGTABLE_64_H
2 #define _ASM_X86_PGTABLE_64_H
3 
4 #include <linux/const.h>
5 #include <asm/pgtable_64_types.h>
6 
7 #ifndef __ASSEMBLY__
8 
9 /*
10  * This file contains the functions and defines necessary to modify and use
11  * the x86-64 page table tree.
12  */
13 #include <asm/processor.h>
14 #include <linux/bitops.h>
15 #include <linux/threads.h>
16 
17 extern pud_t level3_kernel_pgt[512];
18 extern pud_t level3_ident_pgt[512];
19 extern pmd_t level2_kernel_pgt[512];
20 extern pmd_t level2_fixmap_pgt[512];
21 extern pmd_t level2_ident_pgt[512];
22 extern pgd_t init_level4_pgt[];
23 
24 #define swapper_pg_dir init_level4_pgt
25 
26 extern void paging_init(void);
27 
28 #define pte_ERROR(e)					\
29 	pr_err("%s:%d: bad pte %p(%016lx)\n",		\
30 	       __FILE__, __LINE__, &(e), pte_val(e))
31 #define pmd_ERROR(e)					\
32 	pr_err("%s:%d: bad pmd %p(%016lx)\n",		\
33 	       __FILE__, __LINE__, &(e), pmd_val(e))
34 #define pud_ERROR(e)					\
35 	pr_err("%s:%d: bad pud %p(%016lx)\n",		\
36 	       __FILE__, __LINE__, &(e), pud_val(e))
37 #define pgd_ERROR(e)					\
38 	pr_err("%s:%d: bad pgd %p(%016lx)\n",		\
39 	       __FILE__, __LINE__, &(e), pgd_val(e))
40 
41 struct mm_struct;
42 
43 void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
44 
45 
46 static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
47 				    pte_t *ptep)
48 {
49 	*ptep = native_make_pte(0);
50 }
51 
52 static inline void native_set_pte(pte_t *ptep, pte_t pte)
53 {
54 	*ptep = pte;
55 }
56 
57 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
58 {
59 	native_set_pte(ptep, pte);
60 }
61 
62 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
63 {
64 	*pmdp = pmd;
65 }
66 
67 static inline void native_pmd_clear(pmd_t *pmd)
68 {
69 	native_set_pmd(pmd, native_make_pmd(0));
70 }
71 
72 static inline pte_t native_ptep_get_and_clear(pte_t *xp)
73 {
74 #ifdef CONFIG_SMP
75 	return native_make_pte(xchg(&xp->pte, 0));
76 #else
77 	/* native_local_ptep_get_and_clear,
78 	   but duplicated because of cyclic dependency */
79 	pte_t ret = *xp;
80 	native_pte_clear(NULL, 0, xp);
81 	return ret;
82 #endif
83 }
84 
85 static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
86 {
87 #ifdef CONFIG_SMP
88 	return native_make_pmd(xchg(&xp->pmd, 0));
89 #else
90 	/* native_local_pmdp_get_and_clear,
91 	   but duplicated because of cyclic dependency */
92 	pmd_t ret = *xp;
93 	native_pmd_clear(xp);
94 	return ret;
95 #endif
96 }
97 
98 static inline void native_set_pud(pud_t *pudp, pud_t pud)
99 {
100 	*pudp = pud;
101 }
102 
103 static inline void native_pud_clear(pud_t *pud)
104 {
105 	native_set_pud(pud, native_make_pud(0));
106 }
107 
108 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
109 {
110 	*pgdp = pgd;
111 }
112 
113 static inline void native_pgd_clear(pgd_t *pgd)
114 {
115 	native_set_pgd(pgd, native_make_pgd(0));
116 }
117 
118 extern void sync_global_pgds(unsigned long start, unsigned long end);
119 
120 /*
121  * Conversion functions: convert a page and protection to a page entry,
122  * and a page entry and page directory to the page they refer to.
123  */
124 
125 /*
126  * Level 4 access.
127  */
128 static inline int pgd_large(pgd_t pgd) { return 0; }
129 #define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE)
130 
131 /* PUD - Level3 access */
132 
133 /* PMD  - Level 2 access */
134 #define pte_to_pgoff(pte) ((pte_val((pte)) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
135 #define pgoff_to_pte(off) ((pte_t) { .pte = ((off) << PAGE_SHIFT) |	\
136 					    _PAGE_FILE })
137 #define PTE_FILE_MAX_BITS __PHYSICAL_MASK_SHIFT
138 
139 /* PTE - Level 1 access. */
140 
141 /* x86-64 always has all page tables mapped. */
142 #define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
143 #define pte_unmap(pte) ((void)(pte))/* NOP */
144 
145 /* Encode and de-code a swap entry */
146 #if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE
147 #define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1)
148 #define SWP_OFFSET_SHIFT (_PAGE_BIT_PROTNONE + 1)
149 #else
150 #define SWP_TYPE_BITS (_PAGE_BIT_PROTNONE - _PAGE_BIT_PRESENT - 1)
151 #define SWP_OFFSET_SHIFT (_PAGE_BIT_FILE + 1)
152 #endif
153 
154 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
155 
156 #define __swp_type(x)			(((x).val >> (_PAGE_BIT_PRESENT + 1)) \
157 					 & ((1U << SWP_TYPE_BITS) - 1))
158 #define __swp_offset(x)			((x).val >> SWP_OFFSET_SHIFT)
159 #define __swp_entry(type, offset)	((swp_entry_t) { \
160 					 ((type) << (_PAGE_BIT_PRESENT + 1)) \
161 					 | ((offset) << SWP_OFFSET_SHIFT) })
162 #define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val((pte)) })
163 #define __swp_entry_to_pte(x)		((pte_t) { .pte = (x).val })
164 
165 extern int kern_addr_valid(unsigned long addr);
166 extern void cleanup_highmap(void);
167 
168 #define HAVE_ARCH_UNMAPPED_AREA
169 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
170 
171 #define pgtable_cache_init()   do { } while (0)
172 #define check_pgt_cache()      do { } while (0)
173 
174 #define PAGE_AGP    PAGE_KERNEL_NOCACHE
175 #define HAVE_PAGE_AGP 1
176 
177 /* fs/proc/kcore.c */
178 #define	kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK)
179 #define	kc_offset_to_vaddr(o) ((o) | ~__VIRTUAL_MASK)
180 
181 #define __HAVE_ARCH_PTE_SAME
182 
183 #define vmemmap ((struct page *)VMEMMAP_START)
184 
185 extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
186 extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
187 
188 #endif /* !__ASSEMBLY__ */
189 
190 #endif /* _ASM_X86_PGTABLE_64_H */
191