1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PGTABLE_DEFS_H
3 #define _ASM_X86_PGTABLE_DEFS_H
4 
5 #include <linux/const.h>
6 #include <linux/mem_encrypt.h>
7 
8 #include <asm/page_types.h>
9 
10 #define FIRST_USER_ADDRESS	0UL
11 
12 #define _PAGE_BIT_PRESENT	0	/* is present */
13 #define _PAGE_BIT_RW		1	/* writeable */
14 #define _PAGE_BIT_USER		2	/* userspace addressable */
15 #define _PAGE_BIT_PWT		3	/* page write through */
16 #define _PAGE_BIT_PCD		4	/* page cache disabled */
17 #define _PAGE_BIT_ACCESSED	5	/* was accessed (raised by CPU) */
18 #define _PAGE_BIT_DIRTY		6	/* was written to (raised by CPU) */
19 #define _PAGE_BIT_PSE		7	/* 4 MB (or 2MB) page */
20 #define _PAGE_BIT_PAT		7	/* on 4KB pages */
21 #define _PAGE_BIT_GLOBAL	8	/* Global TLB entry PPro+ */
22 #define _PAGE_BIT_SOFTW1	9	/* available for programmer */
23 #define _PAGE_BIT_SOFTW2	10	/* " */
24 #define _PAGE_BIT_SOFTW3	11	/* " */
25 #define _PAGE_BIT_PAT_LARGE	12	/* On 2MB or 1GB pages */
26 #define _PAGE_BIT_SOFTW4	58	/* available for programmer */
27 #define _PAGE_BIT_PKEY_BIT0	59	/* Protection Keys, bit 1/4 */
28 #define _PAGE_BIT_PKEY_BIT1	60	/* Protection Keys, bit 2/4 */
29 #define _PAGE_BIT_PKEY_BIT2	61	/* Protection Keys, bit 3/4 */
30 #define _PAGE_BIT_PKEY_BIT3	62	/* Protection Keys, bit 4/4 */
31 #define _PAGE_BIT_NX		63	/* No execute: only valid after cpuid check */
32 
33 #define _PAGE_BIT_SPECIAL	_PAGE_BIT_SOFTW1
34 #define _PAGE_BIT_CPA_TEST	_PAGE_BIT_SOFTW1
35 #define _PAGE_BIT_HIDDEN	_PAGE_BIT_SOFTW3 /* hidden by kmemcheck */
36 #define _PAGE_BIT_SOFT_DIRTY	_PAGE_BIT_SOFTW3 /* software dirty tracking */
37 #define _PAGE_BIT_DEVMAP	_PAGE_BIT_SOFTW4
38 
39 /* If _PAGE_BIT_PRESENT is clear, we use these: */
40 /* - if the user mapped it with PROT_NONE; pte_present gives true */
41 #define _PAGE_BIT_PROTNONE	_PAGE_BIT_GLOBAL
42 
43 #define _PAGE_PRESENT	(_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
44 #define _PAGE_RW	(_AT(pteval_t, 1) << _PAGE_BIT_RW)
45 #define _PAGE_USER	(_AT(pteval_t, 1) << _PAGE_BIT_USER)
46 #define _PAGE_PWT	(_AT(pteval_t, 1) << _PAGE_BIT_PWT)
47 #define _PAGE_PCD	(_AT(pteval_t, 1) << _PAGE_BIT_PCD)
48 #define _PAGE_ACCESSED	(_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED)
49 #define _PAGE_DIRTY	(_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
50 #define _PAGE_PSE	(_AT(pteval_t, 1) << _PAGE_BIT_PSE)
51 #define _PAGE_GLOBAL	(_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
52 #define _PAGE_SOFTW1	(_AT(pteval_t, 1) << _PAGE_BIT_SOFTW1)
53 #define _PAGE_SOFTW2	(_AT(pteval_t, 1) << _PAGE_BIT_SOFTW2)
54 #define _PAGE_PAT	(_AT(pteval_t, 1) << _PAGE_BIT_PAT)
55 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
56 #define _PAGE_SPECIAL	(_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
57 #define _PAGE_CPA_TEST	(_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
58 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
59 #define _PAGE_PKEY_BIT0	(_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT0)
60 #define _PAGE_PKEY_BIT1	(_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT1)
61 #define _PAGE_PKEY_BIT2	(_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT2)
62 #define _PAGE_PKEY_BIT3	(_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT3)
63 #else
64 #define _PAGE_PKEY_BIT0	(_AT(pteval_t, 0))
65 #define _PAGE_PKEY_BIT1	(_AT(pteval_t, 0))
66 #define _PAGE_PKEY_BIT2	(_AT(pteval_t, 0))
67 #define _PAGE_PKEY_BIT3	(_AT(pteval_t, 0))
68 #endif
69 #define __HAVE_ARCH_PTE_SPECIAL
70 
71 #define _PAGE_PKEY_MASK (_PAGE_PKEY_BIT0 | \
72 			 _PAGE_PKEY_BIT1 | \
73 			 _PAGE_PKEY_BIT2 | \
74 			 _PAGE_PKEY_BIT3)
75 
76 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
77 #define _PAGE_KNL_ERRATUM_MASK (_PAGE_DIRTY | _PAGE_ACCESSED)
78 #else
79 #define _PAGE_KNL_ERRATUM_MASK 0
80 #endif
81 
82 #ifdef CONFIG_KMEMCHECK
83 #define _PAGE_HIDDEN	(_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
84 #else
85 #define _PAGE_HIDDEN	(_AT(pteval_t, 0))
86 #endif
87 
88 /*
89  * The same hidden bit is used by kmemcheck, but since kmemcheck
90  * works on kernel pages while soft-dirty engine on user space,
91  * they do not conflict with each other.
92  */
93 
94 #ifdef CONFIG_MEM_SOFT_DIRTY
95 #define _PAGE_SOFT_DIRTY	(_AT(pteval_t, 1) << _PAGE_BIT_SOFT_DIRTY)
96 #else
97 #define _PAGE_SOFT_DIRTY	(_AT(pteval_t, 0))
98 #endif
99 
100 /*
101  * Tracking soft dirty bit when a page goes to a swap is tricky.
102  * We need a bit which can be stored in pte _and_ not conflict
103  * with swap entry format. On x86 bits 1-4 are *not* involved
104  * into swap entry computation, but bit 7 is used for thp migration,
105  * so we borrow bit 1 for soft dirty tracking.
106  *
107  * Please note that this bit must be treated as swap dirty page
108  * mark if and only if the PTE/PMD has present bit clear!
109  */
110 #ifdef CONFIG_MEM_SOFT_DIRTY
111 #define _PAGE_SWP_SOFT_DIRTY	_PAGE_RW
112 #else
113 #define _PAGE_SWP_SOFT_DIRTY	(_AT(pteval_t, 0))
114 #endif
115 
116 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
117 #define _PAGE_NX	(_AT(pteval_t, 1) << _PAGE_BIT_NX)
118 #define _PAGE_DEVMAP	(_AT(u64, 1) << _PAGE_BIT_DEVMAP)
119 #define __HAVE_ARCH_PTE_DEVMAP
120 #else
121 #define _PAGE_NX	(_AT(pteval_t, 0))
122 #define _PAGE_DEVMAP	(_AT(pteval_t, 0))
123 #endif
124 
125 #define _PAGE_PROTNONE	(_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
126 
127 #define _PAGE_TABLE_NOENC	(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |\
128 				 _PAGE_ACCESSED | _PAGE_DIRTY)
129 #define _KERNPG_TABLE_NOENC	(_PAGE_PRESENT | _PAGE_RW |		\
130 				 _PAGE_ACCESSED | _PAGE_DIRTY)
131 
132 /*
133  * Set of bits not changed in pte_modify.  The pte's
134  * protection key is treated like _PAGE_RW, for
135  * instance, and is *not* included in this mask since
136  * pte_modify() does modify it.
137  */
138 #define _PAGE_CHG_MASK	(PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT |		\
139 			 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY |	\
140 			 _PAGE_SOFT_DIRTY)
141 #define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
142 
143 /*
144  * The cache modes defined here are used to translate between pure SW usage
145  * and the HW defined cache mode bits and/or PAT entries.
146  *
147  * The resulting bits for PWT, PCD and PAT should be chosen in a way
148  * to have the WB mode at index 0 (all bits clear). This is the default
149  * right now and likely would break too much if changed.
150  */
151 #ifndef __ASSEMBLY__
152 enum page_cache_mode {
153 	_PAGE_CACHE_MODE_WB = 0,
154 	_PAGE_CACHE_MODE_WC = 1,
155 	_PAGE_CACHE_MODE_UC_MINUS = 2,
156 	_PAGE_CACHE_MODE_UC = 3,
157 	_PAGE_CACHE_MODE_WT = 4,
158 	_PAGE_CACHE_MODE_WP = 5,
159 	_PAGE_CACHE_MODE_NUM = 8
160 };
161 #endif
162 
163 #define _PAGE_CACHE_MASK	(_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)
164 #define _PAGE_NOCACHE		(cachemode2protval(_PAGE_CACHE_MODE_UC))
165 #define _PAGE_CACHE_WP		(cachemode2protval(_PAGE_CACHE_MODE_WP))
166 
167 #define PAGE_NONE	__pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
168 #define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
169 				 _PAGE_ACCESSED | _PAGE_NX)
170 
171 #define PAGE_SHARED_EXEC	__pgprot(_PAGE_PRESENT | _PAGE_RW |	\
172 					 _PAGE_USER | _PAGE_ACCESSED)
173 #define PAGE_COPY_NOEXEC	__pgprot(_PAGE_PRESENT | _PAGE_USER |	\
174 					 _PAGE_ACCESSED | _PAGE_NX)
175 #define PAGE_COPY_EXEC		__pgprot(_PAGE_PRESENT | _PAGE_USER |	\
176 					 _PAGE_ACCESSED)
177 #define PAGE_COPY		PAGE_COPY_NOEXEC
178 #define PAGE_READONLY		__pgprot(_PAGE_PRESENT | _PAGE_USER |	\
179 					 _PAGE_ACCESSED | _PAGE_NX)
180 #define PAGE_READONLY_EXEC	__pgprot(_PAGE_PRESENT | _PAGE_USER |	\
181 					 _PAGE_ACCESSED)
182 
183 #define __PAGE_KERNEL_EXEC						\
184 	(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
185 #define __PAGE_KERNEL		(__PAGE_KERNEL_EXEC | _PAGE_NX)
186 
187 #define __PAGE_KERNEL_RO		(__PAGE_KERNEL & ~_PAGE_RW)
188 #define __PAGE_KERNEL_RX		(__PAGE_KERNEL_EXEC & ~_PAGE_RW)
189 #define __PAGE_KERNEL_NOCACHE		(__PAGE_KERNEL | _PAGE_NOCACHE)
190 #define __PAGE_KERNEL_VSYSCALL		(__PAGE_KERNEL_RX | _PAGE_USER)
191 #define __PAGE_KERNEL_VVAR		(__PAGE_KERNEL_RO | _PAGE_USER)
192 #define __PAGE_KERNEL_LARGE		(__PAGE_KERNEL | _PAGE_PSE)
193 #define __PAGE_KERNEL_LARGE_EXEC	(__PAGE_KERNEL_EXEC | _PAGE_PSE)
194 #define __PAGE_KERNEL_WP		(__PAGE_KERNEL | _PAGE_CACHE_WP)
195 
196 #define __PAGE_KERNEL_IO		(__PAGE_KERNEL)
197 #define __PAGE_KERNEL_IO_NOCACHE	(__PAGE_KERNEL_NOCACHE)
198 
199 #ifndef __ASSEMBLY__
200 
201 #define _PAGE_ENC	(_AT(pteval_t, sme_me_mask))
202 
203 #define _KERNPG_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED |	\
204 			 _PAGE_DIRTY | _PAGE_ENC)
205 #define _PAGE_TABLE	(_KERNPG_TABLE | _PAGE_USER)
206 
207 #define __PAGE_KERNEL_ENC	(__PAGE_KERNEL | _PAGE_ENC)
208 #define __PAGE_KERNEL_ENC_WP	(__PAGE_KERNEL_WP | _PAGE_ENC)
209 
210 #define __PAGE_KERNEL_NOENC	(__PAGE_KERNEL)
211 #define __PAGE_KERNEL_NOENC_WP	(__PAGE_KERNEL_WP)
212 
213 #define PAGE_KERNEL		__pgprot(__PAGE_KERNEL | _PAGE_ENC)
214 #define PAGE_KERNEL_NOENC	__pgprot(__PAGE_KERNEL)
215 #define PAGE_KERNEL_RO		__pgprot(__PAGE_KERNEL_RO | _PAGE_ENC)
216 #define PAGE_KERNEL_EXEC	__pgprot(__PAGE_KERNEL_EXEC | _PAGE_ENC)
217 #define PAGE_KERNEL_EXEC_NOENC	__pgprot(__PAGE_KERNEL_EXEC)
218 #define PAGE_KERNEL_RX		__pgprot(__PAGE_KERNEL_RX | _PAGE_ENC)
219 #define PAGE_KERNEL_NOCACHE	__pgprot(__PAGE_KERNEL_NOCACHE | _PAGE_ENC)
220 #define PAGE_KERNEL_LARGE	__pgprot(__PAGE_KERNEL_LARGE | _PAGE_ENC)
221 #define PAGE_KERNEL_LARGE_EXEC	__pgprot(__PAGE_KERNEL_LARGE_EXEC | _PAGE_ENC)
222 #define PAGE_KERNEL_VSYSCALL	__pgprot(__PAGE_KERNEL_VSYSCALL | _PAGE_ENC)
223 #define PAGE_KERNEL_VVAR	__pgprot(__PAGE_KERNEL_VVAR | _PAGE_ENC)
224 
225 #define PAGE_KERNEL_IO		__pgprot(__PAGE_KERNEL_IO)
226 #define PAGE_KERNEL_IO_NOCACHE	__pgprot(__PAGE_KERNEL_IO_NOCACHE)
227 
228 #endif	/* __ASSEMBLY__ */
229 
230 /*         xwr */
231 #define __P000	PAGE_NONE
232 #define __P001	PAGE_READONLY
233 #define __P010	PAGE_COPY
234 #define __P011	PAGE_COPY
235 #define __P100	PAGE_READONLY_EXEC
236 #define __P101	PAGE_READONLY_EXEC
237 #define __P110	PAGE_COPY_EXEC
238 #define __P111	PAGE_COPY_EXEC
239 
240 #define __S000	PAGE_NONE
241 #define __S001	PAGE_READONLY
242 #define __S010	PAGE_SHARED
243 #define __S011	PAGE_SHARED
244 #define __S100	PAGE_READONLY_EXEC
245 #define __S101	PAGE_READONLY_EXEC
246 #define __S110	PAGE_SHARED_EXEC
247 #define __S111	PAGE_SHARED_EXEC
248 
249 /*
250  * early identity mapping  pte attrib macros.
251  */
252 #ifdef CONFIG_X86_64
253 #define __PAGE_KERNEL_IDENT_LARGE_EXEC	__PAGE_KERNEL_LARGE_EXEC
254 #else
255 #define PTE_IDENT_ATTR	 0x003		/* PRESENT+RW */
256 #define PDE_IDENT_ATTR	 0x063		/* PRESENT+RW+DIRTY+ACCESSED */
257 #define PGD_IDENT_ATTR	 0x001		/* PRESENT (no other attributes) */
258 #endif
259 
260 #ifdef CONFIG_X86_32
261 # include <asm/pgtable_32_types.h>
262 #else
263 # include <asm/pgtable_64_types.h>
264 #endif
265 
266 #ifndef __ASSEMBLY__
267 
268 #include <linux/types.h>
269 
270 /* Extracts the PFN from a (pte|pmd|pud|pgd)val_t of a 4KB page */
271 #define PTE_PFN_MASK		((pteval_t)PHYSICAL_PAGE_MASK)
272 
273 /*
274  *  Extracts the flags from a (pte|pmd|pud|pgd)val_t
275  *  This includes the protection key value.
276  */
277 #define PTE_FLAGS_MASK		(~PTE_PFN_MASK)
278 
279 typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
280 
281 typedef struct { pgdval_t pgd; } pgd_t;
282 
283 static inline pgd_t native_make_pgd(pgdval_t val)
284 {
285 	return (pgd_t) { val };
286 }
287 
288 static inline pgdval_t native_pgd_val(pgd_t pgd)
289 {
290 	return pgd.pgd;
291 }
292 
293 static inline pgdval_t pgd_flags(pgd_t pgd)
294 {
295 	return native_pgd_val(pgd) & PTE_FLAGS_MASK;
296 }
297 
298 #if CONFIG_PGTABLE_LEVELS > 4
299 typedef struct { p4dval_t p4d; } p4d_t;
300 
301 static inline p4d_t native_make_p4d(pudval_t val)
302 {
303 	return (p4d_t) { val };
304 }
305 
306 static inline p4dval_t native_p4d_val(p4d_t p4d)
307 {
308 	return p4d.p4d;
309 }
310 #else
311 #include <asm-generic/pgtable-nop4d.h>
312 
313 static inline p4d_t native_make_p4d(pudval_t val)
314 {
315 	return (p4d_t) { .pgd = native_make_pgd((pgdval_t)val) };
316 }
317 
318 static inline p4dval_t native_p4d_val(p4d_t p4d)
319 {
320 	return native_pgd_val(p4d.pgd);
321 }
322 #endif
323 
324 #if CONFIG_PGTABLE_LEVELS > 3
325 typedef struct { pudval_t pud; } pud_t;
326 
327 static inline pud_t native_make_pud(pmdval_t val)
328 {
329 	return (pud_t) { val };
330 }
331 
332 static inline pudval_t native_pud_val(pud_t pud)
333 {
334 	return pud.pud;
335 }
336 #else
337 #include <asm-generic/pgtable-nopud.h>
338 
339 static inline pudval_t native_pud_val(pud_t pud)
340 {
341 	return native_pgd_val(pud.p4d.pgd);
342 }
343 #endif
344 
345 #if CONFIG_PGTABLE_LEVELS > 2
346 typedef struct { pmdval_t pmd; } pmd_t;
347 
348 static inline pmd_t native_make_pmd(pmdval_t val)
349 {
350 	return (pmd_t) { val };
351 }
352 
353 static inline pmdval_t native_pmd_val(pmd_t pmd)
354 {
355 	return pmd.pmd;
356 }
357 #else
358 #include <asm-generic/pgtable-nopmd.h>
359 
360 static inline pmdval_t native_pmd_val(pmd_t pmd)
361 {
362 	return native_pgd_val(pmd.pud.p4d.pgd);
363 }
364 #endif
365 
366 static inline p4dval_t p4d_pfn_mask(p4d_t p4d)
367 {
368 	/* No 512 GiB huge pages yet */
369 	return PTE_PFN_MASK;
370 }
371 
372 static inline p4dval_t p4d_flags_mask(p4d_t p4d)
373 {
374 	return ~p4d_pfn_mask(p4d);
375 }
376 
377 static inline p4dval_t p4d_flags(p4d_t p4d)
378 {
379 	return native_p4d_val(p4d) & p4d_flags_mask(p4d);
380 }
381 
382 static inline pudval_t pud_pfn_mask(pud_t pud)
383 {
384 	if (native_pud_val(pud) & _PAGE_PSE)
385 		return PHYSICAL_PUD_PAGE_MASK;
386 	else
387 		return PTE_PFN_MASK;
388 }
389 
390 static inline pudval_t pud_flags_mask(pud_t pud)
391 {
392 	return ~pud_pfn_mask(pud);
393 }
394 
395 static inline pudval_t pud_flags(pud_t pud)
396 {
397 	return native_pud_val(pud) & pud_flags_mask(pud);
398 }
399 
400 static inline pmdval_t pmd_pfn_mask(pmd_t pmd)
401 {
402 	if (native_pmd_val(pmd) & _PAGE_PSE)
403 		return PHYSICAL_PMD_PAGE_MASK;
404 	else
405 		return PTE_PFN_MASK;
406 }
407 
408 static inline pmdval_t pmd_flags_mask(pmd_t pmd)
409 {
410 	return ~pmd_pfn_mask(pmd);
411 }
412 
413 static inline pmdval_t pmd_flags(pmd_t pmd)
414 {
415 	return native_pmd_val(pmd) & pmd_flags_mask(pmd);
416 }
417 
418 static inline pte_t native_make_pte(pteval_t val)
419 {
420 	return (pte_t) { .pte = val };
421 }
422 
423 static inline pteval_t native_pte_val(pte_t pte)
424 {
425 	return pte.pte;
426 }
427 
428 static inline pteval_t pte_flags(pte_t pte)
429 {
430 	return native_pte_val(pte) & PTE_FLAGS_MASK;
431 }
432 
433 #define pgprot_val(x)	((x).pgprot)
434 #define __pgprot(x)	((pgprot_t) { (x) } )
435 
436 extern uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM];
437 extern uint8_t __pte2cachemode_tbl[8];
438 
439 #define __pte2cm_idx(cb)				\
440 	((((cb) >> (_PAGE_BIT_PAT - 2)) & 4) |		\
441 	 (((cb) >> (_PAGE_BIT_PCD - 1)) & 2) |		\
442 	 (((cb) >> _PAGE_BIT_PWT) & 1))
443 #define __cm_idx2pte(i)					\
444 	((((i) & 4) << (_PAGE_BIT_PAT - 2)) |		\
445 	 (((i) & 2) << (_PAGE_BIT_PCD - 1)) |		\
446 	 (((i) & 1) << _PAGE_BIT_PWT))
447 
448 static inline unsigned long cachemode2protval(enum page_cache_mode pcm)
449 {
450 	if (likely(pcm == 0))
451 		return 0;
452 	return __cachemode2pte_tbl[pcm];
453 }
454 static inline pgprot_t cachemode2pgprot(enum page_cache_mode pcm)
455 {
456 	return __pgprot(cachemode2protval(pcm));
457 }
458 static inline enum page_cache_mode pgprot2cachemode(pgprot_t pgprot)
459 {
460 	unsigned long masked;
461 
462 	masked = pgprot_val(pgprot) & _PAGE_CACHE_MASK;
463 	if (likely(masked == 0))
464 		return 0;
465 	return __pte2cachemode_tbl[__pte2cm_idx(masked)];
466 }
467 static inline pgprot_t pgprot_4k_2_large(pgprot_t pgprot)
468 {
469 	pgprotval_t val = pgprot_val(pgprot);
470 	pgprot_t new;
471 
472 	pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
473 		((val & _PAGE_PAT) << (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
474 	return new;
475 }
476 static inline pgprot_t pgprot_large_2_4k(pgprot_t pgprot)
477 {
478 	pgprotval_t val = pgprot_val(pgprot);
479 	pgprot_t new;
480 
481 	pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
482 			  ((val & _PAGE_PAT_LARGE) >>
483 			   (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
484 	return new;
485 }
486 
487 
488 typedef struct page *pgtable_t;
489 
490 extern pteval_t __supported_pte_mask;
491 extern void set_nx(void);
492 extern int nx_enabled;
493 
494 #define pgprot_writecombine	pgprot_writecombine
495 extern pgprot_t pgprot_writecombine(pgprot_t prot);
496 
497 #define pgprot_writethrough	pgprot_writethrough
498 extern pgprot_t pgprot_writethrough(pgprot_t prot);
499 
500 /* Indicate that x86 has its own track and untrack pfn vma functions */
501 #define __HAVE_PFNMAP_TRACKING
502 
503 #define __HAVE_PHYS_MEM_ACCESS_PROT
504 struct file;
505 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
506                               unsigned long size, pgprot_t vma_prot);
507 
508 /* Install a pte for a particular vaddr in kernel space. */
509 void set_pte_vaddr(unsigned long vaddr, pte_t pte);
510 
511 #ifdef CONFIG_X86_32
512 extern void native_pagetable_init(void);
513 #else
514 #define native_pagetable_init        paging_init
515 #endif
516 
517 struct seq_file;
518 extern void arch_report_meminfo(struct seq_file *m);
519 
520 enum pg_level {
521 	PG_LEVEL_NONE,
522 	PG_LEVEL_4K,
523 	PG_LEVEL_2M,
524 	PG_LEVEL_1G,
525 	PG_LEVEL_512G,
526 	PG_LEVEL_NUM
527 };
528 
529 #ifdef CONFIG_PROC_FS
530 extern void update_page_count(int level, unsigned long pages);
531 #else
532 static inline void update_page_count(int level, unsigned long pages) { }
533 #endif
534 
535 /*
536  * Helper function that returns the kernel pagetable entry controlling
537  * the virtual address 'address'. NULL means no pagetable entry present.
538  * NOTE: the return type is pte_t but if the pmd is PSE then we return it
539  * as a pte too.
540  */
541 extern pte_t *lookup_address(unsigned long address, unsigned int *level);
542 extern pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
543 				    unsigned int *level);
544 extern pmd_t *lookup_pmd_address(unsigned long address);
545 extern phys_addr_t slow_virt_to_phys(void *__address);
546 extern int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
547 				   unsigned numpages, unsigned long page_flags);
548 #endif	/* !__ASSEMBLY__ */
549 
550 #endif /* _ASM_X86_PGTABLE_DEFS_H */
551