1 #ifndef _ASM_X86_PGTABLE_3LEVEL_H
2 #define _ASM_X86_PGTABLE_3LEVEL_H
3 
4 /*
5  * Intel Physical Address Extension (PAE) Mode - three-level page
6  * tables on PPro+ CPUs.
7  *
8  * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
9  */
10 
11 #define pte_ERROR(e)							\
12 	printk("%s:%d: bad pte %p(%08lx%08lx).\n",			\
13 	       __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low)
14 #define pmd_ERROR(e)							\
15 	printk("%s:%d: bad pmd %p(%016Lx).\n",				\
16 	       __FILE__, __LINE__, &(e), pmd_val(e))
17 #define pgd_ERROR(e)							\
18 	printk("%s:%d: bad pgd %p(%016Lx).\n",				\
19 	       __FILE__, __LINE__, &(e), pgd_val(e))
20 
21 /* Rules for using set_pte: the pte being assigned *must* be
22  * either not present or in a state where the hardware will
23  * not attempt to update the pte.  In places where this is
24  * not possible, use pte_get_and_clear to obtain the old pte
25  * value and then use set_pte to update it.  -ben
26  */
27 static inline void native_set_pte(pte_t *ptep, pte_t pte)
28 {
29 	ptep->pte_high = pte.pte_high;
30 	smp_wmb();
31 	ptep->pte_low = pte.pte_low;
32 }
33 
34 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
35 {
36 	set_64bit((unsigned long long *)(ptep), native_pte_val(pte));
37 }
38 
39 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
40 {
41 	set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
42 }
43 
44 static inline void native_set_pud(pud_t *pudp, pud_t pud)
45 {
46 	set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
47 }
48 
49 /*
50  * For PTEs and PDEs, we must clear the P-bit first when clearing a page table
51  * entry, so clear the bottom half first and enforce ordering with a compiler
52  * barrier.
53  */
54 static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
55 				    pte_t *ptep)
56 {
57 	ptep->pte_low = 0;
58 	smp_wmb();
59 	ptep->pte_high = 0;
60 }
61 
62 static inline void native_pmd_clear(pmd_t *pmd)
63 {
64 	u32 *tmp = (u32 *)pmd;
65 	*tmp = 0;
66 	smp_wmb();
67 	*(tmp + 1) = 0;
68 }
69 
70 static inline void pud_clear(pud_t *pudp)
71 {
72 	unsigned long pgd;
73 
74 	set_pud(pudp, __pud(0));
75 
76 	/*
77 	 * According to Intel App note "TLBs, Paging-Structure Caches,
78 	 * and Their Invalidation", April 2007, document 317080-001,
79 	 * section 8.1: in PAE mode we explicitly have to flush the
80 	 * TLB via cr3 if the top-level pgd is changed...
81 	 *
82 	 * Make sure the pud entry we're updating is within the
83 	 * current pgd to avoid unnecessary TLB flushes.
84 	 */
85 	pgd = read_cr3();
86 	if (__pa(pudp) >= pgd && __pa(pudp) <
87 	    (pgd + sizeof(pgd_t)*PTRS_PER_PGD))
88 		write_cr3(pgd);
89 }
90 
91 #ifdef CONFIG_SMP
92 static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
93 {
94 	pte_t res;
95 
96 	/* xchg acts as a barrier before the setting of the high bits */
97 	res.pte_low = xchg(&ptep->pte_low, 0);
98 	res.pte_high = ptep->pte_high;
99 	ptep->pte_high = 0;
100 
101 	return res;
102 }
103 #else
104 #define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
105 #endif
106 
107 /*
108  * Bits 0, 6 and 7 are taken in the low part of the pte,
109  * put the 32 bits of offset into the high part.
110  */
111 #define pte_to_pgoff(pte) ((pte).pte_high)
112 #define pgoff_to_pte(off)						\
113 	((pte_t) { { .pte_low = _PAGE_FILE, .pte_high = (off) } })
114 #define PTE_FILE_MAX_BITS       32
115 
116 /* Encode and de-code a swap entry */
117 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5)
118 #define __swp_type(x)			(((x).val) & 0x1f)
119 #define __swp_offset(x)			((x).val >> 5)
120 #define __swp_entry(type, offset)	((swp_entry_t){(type) | (offset) << 5})
121 #define __pte_to_swp_entry(pte)		((swp_entry_t){ (pte).pte_high })
122 #define __swp_entry_to_pte(x)		((pte_t){ { .pte_high = (x).val } })
123 
124 #endif /* _ASM_X86_PGTABLE_3LEVEL_H */
125