1 #ifndef ASM_X86__PGTABLE_3LEVEL_H
2 #define ASM_X86__PGTABLE_3LEVEL_H
3 
4 /*
5  * Intel Physical Address Extension (PAE) Mode - three-level page
6  * tables on PPro+ CPUs.
7  *
8  * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
9  */
10 
11 #define pte_ERROR(e)							\
12 	printk("%s:%d: bad pte %p(%08lx%08lx).\n",			\
13 	       __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low)
14 #define pmd_ERROR(e)							\
15 	printk("%s:%d: bad pmd %p(%016Lx).\n",				\
16 	       __FILE__, __LINE__, &(e), pmd_val(e))
17 #define pgd_ERROR(e)							\
18 	printk("%s:%d: bad pgd %p(%016Lx).\n",				\
19 	       __FILE__, __LINE__, &(e), pgd_val(e))
20 
21 static inline int pud_none(pud_t pud)
22 {
23 	return pud_val(pud) == 0;
24 }
25 
26 static inline int pud_bad(pud_t pud)
27 {
28 	return (pud_val(pud) & ~(PTE_PFN_MASK | _KERNPG_TABLE | _PAGE_USER)) != 0;
29 }
30 
31 static inline int pud_present(pud_t pud)
32 {
33 	return pud_val(pud) & _PAGE_PRESENT;
34 }
35 
36 /* Rules for using set_pte: the pte being assigned *must* be
37  * either not present or in a state where the hardware will
38  * not attempt to update the pte.  In places where this is
39  * not possible, use pte_get_and_clear to obtain the old pte
40  * value and then use set_pte to update it.  -ben
41  */
42 static inline void native_set_pte(pte_t *ptep, pte_t pte)
43 {
44 	ptep->pte_high = pte.pte_high;
45 	smp_wmb();
46 	ptep->pte_low = pte.pte_low;
47 }
48 
49 /*
50  * Since this is only called on user PTEs, and the page fault handler
51  * must handle the already racy situation of simultaneous page faults,
52  * we are justified in merely clearing the PTE present bit, followed
53  * by a set.  The ordering here is important.
54  */
55 static inline void native_set_pte_present(struct mm_struct *mm,
56 					  unsigned long addr,
57 					  pte_t *ptep, pte_t pte)
58 {
59 	ptep->pte_low = 0;
60 	smp_wmb();
61 	ptep->pte_high = pte.pte_high;
62 	smp_wmb();
63 	ptep->pte_low = pte.pte_low;
64 }
65 
66 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
67 {
68 	set_64bit((unsigned long long *)(ptep), native_pte_val(pte));
69 }
70 
71 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
72 {
73 	set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
74 }
75 
76 static inline void native_set_pud(pud_t *pudp, pud_t pud)
77 {
78 	set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
79 }
80 
81 /*
82  * For PTEs and PDEs, we must clear the P-bit first when clearing a page table
83  * entry, so clear the bottom half first and enforce ordering with a compiler
84  * barrier.
85  */
86 static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
87 				    pte_t *ptep)
88 {
89 	ptep->pte_low = 0;
90 	smp_wmb();
91 	ptep->pte_high = 0;
92 }
93 
94 static inline void native_pmd_clear(pmd_t *pmd)
95 {
96 	u32 *tmp = (u32 *)pmd;
97 	*tmp = 0;
98 	smp_wmb();
99 	*(tmp + 1) = 0;
100 }
101 
102 static inline void pud_clear(pud_t *pudp)
103 {
104 	unsigned long pgd;
105 
106 	set_pud(pudp, __pud(0));
107 
108 	/*
109 	 * According to Intel App note "TLBs, Paging-Structure Caches,
110 	 * and Their Invalidation", April 2007, document 317080-001,
111 	 * section 8.1: in PAE mode we explicitly have to flush the
112 	 * TLB via cr3 if the top-level pgd is changed...
113 	 *
114 	 * Make sure the pud entry we're updating is within the
115 	 * current pgd to avoid unnecessary TLB flushes.
116 	 */
117 	pgd = read_cr3();
118 	if (__pa(pudp) >= pgd && __pa(pudp) <
119 	    (pgd + sizeof(pgd_t)*PTRS_PER_PGD))
120 		write_cr3(pgd);
121 }
122 
123 #define pud_page(pud) ((struct page *) __va(pud_val(pud) & PTE_PFN_MASK))
124 
125 #define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PTE_PFN_MASK))
126 
127 
128 /* Find an entry in the second-level page table.. */
129 #define pmd_offset(pud, address) ((pmd_t *)pud_page(*(pud)) +	\
130 				  pmd_index(address))
131 
132 #ifdef CONFIG_SMP
133 static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
134 {
135 	pte_t res;
136 
137 	/* xchg acts as a barrier before the setting of the high bits */
138 	res.pte_low = xchg(&ptep->pte_low, 0);
139 	res.pte_high = ptep->pte_high;
140 	ptep->pte_high = 0;
141 
142 	return res;
143 }
144 #else
145 #define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
146 #endif
147 
148 #define __HAVE_ARCH_PTE_SAME
149 static inline int pte_same(pte_t a, pte_t b)
150 {
151 	return a.pte_low == b.pte_low && a.pte_high == b.pte_high;
152 }
153 
154 static inline int pte_none(pte_t pte)
155 {
156 	return !pte.pte_low && !pte.pte_high;
157 }
158 
159 /*
160  * Bits 0, 6 and 7 are taken in the low part of the pte,
161  * put the 32 bits of offset into the high part.
162  */
163 #define pte_to_pgoff(pte) ((pte).pte_high)
164 #define pgoff_to_pte(off)						\
165 	((pte_t) { { .pte_low = _PAGE_FILE, .pte_high = (off) } })
166 #define PTE_FILE_MAX_BITS       32
167 
168 /* Encode and de-code a swap entry */
169 #define __swp_type(x)			(((x).val) & 0x1f)
170 #define __swp_offset(x)			((x).val >> 5)
171 #define __swp_entry(type, offset)	((swp_entry_t){(type) | (offset) << 5})
172 #define __pte_to_swp_entry(pte)		((swp_entry_t){ (pte).pte_high })
173 #define __swp_entry_to_pte(x)		((pte_t){ { .pte_high = (x).val } })
174 
175 #endif /* ASM_X86__PGTABLE_3LEVEL_H */
176