xref: /openbmc/linux/arch/x86/include/asm/pgtable.h (revision 9ac8d3fb)
1 #ifndef _ASM_X86_PGTABLE_H
2 #define _ASM_X86_PGTABLE_H
3 
4 #define FIRST_USER_ADDRESS	0
5 
6 #define _PAGE_BIT_PRESENT	0	/* is present */
7 #define _PAGE_BIT_RW		1	/* writeable */
8 #define _PAGE_BIT_USER		2	/* userspace addressable */
9 #define _PAGE_BIT_PWT		3	/* page write through */
10 #define _PAGE_BIT_PCD		4	/* page cache disabled */
11 #define _PAGE_BIT_ACCESSED	5	/* was accessed (raised by CPU) */
12 #define _PAGE_BIT_DIRTY		6	/* was written to (raised by CPU) */
13 #define _PAGE_BIT_FILE		6
14 #define _PAGE_BIT_PSE		7	/* 4 MB (or 2MB) page */
15 #define _PAGE_BIT_PAT		7	/* on 4KB pages */
16 #define _PAGE_BIT_GLOBAL	8	/* Global TLB entry PPro+ */
17 #define _PAGE_BIT_UNUSED1	9	/* available for programmer */
18 #define _PAGE_BIT_IOMAP		10	/* flag used to indicate IO mapping */
19 #define _PAGE_BIT_UNUSED3	11
20 #define _PAGE_BIT_PAT_LARGE	12	/* On 2MB or 1GB pages */
21 #define _PAGE_BIT_SPECIAL	_PAGE_BIT_UNUSED1
22 #define _PAGE_BIT_CPA_TEST	_PAGE_BIT_UNUSED1
23 #define _PAGE_BIT_NX           63       /* No execute: only valid after cpuid check */
24 
25 #define _PAGE_PRESENT	(_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
26 #define _PAGE_RW	(_AT(pteval_t, 1) << _PAGE_BIT_RW)
27 #define _PAGE_USER	(_AT(pteval_t, 1) << _PAGE_BIT_USER)
28 #define _PAGE_PWT	(_AT(pteval_t, 1) << _PAGE_BIT_PWT)
29 #define _PAGE_PCD	(_AT(pteval_t, 1) << _PAGE_BIT_PCD)
30 #define _PAGE_ACCESSED	(_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED)
31 #define _PAGE_DIRTY	(_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
32 #define _PAGE_PSE	(_AT(pteval_t, 1) << _PAGE_BIT_PSE)
33 #define _PAGE_GLOBAL	(_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
34 #define _PAGE_UNUSED1	(_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
35 #define _PAGE_IOMAP	(_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
36 #define _PAGE_UNUSED3	(_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3)
37 #define _PAGE_PAT	(_AT(pteval_t, 1) << _PAGE_BIT_PAT)
38 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
39 #define _PAGE_SPECIAL	(_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
40 #define _PAGE_CPA_TEST	(_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
41 #define __HAVE_ARCH_PTE_SPECIAL
42 
43 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
44 #define _PAGE_NX	(_AT(pteval_t, 1) << _PAGE_BIT_NX)
45 #else
46 #define _PAGE_NX	(_AT(pteval_t, 0))
47 #endif
48 
49 /* If _PAGE_PRESENT is clear, we use these: */
50 #define _PAGE_FILE	_PAGE_DIRTY	/* nonlinear file mapping,
51 					 * saved PTE; unset:swap */
52 #define _PAGE_PROTNONE	_PAGE_PSE	/* if the user mapped it with PROT_NONE;
53 					   pte_present gives true */
54 
55 #define _PAGE_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |	\
56 			 _PAGE_ACCESSED | _PAGE_DIRTY)
57 #define _KERNPG_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED |	\
58 			 _PAGE_DIRTY)
59 
60 /* Set of bits not changed in pte_modify */
61 #define _PAGE_CHG_MASK	(PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT |		\
62 			 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY)
63 
64 #define _PAGE_CACHE_MASK	(_PAGE_PCD | _PAGE_PWT)
65 #define _PAGE_CACHE_WB		(0)
66 #define _PAGE_CACHE_WC		(_PAGE_PWT)
67 #define _PAGE_CACHE_UC_MINUS	(_PAGE_PCD)
68 #define _PAGE_CACHE_UC		(_PAGE_PCD | _PAGE_PWT)
69 
70 #define PAGE_NONE	__pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
71 #define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
72 				 _PAGE_ACCESSED | _PAGE_NX)
73 
74 #define PAGE_SHARED_EXEC	__pgprot(_PAGE_PRESENT | _PAGE_RW |	\
75 					 _PAGE_USER | _PAGE_ACCESSED)
76 #define PAGE_COPY_NOEXEC	__pgprot(_PAGE_PRESENT | _PAGE_USER |	\
77 					 _PAGE_ACCESSED | _PAGE_NX)
78 #define PAGE_COPY_EXEC		__pgprot(_PAGE_PRESENT | _PAGE_USER |	\
79 					 _PAGE_ACCESSED)
80 #define PAGE_COPY		PAGE_COPY_NOEXEC
81 #define PAGE_READONLY		__pgprot(_PAGE_PRESENT | _PAGE_USER |	\
82 					 _PAGE_ACCESSED | _PAGE_NX)
83 #define PAGE_READONLY_EXEC	__pgprot(_PAGE_PRESENT | _PAGE_USER |	\
84 					 _PAGE_ACCESSED)
85 
86 #define __PAGE_KERNEL_EXEC						\
87 	(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
88 #define __PAGE_KERNEL		(__PAGE_KERNEL_EXEC | _PAGE_NX)
89 
90 #define __PAGE_KERNEL_RO		(__PAGE_KERNEL & ~_PAGE_RW)
91 #define __PAGE_KERNEL_RX		(__PAGE_KERNEL_EXEC & ~_PAGE_RW)
92 #define __PAGE_KERNEL_EXEC_NOCACHE	(__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT)
93 #define __PAGE_KERNEL_WC		(__PAGE_KERNEL | _PAGE_CACHE_WC)
94 #define __PAGE_KERNEL_NOCACHE		(__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
95 #define __PAGE_KERNEL_UC_MINUS		(__PAGE_KERNEL | _PAGE_PCD)
96 #define __PAGE_KERNEL_VSYSCALL		(__PAGE_KERNEL_RX | _PAGE_USER)
97 #define __PAGE_KERNEL_VSYSCALL_NOCACHE	(__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
98 #define __PAGE_KERNEL_LARGE		(__PAGE_KERNEL | _PAGE_PSE)
99 #define __PAGE_KERNEL_LARGE_NOCACHE	(__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
100 #define __PAGE_KERNEL_LARGE_EXEC	(__PAGE_KERNEL_EXEC | _PAGE_PSE)
101 
102 #define __PAGE_KERNEL_IO		(__PAGE_KERNEL | _PAGE_IOMAP)
103 #define __PAGE_KERNEL_IO_NOCACHE	(__PAGE_KERNEL_NOCACHE | _PAGE_IOMAP)
104 #define __PAGE_KERNEL_IO_UC_MINUS	(__PAGE_KERNEL_UC_MINUS | _PAGE_IOMAP)
105 #define __PAGE_KERNEL_IO_WC		(__PAGE_KERNEL_WC | _PAGE_IOMAP)
106 
107 #define PAGE_KERNEL			__pgprot(__PAGE_KERNEL)
108 #define PAGE_KERNEL_RO			__pgprot(__PAGE_KERNEL_RO)
109 #define PAGE_KERNEL_EXEC		__pgprot(__PAGE_KERNEL_EXEC)
110 #define PAGE_KERNEL_RX			__pgprot(__PAGE_KERNEL_RX)
111 #define PAGE_KERNEL_WC			__pgprot(__PAGE_KERNEL_WC)
112 #define PAGE_KERNEL_NOCACHE		__pgprot(__PAGE_KERNEL_NOCACHE)
113 #define PAGE_KERNEL_UC_MINUS		__pgprot(__PAGE_KERNEL_UC_MINUS)
114 #define PAGE_KERNEL_EXEC_NOCACHE	__pgprot(__PAGE_KERNEL_EXEC_NOCACHE)
115 #define PAGE_KERNEL_LARGE		__pgprot(__PAGE_KERNEL_LARGE)
116 #define PAGE_KERNEL_LARGE_NOCACHE	__pgprot(__PAGE_KERNEL_LARGE_NOCACHE)
117 #define PAGE_KERNEL_LARGE_EXEC		__pgprot(__PAGE_KERNEL_LARGE_EXEC)
118 #define PAGE_KERNEL_VSYSCALL		__pgprot(__PAGE_KERNEL_VSYSCALL)
119 #define PAGE_KERNEL_VSYSCALL_NOCACHE	__pgprot(__PAGE_KERNEL_VSYSCALL_NOCACHE)
120 
121 #define PAGE_KERNEL_IO			__pgprot(__PAGE_KERNEL_IO)
122 #define PAGE_KERNEL_IO_NOCACHE		__pgprot(__PAGE_KERNEL_IO_NOCACHE)
123 #define PAGE_KERNEL_IO_UC_MINUS		__pgprot(__PAGE_KERNEL_IO_UC_MINUS)
124 #define PAGE_KERNEL_IO_WC		__pgprot(__PAGE_KERNEL_IO_WC)
125 
126 /*         xwr */
127 #define __P000	PAGE_NONE
128 #define __P001	PAGE_READONLY
129 #define __P010	PAGE_COPY
130 #define __P011	PAGE_COPY
131 #define __P100	PAGE_READONLY_EXEC
132 #define __P101	PAGE_READONLY_EXEC
133 #define __P110	PAGE_COPY_EXEC
134 #define __P111	PAGE_COPY_EXEC
135 
136 #define __S000	PAGE_NONE
137 #define __S001	PAGE_READONLY
138 #define __S010	PAGE_SHARED
139 #define __S011	PAGE_SHARED
140 #define __S100	PAGE_READONLY_EXEC
141 #define __S101	PAGE_READONLY_EXEC
142 #define __S110	PAGE_SHARED_EXEC
143 #define __S111	PAGE_SHARED_EXEC
144 
145 /*
146  * early identity mapping  pte attrib macros.
147  */
148 #ifdef CONFIG_X86_64
149 #define __PAGE_KERNEL_IDENT_LARGE_EXEC	__PAGE_KERNEL_LARGE_EXEC
150 #else
151 /*
152  * For PDE_IDENT_ATTR include USER bit. As the PDE and PTE protection
153  * bits are combined, this will alow user to access the high address mapped
154  * VDSO in the presence of CONFIG_COMPAT_VDSO
155  */
156 #define PTE_IDENT_ATTR	 0x003		/* PRESENT+RW */
157 #define PDE_IDENT_ATTR	 0x067		/* PRESENT+RW+USER+DIRTY+ACCESSED */
158 #define PGD_IDENT_ATTR	 0x001		/* PRESENT (no other attributes) */
159 #endif
160 
161 #ifndef __ASSEMBLY__
162 
163 /*
164  * ZERO_PAGE is a global shared page that is always zero: used
165  * for zero-mapped memory areas etc..
166  */
167 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
168 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
169 
170 extern spinlock_t pgd_lock;
171 extern struct list_head pgd_list;
172 
173 /*
174  * The following only work if pte_present() is true.
175  * Undefined behaviour if not..
176  */
177 static inline int pte_dirty(pte_t pte)
178 {
179 	return pte_flags(pte) & _PAGE_DIRTY;
180 }
181 
182 static inline int pte_young(pte_t pte)
183 {
184 	return pte_flags(pte) & _PAGE_ACCESSED;
185 }
186 
187 static inline int pte_write(pte_t pte)
188 {
189 	return pte_flags(pte) & _PAGE_RW;
190 }
191 
192 static inline int pte_file(pte_t pte)
193 {
194 	return pte_flags(pte) & _PAGE_FILE;
195 }
196 
197 static inline int pte_huge(pte_t pte)
198 {
199 	return pte_flags(pte) & _PAGE_PSE;
200 }
201 
202 static inline int pte_global(pte_t pte)
203 {
204 	return pte_flags(pte) & _PAGE_GLOBAL;
205 }
206 
207 static inline int pte_exec(pte_t pte)
208 {
209 	return !(pte_flags(pte) & _PAGE_NX);
210 }
211 
212 static inline int pte_special(pte_t pte)
213 {
214 	return pte_flags(pte) & _PAGE_SPECIAL;
215 }
216 
217 static inline unsigned long pte_pfn(pte_t pte)
218 {
219 	return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
220 }
221 
222 #define pte_page(pte)	pfn_to_page(pte_pfn(pte))
223 
224 static inline int pmd_large(pmd_t pte)
225 {
226 	return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
227 		(_PAGE_PSE | _PAGE_PRESENT);
228 }
229 
230 static inline pte_t pte_mkclean(pte_t pte)
231 {
232 	return __pte(pte_val(pte) & ~_PAGE_DIRTY);
233 }
234 
235 static inline pte_t pte_mkold(pte_t pte)
236 {
237 	return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
238 }
239 
240 static inline pte_t pte_wrprotect(pte_t pte)
241 {
242 	return __pte(pte_val(pte) & ~_PAGE_RW);
243 }
244 
245 static inline pte_t pte_mkexec(pte_t pte)
246 {
247 	return __pte(pte_val(pte) & ~_PAGE_NX);
248 }
249 
250 static inline pte_t pte_mkdirty(pte_t pte)
251 {
252 	return __pte(pte_val(pte) | _PAGE_DIRTY);
253 }
254 
255 static inline pte_t pte_mkyoung(pte_t pte)
256 {
257 	return __pte(pte_val(pte) | _PAGE_ACCESSED);
258 }
259 
260 static inline pte_t pte_mkwrite(pte_t pte)
261 {
262 	return __pte(pte_val(pte) | _PAGE_RW);
263 }
264 
265 static inline pte_t pte_mkhuge(pte_t pte)
266 {
267 	return __pte(pte_val(pte) | _PAGE_PSE);
268 }
269 
270 static inline pte_t pte_clrhuge(pte_t pte)
271 {
272 	return __pte(pte_val(pte) & ~_PAGE_PSE);
273 }
274 
275 static inline pte_t pte_mkglobal(pte_t pte)
276 {
277 	return __pte(pte_val(pte) | _PAGE_GLOBAL);
278 }
279 
280 static inline pte_t pte_clrglobal(pte_t pte)
281 {
282 	return __pte(pte_val(pte) & ~_PAGE_GLOBAL);
283 }
284 
285 static inline pte_t pte_mkspecial(pte_t pte)
286 {
287 	return __pte(pte_val(pte) | _PAGE_SPECIAL);
288 }
289 
290 extern pteval_t __supported_pte_mask;
291 
292 static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
293 {
294 	return __pte((((phys_addr_t)page_nr << PAGE_SHIFT) |
295 		      pgprot_val(pgprot)) & __supported_pte_mask);
296 }
297 
298 static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
299 {
300 	return __pmd((((phys_addr_t)page_nr << PAGE_SHIFT) |
301 		      pgprot_val(pgprot)) & __supported_pte_mask);
302 }
303 
304 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
305 {
306 	pteval_t val = pte_val(pte);
307 
308 	/*
309 	 * Chop off the NX bit (if present), and add the NX portion of
310 	 * the newprot (if present):
311 	 */
312 	val &= _PAGE_CHG_MASK;
313 	val |= pgprot_val(newprot) & (~_PAGE_CHG_MASK) & __supported_pte_mask;
314 
315 	return __pte(val);
316 }
317 
318 /* mprotect needs to preserve PAT bits when updating vm_page_prot */
319 #define pgprot_modify pgprot_modify
320 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
321 {
322 	pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
323 	pgprotval_t addbits = pgprot_val(newprot);
324 	return __pgprot(preservebits | addbits);
325 }
326 
327 #define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK)
328 
329 #define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask)
330 
331 #ifndef __ASSEMBLY__
332 #define __HAVE_PHYS_MEM_ACCESS_PROT
333 struct file;
334 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
335                               unsigned long size, pgprot_t vma_prot);
336 int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
337                               unsigned long size, pgprot_t *vma_prot);
338 #endif
339 
340 /* Install a pte for a particular vaddr in kernel space. */
341 void set_pte_vaddr(unsigned long vaddr, pte_t pte);
342 
343 #ifdef CONFIG_X86_32
344 extern void native_pagetable_setup_start(pgd_t *base);
345 extern void native_pagetable_setup_done(pgd_t *base);
346 #else
347 static inline void native_pagetable_setup_start(pgd_t *base) {}
348 static inline void native_pagetable_setup_done(pgd_t *base) {}
349 #endif
350 
351 struct seq_file;
352 extern void arch_report_meminfo(struct seq_file *m);
353 
354 #ifdef CONFIG_PARAVIRT
355 #include <asm/paravirt.h>
356 #else  /* !CONFIG_PARAVIRT */
357 #define set_pte(ptep, pte)		native_set_pte(ptep, pte)
358 #define set_pte_at(mm, addr, ptep, pte)	native_set_pte_at(mm, addr, ptep, pte)
359 
360 #define set_pte_present(mm, addr, ptep, pte)				\
361 	native_set_pte_present(mm, addr, ptep, pte)
362 #define set_pte_atomic(ptep, pte)					\
363 	native_set_pte_atomic(ptep, pte)
364 
365 #define set_pmd(pmdp, pmd)		native_set_pmd(pmdp, pmd)
366 
367 #ifndef __PAGETABLE_PUD_FOLDED
368 #define set_pgd(pgdp, pgd)		native_set_pgd(pgdp, pgd)
369 #define pgd_clear(pgd)			native_pgd_clear(pgd)
370 #endif
371 
372 #ifndef set_pud
373 # define set_pud(pudp, pud)		native_set_pud(pudp, pud)
374 #endif
375 
376 #ifndef __PAGETABLE_PMD_FOLDED
377 #define pud_clear(pud)			native_pud_clear(pud)
378 #endif
379 
380 #define pte_clear(mm, addr, ptep)	native_pte_clear(mm, addr, ptep)
381 #define pmd_clear(pmd)			native_pmd_clear(pmd)
382 
383 #define pte_update(mm, addr, ptep)              do { } while (0)
384 #define pte_update_defer(mm, addr, ptep)        do { } while (0)
385 
386 static inline void __init paravirt_pagetable_setup_start(pgd_t *base)
387 {
388 	native_pagetable_setup_start(base);
389 }
390 
391 static inline void __init paravirt_pagetable_setup_done(pgd_t *base)
392 {
393 	native_pagetable_setup_done(base);
394 }
395 #endif	/* CONFIG_PARAVIRT */
396 
397 #endif	/* __ASSEMBLY__ */
398 
399 #ifdef CONFIG_X86_32
400 # include "pgtable_32.h"
401 #else
402 # include "pgtable_64.h"
403 #endif
404 
405 /*
406  * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
407  *
408  * this macro returns the index of the entry in the pgd page which would
409  * control the given virtual address
410  */
411 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
412 
413 /*
414  * pgd_offset() returns a (pgd_t *)
415  * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
416  */
417 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
418 /*
419  * a shortcut which implies the use of the kernel's pgd, instead
420  * of a process's
421  */
422 #define pgd_offset_k(address) pgd_offset(&init_mm, (address))
423 
424 
425 #define KERNEL_PGD_BOUNDARY	pgd_index(PAGE_OFFSET)
426 #define KERNEL_PGD_PTRS		(PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
427 
428 #ifndef __ASSEMBLY__
429 
430 enum {
431 	PG_LEVEL_NONE,
432 	PG_LEVEL_4K,
433 	PG_LEVEL_2M,
434 	PG_LEVEL_1G,
435 	PG_LEVEL_NUM
436 };
437 
438 #ifdef CONFIG_PROC_FS
439 extern void update_page_count(int level, unsigned long pages);
440 #else
441 static inline void update_page_count(int level, unsigned long pages) { }
442 #endif
443 
444 /*
445  * Helper function that returns the kernel pagetable entry controlling
446  * the virtual address 'address'. NULL means no pagetable entry present.
447  * NOTE: the return type is pte_t but if the pmd is PSE then we return it
448  * as a pte too.
449  */
450 extern pte_t *lookup_address(unsigned long address, unsigned int *level);
451 
452 /* local pte updates need not use xchg for locking */
453 static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
454 {
455 	pte_t res = *ptep;
456 
457 	/* Pure native function needs no input for mm, addr */
458 	native_pte_clear(NULL, 0, ptep);
459 	return res;
460 }
461 
462 static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
463 				     pte_t *ptep , pte_t pte)
464 {
465 	native_set_pte(ptep, pte);
466 }
467 
468 #ifndef CONFIG_PARAVIRT
469 /*
470  * Rules for using pte_update - it must be called after any PTE update which
471  * has not been done using the set_pte / clear_pte interfaces.  It is used by
472  * shadow mode hypervisors to resynchronize the shadow page tables.  Kernel PTE
473  * updates should either be sets, clears, or set_pte_atomic for P->P
474  * transitions, which means this hook should only be called for user PTEs.
475  * This hook implies a P->P protection or access change has taken place, which
476  * requires a subsequent TLB flush.  The notification can optionally be delayed
477  * until the TLB flush event by using the pte_update_defer form of the
478  * interface, but care must be taken to assure that the flush happens while
479  * still holding the same page table lock so that the shadow and primary pages
480  * do not become out of sync on SMP.
481  */
482 #define pte_update(mm, addr, ptep)		do { } while (0)
483 #define pte_update_defer(mm, addr, ptep)	do { } while (0)
484 #endif
485 
486 /*
487  * We only update the dirty/accessed state if we set
488  * the dirty bit by hand in the kernel, since the hardware
489  * will do the accessed bit for us, and we don't want to
490  * race with other CPU's that might be updating the dirty
491  * bit at the same time.
492  */
493 struct vm_area_struct;
494 
495 #define  __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
496 extern int ptep_set_access_flags(struct vm_area_struct *vma,
497 				 unsigned long address, pte_t *ptep,
498 				 pte_t entry, int dirty);
499 
500 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
501 extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
502 				     unsigned long addr, pte_t *ptep);
503 
504 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
505 extern int ptep_clear_flush_young(struct vm_area_struct *vma,
506 				  unsigned long address, pte_t *ptep);
507 
508 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
509 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
510 				       pte_t *ptep)
511 {
512 	pte_t pte = native_ptep_get_and_clear(ptep);
513 	pte_update(mm, addr, ptep);
514 	return pte;
515 }
516 
517 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
518 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
519 					    unsigned long addr, pte_t *ptep,
520 					    int full)
521 {
522 	pte_t pte;
523 	if (full) {
524 		/*
525 		 * Full address destruction in progress; paravirt does not
526 		 * care about updates and native needs no locking
527 		 */
528 		pte = native_local_ptep_get_and_clear(ptep);
529 	} else {
530 		pte = ptep_get_and_clear(mm, addr, ptep);
531 	}
532 	return pte;
533 }
534 
535 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
536 static inline void ptep_set_wrprotect(struct mm_struct *mm,
537 				      unsigned long addr, pte_t *ptep)
538 {
539 	clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
540 	pte_update(mm, addr, ptep);
541 }
542 
543 /*
544  * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
545  *
546  *  dst - pointer to pgd range anwhere on a pgd page
547  *  src - ""
548  *  count - the number of pgds to copy.
549  *
550  * dst and src can be on the same page, but the range must not overlap,
551  * and must not cross a page boundary.
552  */
553 static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
554 {
555        memcpy(dst, src, count * sizeof(pgd_t));
556 }
557 
558 
559 #include <asm-generic/pgtable.h>
560 #endif	/* __ASSEMBLY__ */
561 
562 #endif /* _ASM_X86_PGTABLE_H */
563