xref: /openbmc/linux/arch/x86/include/asm/pgtable.h (revision bb66fc67)
1 #ifndef _ASM_X86_PGTABLE_H
2 #define _ASM_X86_PGTABLE_H
3 
4 #include <asm/page.h>
5 #include <asm/e820.h>
6 
7 #include <asm/pgtable_types.h>
8 
9 /*
10  * Macro to mark a page protection value as UC-
11  */
12 #define pgprot_noncached(prot)					\
13 	((boot_cpu_data.x86 > 3)				\
14 	 ? (__pgprot(pgprot_val(prot) | _PAGE_CACHE_UC_MINUS))	\
15 	 : (prot))
16 
17 #ifndef __ASSEMBLY__
18 #include <asm/x86_init.h>
19 
20 void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd);
21 
22 /*
23  * ZERO_PAGE is a global shared page that is always zero: used
24  * for zero-mapped memory areas etc..
25  */
26 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
27 	__visible;
28 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
29 
30 extern spinlock_t pgd_lock;
31 extern struct list_head pgd_list;
32 
33 extern struct mm_struct *pgd_page_get_mm(struct page *page);
34 
35 #ifdef CONFIG_PARAVIRT
36 #include <asm/paravirt.h>
37 #else  /* !CONFIG_PARAVIRT */
38 #define set_pte(ptep, pte)		native_set_pte(ptep, pte)
39 #define set_pte_at(mm, addr, ptep, pte)	native_set_pte_at(mm, addr, ptep, pte)
40 #define set_pmd_at(mm, addr, pmdp, pmd)	native_set_pmd_at(mm, addr, pmdp, pmd)
41 
42 #define set_pte_atomic(ptep, pte)					\
43 	native_set_pte_atomic(ptep, pte)
44 
45 #define set_pmd(pmdp, pmd)		native_set_pmd(pmdp, pmd)
46 
47 #ifndef __PAGETABLE_PUD_FOLDED
48 #define set_pgd(pgdp, pgd)		native_set_pgd(pgdp, pgd)
49 #define pgd_clear(pgd)			native_pgd_clear(pgd)
50 #endif
51 
52 #ifndef set_pud
53 # define set_pud(pudp, pud)		native_set_pud(pudp, pud)
54 #endif
55 
56 #ifndef __PAGETABLE_PMD_FOLDED
57 #define pud_clear(pud)			native_pud_clear(pud)
58 #endif
59 
60 #define pte_clear(mm, addr, ptep)	native_pte_clear(mm, addr, ptep)
61 #define pmd_clear(pmd)			native_pmd_clear(pmd)
62 
63 #define pte_update(mm, addr, ptep)              do { } while (0)
64 #define pte_update_defer(mm, addr, ptep)        do { } while (0)
65 #define pmd_update(mm, addr, ptep)              do { } while (0)
66 #define pmd_update_defer(mm, addr, ptep)        do { } while (0)
67 
68 #define pgd_val(x)	native_pgd_val(x)
69 #define __pgd(x)	native_make_pgd(x)
70 
71 #ifndef __PAGETABLE_PUD_FOLDED
72 #define pud_val(x)	native_pud_val(x)
73 #define __pud(x)	native_make_pud(x)
74 #endif
75 
76 #ifndef __PAGETABLE_PMD_FOLDED
77 #define pmd_val(x)	native_pmd_val(x)
78 #define __pmd(x)	native_make_pmd(x)
79 #endif
80 
81 #define pte_val(x)	native_pte_val(x)
82 #define __pte(x)	native_make_pte(x)
83 
84 #define arch_end_context_switch(prev)	do {} while(0)
85 
86 #endif	/* CONFIG_PARAVIRT */
87 
88 /*
89  * The following only work if pte_present() is true.
90  * Undefined behaviour if not..
91  */
92 static inline int pte_dirty(pte_t pte)
93 {
94 	return pte_flags(pte) & _PAGE_DIRTY;
95 }
96 
97 static inline int pte_young(pte_t pte)
98 {
99 	return pte_flags(pte) & _PAGE_ACCESSED;
100 }
101 
102 static inline int pmd_young(pmd_t pmd)
103 {
104 	return pmd_flags(pmd) & _PAGE_ACCESSED;
105 }
106 
107 static inline int pte_write(pte_t pte)
108 {
109 	return pte_flags(pte) & _PAGE_RW;
110 }
111 
112 static inline int pte_file(pte_t pte)
113 {
114 	return pte_flags(pte) & _PAGE_FILE;
115 }
116 
117 static inline int pte_huge(pte_t pte)
118 {
119 	return pte_flags(pte) & _PAGE_PSE;
120 }
121 
122 static inline int pte_global(pte_t pte)
123 {
124 	return pte_flags(pte) & _PAGE_GLOBAL;
125 }
126 
127 static inline int pte_exec(pte_t pte)
128 {
129 	return !(pte_flags(pte) & _PAGE_NX);
130 }
131 
132 static inline int pte_special(pte_t pte)
133 {
134 	return pte_flags(pte) & _PAGE_SPECIAL;
135 }
136 
137 static inline unsigned long pte_pfn(pte_t pte)
138 {
139 	return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
140 }
141 
142 static inline unsigned long pmd_pfn(pmd_t pmd)
143 {
144 	return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
145 }
146 
147 static inline unsigned long pud_pfn(pud_t pud)
148 {
149 	return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
150 }
151 
152 #define pte_page(pte)	pfn_to_page(pte_pfn(pte))
153 
154 static inline int pmd_large(pmd_t pte)
155 {
156 	return pmd_flags(pte) & _PAGE_PSE;
157 }
158 
159 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
160 static inline int pmd_trans_splitting(pmd_t pmd)
161 {
162 	return pmd_val(pmd) & _PAGE_SPLITTING;
163 }
164 
165 static inline int pmd_trans_huge(pmd_t pmd)
166 {
167 	return pmd_val(pmd) & _PAGE_PSE;
168 }
169 
170 static inline int has_transparent_hugepage(void)
171 {
172 	return cpu_has_pse;
173 }
174 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
175 
176 static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
177 {
178 	pteval_t v = native_pte_val(pte);
179 
180 	return native_make_pte(v | set);
181 }
182 
183 static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
184 {
185 	pteval_t v = native_pte_val(pte);
186 
187 	return native_make_pte(v & ~clear);
188 }
189 
190 static inline pte_t pte_mkclean(pte_t pte)
191 {
192 	return pte_clear_flags(pte, _PAGE_DIRTY);
193 }
194 
195 static inline pte_t pte_mkold(pte_t pte)
196 {
197 	return pte_clear_flags(pte, _PAGE_ACCESSED);
198 }
199 
200 static inline pte_t pte_wrprotect(pte_t pte)
201 {
202 	return pte_clear_flags(pte, _PAGE_RW);
203 }
204 
205 static inline pte_t pte_mkexec(pte_t pte)
206 {
207 	return pte_clear_flags(pte, _PAGE_NX);
208 }
209 
210 static inline pte_t pte_mkdirty(pte_t pte)
211 {
212 	return pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
213 }
214 
215 static inline pte_t pte_mkyoung(pte_t pte)
216 {
217 	return pte_set_flags(pte, _PAGE_ACCESSED);
218 }
219 
220 static inline pte_t pte_mkwrite(pte_t pte)
221 {
222 	return pte_set_flags(pte, _PAGE_RW);
223 }
224 
225 static inline pte_t pte_mkhuge(pte_t pte)
226 {
227 	return pte_set_flags(pte, _PAGE_PSE);
228 }
229 
230 static inline pte_t pte_clrhuge(pte_t pte)
231 {
232 	return pte_clear_flags(pte, _PAGE_PSE);
233 }
234 
235 static inline pte_t pte_mkglobal(pte_t pte)
236 {
237 	return pte_set_flags(pte, _PAGE_GLOBAL);
238 }
239 
240 static inline pte_t pte_clrglobal(pte_t pte)
241 {
242 	return pte_clear_flags(pte, _PAGE_GLOBAL);
243 }
244 
245 static inline pte_t pte_mkspecial(pte_t pte)
246 {
247 	return pte_set_flags(pte, _PAGE_SPECIAL);
248 }
249 
250 static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
251 {
252 	pmdval_t v = native_pmd_val(pmd);
253 
254 	return __pmd(v | set);
255 }
256 
257 static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
258 {
259 	pmdval_t v = native_pmd_val(pmd);
260 
261 	return __pmd(v & ~clear);
262 }
263 
264 static inline pmd_t pmd_mkold(pmd_t pmd)
265 {
266 	return pmd_clear_flags(pmd, _PAGE_ACCESSED);
267 }
268 
269 static inline pmd_t pmd_wrprotect(pmd_t pmd)
270 {
271 	return pmd_clear_flags(pmd, _PAGE_RW);
272 }
273 
274 static inline pmd_t pmd_mkdirty(pmd_t pmd)
275 {
276 	return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
277 }
278 
279 static inline pmd_t pmd_mkhuge(pmd_t pmd)
280 {
281 	return pmd_set_flags(pmd, _PAGE_PSE);
282 }
283 
284 static inline pmd_t pmd_mkyoung(pmd_t pmd)
285 {
286 	return pmd_set_flags(pmd, _PAGE_ACCESSED);
287 }
288 
289 static inline pmd_t pmd_mkwrite(pmd_t pmd)
290 {
291 	return pmd_set_flags(pmd, _PAGE_RW);
292 }
293 
294 static inline pmd_t pmd_mknotpresent(pmd_t pmd)
295 {
296 	return pmd_clear_flags(pmd, _PAGE_PRESENT);
297 }
298 
299 static inline int pte_soft_dirty(pte_t pte)
300 {
301 	return pte_flags(pte) & _PAGE_SOFT_DIRTY;
302 }
303 
304 static inline int pmd_soft_dirty(pmd_t pmd)
305 {
306 	return pmd_flags(pmd) & _PAGE_SOFT_DIRTY;
307 }
308 
309 static inline pte_t pte_mksoft_dirty(pte_t pte)
310 {
311 	return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
312 }
313 
314 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
315 {
316 	return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
317 }
318 
319 static inline pte_t pte_file_clear_soft_dirty(pte_t pte)
320 {
321 	return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
322 }
323 
324 static inline pte_t pte_file_mksoft_dirty(pte_t pte)
325 {
326 	return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
327 }
328 
329 static inline int pte_file_soft_dirty(pte_t pte)
330 {
331 	return pte_flags(pte) & _PAGE_SOFT_DIRTY;
332 }
333 
334 /*
335  * Mask out unsupported bits in a present pgprot.  Non-present pgprots
336  * can use those bits for other purposes, so leave them be.
337  */
338 static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
339 {
340 	pgprotval_t protval = pgprot_val(pgprot);
341 
342 	if (protval & _PAGE_PRESENT)
343 		protval &= __supported_pte_mask;
344 
345 	return protval;
346 }
347 
348 static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
349 {
350 	return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
351 		     massage_pgprot(pgprot));
352 }
353 
354 static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
355 {
356 	return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) |
357 		     massage_pgprot(pgprot));
358 }
359 
360 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
361 {
362 	pteval_t val = pte_val(pte);
363 
364 	/*
365 	 * Chop off the NX bit (if present), and add the NX portion of
366 	 * the newprot (if present):
367 	 */
368 	val &= _PAGE_CHG_MASK;
369 	val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK;
370 
371 	return __pte(val);
372 }
373 
374 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
375 {
376 	pmdval_t val = pmd_val(pmd);
377 
378 	val &= _HPAGE_CHG_MASK;
379 	val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK;
380 
381 	return __pmd(val);
382 }
383 
384 /* mprotect needs to preserve PAT bits when updating vm_page_prot */
385 #define pgprot_modify pgprot_modify
386 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
387 {
388 	pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
389 	pgprotval_t addbits = pgprot_val(newprot);
390 	return __pgprot(preservebits | addbits);
391 }
392 
393 #define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK)
394 
395 #define canon_pgprot(p) __pgprot(massage_pgprot(p))
396 
397 static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
398 					 unsigned long flags,
399 					 unsigned long new_flags)
400 {
401 	/*
402 	 * PAT type is always WB for untracked ranges, so no need to check.
403 	 */
404 	if (x86_platform.is_untracked_pat_range(paddr, paddr + size))
405 		return 1;
406 
407 	/*
408 	 * Certain new memtypes are not allowed with certain
409 	 * requested memtype:
410 	 * - request is uncached, return cannot be write-back
411 	 * - request is write-combine, return cannot be write-back
412 	 */
413 	if ((flags == _PAGE_CACHE_UC_MINUS &&
414 	     new_flags == _PAGE_CACHE_WB) ||
415 	    (flags == _PAGE_CACHE_WC &&
416 	     new_flags == _PAGE_CACHE_WB)) {
417 		return 0;
418 	}
419 
420 	return 1;
421 }
422 
423 pmd_t *populate_extra_pmd(unsigned long vaddr);
424 pte_t *populate_extra_pte(unsigned long vaddr);
425 #endif	/* __ASSEMBLY__ */
426 
427 #ifdef CONFIG_X86_32
428 # include <asm/pgtable_32.h>
429 #else
430 # include <asm/pgtable_64.h>
431 #endif
432 
433 #ifndef __ASSEMBLY__
434 #include <linux/mm_types.h>
435 #include <linux/mmdebug.h>
436 #include <linux/log2.h>
437 
438 static inline int pte_none(pte_t pte)
439 {
440 	return !pte.pte;
441 }
442 
443 #define __HAVE_ARCH_PTE_SAME
444 static inline int pte_same(pte_t a, pte_t b)
445 {
446 	return a.pte == b.pte;
447 }
448 
449 static inline int pte_present(pte_t a)
450 {
451 	return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE |
452 			       _PAGE_NUMA);
453 }
454 
455 #define pte_accessible pte_accessible
456 static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
457 {
458 	if (pte_flags(a) & _PAGE_PRESENT)
459 		return true;
460 
461 	if ((pte_flags(a) & (_PAGE_PROTNONE | _PAGE_NUMA)) &&
462 			mm_tlb_flush_pending(mm))
463 		return true;
464 
465 	return false;
466 }
467 
468 static inline int pte_hidden(pte_t pte)
469 {
470 	return pte_flags(pte) & _PAGE_HIDDEN;
471 }
472 
473 static inline int pmd_present(pmd_t pmd)
474 {
475 	/*
476 	 * Checking for _PAGE_PSE is needed too because
477 	 * split_huge_page will temporarily clear the present bit (but
478 	 * the _PAGE_PSE flag will remain set at all times while the
479 	 * _PAGE_PRESENT bit is clear).
480 	 */
481 	return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE |
482 				 _PAGE_NUMA);
483 }
484 
485 static inline int pmd_none(pmd_t pmd)
486 {
487 	/* Only check low word on 32-bit platforms, since it might be
488 	   out of sync with upper half. */
489 	return (unsigned long)native_pmd_val(pmd) == 0;
490 }
491 
492 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
493 {
494 	return (unsigned long)__va(pmd_val(pmd) & PTE_PFN_MASK);
495 }
496 
497 /*
498  * Currently stuck as a macro due to indirect forward reference to
499  * linux/mmzone.h's __section_mem_map_addr() definition:
500  */
501 #define pmd_page(pmd)	pfn_to_page((pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT)
502 
503 /*
504  * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
505  *
506  * this macro returns the index of the entry in the pmd page which would
507  * control the given virtual address
508  */
509 static inline unsigned long pmd_index(unsigned long address)
510 {
511 	return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
512 }
513 
514 /*
515  * Conversion functions: convert a page and protection to a page entry,
516  * and a page entry and page directory to the page they refer to.
517  *
518  * (Currently stuck as a macro because of indirect forward reference
519  * to linux/mm.h:page_to_nid())
520  */
521 #define mk_pte(page, pgprot)   pfn_pte(page_to_pfn(page), (pgprot))
522 
523 /*
524  * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
525  *
526  * this function returns the index of the entry in the pte page which would
527  * control the given virtual address
528  */
529 static inline unsigned long pte_index(unsigned long address)
530 {
531 	return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
532 }
533 
534 static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
535 {
536 	return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
537 }
538 
539 static inline int pmd_bad(pmd_t pmd)
540 {
541 #ifdef CONFIG_NUMA_BALANCING
542 	/* pmd_numa check */
543 	if ((pmd_flags(pmd) & (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA)
544 		return 0;
545 #endif
546 	return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
547 }
548 
549 static inline unsigned long pages_to_mb(unsigned long npg)
550 {
551 	return npg >> (20 - PAGE_SHIFT);
552 }
553 
554 #if PAGETABLE_LEVELS > 2
555 static inline int pud_none(pud_t pud)
556 {
557 	return native_pud_val(pud) == 0;
558 }
559 
560 static inline int pud_present(pud_t pud)
561 {
562 	return pud_flags(pud) & _PAGE_PRESENT;
563 }
564 
565 static inline unsigned long pud_page_vaddr(pud_t pud)
566 {
567 	return (unsigned long)__va((unsigned long)pud_val(pud) & PTE_PFN_MASK);
568 }
569 
570 /*
571  * Currently stuck as a macro due to indirect forward reference to
572  * linux/mmzone.h's __section_mem_map_addr() definition:
573  */
574 #define pud_page(pud)		pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
575 
576 /* Find an entry in the second-level page table.. */
577 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
578 {
579 	return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
580 }
581 
582 static inline int pud_large(pud_t pud)
583 {
584 	return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
585 		(_PAGE_PSE | _PAGE_PRESENT);
586 }
587 
588 static inline int pud_bad(pud_t pud)
589 {
590 	return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
591 }
592 #else
593 static inline int pud_large(pud_t pud)
594 {
595 	return 0;
596 }
597 #endif	/* PAGETABLE_LEVELS > 2 */
598 
599 #if PAGETABLE_LEVELS > 3
600 static inline int pgd_present(pgd_t pgd)
601 {
602 	return pgd_flags(pgd) & _PAGE_PRESENT;
603 }
604 
605 static inline unsigned long pgd_page_vaddr(pgd_t pgd)
606 {
607 	return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
608 }
609 
610 /*
611  * Currently stuck as a macro due to indirect forward reference to
612  * linux/mmzone.h's __section_mem_map_addr() definition:
613  */
614 #define pgd_page(pgd)		pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
615 
616 /* to find an entry in a page-table-directory. */
617 static inline unsigned long pud_index(unsigned long address)
618 {
619 	return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
620 }
621 
622 static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
623 {
624 	return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
625 }
626 
627 static inline int pgd_bad(pgd_t pgd)
628 {
629 	return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
630 }
631 
632 static inline int pgd_none(pgd_t pgd)
633 {
634 	return !native_pgd_val(pgd);
635 }
636 #endif	/* PAGETABLE_LEVELS > 3 */
637 
638 #endif	/* __ASSEMBLY__ */
639 
640 /*
641  * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
642  *
643  * this macro returns the index of the entry in the pgd page which would
644  * control the given virtual address
645  */
646 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
647 
648 /*
649  * pgd_offset() returns a (pgd_t *)
650  * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
651  */
652 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
653 /*
654  * a shortcut which implies the use of the kernel's pgd, instead
655  * of a process's
656  */
657 #define pgd_offset_k(address) pgd_offset(&init_mm, (address))
658 
659 
660 #define KERNEL_PGD_BOUNDARY	pgd_index(PAGE_OFFSET)
661 #define KERNEL_PGD_PTRS		(PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
662 
663 #ifndef __ASSEMBLY__
664 
665 extern int direct_gbpages;
666 void init_mem_mapping(void);
667 void early_alloc_pgt_buf(void);
668 
669 /* local pte updates need not use xchg for locking */
670 static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
671 {
672 	pte_t res = *ptep;
673 
674 	/* Pure native function needs no input for mm, addr */
675 	native_pte_clear(NULL, 0, ptep);
676 	return res;
677 }
678 
679 static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
680 {
681 	pmd_t res = *pmdp;
682 
683 	native_pmd_clear(pmdp);
684 	return res;
685 }
686 
687 static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
688 				     pte_t *ptep , pte_t pte)
689 {
690 	native_set_pte(ptep, pte);
691 }
692 
693 static inline void native_set_pmd_at(struct mm_struct *mm, unsigned long addr,
694 				     pmd_t *pmdp , pmd_t pmd)
695 {
696 	native_set_pmd(pmdp, pmd);
697 }
698 
699 #ifndef CONFIG_PARAVIRT
700 /*
701  * Rules for using pte_update - it must be called after any PTE update which
702  * has not been done using the set_pte / clear_pte interfaces.  It is used by
703  * shadow mode hypervisors to resynchronize the shadow page tables.  Kernel PTE
704  * updates should either be sets, clears, or set_pte_atomic for P->P
705  * transitions, which means this hook should only be called for user PTEs.
706  * This hook implies a P->P protection or access change has taken place, which
707  * requires a subsequent TLB flush.  The notification can optionally be delayed
708  * until the TLB flush event by using the pte_update_defer form of the
709  * interface, but care must be taken to assure that the flush happens while
710  * still holding the same page table lock so that the shadow and primary pages
711  * do not become out of sync on SMP.
712  */
713 #define pte_update(mm, addr, ptep)		do { } while (0)
714 #define pte_update_defer(mm, addr, ptep)	do { } while (0)
715 #endif
716 
717 /*
718  * We only update the dirty/accessed state if we set
719  * the dirty bit by hand in the kernel, since the hardware
720  * will do the accessed bit for us, and we don't want to
721  * race with other CPU's that might be updating the dirty
722  * bit at the same time.
723  */
724 struct vm_area_struct;
725 
726 #define  __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
727 extern int ptep_set_access_flags(struct vm_area_struct *vma,
728 				 unsigned long address, pte_t *ptep,
729 				 pte_t entry, int dirty);
730 
731 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
732 extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
733 				     unsigned long addr, pte_t *ptep);
734 
735 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
736 extern int ptep_clear_flush_young(struct vm_area_struct *vma,
737 				  unsigned long address, pte_t *ptep);
738 
739 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
740 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
741 				       pte_t *ptep)
742 {
743 	pte_t pte = native_ptep_get_and_clear(ptep);
744 	pte_update(mm, addr, ptep);
745 	return pte;
746 }
747 
748 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
749 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
750 					    unsigned long addr, pte_t *ptep,
751 					    int full)
752 {
753 	pte_t pte;
754 	if (full) {
755 		/*
756 		 * Full address destruction in progress; paravirt does not
757 		 * care about updates and native needs no locking
758 		 */
759 		pte = native_local_ptep_get_and_clear(ptep);
760 	} else {
761 		pte = ptep_get_and_clear(mm, addr, ptep);
762 	}
763 	return pte;
764 }
765 
766 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
767 static inline void ptep_set_wrprotect(struct mm_struct *mm,
768 				      unsigned long addr, pte_t *ptep)
769 {
770 	clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
771 	pte_update(mm, addr, ptep);
772 }
773 
774 #define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
775 
776 #define mk_pmd(page, pgprot)   pfn_pmd(page_to_pfn(page), (pgprot))
777 
778 #define  __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
779 extern int pmdp_set_access_flags(struct vm_area_struct *vma,
780 				 unsigned long address, pmd_t *pmdp,
781 				 pmd_t entry, int dirty);
782 
783 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
784 extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
785 				     unsigned long addr, pmd_t *pmdp);
786 
787 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
788 extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
789 				  unsigned long address, pmd_t *pmdp);
790 
791 
792 #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
793 extern void pmdp_splitting_flush(struct vm_area_struct *vma,
794 				 unsigned long addr, pmd_t *pmdp);
795 
796 #define __HAVE_ARCH_PMD_WRITE
797 static inline int pmd_write(pmd_t pmd)
798 {
799 	return pmd_flags(pmd) & _PAGE_RW;
800 }
801 
802 #define __HAVE_ARCH_PMDP_GET_AND_CLEAR
803 static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, unsigned long addr,
804 				       pmd_t *pmdp)
805 {
806 	pmd_t pmd = native_pmdp_get_and_clear(pmdp);
807 	pmd_update(mm, addr, pmdp);
808 	return pmd;
809 }
810 
811 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
812 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
813 				      unsigned long addr, pmd_t *pmdp)
814 {
815 	clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
816 	pmd_update(mm, addr, pmdp);
817 }
818 
819 /*
820  * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
821  *
822  *  dst - pointer to pgd range anwhere on a pgd page
823  *  src - ""
824  *  count - the number of pgds to copy.
825  *
826  * dst and src can be on the same page, but the range must not overlap,
827  * and must not cross a page boundary.
828  */
829 static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
830 {
831        memcpy(dst, src, count * sizeof(pgd_t));
832 }
833 
834 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
835 static inline int page_level_shift(enum pg_level level)
836 {
837 	return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT;
838 }
839 static inline unsigned long page_level_size(enum pg_level level)
840 {
841 	return 1UL << page_level_shift(level);
842 }
843 static inline unsigned long page_level_mask(enum pg_level level)
844 {
845 	return ~(page_level_size(level) - 1);
846 }
847 
848 /*
849  * The x86 doesn't have any external MMU info: the kernel page
850  * tables contain all the necessary information.
851  */
852 static inline void update_mmu_cache(struct vm_area_struct *vma,
853 		unsigned long addr, pte_t *ptep)
854 {
855 }
856 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
857 		unsigned long addr, pmd_t *pmd)
858 {
859 }
860 
861 static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
862 {
863 	VM_BUG_ON(pte_present(pte));
864 	return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
865 }
866 
867 static inline int pte_swp_soft_dirty(pte_t pte)
868 {
869 	VM_BUG_ON(pte_present(pte));
870 	return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
871 }
872 
873 static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
874 {
875 	VM_BUG_ON(pte_present(pte));
876 	return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
877 }
878 
879 #include <asm-generic/pgtable.h>
880 #endif	/* __ASSEMBLY__ */
881 
882 #endif /* _ASM_X86_PGTABLE_H */
883