1 #ifndef _ASM_X86_PGTABLE_H 2 #define _ASM_X86_PGTABLE_H 3 4 #include <asm/page.h> 5 #include <asm/e820.h> 6 7 #include <asm/pgtable_types.h> 8 9 /* 10 * Macro to mark a page protection value as UC- 11 */ 12 #define pgprot_noncached(prot) \ 13 ((boot_cpu_data.x86 > 3) \ 14 ? (__pgprot(pgprot_val(prot) | _PAGE_CACHE_UC_MINUS)) \ 15 : (prot)) 16 17 #ifndef __ASSEMBLY__ 18 19 #include <asm/x86_init.h> 20 21 /* 22 * ZERO_PAGE is a global shared page that is always zero: used 23 * for zero-mapped memory areas etc.. 24 */ 25 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; 26 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 27 28 extern spinlock_t pgd_lock; 29 extern struct list_head pgd_list; 30 31 extern struct mm_struct *pgd_page_get_mm(struct page *page); 32 33 #ifdef CONFIG_PARAVIRT 34 #include <asm/paravirt.h> 35 #else /* !CONFIG_PARAVIRT */ 36 #define set_pte(ptep, pte) native_set_pte(ptep, pte) 37 #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte) 38 #define set_pmd_at(mm, addr, pmdp, pmd) native_set_pmd_at(mm, addr, pmdp, pmd) 39 40 #define set_pte_atomic(ptep, pte) \ 41 native_set_pte_atomic(ptep, pte) 42 43 #define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd) 44 45 #ifndef __PAGETABLE_PUD_FOLDED 46 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd) 47 #define pgd_clear(pgd) native_pgd_clear(pgd) 48 #endif 49 50 #ifndef set_pud 51 # define set_pud(pudp, pud) native_set_pud(pudp, pud) 52 #endif 53 54 #ifndef __PAGETABLE_PMD_FOLDED 55 #define pud_clear(pud) native_pud_clear(pud) 56 #endif 57 58 #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep) 59 #define pmd_clear(pmd) native_pmd_clear(pmd) 60 61 #define pte_update(mm, addr, ptep) do { } while (0) 62 #define pte_update_defer(mm, addr, ptep) do { } while (0) 63 #define pmd_update(mm, addr, ptep) do { } while (0) 64 #define pmd_update_defer(mm, addr, ptep) do { } while (0) 65 66 #define pgd_val(x) native_pgd_val(x) 67 #define __pgd(x) native_make_pgd(x) 68 69 #ifndef __PAGETABLE_PUD_FOLDED 70 #define pud_val(x) native_pud_val(x) 71 #define __pud(x) native_make_pud(x) 72 #endif 73 74 #ifndef __PAGETABLE_PMD_FOLDED 75 #define pmd_val(x) native_pmd_val(x) 76 #define __pmd(x) native_make_pmd(x) 77 #endif 78 79 #define pte_val(x) native_pte_val(x) 80 #define __pte(x) native_make_pte(x) 81 82 #define arch_end_context_switch(prev) do {} while(0) 83 84 #endif /* CONFIG_PARAVIRT */ 85 86 /* 87 * The following only work if pte_present() is true. 88 * Undefined behaviour if not.. 89 */ 90 static inline int pte_dirty(pte_t pte) 91 { 92 return pte_flags(pte) & _PAGE_DIRTY; 93 } 94 95 static inline int pte_young(pte_t pte) 96 { 97 return pte_flags(pte) & _PAGE_ACCESSED; 98 } 99 100 static inline int pmd_young(pmd_t pmd) 101 { 102 return pmd_flags(pmd) & _PAGE_ACCESSED; 103 } 104 105 static inline int pte_write(pte_t pte) 106 { 107 return pte_flags(pte) & _PAGE_RW; 108 } 109 110 static inline int pte_file(pte_t pte) 111 { 112 return pte_flags(pte) & _PAGE_FILE; 113 } 114 115 static inline int pte_huge(pte_t pte) 116 { 117 return pte_flags(pte) & _PAGE_PSE; 118 } 119 120 static inline int pte_global(pte_t pte) 121 { 122 return pte_flags(pte) & _PAGE_GLOBAL; 123 } 124 125 static inline int pte_exec(pte_t pte) 126 { 127 return !(pte_flags(pte) & _PAGE_NX); 128 } 129 130 static inline int pte_special(pte_t pte) 131 { 132 return pte_flags(pte) & _PAGE_SPECIAL; 133 } 134 135 static inline unsigned long pte_pfn(pte_t pte) 136 { 137 return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT; 138 } 139 140 static inline unsigned long pmd_pfn(pmd_t pmd) 141 { 142 return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT; 143 } 144 145 #define pte_page(pte) pfn_to_page(pte_pfn(pte)) 146 147 static inline int pmd_large(pmd_t pte) 148 { 149 return pmd_flags(pte) & _PAGE_PSE; 150 } 151 152 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 153 static inline int pmd_trans_splitting(pmd_t pmd) 154 { 155 return pmd_val(pmd) & _PAGE_SPLITTING; 156 } 157 158 static inline int pmd_trans_huge(pmd_t pmd) 159 { 160 return pmd_val(pmd) & _PAGE_PSE; 161 } 162 163 static inline int has_transparent_hugepage(void) 164 { 165 return cpu_has_pse; 166 } 167 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 168 169 static inline pte_t pte_set_flags(pte_t pte, pteval_t set) 170 { 171 pteval_t v = native_pte_val(pte); 172 173 return native_make_pte(v | set); 174 } 175 176 static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear) 177 { 178 pteval_t v = native_pte_val(pte); 179 180 return native_make_pte(v & ~clear); 181 } 182 183 static inline pte_t pte_mkclean(pte_t pte) 184 { 185 return pte_clear_flags(pte, _PAGE_DIRTY); 186 } 187 188 static inline pte_t pte_mkold(pte_t pte) 189 { 190 return pte_clear_flags(pte, _PAGE_ACCESSED); 191 } 192 193 static inline pte_t pte_wrprotect(pte_t pte) 194 { 195 return pte_clear_flags(pte, _PAGE_RW); 196 } 197 198 static inline pte_t pte_mkexec(pte_t pte) 199 { 200 return pte_clear_flags(pte, _PAGE_NX); 201 } 202 203 static inline pte_t pte_mkdirty(pte_t pte) 204 { 205 return pte_set_flags(pte, _PAGE_DIRTY); 206 } 207 208 static inline pte_t pte_mkyoung(pte_t pte) 209 { 210 return pte_set_flags(pte, _PAGE_ACCESSED); 211 } 212 213 static inline pte_t pte_mkwrite(pte_t pte) 214 { 215 return pte_set_flags(pte, _PAGE_RW); 216 } 217 218 static inline pte_t pte_mkhuge(pte_t pte) 219 { 220 return pte_set_flags(pte, _PAGE_PSE); 221 } 222 223 static inline pte_t pte_clrhuge(pte_t pte) 224 { 225 return pte_clear_flags(pte, _PAGE_PSE); 226 } 227 228 static inline pte_t pte_mkglobal(pte_t pte) 229 { 230 return pte_set_flags(pte, _PAGE_GLOBAL); 231 } 232 233 static inline pte_t pte_clrglobal(pte_t pte) 234 { 235 return pte_clear_flags(pte, _PAGE_GLOBAL); 236 } 237 238 static inline pte_t pte_mkspecial(pte_t pte) 239 { 240 return pte_set_flags(pte, _PAGE_SPECIAL); 241 } 242 243 static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set) 244 { 245 pmdval_t v = native_pmd_val(pmd); 246 247 return __pmd(v | set); 248 } 249 250 static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear) 251 { 252 pmdval_t v = native_pmd_val(pmd); 253 254 return __pmd(v & ~clear); 255 } 256 257 static inline pmd_t pmd_mkold(pmd_t pmd) 258 { 259 return pmd_clear_flags(pmd, _PAGE_ACCESSED); 260 } 261 262 static inline pmd_t pmd_wrprotect(pmd_t pmd) 263 { 264 return pmd_clear_flags(pmd, _PAGE_RW); 265 } 266 267 static inline pmd_t pmd_mkdirty(pmd_t pmd) 268 { 269 return pmd_set_flags(pmd, _PAGE_DIRTY); 270 } 271 272 static inline pmd_t pmd_mkhuge(pmd_t pmd) 273 { 274 return pmd_set_flags(pmd, _PAGE_PSE); 275 } 276 277 static inline pmd_t pmd_mkyoung(pmd_t pmd) 278 { 279 return pmd_set_flags(pmd, _PAGE_ACCESSED); 280 } 281 282 static inline pmd_t pmd_mkwrite(pmd_t pmd) 283 { 284 return pmd_set_flags(pmd, _PAGE_RW); 285 } 286 287 static inline pmd_t pmd_mknotpresent(pmd_t pmd) 288 { 289 return pmd_clear_flags(pmd, _PAGE_PRESENT); 290 } 291 292 /* 293 * Mask out unsupported bits in a present pgprot. Non-present pgprots 294 * can use those bits for other purposes, so leave them be. 295 */ 296 static inline pgprotval_t massage_pgprot(pgprot_t pgprot) 297 { 298 pgprotval_t protval = pgprot_val(pgprot); 299 300 if (protval & _PAGE_PRESENT) 301 protval &= __supported_pte_mask; 302 303 return protval; 304 } 305 306 static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) 307 { 308 return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) | 309 massage_pgprot(pgprot)); 310 } 311 312 static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) 313 { 314 return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) | 315 massage_pgprot(pgprot)); 316 } 317 318 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 319 { 320 pteval_t val = pte_val(pte); 321 322 /* 323 * Chop off the NX bit (if present), and add the NX portion of 324 * the newprot (if present): 325 */ 326 val &= _PAGE_CHG_MASK; 327 val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK; 328 329 return __pte(val); 330 } 331 332 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 333 { 334 pmdval_t val = pmd_val(pmd); 335 336 val &= _HPAGE_CHG_MASK; 337 val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK; 338 339 return __pmd(val); 340 } 341 342 /* mprotect needs to preserve PAT bits when updating vm_page_prot */ 343 #define pgprot_modify pgprot_modify 344 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) 345 { 346 pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK; 347 pgprotval_t addbits = pgprot_val(newprot); 348 return __pgprot(preservebits | addbits); 349 } 350 351 #define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK) 352 353 #define canon_pgprot(p) __pgprot(massage_pgprot(p)) 354 355 static inline int is_new_memtype_allowed(u64 paddr, unsigned long size, 356 unsigned long flags, 357 unsigned long new_flags) 358 { 359 /* 360 * PAT type is always WB for untracked ranges, so no need to check. 361 */ 362 if (x86_platform.is_untracked_pat_range(paddr, paddr + size)) 363 return 1; 364 365 /* 366 * Certain new memtypes are not allowed with certain 367 * requested memtype: 368 * - request is uncached, return cannot be write-back 369 * - request is write-combine, return cannot be write-back 370 */ 371 if ((flags == _PAGE_CACHE_UC_MINUS && 372 new_flags == _PAGE_CACHE_WB) || 373 (flags == _PAGE_CACHE_WC && 374 new_flags == _PAGE_CACHE_WB)) { 375 return 0; 376 } 377 378 return 1; 379 } 380 381 pmd_t *populate_extra_pmd(unsigned long vaddr); 382 pte_t *populate_extra_pte(unsigned long vaddr); 383 #endif /* __ASSEMBLY__ */ 384 385 #ifdef CONFIG_X86_32 386 # include <asm/pgtable_32.h> 387 #else 388 # include <asm/pgtable_64.h> 389 #endif 390 391 #ifndef __ASSEMBLY__ 392 #include <linux/mm_types.h> 393 394 static inline int pte_none(pte_t pte) 395 { 396 return !pte.pte; 397 } 398 399 #define __HAVE_ARCH_PTE_SAME 400 static inline int pte_same(pte_t a, pte_t b) 401 { 402 return a.pte == b.pte; 403 } 404 405 static inline int pte_present(pte_t a) 406 { 407 return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE); 408 } 409 410 static inline int pte_hidden(pte_t pte) 411 { 412 return pte_flags(pte) & _PAGE_HIDDEN; 413 } 414 415 static inline int pmd_present(pmd_t pmd) 416 { 417 /* 418 * Checking for _PAGE_PSE is needed too because 419 * split_huge_page will temporarily clear the present bit (but 420 * the _PAGE_PSE flag will remain set at all times while the 421 * _PAGE_PRESENT bit is clear). 422 */ 423 return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE); 424 } 425 426 static inline int pmd_none(pmd_t pmd) 427 { 428 /* Only check low word on 32-bit platforms, since it might be 429 out of sync with upper half. */ 430 return (unsigned long)native_pmd_val(pmd) == 0; 431 } 432 433 static inline unsigned long pmd_page_vaddr(pmd_t pmd) 434 { 435 return (unsigned long)__va(pmd_val(pmd) & PTE_PFN_MASK); 436 } 437 438 /* 439 * Currently stuck as a macro due to indirect forward reference to 440 * linux/mmzone.h's __section_mem_map_addr() definition: 441 */ 442 #define pmd_page(pmd) pfn_to_page((pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT) 443 444 /* 445 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD] 446 * 447 * this macro returns the index of the entry in the pmd page which would 448 * control the given virtual address 449 */ 450 static inline unsigned long pmd_index(unsigned long address) 451 { 452 return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); 453 } 454 455 /* 456 * Conversion functions: convert a page and protection to a page entry, 457 * and a page entry and page directory to the page they refer to. 458 * 459 * (Currently stuck as a macro because of indirect forward reference 460 * to linux/mm.h:page_to_nid()) 461 */ 462 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) 463 464 /* 465 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] 466 * 467 * this function returns the index of the entry in the pte page which would 468 * control the given virtual address 469 */ 470 static inline unsigned long pte_index(unsigned long address) 471 { 472 return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); 473 } 474 475 static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address) 476 { 477 return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address); 478 } 479 480 static inline int pmd_bad(pmd_t pmd) 481 { 482 return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE; 483 } 484 485 static inline unsigned long pages_to_mb(unsigned long npg) 486 { 487 return npg >> (20 - PAGE_SHIFT); 488 } 489 490 #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 491 remap_pfn_range(vma, vaddr, pfn, size, prot) 492 493 #if PAGETABLE_LEVELS > 2 494 static inline int pud_none(pud_t pud) 495 { 496 return native_pud_val(pud) == 0; 497 } 498 499 static inline int pud_present(pud_t pud) 500 { 501 return pud_flags(pud) & _PAGE_PRESENT; 502 } 503 504 static inline unsigned long pud_page_vaddr(pud_t pud) 505 { 506 return (unsigned long)__va((unsigned long)pud_val(pud) & PTE_PFN_MASK); 507 } 508 509 /* 510 * Currently stuck as a macro due to indirect forward reference to 511 * linux/mmzone.h's __section_mem_map_addr() definition: 512 */ 513 #define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT) 514 515 /* Find an entry in the second-level page table.. */ 516 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) 517 { 518 return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address); 519 } 520 521 static inline int pud_large(pud_t pud) 522 { 523 return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) == 524 (_PAGE_PSE | _PAGE_PRESENT); 525 } 526 527 static inline int pud_bad(pud_t pud) 528 { 529 return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0; 530 } 531 #else 532 static inline int pud_large(pud_t pud) 533 { 534 return 0; 535 } 536 #endif /* PAGETABLE_LEVELS > 2 */ 537 538 #if PAGETABLE_LEVELS > 3 539 static inline int pgd_present(pgd_t pgd) 540 { 541 return pgd_flags(pgd) & _PAGE_PRESENT; 542 } 543 544 static inline unsigned long pgd_page_vaddr(pgd_t pgd) 545 { 546 return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK); 547 } 548 549 /* 550 * Currently stuck as a macro due to indirect forward reference to 551 * linux/mmzone.h's __section_mem_map_addr() definition: 552 */ 553 #define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT) 554 555 /* to find an entry in a page-table-directory. */ 556 static inline unsigned long pud_index(unsigned long address) 557 { 558 return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1); 559 } 560 561 static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address) 562 { 563 return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address); 564 } 565 566 static inline int pgd_bad(pgd_t pgd) 567 { 568 return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE; 569 } 570 571 static inline int pgd_none(pgd_t pgd) 572 { 573 return !native_pgd_val(pgd); 574 } 575 #endif /* PAGETABLE_LEVELS > 3 */ 576 577 #endif /* __ASSEMBLY__ */ 578 579 /* 580 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] 581 * 582 * this macro returns the index of the entry in the pgd page which would 583 * control the given virtual address 584 */ 585 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) 586 587 /* 588 * pgd_offset() returns a (pgd_t *) 589 * pgd_index() is used get the offset into the pgd page's array of pgd_t's; 590 */ 591 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address))) 592 /* 593 * a shortcut which implies the use of the kernel's pgd, instead 594 * of a process's 595 */ 596 #define pgd_offset_k(address) pgd_offset(&init_mm, (address)) 597 598 599 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET) 600 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY) 601 602 #ifndef __ASSEMBLY__ 603 604 extern int direct_gbpages; 605 606 /* local pte updates need not use xchg for locking */ 607 static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep) 608 { 609 pte_t res = *ptep; 610 611 /* Pure native function needs no input for mm, addr */ 612 native_pte_clear(NULL, 0, ptep); 613 return res; 614 } 615 616 static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp) 617 { 618 pmd_t res = *pmdp; 619 620 native_pmd_clear(pmdp); 621 return res; 622 } 623 624 static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr, 625 pte_t *ptep , pte_t pte) 626 { 627 native_set_pte(ptep, pte); 628 } 629 630 static inline void native_set_pmd_at(struct mm_struct *mm, unsigned long addr, 631 pmd_t *pmdp , pmd_t pmd) 632 { 633 native_set_pmd(pmdp, pmd); 634 } 635 636 #ifndef CONFIG_PARAVIRT 637 /* 638 * Rules for using pte_update - it must be called after any PTE update which 639 * has not been done using the set_pte / clear_pte interfaces. It is used by 640 * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE 641 * updates should either be sets, clears, or set_pte_atomic for P->P 642 * transitions, which means this hook should only be called for user PTEs. 643 * This hook implies a P->P protection or access change has taken place, which 644 * requires a subsequent TLB flush. The notification can optionally be delayed 645 * until the TLB flush event by using the pte_update_defer form of the 646 * interface, but care must be taken to assure that the flush happens while 647 * still holding the same page table lock so that the shadow and primary pages 648 * do not become out of sync on SMP. 649 */ 650 #define pte_update(mm, addr, ptep) do { } while (0) 651 #define pte_update_defer(mm, addr, ptep) do { } while (0) 652 #endif 653 654 /* 655 * We only update the dirty/accessed state if we set 656 * the dirty bit by hand in the kernel, since the hardware 657 * will do the accessed bit for us, and we don't want to 658 * race with other CPU's that might be updating the dirty 659 * bit at the same time. 660 */ 661 struct vm_area_struct; 662 663 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 664 extern int ptep_set_access_flags(struct vm_area_struct *vma, 665 unsigned long address, pte_t *ptep, 666 pte_t entry, int dirty); 667 668 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 669 extern int ptep_test_and_clear_young(struct vm_area_struct *vma, 670 unsigned long addr, pte_t *ptep); 671 672 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 673 extern int ptep_clear_flush_young(struct vm_area_struct *vma, 674 unsigned long address, pte_t *ptep); 675 676 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR 677 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, 678 pte_t *ptep) 679 { 680 pte_t pte = native_ptep_get_and_clear(ptep); 681 pte_update(mm, addr, ptep); 682 return pte; 683 } 684 685 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL 686 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, 687 unsigned long addr, pte_t *ptep, 688 int full) 689 { 690 pte_t pte; 691 if (full) { 692 /* 693 * Full address destruction in progress; paravirt does not 694 * care about updates and native needs no locking 695 */ 696 pte = native_local_ptep_get_and_clear(ptep); 697 } else { 698 pte = ptep_get_and_clear(mm, addr, ptep); 699 } 700 return pte; 701 } 702 703 #define __HAVE_ARCH_PTEP_SET_WRPROTECT 704 static inline void ptep_set_wrprotect(struct mm_struct *mm, 705 unsigned long addr, pte_t *ptep) 706 { 707 clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte); 708 pte_update(mm, addr, ptep); 709 } 710 711 #define flush_tlb_fix_spurious_fault(vma, address) do { } while (0) 712 713 #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) 714 715 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 716 extern int pmdp_set_access_flags(struct vm_area_struct *vma, 717 unsigned long address, pmd_t *pmdp, 718 pmd_t entry, int dirty); 719 720 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG 721 extern int pmdp_test_and_clear_young(struct vm_area_struct *vma, 722 unsigned long addr, pmd_t *pmdp); 723 724 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH 725 extern int pmdp_clear_flush_young(struct vm_area_struct *vma, 726 unsigned long address, pmd_t *pmdp); 727 728 729 #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH 730 extern void pmdp_splitting_flush(struct vm_area_struct *vma, 731 unsigned long addr, pmd_t *pmdp); 732 733 #define __HAVE_ARCH_PMD_WRITE 734 static inline int pmd_write(pmd_t pmd) 735 { 736 return pmd_flags(pmd) & _PAGE_RW; 737 } 738 739 #define __HAVE_ARCH_PMDP_GET_AND_CLEAR 740 static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, unsigned long addr, 741 pmd_t *pmdp) 742 { 743 pmd_t pmd = native_pmdp_get_and_clear(pmdp); 744 pmd_update(mm, addr, pmdp); 745 return pmd; 746 } 747 748 #define __HAVE_ARCH_PMDP_SET_WRPROTECT 749 static inline void pmdp_set_wrprotect(struct mm_struct *mm, 750 unsigned long addr, pmd_t *pmdp) 751 { 752 clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp); 753 pmd_update(mm, addr, pmdp); 754 } 755 756 /* 757 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); 758 * 759 * dst - pointer to pgd range anwhere on a pgd page 760 * src - "" 761 * count - the number of pgds to copy. 762 * 763 * dst and src can be on the same page, but the range must not overlap, 764 * and must not cross a page boundary. 765 */ 766 static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) 767 { 768 memcpy(dst, src, count * sizeof(pgd_t)); 769 } 770 771 772 #include <asm-generic/pgtable.h> 773 #endif /* __ASSEMBLY__ */ 774 775 #endif /* _ASM_X86_PGTABLE_H */ 776