1e2cda322SAndrea Arcangeli /* 2e2cda322SAndrea Arcangeli * mm/pgtable-generic.c 3e2cda322SAndrea Arcangeli * 4e2cda322SAndrea Arcangeli * Generic pgtable methods declared in asm-generic/pgtable.h 5e2cda322SAndrea Arcangeli * 6e2cda322SAndrea Arcangeli * Copyright (C) 2010 Linus Torvalds 7e2cda322SAndrea Arcangeli */ 8e2cda322SAndrea Arcangeli 9f95ba941SAndrew Morton #include <linux/pagemap.h> 10e2cda322SAndrea Arcangeli #include <asm/tlb.h> 11e2cda322SAndrea Arcangeli #include <asm-generic/pgtable.h> 12e2cda322SAndrea Arcangeli 13bc4b4448SJoonsoo Kim /* 14bc4b4448SJoonsoo Kim * If a p?d_bad entry is found while walking page tables, report 15bc4b4448SJoonsoo Kim * the error, before resetting entry to p?d_none. Usually (but 16bc4b4448SJoonsoo Kim * very seldom) called out from the p?d_none_or_clear_bad macros. 17bc4b4448SJoonsoo Kim */ 18bc4b4448SJoonsoo Kim 19bc4b4448SJoonsoo Kim void pgd_clear_bad(pgd_t *pgd) 20bc4b4448SJoonsoo Kim { 21bc4b4448SJoonsoo Kim pgd_ERROR(*pgd); 22bc4b4448SJoonsoo Kim pgd_clear(pgd); 23bc4b4448SJoonsoo Kim } 24bc4b4448SJoonsoo Kim 25bc4b4448SJoonsoo Kim void pud_clear_bad(pud_t *pud) 26bc4b4448SJoonsoo Kim { 27bc4b4448SJoonsoo Kim pud_ERROR(*pud); 28bc4b4448SJoonsoo Kim pud_clear(pud); 29bc4b4448SJoonsoo Kim } 30bc4b4448SJoonsoo Kim 31bc4b4448SJoonsoo Kim void pmd_clear_bad(pmd_t *pmd) 32bc4b4448SJoonsoo Kim { 33bc4b4448SJoonsoo Kim pmd_ERROR(*pmd); 34bc4b4448SJoonsoo Kim pmd_clear(pmd); 35bc4b4448SJoonsoo Kim } 36bc4b4448SJoonsoo Kim 37e2cda322SAndrea Arcangeli #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 38e2cda322SAndrea Arcangeli /* 39cef23d9dSRik van Riel * Only sets the access flags (dirty, accessed), as well as write 40cef23d9dSRik van Riel * permission. Furthermore, we know it always gets set to a "more 41e2cda322SAndrea Arcangeli * permissive" setting, which allows most architectures to optimize 42e2cda322SAndrea Arcangeli * this. We return whether the PTE actually changed, which in turn 43e2cda322SAndrea Arcangeli * instructs the caller to do things like update__mmu_cache. This 44e2cda322SAndrea Arcangeli * used to be done in the caller, but sparc needs minor faults to 45e2cda322SAndrea Arcangeli * force that call on sun4c so we changed this macro slightly 46e2cda322SAndrea Arcangeli */ 47e2cda322SAndrea Arcangeli int ptep_set_access_flags(struct vm_area_struct *vma, 48e2cda322SAndrea Arcangeli unsigned long address, pte_t *ptep, 49e2cda322SAndrea Arcangeli pte_t entry, int dirty) 50e2cda322SAndrea Arcangeli { 51e2cda322SAndrea Arcangeli int changed = !pte_same(*ptep, entry); 52e2cda322SAndrea Arcangeli if (changed) { 53e2cda322SAndrea Arcangeli set_pte_at(vma->vm_mm, address, ptep, entry); 54cef23d9dSRik van Riel flush_tlb_fix_spurious_fault(vma, address); 55e2cda322SAndrea Arcangeli } 56e2cda322SAndrea Arcangeli return changed; 57e2cda322SAndrea Arcangeli } 58e2cda322SAndrea Arcangeli #endif 59e2cda322SAndrea Arcangeli 6052585bccSVineet Gupta #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 6152585bccSVineet Gupta int ptep_clear_flush_young(struct vm_area_struct *vma, 6252585bccSVineet Gupta unsigned long address, pte_t *ptep) 6352585bccSVineet Gupta { 6452585bccSVineet Gupta int young; 6552585bccSVineet Gupta young = ptep_test_and_clear_young(vma, address, ptep); 6652585bccSVineet Gupta if (young) 6752585bccSVineet Gupta flush_tlb_page(vma, address); 6852585bccSVineet Gupta return young; 6952585bccSVineet Gupta } 7052585bccSVineet Gupta #endif 7152585bccSVineet Gupta 7252585bccSVineet Gupta #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH 7352585bccSVineet Gupta pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, 7452585bccSVineet Gupta pte_t *ptep) 7552585bccSVineet Gupta { 7652585bccSVineet Gupta struct mm_struct *mm = (vma)->vm_mm; 7752585bccSVineet Gupta pte_t pte; 7852585bccSVineet Gupta pte = ptep_get_and_clear(mm, address, ptep); 7952585bccSVineet Gupta if (pte_accessible(mm, pte)) 8052585bccSVineet Gupta flush_tlb_page(vma, address); 8152585bccSVineet Gupta return pte; 8252585bccSVineet Gupta } 8352585bccSVineet Gupta #endif 8452585bccSVineet Gupta 85bd5e88adSVineet Gupta #ifdef CONFIG_TRANSPARENT_HUGEPAGE 86bd5e88adSVineet Gupta 8712ebc158SVineet Gupta #ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE 8812ebc158SVineet Gupta 8912ebc158SVineet Gupta /* 9012ebc158SVineet Gupta * ARCHes with special requirements for evicting THP backing TLB entries can 9112ebc158SVineet Gupta * implement this. Otherwise also, it can help optimize normal TLB flush in 9212ebc158SVineet Gupta * THP regime. stock flush_tlb_range() typically has optimization to nuke the 9312ebc158SVineet Gupta * entire TLB TLB if flush span is greater than a threshhold, which will 9412ebc158SVineet Gupta * likely be true for a single huge page. Thus a single thp flush will 9512ebc158SVineet Gupta * invalidate the entire TLB which is not desitable. 9612ebc158SVineet Gupta * e.g. see arch/arc: flush_pmd_tlb_range 9712ebc158SVineet Gupta */ 9812ebc158SVineet Gupta #define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) 9912ebc158SVineet Gupta #endif 10012ebc158SVineet Gupta 101e2cda322SAndrea Arcangeli #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 102e2cda322SAndrea Arcangeli int pmdp_set_access_flags(struct vm_area_struct *vma, 103e2cda322SAndrea Arcangeli unsigned long address, pmd_t *pmdp, 104e2cda322SAndrea Arcangeli pmd_t entry, int dirty) 105e2cda322SAndrea Arcangeli { 106e2cda322SAndrea Arcangeli int changed = !pmd_same(*pmdp, entry); 107e2cda322SAndrea Arcangeli VM_BUG_ON(address & ~HPAGE_PMD_MASK); 108e2cda322SAndrea Arcangeli if (changed) { 109e2cda322SAndrea Arcangeli set_pmd_at(vma->vm_mm, address, pmdp, entry); 11012ebc158SVineet Gupta flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 111e2cda322SAndrea Arcangeli } 112e2cda322SAndrea Arcangeli return changed; 113e2cda322SAndrea Arcangeli } 114e2cda322SAndrea Arcangeli #endif 115e2cda322SAndrea Arcangeli 116e2cda322SAndrea Arcangeli #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH 117e2cda322SAndrea Arcangeli int pmdp_clear_flush_young(struct vm_area_struct *vma, 118e2cda322SAndrea Arcangeli unsigned long address, pmd_t *pmdp) 119e2cda322SAndrea Arcangeli { 120e2cda322SAndrea Arcangeli int young; 121d8c37c48SNaoya Horiguchi VM_BUG_ON(address & ~HPAGE_PMD_MASK); 122e2cda322SAndrea Arcangeli young = pmdp_test_and_clear_young(vma, address, pmdp); 123e2cda322SAndrea Arcangeli if (young) 12412ebc158SVineet Gupta flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 125e2cda322SAndrea Arcangeli return young; 126e2cda322SAndrea Arcangeli } 127e2cda322SAndrea Arcangeli #endif 128e2cda322SAndrea Arcangeli 1298809aa2dSAneesh Kumar K.V #ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH 1308809aa2dSAneesh Kumar K.V pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address, 131e2cda322SAndrea Arcangeli pmd_t *pmdp) 132e2cda322SAndrea Arcangeli { 133e2cda322SAndrea Arcangeli pmd_t pmd; 134e2cda322SAndrea Arcangeli VM_BUG_ON(address & ~HPAGE_PMD_MASK); 1355c7fb56eSDan Williams VM_BUG_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp)); 1368809aa2dSAneesh Kumar K.V pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); 13712ebc158SVineet Gupta flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 138e2cda322SAndrea Arcangeli return pmd; 139e2cda322SAndrea Arcangeli } 140e2cda322SAndrea Arcangeli #endif 141e2cda322SAndrea Arcangeli 142e3ebcf64SGerald Schaefer #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT 1436b0b50b0SAneesh Kumar K.V void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, 1446b0b50b0SAneesh Kumar K.V pgtable_t pgtable) 145e3ebcf64SGerald Schaefer { 146c4088ebdSKirill A. Shutemov assert_spin_locked(pmd_lockptr(mm, pmdp)); 147e3ebcf64SGerald Schaefer 148e3ebcf64SGerald Schaefer /* FIFO */ 149c389a250SKirill A. Shutemov if (!pmd_huge_pte(mm, pmdp)) 150e3ebcf64SGerald Schaefer INIT_LIST_HEAD(&pgtable->lru); 151e3ebcf64SGerald Schaefer else 152c389a250SKirill A. Shutemov list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru); 153c389a250SKirill A. Shutemov pmd_huge_pte(mm, pmdp) = pgtable; 154e3ebcf64SGerald Schaefer } 155e3ebcf64SGerald Schaefer #endif 156e3ebcf64SGerald Schaefer 157e3ebcf64SGerald Schaefer #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW 158e3ebcf64SGerald Schaefer /* no "address" argument so destroys page coloring of some arch */ 1596b0b50b0SAneesh Kumar K.V pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) 160e3ebcf64SGerald Schaefer { 161e3ebcf64SGerald Schaefer pgtable_t pgtable; 162e3ebcf64SGerald Schaefer 163c4088ebdSKirill A. Shutemov assert_spin_locked(pmd_lockptr(mm, pmdp)); 164e3ebcf64SGerald Schaefer 165e3ebcf64SGerald Schaefer /* FIFO */ 166c389a250SKirill A. Shutemov pgtable = pmd_huge_pte(mm, pmdp); 16714669347SGeliang Tang pmd_huge_pte(mm, pmdp) = list_first_entry_or_null(&pgtable->lru, 168e3ebcf64SGerald Schaefer struct page, lru); 16914669347SGeliang Tang if (pmd_huge_pte(mm, pmdp)) 170e3ebcf64SGerald Schaefer list_del(&pgtable->lru); 171e3ebcf64SGerald Schaefer return pgtable; 172e3ebcf64SGerald Schaefer } 173e3ebcf64SGerald Schaefer #endif 17446dcde73SGerald Schaefer 17546dcde73SGerald Schaefer #ifndef __HAVE_ARCH_PMDP_INVALIDATE 17646dcde73SGerald Schaefer void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, 17746dcde73SGerald Schaefer pmd_t *pmdp) 17846dcde73SGerald Schaefer { 17967f87463SMel Gorman pmd_t entry = *pmdp; 180ce8369bcSMatthew Wilcox set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry)); 18112ebc158SVineet Gupta flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 18246dcde73SGerald Schaefer } 18346dcde73SGerald Schaefer #endif 184f28b6ff8SAneesh Kumar K.V 185f28b6ff8SAneesh Kumar K.V #ifndef pmdp_collapse_flush 186f28b6ff8SAneesh Kumar K.V pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address, 187f28b6ff8SAneesh Kumar K.V pmd_t *pmdp) 188f28b6ff8SAneesh Kumar K.V { 1898809aa2dSAneesh Kumar K.V /* 1908809aa2dSAneesh Kumar K.V * pmd and hugepage pte format are same. So we could 1918809aa2dSAneesh Kumar K.V * use the same function. 1928809aa2dSAneesh Kumar K.V */ 193f28b6ff8SAneesh Kumar K.V pmd_t pmd; 194f28b6ff8SAneesh Kumar K.V 195f28b6ff8SAneesh Kumar K.V VM_BUG_ON(address & ~HPAGE_PMD_MASK); 196f28b6ff8SAneesh Kumar K.V VM_BUG_ON(pmd_trans_huge(*pmdp)); 1978809aa2dSAneesh Kumar K.V pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); 198*6a6ac72fSVineet Gupta 199*6a6ac72fSVineet Gupta /* collapse entails shooting down ptes not pmd */ 200*6a6ac72fSVineet Gupta flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 201f28b6ff8SAneesh Kumar K.V return pmd; 202f28b6ff8SAneesh Kumar K.V } 203f28b6ff8SAneesh Kumar K.V #endif 204bd5e88adSVineet Gupta #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 205