1e2cda322SAndrea Arcangeli /* 2e2cda322SAndrea Arcangeli * mm/pgtable-generic.c 3e2cda322SAndrea Arcangeli * 4e2cda322SAndrea Arcangeli * Generic pgtable methods declared in asm-generic/pgtable.h 5e2cda322SAndrea Arcangeli * 6e2cda322SAndrea Arcangeli * Copyright (C) 2010 Linus Torvalds 7e2cda322SAndrea Arcangeli */ 8e2cda322SAndrea Arcangeli 9f95ba941SAndrew Morton #include <linux/pagemap.h> 10e2cda322SAndrea Arcangeli #include <asm/tlb.h> 11e2cda322SAndrea Arcangeli #include <asm-generic/pgtable.h> 12e2cda322SAndrea Arcangeli 13e2cda322SAndrea Arcangeli #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 14e2cda322SAndrea Arcangeli /* 15e2cda322SAndrea Arcangeli * Only sets the access flags (dirty, accessed, and 16e2cda322SAndrea Arcangeli * writable). Furthermore, we know it always gets set to a "more 17e2cda322SAndrea Arcangeli * permissive" setting, which allows most architectures to optimize 18e2cda322SAndrea Arcangeli * this. We return whether the PTE actually changed, which in turn 19e2cda322SAndrea Arcangeli * instructs the caller to do things like update__mmu_cache. This 20e2cda322SAndrea Arcangeli * used to be done in the caller, but sparc needs minor faults to 21e2cda322SAndrea Arcangeli * force that call on sun4c so we changed this macro slightly 22e2cda322SAndrea Arcangeli */ 23e2cda322SAndrea Arcangeli int ptep_set_access_flags(struct vm_area_struct *vma, 24e2cda322SAndrea Arcangeli unsigned long address, pte_t *ptep, 25e2cda322SAndrea Arcangeli pte_t entry, int dirty) 26e2cda322SAndrea Arcangeli { 27e2cda322SAndrea Arcangeli int changed = !pte_same(*ptep, entry); 28e2cda322SAndrea Arcangeli if (changed) { 29e2cda322SAndrea Arcangeli set_pte_at(vma->vm_mm, address, ptep, entry); 30e2cda322SAndrea Arcangeli flush_tlb_page(vma, address); 31e2cda322SAndrea Arcangeli } 32e2cda322SAndrea Arcangeli return changed; 33e2cda322SAndrea Arcangeli } 34e2cda322SAndrea Arcangeli #endif 35e2cda322SAndrea Arcangeli 36e2cda322SAndrea Arcangeli #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 37e2cda322SAndrea Arcangeli int pmdp_set_access_flags(struct vm_area_struct *vma, 38e2cda322SAndrea Arcangeli unsigned long address, pmd_t *pmdp, 39e2cda322SAndrea Arcangeli pmd_t entry, int dirty) 40e2cda322SAndrea Arcangeli { 41e2cda322SAndrea Arcangeli #ifdef CONFIG_TRANSPARENT_HUGEPAGE 42e2cda322SAndrea Arcangeli int changed = !pmd_same(*pmdp, entry); 43e2cda322SAndrea Arcangeli VM_BUG_ON(address & ~HPAGE_PMD_MASK); 44e2cda322SAndrea Arcangeli if (changed) { 45e2cda322SAndrea Arcangeli set_pmd_at(vma->vm_mm, address, pmdp, entry); 46e2cda322SAndrea Arcangeli flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 47e2cda322SAndrea Arcangeli } 48e2cda322SAndrea Arcangeli return changed; 49e2cda322SAndrea Arcangeli #else /* CONFIG_TRANSPARENT_HUGEPAGE */ 50e2cda322SAndrea Arcangeli BUG(); 51e2cda322SAndrea Arcangeli return 0; 52e2cda322SAndrea Arcangeli #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 53e2cda322SAndrea Arcangeli } 54e2cda322SAndrea Arcangeli #endif 55e2cda322SAndrea Arcangeli 56e2cda322SAndrea Arcangeli #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 57e2cda322SAndrea Arcangeli int ptep_clear_flush_young(struct vm_area_struct *vma, 58e2cda322SAndrea Arcangeli unsigned long address, pte_t *ptep) 59e2cda322SAndrea Arcangeli { 60e2cda322SAndrea Arcangeli int young; 61e2cda322SAndrea Arcangeli young = ptep_test_and_clear_young(vma, address, ptep); 62e2cda322SAndrea Arcangeli if (young) 63e2cda322SAndrea Arcangeli flush_tlb_page(vma, address); 64e2cda322SAndrea Arcangeli return young; 65e2cda322SAndrea Arcangeli } 66e2cda322SAndrea Arcangeli #endif 67e2cda322SAndrea Arcangeli 68e2cda322SAndrea Arcangeli #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH 69e2cda322SAndrea Arcangeli int pmdp_clear_flush_young(struct vm_area_struct *vma, 70e2cda322SAndrea Arcangeli unsigned long address, pmd_t *pmdp) 71e2cda322SAndrea Arcangeli { 72e2cda322SAndrea Arcangeli int young; 73d8c37c48SNaoya Horiguchi #ifdef CONFIG_TRANSPARENT_HUGEPAGE 74d8c37c48SNaoya Horiguchi VM_BUG_ON(address & ~HPAGE_PMD_MASK); 75d8c37c48SNaoya Horiguchi #else 76e2cda322SAndrea Arcangeli BUG(); 77e2cda322SAndrea Arcangeli #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 78e2cda322SAndrea Arcangeli young = pmdp_test_and_clear_young(vma, address, pmdp); 79e2cda322SAndrea Arcangeli if (young) 80e2cda322SAndrea Arcangeli flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 81e2cda322SAndrea Arcangeli return young; 82e2cda322SAndrea Arcangeli } 83e2cda322SAndrea Arcangeli #endif 84e2cda322SAndrea Arcangeli 85e2cda322SAndrea Arcangeli #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH 86e2cda322SAndrea Arcangeli pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, 87e2cda322SAndrea Arcangeli pte_t *ptep) 88e2cda322SAndrea Arcangeli { 89e2cda322SAndrea Arcangeli pte_t pte; 90e2cda322SAndrea Arcangeli pte = ptep_get_and_clear((vma)->vm_mm, address, ptep); 91e2cda322SAndrea Arcangeli flush_tlb_page(vma, address); 92e2cda322SAndrea Arcangeli return pte; 93e2cda322SAndrea Arcangeli } 94e2cda322SAndrea Arcangeli #endif 95e2cda322SAndrea Arcangeli 96e2cda322SAndrea Arcangeli #ifndef __HAVE_ARCH_PMDP_CLEAR_FLUSH 97b3697c02SAndrea Arcangeli #ifdef CONFIG_TRANSPARENT_HUGEPAGE 98e2cda322SAndrea Arcangeli pmd_t pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address, 99e2cda322SAndrea Arcangeli pmd_t *pmdp) 100e2cda322SAndrea Arcangeli { 101e2cda322SAndrea Arcangeli pmd_t pmd; 102e2cda322SAndrea Arcangeli VM_BUG_ON(address & ~HPAGE_PMD_MASK); 103e2cda322SAndrea Arcangeli pmd = pmdp_get_and_clear(vma->vm_mm, address, pmdp); 104e2cda322SAndrea Arcangeli flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 105e2cda322SAndrea Arcangeli return pmd; 106e2cda322SAndrea Arcangeli } 107b3697c02SAndrea Arcangeli #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 108e2cda322SAndrea Arcangeli #endif 109e2cda322SAndrea Arcangeli 110e2cda322SAndrea Arcangeli #ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH 111b3697c02SAndrea Arcangeli #ifdef CONFIG_TRANSPARENT_HUGEPAGE 11273636b1aSChris Metcalf void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, 113e2cda322SAndrea Arcangeli pmd_t *pmdp) 114e2cda322SAndrea Arcangeli { 115e2cda322SAndrea Arcangeli pmd_t pmd = pmd_mksplitting(*pmdp); 116e2cda322SAndrea Arcangeli VM_BUG_ON(address & ~HPAGE_PMD_MASK); 117e2cda322SAndrea Arcangeli set_pmd_at(vma->vm_mm, address, pmdp, pmd); 118e2cda322SAndrea Arcangeli /* tlb flush only to serialize against gup-fast */ 119e2cda322SAndrea Arcangeli flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 120e2cda322SAndrea Arcangeli } 121b3697c02SAndrea Arcangeli #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 122e2cda322SAndrea Arcangeli #endif 123e3ebcf64SGerald Schaefer 124e3ebcf64SGerald Schaefer #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT 125e3ebcf64SGerald Schaefer #ifdef CONFIG_TRANSPARENT_HUGEPAGE 126e3ebcf64SGerald Schaefer void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable) 127e3ebcf64SGerald Schaefer { 128e3ebcf64SGerald Schaefer assert_spin_locked(&mm->page_table_lock); 129e3ebcf64SGerald Schaefer 130e3ebcf64SGerald Schaefer /* FIFO */ 131e3ebcf64SGerald Schaefer if (!mm->pmd_huge_pte) 132e3ebcf64SGerald Schaefer INIT_LIST_HEAD(&pgtable->lru); 133e3ebcf64SGerald Schaefer else 134e3ebcf64SGerald Schaefer list_add(&pgtable->lru, &mm->pmd_huge_pte->lru); 135e3ebcf64SGerald Schaefer mm->pmd_huge_pte = pgtable; 136e3ebcf64SGerald Schaefer } 137e3ebcf64SGerald Schaefer #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 138e3ebcf64SGerald Schaefer #endif 139e3ebcf64SGerald Schaefer 140e3ebcf64SGerald Schaefer #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW 141e3ebcf64SGerald Schaefer #ifdef CONFIG_TRANSPARENT_HUGEPAGE 142e3ebcf64SGerald Schaefer /* no "address" argument so destroys page coloring of some arch */ 143e3ebcf64SGerald Schaefer pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm) 144e3ebcf64SGerald Schaefer { 145e3ebcf64SGerald Schaefer pgtable_t pgtable; 146e3ebcf64SGerald Schaefer 147e3ebcf64SGerald Schaefer assert_spin_locked(&mm->page_table_lock); 148e3ebcf64SGerald Schaefer 149e3ebcf64SGerald Schaefer /* FIFO */ 150e3ebcf64SGerald Schaefer pgtable = mm->pmd_huge_pte; 151e3ebcf64SGerald Schaefer if (list_empty(&pgtable->lru)) 152e3ebcf64SGerald Schaefer mm->pmd_huge_pte = NULL; 153e3ebcf64SGerald Schaefer else { 154e3ebcf64SGerald Schaefer mm->pmd_huge_pte = list_entry(pgtable->lru.next, 155e3ebcf64SGerald Schaefer struct page, lru); 156e3ebcf64SGerald Schaefer list_del(&pgtable->lru); 157e3ebcf64SGerald Schaefer } 158e3ebcf64SGerald Schaefer return pgtable; 159e3ebcf64SGerald Schaefer } 160e3ebcf64SGerald Schaefer #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 161e3ebcf64SGerald Schaefer #endif 162*46dcde73SGerald Schaefer 163*46dcde73SGerald Schaefer #ifndef __HAVE_ARCH_PMDP_INVALIDATE 164*46dcde73SGerald Schaefer #ifdef CONFIG_TRANSPARENT_HUGEPAGE 165*46dcde73SGerald Schaefer void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, 166*46dcde73SGerald Schaefer pmd_t *pmdp) 167*46dcde73SGerald Schaefer { 168*46dcde73SGerald Schaefer set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(*pmdp)); 169*46dcde73SGerald Schaefer flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 170*46dcde73SGerald Schaefer } 171*46dcde73SGerald Schaefer #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 172*46dcde73SGerald Schaefer #endif 173