1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H 3 #define _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H 4 5 #define MMU_NO_CONTEXT ~0UL 6 7 #include <linux/mm_types.h> 8 #include <asm/book3s/64/tlbflush-hash.h> 9 #include <asm/book3s/64/tlbflush-radix.h> 10 11 /* TLB flush actions. Used as argument to tlbiel_all() */ 12 enum { 13 TLB_INVAL_SCOPE_GLOBAL = 0, /* invalidate all TLBs */ 14 TLB_INVAL_SCOPE_LPID = 1, /* invalidate TLBs for current LPID */ 15 }; 16 17 static inline void tlbiel_all(void) 18 { 19 /* 20 * This is used for host machine check and bootup. 21 * 22 * This uses early_radix_enabled and implementations use 23 * early_cpu_has_feature etc because that works early in boot 24 * and this is the machine check path which is not performance 25 * critical. 26 */ 27 if (early_radix_enabled()) 28 radix__tlbiel_all(TLB_INVAL_SCOPE_GLOBAL); 29 else 30 hash__tlbiel_all(TLB_INVAL_SCOPE_GLOBAL); 31 } 32 33 static inline void tlbiel_all_lpid(bool radix) 34 { 35 /* 36 * This is used for guest machine check. 37 */ 38 if (radix) 39 radix__tlbiel_all(TLB_INVAL_SCOPE_LPID); 40 else 41 hash__tlbiel_all(TLB_INVAL_SCOPE_LPID); 42 } 43 44 45 #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE 46 static inline void flush_pmd_tlb_range(struct vm_area_struct *vma, 47 unsigned long start, unsigned long end) 48 { 49 if (radix_enabled()) 50 radix__flush_pmd_tlb_range(vma, start, end); 51 } 52 53 #define __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE 54 static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma, 55 unsigned long start, 56 unsigned long end) 57 { 58 if (radix_enabled()) 59 radix__flush_hugetlb_tlb_range(vma, start, end); 60 } 61 62 static inline void flush_tlb_range(struct vm_area_struct *vma, 63 unsigned long start, unsigned long end) 64 { 65 if (radix_enabled()) 66 radix__flush_tlb_range(vma, start, end); 67 } 68 69 static inline void flush_tlb_kernel_range(unsigned long start, 70 unsigned long end) 71 { 72 if (radix_enabled()) 73 radix__flush_tlb_kernel_range(start, end); 74 } 75 76 static inline void local_flush_tlb_mm(struct mm_struct *mm) 77 { 78 if (radix_enabled()) 79 radix__local_flush_tlb_mm(mm); 80 } 81 82 static inline void local_flush_tlb_page(struct vm_area_struct *vma, 83 unsigned long vmaddr) 84 { 85 if (radix_enabled()) 86 radix__local_flush_tlb_page(vma, vmaddr); 87 } 88 89 static inline void local_flush_tlb_page_psize(struct mm_struct *mm, 90 unsigned long vmaddr, int psize) 91 { 92 if (radix_enabled()) 93 radix__local_flush_tlb_page_psize(mm, vmaddr, psize); 94 } 95 96 static inline void tlb_flush(struct mmu_gather *tlb) 97 { 98 if (radix_enabled()) 99 radix__tlb_flush(tlb); 100 } 101 102 #ifdef CONFIG_SMP 103 static inline void flush_tlb_mm(struct mm_struct *mm) 104 { 105 if (radix_enabled()) 106 radix__flush_tlb_mm(mm); 107 } 108 109 static inline void flush_tlb_page(struct vm_area_struct *vma, 110 unsigned long vmaddr) 111 { 112 if (radix_enabled()) 113 radix__flush_tlb_page(vma, vmaddr); 114 } 115 #else 116 #define flush_tlb_mm(mm) local_flush_tlb_mm(mm) 117 #define flush_tlb_page(vma, addr) local_flush_tlb_page(vma, addr) 118 #endif /* CONFIG_SMP */ 119 120 #define flush_tlb_fix_spurious_fault flush_tlb_fix_spurious_fault 121 static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma, 122 unsigned long address) 123 { 124 /* 125 * Book3S 64 does not require spurious fault flushes because the PTE 126 * must be re-fetched in case of an access permission problem. So the 127 * only reason for a spurious fault should be concurrent modification 128 * to the PTE, in which case the PTE will eventually be re-fetched by 129 * the MMU when it attempts the access again. 130 * 131 * See: Power ISA Version 3.1B, 6.10.1.2 Modifying a Translation Table 132 * Entry, Setting a Reference or Change Bit or Upgrading Access 133 * Authority (PTE Subject to Atomic Hardware Updates): 134 * 135 * "If the only change being made to a valid PTE that is subject to 136 * atomic hardware updates is to set the Reference or Change bit to 137 * 1 or to upgrade access authority, a simpler sequence suffices 138 * because the translation hardware will refetch the PTE if an 139 * access is attempted for which the only problems were reference 140 * and/or change bits needing to be set or insufficient access 141 * authority." 142 * 143 * The nest MMU in POWER9 does not perform this PTE re-fetch, but 144 * it avoids the spurious fault problem by flushing the TLB before 145 * upgrading PTE permissions, see radix__ptep_set_access_flags. 146 */ 147 } 148 149 static inline bool __pte_flags_need_flush(unsigned long oldval, 150 unsigned long newval) 151 { 152 unsigned long delta = oldval ^ newval; 153 154 /* 155 * The return value of this function doesn't matter for hash, 156 * ptep_modify_prot_start() does a pte_update() which does or schedules 157 * any necessary hash table update and flush. 158 */ 159 if (!radix_enabled()) 160 return true; 161 162 /* 163 * We do not expect kernel mappings or non-PTEs or not-present PTEs. 164 */ 165 VM_WARN_ON_ONCE(oldval & _PAGE_PRIVILEGED); 166 VM_WARN_ON_ONCE(newval & _PAGE_PRIVILEGED); 167 VM_WARN_ON_ONCE(!(oldval & _PAGE_PTE)); 168 VM_WARN_ON_ONCE(!(newval & _PAGE_PTE)); 169 VM_WARN_ON_ONCE(!(oldval & _PAGE_PRESENT)); 170 VM_WARN_ON_ONCE(!(newval & _PAGE_PRESENT)); 171 172 /* 173 * Must flush on any change except READ, WRITE, EXEC, DIRTY, ACCESSED. 174 * 175 * In theory, some changed software bits could be tolerated, in 176 * practice those should rarely if ever matter. 177 */ 178 179 if (delta & ~(_PAGE_RWX | _PAGE_DIRTY | _PAGE_ACCESSED)) 180 return true; 181 182 /* 183 * If any of the above was present in old but cleared in new, flush. 184 * With the exception of _PAGE_ACCESSED, don't worry about flushing 185 * if that was cleared (see the comment in ptep_clear_flush_young()). 186 */ 187 if ((delta & ~_PAGE_ACCESSED) & oldval) 188 return true; 189 190 return false; 191 } 192 193 static inline bool pte_needs_flush(pte_t oldpte, pte_t newpte) 194 { 195 return __pte_flags_need_flush(pte_val(oldpte), pte_val(newpte)); 196 } 197 #define pte_needs_flush pte_needs_flush 198 199 static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd) 200 { 201 return __pte_flags_need_flush(pmd_val(oldpmd), pmd_val(newpmd)); 202 } 203 #define huge_pmd_needs_flush huge_pmd_needs_flush 204 205 extern bool tlbie_capable; 206 extern bool tlbie_enabled; 207 208 static inline bool cputlb_use_tlbie(void) 209 { 210 return tlbie_enabled; 211 } 212 213 #endif /* _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */ 214