1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
3 #define _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
4 
5 #define MMU_NO_CONTEXT	~0UL
6 
7 #include <linux/mm_types.h>
8 #include <asm/book3s/64/tlbflush-hash.h>
9 #include <asm/book3s/64/tlbflush-radix.h>
10 
11 /* TLB flush actions. Used as argument to tlbiel_all() */
12 enum {
13 	TLB_INVAL_SCOPE_GLOBAL = 0,	/* invalidate all TLBs */
14 	TLB_INVAL_SCOPE_LPID = 1,	/* invalidate TLBs for current LPID */
15 };
16 
17 static inline void tlbiel_all(void)
18 {
19 	/*
20 	 * This is used for host machine check and bootup.
21 	 *
22 	 * This uses early_radix_enabled and implementations use
23 	 * early_cpu_has_feature etc because that works early in boot
24 	 * and this is the machine check path which is not performance
25 	 * critical.
26 	 */
27 	if (early_radix_enabled())
28 		radix__tlbiel_all(TLB_INVAL_SCOPE_GLOBAL);
29 	else
30 		hash__tlbiel_all(TLB_INVAL_SCOPE_GLOBAL);
31 }
32 
33 static inline void tlbiel_all_lpid(bool radix)
34 {
35 	/*
36 	 * This is used for guest machine check.
37 	 */
38 	if (radix)
39 		radix__tlbiel_all(TLB_INVAL_SCOPE_LPID);
40 	else
41 		hash__tlbiel_all(TLB_INVAL_SCOPE_LPID);
42 }
43 
44 
45 #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
46 static inline void flush_pmd_tlb_range(struct vm_area_struct *vma,
47 				       unsigned long start, unsigned long end)
48 {
49 	if (radix_enabled())
50 		radix__flush_pmd_tlb_range(vma, start, end);
51 }
52 
53 #define __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
54 static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,
55 					   unsigned long start,
56 					   unsigned long end)
57 {
58 	if (radix_enabled())
59 		radix__flush_hugetlb_tlb_range(vma, start, end);
60 }
61 
62 static inline void flush_tlb_range(struct vm_area_struct *vma,
63 				   unsigned long start, unsigned long end)
64 {
65 	if (radix_enabled())
66 		radix__flush_tlb_range(vma, start, end);
67 }
68 
69 static inline void flush_tlb_kernel_range(unsigned long start,
70 					  unsigned long end)
71 {
72 	if (radix_enabled())
73 		radix__flush_tlb_kernel_range(start, end);
74 }
75 
76 static inline void local_flush_tlb_mm(struct mm_struct *mm)
77 {
78 	if (radix_enabled())
79 		radix__local_flush_tlb_mm(mm);
80 }
81 
82 static inline void local_flush_tlb_page(struct vm_area_struct *vma,
83 					unsigned long vmaddr)
84 {
85 	if (radix_enabled())
86 		radix__local_flush_tlb_page(vma, vmaddr);
87 }
88 
89 static inline void local_flush_tlb_page_psize(struct mm_struct *mm,
90 					      unsigned long vmaddr, int psize)
91 {
92 	if (radix_enabled())
93 		radix__local_flush_tlb_page_psize(mm, vmaddr, psize);
94 }
95 
96 static inline void tlb_flush(struct mmu_gather *tlb)
97 {
98 	if (radix_enabled())
99 		radix__tlb_flush(tlb);
100 	else
101 		hash__tlb_flush(tlb);
102 }
103 
104 #ifdef CONFIG_SMP
105 static inline void flush_tlb_mm(struct mm_struct *mm)
106 {
107 	if (radix_enabled())
108 		radix__flush_tlb_mm(mm);
109 }
110 
111 static inline void flush_tlb_page(struct vm_area_struct *vma,
112 				  unsigned long vmaddr)
113 {
114 	if (radix_enabled())
115 		radix__flush_tlb_page(vma, vmaddr);
116 }
117 #else
118 #define flush_tlb_mm(mm)		local_flush_tlb_mm(mm)
119 #define flush_tlb_page(vma, addr)	local_flush_tlb_page(vma, addr)
120 #endif /* CONFIG_SMP */
121 
122 #define flush_tlb_fix_spurious_fault flush_tlb_fix_spurious_fault
123 static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
124 						unsigned long address,
125 						pte_t *ptep)
126 {
127 	/*
128 	 * Book3S 64 does not require spurious fault flushes because the PTE
129 	 * must be re-fetched in case of an access permission problem. So the
130 	 * only reason for a spurious fault should be concurrent modification
131 	 * to the PTE, in which case the PTE will eventually be re-fetched by
132 	 * the MMU when it attempts the access again.
133 	 *
134 	 * See: Power ISA Version 3.1B, 6.10.1.2 Modifying a Translation Table
135 	 * Entry, Setting a Reference or Change Bit or Upgrading Access
136 	 * Authority (PTE Subject to Atomic Hardware Updates):
137 	 *
138 	 * "If the only change being made to a valid PTE that is subject to
139 	 *  atomic hardware updates is to set the Reference or Change bit to
140 	 *  1 or to upgrade access authority, a simpler sequence suffices
141 	 *  because the translation hardware will refetch the PTE if an
142 	 *  access is attempted for which the only problems were reference
143 	 *  and/or change bits needing to be set or insufficient access
144 	 *  authority."
145 	 *
146 	 * The nest MMU in POWER9 does not perform this PTE re-fetch, but
147 	 * it avoids the spurious fault problem by flushing the TLB before
148 	 * upgrading PTE permissions, see radix__ptep_set_access_flags.
149 	 */
150 }
151 
152 static inline bool __pte_protnone(unsigned long pte)
153 {
154 	return (pte & (pgprot_val(PAGE_NONE) | _PAGE_RWX)) == pgprot_val(PAGE_NONE);
155 }
156 
157 static inline bool __pte_flags_need_flush(unsigned long oldval,
158 					  unsigned long newval)
159 {
160 	unsigned long delta = oldval ^ newval;
161 
162 	/*
163 	 * The return value of this function doesn't matter for hash,
164 	 * ptep_modify_prot_start() does a pte_update() which does or schedules
165 	 * any necessary hash table update and flush.
166 	 */
167 	if (!radix_enabled())
168 		return true;
169 
170 	/*
171 	 * We do not expect kernel mappings or non-PTEs or not-present PTEs.
172 	 */
173 	VM_WARN_ON_ONCE(!__pte_protnone(oldval) && oldval & _PAGE_PRIVILEGED);
174 	VM_WARN_ON_ONCE(!__pte_protnone(newval) && newval & _PAGE_PRIVILEGED);
175 	VM_WARN_ON_ONCE(!(oldval & _PAGE_PTE));
176 	VM_WARN_ON_ONCE(!(newval & _PAGE_PTE));
177 	VM_WARN_ON_ONCE(!(oldval & _PAGE_PRESENT));
178 	VM_WARN_ON_ONCE(!(newval & _PAGE_PRESENT));
179 
180 	/*
181 	*  Must flush on any change except READ, WRITE, EXEC, DIRTY, ACCESSED.
182 	*
183 	 * In theory, some changed software bits could be tolerated, in
184 	 * practice those should rarely if ever matter.
185 	 */
186 
187 	if (delta & ~(_PAGE_RWX | _PAGE_DIRTY | _PAGE_ACCESSED))
188 		return true;
189 
190 	/*
191 	 * If any of the above was present in old but cleared in new, flush.
192 	 * With the exception of _PAGE_ACCESSED, don't worry about flushing
193 	 * if that was cleared (see the comment in ptep_clear_flush_young()).
194 	 */
195 	if ((delta & ~_PAGE_ACCESSED) & oldval)
196 		return true;
197 
198 	return false;
199 }
200 
201 static inline bool pte_needs_flush(pte_t oldpte, pte_t newpte)
202 {
203 	return __pte_flags_need_flush(pte_val(oldpte), pte_val(newpte));
204 }
205 #define pte_needs_flush pte_needs_flush
206 
207 static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)
208 {
209 	return __pte_flags_need_flush(pmd_val(oldpmd), pmd_val(newpmd));
210 }
211 #define huge_pmd_needs_flush huge_pmd_needs_flush
212 
213 extern bool tlbie_capable;
214 extern bool tlbie_enabled;
215 
216 static inline bool cputlb_use_tlbie(void)
217 {
218 	return tlbie_enabled;
219 }
220 
221 #endif /*  _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */
222