1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ASM_POWERPC_MMU_CONTEXT_H
3 #define __ASM_POWERPC_MMU_CONTEXT_H
4 #ifdef __KERNEL__
5 
6 #include <linux/kernel.h>
7 #include <linux/mm.h>
8 #include <linux/sched.h>
9 #include <linux/spinlock.h>
10 #include <asm/mmu.h>
11 #include <asm/cputable.h>
12 #include <asm/cputhreads.h>
13 
14 /*
15  * Most if the context management is out of line
16  */
17 extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
18 extern void destroy_context(struct mm_struct *mm);
19 #ifdef CONFIG_SPAPR_TCE_IOMMU
20 struct mm_iommu_table_group_mem_t;
21 
22 extern int isolate_lru_page(struct page *page);	/* from internal.h */
23 extern bool mm_iommu_preregistered(struct mm_struct *mm);
24 extern long mm_iommu_new(struct mm_struct *mm,
25 		unsigned long ua, unsigned long entries,
26 		struct mm_iommu_table_group_mem_t **pmem);
27 extern long mm_iommu_newdev(struct mm_struct *mm, unsigned long ua,
28 		unsigned long entries, unsigned long dev_hpa,
29 		struct mm_iommu_table_group_mem_t **pmem);
30 extern long mm_iommu_put(struct mm_struct *mm,
31 		struct mm_iommu_table_group_mem_t *mem);
32 extern void mm_iommu_init(struct mm_struct *mm);
33 extern void mm_iommu_cleanup(struct mm_struct *mm);
34 extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
35 		unsigned long ua, unsigned long size);
36 extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(
37 		struct mm_struct *mm, unsigned long ua, unsigned long size);
38 extern struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm,
39 		unsigned long ua, unsigned long entries);
40 extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
41 		unsigned long ua, unsigned int pageshift, unsigned long *hpa);
42 extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
43 		unsigned long ua, unsigned int pageshift, unsigned long *hpa);
44 extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua);
45 extern bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa,
46 		unsigned int pageshift, unsigned long *size);
47 extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
48 extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
49 #else
50 static inline bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa,
51 		unsigned int pageshift, unsigned long *size)
52 {
53 	return false;
54 }
55 #endif
56 extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
57 extern void set_context(unsigned long id, pgd_t *pgd);
58 
59 #ifdef CONFIG_PPC_BOOK3S_64
60 extern void radix__switch_mmu_context(struct mm_struct *prev,
61 				      struct mm_struct *next);
62 static inline void switch_mmu_context(struct mm_struct *prev,
63 				      struct mm_struct *next,
64 				      struct task_struct *tsk)
65 {
66 	if (radix_enabled())
67 		return radix__switch_mmu_context(prev, next);
68 	return switch_slb(tsk, next);
69 }
70 
71 extern int hash__alloc_context_id(void);
72 extern void hash__reserve_context_id(int id);
73 extern void __destroy_context(int context_id);
74 static inline void mmu_context_init(void) { }
75 
76 static inline int alloc_extended_context(struct mm_struct *mm,
77 					 unsigned long ea)
78 {
79 	int context_id;
80 
81 	int index = ea >> MAX_EA_BITS_PER_CONTEXT;
82 
83 	context_id = hash__alloc_context_id();
84 	if (context_id < 0)
85 		return context_id;
86 
87 	VM_WARN_ON(mm->context.extended_id[index]);
88 	mm->context.extended_id[index] = context_id;
89 	return context_id;
90 }
91 
92 static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea)
93 {
94 	int context_id;
95 
96 	context_id = get_user_context(&mm->context, ea);
97 	if (!context_id)
98 		return true;
99 	return false;
100 }
101 
102 #else
103 extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
104 			       struct task_struct *tsk);
105 extern unsigned long __init_new_context(void);
106 extern void __destroy_context(unsigned long context_id);
107 extern void mmu_context_init(void);
108 static inline int alloc_extended_context(struct mm_struct *mm,
109 					 unsigned long ea)
110 {
111 	/* non book3s_64 should never find this called */
112 	WARN_ON(1);
113 	return -ENOMEM;
114 }
115 
116 static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea)
117 {
118 	return false;
119 }
120 #endif
121 
122 #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) && defined(CONFIG_PPC_RADIX_MMU)
123 extern void radix_kvm_prefetch_workaround(struct mm_struct *mm);
124 #else
125 static inline void radix_kvm_prefetch_workaround(struct mm_struct *mm) { }
126 #endif
127 
128 extern void switch_cop(struct mm_struct *next);
129 extern int use_cop(unsigned long acop, struct mm_struct *mm);
130 extern void drop_cop(unsigned long acop, struct mm_struct *mm);
131 
132 #ifdef CONFIG_PPC_BOOK3S_64
133 static inline void inc_mm_active_cpus(struct mm_struct *mm)
134 {
135 	atomic_inc(&mm->context.active_cpus);
136 }
137 
138 static inline void dec_mm_active_cpus(struct mm_struct *mm)
139 {
140 	atomic_dec(&mm->context.active_cpus);
141 }
142 
143 static inline void mm_context_add_copro(struct mm_struct *mm)
144 {
145 	/*
146 	 * If any copro is in use, increment the active CPU count
147 	 * in order to force TLB invalidations to be global as to
148 	 * propagate to the Nest MMU.
149 	 */
150 	if (atomic_inc_return(&mm->context.copros) == 1)
151 		inc_mm_active_cpus(mm);
152 }
153 
154 static inline void mm_context_remove_copro(struct mm_struct *mm)
155 {
156 	int c;
157 
158 	/*
159 	 * When removing the last copro, we need to broadcast a global
160 	 * flush of the full mm, as the next TLBI may be local and the
161 	 * nMMU and/or PSL need to be cleaned up.
162 	 *
163 	 * Both the 'copros' and 'active_cpus' counts are looked at in
164 	 * flush_all_mm() to determine the scope (local/global) of the
165 	 * TLBIs, so we need to flush first before decrementing
166 	 * 'copros'. If this API is used by several callers for the
167 	 * same context, it can lead to over-flushing. It's hopefully
168 	 * not common enough to be a problem.
169 	 *
170 	 * Skip on hash, as we don't know how to do the proper flush
171 	 * for the time being. Invalidations will remain global if
172 	 * used on hash. Note that we can't drop 'copros' either, as
173 	 * it could make some invalidations local with no flush
174 	 * in-between.
175 	 */
176 	if (radix_enabled()) {
177 		flush_all_mm(mm);
178 
179 		c = atomic_dec_if_positive(&mm->context.copros);
180 		/* Detect imbalance between add and remove */
181 		WARN_ON(c < 0);
182 
183 		if (c == 0)
184 			dec_mm_active_cpus(mm);
185 	}
186 }
187 #else
188 static inline void inc_mm_active_cpus(struct mm_struct *mm) { }
189 static inline void dec_mm_active_cpus(struct mm_struct *mm) { }
190 static inline void mm_context_add_copro(struct mm_struct *mm) { }
191 static inline void mm_context_remove_copro(struct mm_struct *mm) { }
192 #endif
193 
194 
195 extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
196 			       struct task_struct *tsk);
197 
198 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
199 			     struct task_struct *tsk)
200 {
201 	unsigned long flags;
202 
203 	local_irq_save(flags);
204 	switch_mm_irqs_off(prev, next, tsk);
205 	local_irq_restore(flags);
206 }
207 #define switch_mm_irqs_off switch_mm_irqs_off
208 
209 
210 #define deactivate_mm(tsk,mm)	do { } while (0)
211 
212 /*
213  * After we have set current->mm to a new value, this activates
214  * the context for the new mm so we see the new mappings.
215  */
216 static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
217 {
218 	switch_mm(prev, next, current);
219 }
220 
221 /* We don't currently use enter_lazy_tlb() for anything */
222 static inline void enter_lazy_tlb(struct mm_struct *mm,
223 				  struct task_struct *tsk)
224 {
225 	/* 64-bit Book3E keeps track of current PGD in the PACA */
226 #ifdef CONFIG_PPC_BOOK3E_64
227 	get_paca()->pgd = NULL;
228 #endif
229 }
230 
231 #ifdef CONFIG_PPC_BOOK3E_64
232 static inline void arch_exit_mmap(struct mm_struct *mm)
233 {
234 }
235 #else
236 extern void arch_exit_mmap(struct mm_struct *mm);
237 #endif
238 
239 static inline void arch_unmap(struct mm_struct *mm,
240 			      struct vm_area_struct *vma,
241 			      unsigned long start, unsigned long end)
242 {
243 	if (start <= mm->context.vdso_base && mm->context.vdso_base < end)
244 		mm->context.vdso_base = 0;
245 }
246 
247 static inline void arch_bprm_mm_init(struct mm_struct *mm,
248 				     struct vm_area_struct *vma)
249 {
250 }
251 
252 #ifdef CONFIG_PPC_MEM_KEYS
253 bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write,
254 			       bool execute, bool foreign);
255 void arch_dup_pkeys(struct mm_struct *oldmm, struct mm_struct *mm);
256 #else /* CONFIG_PPC_MEM_KEYS */
257 static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
258 		bool write, bool execute, bool foreign)
259 {
260 	/* by default, allow everything */
261 	return true;
262 }
263 
264 #define pkey_mm_init(mm)
265 #define thread_pkey_regs_save(thread)
266 #define thread_pkey_regs_restore(new_thread, old_thread)
267 #define thread_pkey_regs_init(thread)
268 #define arch_dup_pkeys(oldmm, mm)
269 
270 static inline u64 pte_to_hpte_pkey_bits(u64 pteflags)
271 {
272 	return 0x0UL;
273 }
274 
275 #endif /* CONFIG_PPC_MEM_KEYS */
276 
277 static inline int arch_dup_mmap(struct mm_struct *oldmm,
278 				struct mm_struct *mm)
279 {
280 	arch_dup_pkeys(oldmm, mm);
281 	return 0;
282 }
283 
284 #endif /* __KERNEL__ */
285 #endif /* __ASM_POWERPC_MMU_CONTEXT_H */
286