xref: /openbmc/linux/arch/powerpc/include/asm/mmu_context.h (revision 4b0aaacee51eb6592a03fdefd5ce97558518e291)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ASM_POWERPC_MMU_CONTEXT_H
3 #define __ASM_POWERPC_MMU_CONTEXT_H
4 #ifdef __KERNEL__
5 
6 #include <linux/kernel.h>
7 #include <linux/mm.h>
8 #include <linux/sched.h>
9 #include <linux/spinlock.h>
10 #include <asm/mmu.h>
11 #include <asm/cputable.h>
12 #include <asm/cputhreads.h>
13 
14 /*
15  * Most if the context management is out of line
16  */
17 extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
18 extern void destroy_context(struct mm_struct *mm);
19 #ifdef CONFIG_SPAPR_TCE_IOMMU
20 struct mm_iommu_table_group_mem_t;
21 
22 extern int isolate_lru_page(struct page *page);	/* from internal.h */
23 extern bool mm_iommu_preregistered(struct mm_struct *mm);
24 extern long mm_iommu_get(struct mm_struct *mm,
25 		unsigned long ua, unsigned long entries,
26 		struct mm_iommu_table_group_mem_t **pmem);
27 extern long mm_iommu_put(struct mm_struct *mm,
28 		struct mm_iommu_table_group_mem_t *mem);
29 extern void mm_iommu_init(struct mm_struct *mm);
30 extern void mm_iommu_cleanup(struct mm_struct *mm);
31 extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
32 		unsigned long ua, unsigned long size);
33 extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(
34 		struct mm_struct *mm, unsigned long ua, unsigned long size);
35 extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
36 		unsigned long ua, unsigned long entries);
37 extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
38 		unsigned long ua, unsigned int pageshift, unsigned long *hpa);
39 extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
40 		unsigned long ua, unsigned int pageshift, unsigned long *hpa);
41 extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua);
42 extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
43 extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
44 #endif
45 extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
46 extern void set_context(unsigned long id, pgd_t *pgd);
47 
48 #ifdef CONFIG_PPC_BOOK3S_64
49 extern void radix__switch_mmu_context(struct mm_struct *prev,
50 				      struct mm_struct *next);
51 static inline void switch_mmu_context(struct mm_struct *prev,
52 				      struct mm_struct *next,
53 				      struct task_struct *tsk)
54 {
55 	if (radix_enabled())
56 		return radix__switch_mmu_context(prev, next);
57 	return switch_slb(tsk, next);
58 }
59 
60 extern int hash__alloc_context_id(void);
61 extern void hash__reserve_context_id(int id);
62 extern void __destroy_context(int context_id);
63 static inline void mmu_context_init(void) { }
64 
65 static inline int alloc_extended_context(struct mm_struct *mm,
66 					 unsigned long ea)
67 {
68 	int context_id;
69 
70 	int index = ea >> MAX_EA_BITS_PER_CONTEXT;
71 
72 	context_id = hash__alloc_context_id();
73 	if (context_id < 0)
74 		return context_id;
75 
76 	VM_WARN_ON(mm->context.extended_id[index]);
77 	mm->context.extended_id[index] = context_id;
78 	return context_id;
79 }
80 
81 static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea)
82 {
83 	int context_id;
84 
85 	context_id = get_ea_context(&mm->context, ea);
86 	if (!context_id)
87 		return true;
88 	return false;
89 }
90 
91 #else
92 extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
93 			       struct task_struct *tsk);
94 extern unsigned long __init_new_context(void);
95 extern void __destroy_context(unsigned long context_id);
96 extern void mmu_context_init(void);
97 static inline int alloc_extended_context(struct mm_struct *mm,
98 					 unsigned long ea)
99 {
100 	/* non book3s_64 should never find this called */
101 	WARN_ON(1);
102 	return -ENOMEM;
103 }
104 
105 static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea)
106 {
107 	return false;
108 }
109 #endif
110 
111 #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) && defined(CONFIG_PPC_RADIX_MMU)
112 extern void radix_kvm_prefetch_workaround(struct mm_struct *mm);
113 #else
114 static inline void radix_kvm_prefetch_workaround(struct mm_struct *mm) { }
115 #endif
116 
117 extern void switch_cop(struct mm_struct *next);
118 extern int use_cop(unsigned long acop, struct mm_struct *mm);
119 extern void drop_cop(unsigned long acop, struct mm_struct *mm);
120 
121 #ifdef CONFIG_PPC_BOOK3S_64
122 static inline void inc_mm_active_cpus(struct mm_struct *mm)
123 {
124 	atomic_inc(&mm->context.active_cpus);
125 }
126 
127 static inline void dec_mm_active_cpus(struct mm_struct *mm)
128 {
129 	atomic_dec(&mm->context.active_cpus);
130 }
131 
132 static inline void mm_context_add_copro(struct mm_struct *mm)
133 {
134 	/*
135 	 * If any copro is in use, increment the active CPU count
136 	 * in order to force TLB invalidations to be global as to
137 	 * propagate to the Nest MMU.
138 	 */
139 	if (atomic_inc_return(&mm->context.copros) == 1)
140 		inc_mm_active_cpus(mm);
141 }
142 
143 static inline void mm_context_remove_copro(struct mm_struct *mm)
144 {
145 	int c;
146 
147 	/*
148 	 * When removing the last copro, we need to broadcast a global
149 	 * flush of the full mm, as the next TLBI may be local and the
150 	 * nMMU and/or PSL need to be cleaned up.
151 	 *
152 	 * Both the 'copros' and 'active_cpus' counts are looked at in
153 	 * flush_all_mm() to determine the scope (local/global) of the
154 	 * TLBIs, so we need to flush first before decrementing
155 	 * 'copros'. If this API is used by several callers for the
156 	 * same context, it can lead to over-flushing. It's hopefully
157 	 * not common enough to be a problem.
158 	 *
159 	 * Skip on hash, as we don't know how to do the proper flush
160 	 * for the time being. Invalidations will remain global if
161 	 * used on hash. Note that we can't drop 'copros' either, as
162 	 * it could make some invalidations local with no flush
163 	 * in-between.
164 	 */
165 	if (radix_enabled()) {
166 		flush_all_mm(mm);
167 
168 		c = atomic_dec_if_positive(&mm->context.copros);
169 		/* Detect imbalance between add and remove */
170 		WARN_ON(c < 0);
171 
172 		if (c == 0)
173 			dec_mm_active_cpus(mm);
174 	}
175 }
176 #else
177 static inline void inc_mm_active_cpus(struct mm_struct *mm) { }
178 static inline void dec_mm_active_cpus(struct mm_struct *mm) { }
179 static inline void mm_context_add_copro(struct mm_struct *mm) { }
180 static inline void mm_context_remove_copro(struct mm_struct *mm) { }
181 #endif
182 
183 
184 extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
185 			       struct task_struct *tsk);
186 
187 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
188 			     struct task_struct *tsk)
189 {
190 	unsigned long flags;
191 
192 	local_irq_save(flags);
193 	switch_mm_irqs_off(prev, next, tsk);
194 	local_irq_restore(flags);
195 }
196 #define switch_mm_irqs_off switch_mm_irqs_off
197 
198 
199 #define deactivate_mm(tsk,mm)	do { } while (0)
200 
201 /*
202  * After we have set current->mm to a new value, this activates
203  * the context for the new mm so we see the new mappings.
204  */
205 static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
206 {
207 	switch_mm(prev, next, current);
208 }
209 
210 /* We don't currently use enter_lazy_tlb() for anything */
211 static inline void enter_lazy_tlb(struct mm_struct *mm,
212 				  struct task_struct *tsk)
213 {
214 	/* 64-bit Book3E keeps track of current PGD in the PACA */
215 #ifdef CONFIG_PPC_BOOK3E_64
216 	get_paca()->pgd = NULL;
217 #endif
218 }
219 
220 static inline int arch_dup_mmap(struct mm_struct *oldmm,
221 				struct mm_struct *mm)
222 {
223 	return 0;
224 }
225 
226 #ifndef CONFIG_PPC_BOOK3S_64
227 static inline void arch_exit_mmap(struct mm_struct *mm)
228 {
229 }
230 #else
231 extern void arch_exit_mmap(struct mm_struct *mm);
232 #endif
233 
234 static inline void arch_unmap(struct mm_struct *mm,
235 			      struct vm_area_struct *vma,
236 			      unsigned long start, unsigned long end)
237 {
238 	if (start <= mm->context.vdso_base && mm->context.vdso_base < end)
239 		mm->context.vdso_base = 0;
240 }
241 
242 static inline void arch_bprm_mm_init(struct mm_struct *mm,
243 				     struct vm_area_struct *vma)
244 {
245 }
246 
247 #ifdef CONFIG_PPC_MEM_KEYS
248 bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write,
249 			       bool execute, bool foreign);
250 #else /* CONFIG_PPC_MEM_KEYS */
251 static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
252 		bool write, bool execute, bool foreign)
253 {
254 	/* by default, allow everything */
255 	return true;
256 }
257 
258 #define pkey_mm_init(mm)
259 #define thread_pkey_regs_save(thread)
260 #define thread_pkey_regs_restore(new_thread, old_thread)
261 #define thread_pkey_regs_init(thread)
262 
263 static inline u64 pte_to_hpte_pkey_bits(u64 pteflags)
264 {
265 	return 0x0UL;
266 }
267 
268 #endif /* CONFIG_PPC_MEM_KEYS */
269 
270 #endif /* __KERNEL__ */
271 #endif /* __ASM_POWERPC_MMU_CONTEXT_H */
272