xref: /openbmc/linux/arch/powerpc/include/asm/mmu_context.h (revision b240b419db5d624ce7a5a397d6f62a1a686009ec)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ASM_POWERPC_MMU_CONTEXT_H
3 #define __ASM_POWERPC_MMU_CONTEXT_H
4 #ifdef __KERNEL__
5 
6 #include <linux/kernel.h>
7 #include <linux/mm.h>
8 #include <linux/sched.h>
9 #include <linux/spinlock.h>
10 #include <asm/mmu.h>
11 #include <asm/cputable.h>
12 #include <asm/cputhreads.h>
13 
14 /*
15  * Most if the context management is out of line
16  */
17 extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
18 extern void destroy_context(struct mm_struct *mm);
19 #ifdef CONFIG_SPAPR_TCE_IOMMU
20 struct mm_iommu_table_group_mem_t;
21 
22 extern int isolate_lru_page(struct page *page);	/* from internal.h */
23 extern bool mm_iommu_preregistered(struct mm_struct *mm);
24 extern long mm_iommu_get(struct mm_struct *mm,
25 		unsigned long ua, unsigned long entries,
26 		struct mm_iommu_table_group_mem_t **pmem);
27 extern long mm_iommu_put(struct mm_struct *mm,
28 		struct mm_iommu_table_group_mem_t *mem);
29 extern void mm_iommu_init(struct mm_struct *mm);
30 extern void mm_iommu_cleanup(struct mm_struct *mm);
31 extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
32 		unsigned long ua, unsigned long size);
33 extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(
34 		struct mm_struct *mm, unsigned long ua, unsigned long size);
35 extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
36 		unsigned long ua, unsigned long entries);
37 extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
38 		unsigned long ua, unsigned long *hpa);
39 extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
40 		unsigned long ua, unsigned long *hpa);
41 extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
42 extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
43 #endif
44 extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
45 extern void set_context(unsigned long id, pgd_t *pgd);
46 
47 #ifdef CONFIG_PPC_BOOK3S_64
48 extern void radix__switch_mmu_context(struct mm_struct *prev,
49 				      struct mm_struct *next);
50 static inline void switch_mmu_context(struct mm_struct *prev,
51 				      struct mm_struct *next,
52 				      struct task_struct *tsk)
53 {
54 	if (radix_enabled())
55 		return radix__switch_mmu_context(prev, next);
56 	return switch_slb(tsk, next);
57 }
58 
59 extern int hash__alloc_context_id(void);
60 extern void hash__reserve_context_id(int id);
61 extern void __destroy_context(int context_id);
62 static inline void mmu_context_init(void) { }
63 #else
64 extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
65 			       struct task_struct *tsk);
66 extern unsigned long __init_new_context(void);
67 extern void __destroy_context(unsigned long context_id);
68 extern void mmu_context_init(void);
69 #endif
70 
71 #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) && defined(CONFIG_PPC_RADIX_MMU)
72 extern void radix_kvm_prefetch_workaround(struct mm_struct *mm);
73 #else
74 static inline void radix_kvm_prefetch_workaround(struct mm_struct *mm) { }
75 #endif
76 
77 extern void switch_cop(struct mm_struct *next);
78 extern int use_cop(unsigned long acop, struct mm_struct *mm);
79 extern void drop_cop(unsigned long acop, struct mm_struct *mm);
80 
81 #ifdef CONFIG_PPC_BOOK3S_64
82 static inline void inc_mm_active_cpus(struct mm_struct *mm)
83 {
84 	atomic_inc(&mm->context.active_cpus);
85 }
86 
87 static inline void dec_mm_active_cpus(struct mm_struct *mm)
88 {
89 	atomic_dec(&mm->context.active_cpus);
90 }
91 
92 static inline void mm_context_add_copro(struct mm_struct *mm)
93 {
94 	/*
95 	 * If any copro is in use, increment the active CPU count
96 	 * in order to force TLB invalidations to be global as to
97 	 * propagate to the Nest MMU.
98 	 */
99 	if (atomic_inc_return(&mm->context.copros) == 1)
100 		inc_mm_active_cpus(mm);
101 }
102 
103 static inline void mm_context_remove_copro(struct mm_struct *mm)
104 {
105 	int c;
106 
107 	c = atomic_dec_if_positive(&mm->context.copros);
108 
109 	/* Detect imbalance between add and remove */
110 	WARN_ON(c < 0);
111 
112 	/*
113 	 * Need to broadcast a global flush of the full mm before
114 	 * decrementing active_cpus count, as the next TLBI may be
115 	 * local and the nMMU and/or PSL need to be cleaned up.
116 	 * Should be rare enough so that it's acceptable.
117 	 *
118 	 * Skip on hash, as we don't know how to do the proper flush
119 	 * for the time being. Invalidations will remain global if
120 	 * used on hash.
121 	 */
122 	if (c == 0 && radix_enabled()) {
123 		flush_all_mm(mm);
124 		dec_mm_active_cpus(mm);
125 	}
126 }
127 #else
128 static inline void inc_mm_active_cpus(struct mm_struct *mm) { }
129 static inline void dec_mm_active_cpus(struct mm_struct *mm) { }
130 static inline void mm_context_add_copro(struct mm_struct *mm) { }
131 static inline void mm_context_remove_copro(struct mm_struct *mm) { }
132 #endif
133 
134 
135 extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
136 			       struct task_struct *tsk);
137 
138 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
139 			     struct task_struct *tsk)
140 {
141 	unsigned long flags;
142 
143 	local_irq_save(flags);
144 	switch_mm_irqs_off(prev, next, tsk);
145 	local_irq_restore(flags);
146 }
147 #define switch_mm_irqs_off switch_mm_irqs_off
148 
149 
150 #define deactivate_mm(tsk,mm)	do { } while (0)
151 
152 /*
153  * After we have set current->mm to a new value, this activates
154  * the context for the new mm so we see the new mappings.
155  */
156 static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
157 {
158 	switch_mm(prev, next, current);
159 }
160 
161 /* We don't currently use enter_lazy_tlb() for anything */
162 static inline void enter_lazy_tlb(struct mm_struct *mm,
163 				  struct task_struct *tsk)
164 {
165 	/* 64-bit Book3E keeps track of current PGD in the PACA */
166 #ifdef CONFIG_PPC_BOOK3E_64
167 	get_paca()->pgd = NULL;
168 #endif
169 }
170 
171 static inline int arch_dup_mmap(struct mm_struct *oldmm,
172 				struct mm_struct *mm)
173 {
174 	return 0;
175 }
176 
177 #ifndef CONFIG_PPC_BOOK3S_64
178 static inline void arch_exit_mmap(struct mm_struct *mm)
179 {
180 }
181 #else
182 extern void arch_exit_mmap(struct mm_struct *mm);
183 #endif
184 
185 static inline void arch_unmap(struct mm_struct *mm,
186 			      struct vm_area_struct *vma,
187 			      unsigned long start, unsigned long end)
188 {
189 	if (start <= mm->context.vdso_base && mm->context.vdso_base < end)
190 		mm->context.vdso_base = 0;
191 }
192 
193 static inline void arch_bprm_mm_init(struct mm_struct *mm,
194 				     struct vm_area_struct *vma)
195 {
196 }
197 
198 #ifdef CONFIG_PPC_MEM_KEYS
199 bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write,
200 			       bool execute, bool foreign);
201 #else /* CONFIG_PPC_MEM_KEYS */
202 static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
203 		bool write, bool execute, bool foreign)
204 {
205 	/* by default, allow everything */
206 	return true;
207 }
208 
209 #define pkey_mm_init(mm)
210 #define thread_pkey_regs_save(thread)
211 #define thread_pkey_regs_restore(new_thread, old_thread)
212 #define thread_pkey_regs_init(thread)
213 
214 static inline int vma_pkey(struct vm_area_struct *vma)
215 {
216 	return 0;
217 }
218 
219 static inline u64 pte_to_hpte_pkey_bits(u64 pteflags)
220 {
221 	return 0x0UL;
222 }
223 
224 #endif /* CONFIG_PPC_MEM_KEYS */
225 
226 #endif /* __KERNEL__ */
227 #endif /* __ASM_POWERPC_MMU_CONTEXT_H */
228