1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __ASM_POWERPC_MMU_CONTEXT_H 3 #define __ASM_POWERPC_MMU_CONTEXT_H 4 #ifdef __KERNEL__ 5 6 #include <linux/kernel.h> 7 #include <linux/mm.h> 8 #include <linux/sched.h> 9 #include <linux/spinlock.h> 10 #include <asm/mmu.h> 11 #include <asm/cputable.h> 12 #include <asm/cputhreads.h> 13 14 /* 15 * Most if the context management is out of line 16 */ 17 #define init_new_context init_new_context 18 extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 19 #define destroy_context destroy_context 20 extern void destroy_context(struct mm_struct *mm); 21 #ifdef CONFIG_SPAPR_TCE_IOMMU 22 struct mm_iommu_table_group_mem_t; 23 24 extern int isolate_lru_page(struct page *page); /* from internal.h */ 25 extern bool mm_iommu_preregistered(struct mm_struct *mm); 26 extern long mm_iommu_new(struct mm_struct *mm, 27 unsigned long ua, unsigned long entries, 28 struct mm_iommu_table_group_mem_t **pmem); 29 extern long mm_iommu_newdev(struct mm_struct *mm, unsigned long ua, 30 unsigned long entries, unsigned long dev_hpa, 31 struct mm_iommu_table_group_mem_t **pmem); 32 extern long mm_iommu_put(struct mm_struct *mm, 33 struct mm_iommu_table_group_mem_t *mem); 34 extern void mm_iommu_init(struct mm_struct *mm); 35 extern void mm_iommu_cleanup(struct mm_struct *mm); 36 extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm, 37 unsigned long ua, unsigned long size); 38 extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm( 39 struct mm_struct *mm, unsigned long ua, unsigned long size); 40 extern struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm, 41 unsigned long ua, unsigned long entries); 42 extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, 43 unsigned long ua, unsigned int pageshift, unsigned long *hpa); 44 extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem, 45 unsigned long ua, unsigned int pageshift, unsigned long *hpa); 46 extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua); 47 extern bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa, 48 unsigned int pageshift, unsigned long *size); 49 extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem); 50 extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem); 51 #else 52 static inline bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa, 53 unsigned int pageshift, unsigned long *size) 54 { 55 return false; 56 } 57 static inline void mm_iommu_init(struct mm_struct *mm) { } 58 #endif 59 extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm); 60 61 #ifdef CONFIG_PPC_BOOK3S_64 62 extern void radix__switch_mmu_context(struct mm_struct *prev, 63 struct mm_struct *next); 64 static inline void switch_mmu_context(struct mm_struct *prev, 65 struct mm_struct *next, 66 struct task_struct *tsk) 67 { 68 if (radix_enabled()) 69 return radix__switch_mmu_context(prev, next); 70 return switch_slb(tsk, next); 71 } 72 73 extern int hash__alloc_context_id(void); 74 extern void hash__reserve_context_id(int id); 75 extern void __destroy_context(int context_id); 76 static inline void mmu_context_init(void) { } 77 78 static inline int alloc_extended_context(struct mm_struct *mm, 79 unsigned long ea) 80 { 81 int context_id; 82 83 int index = ea >> MAX_EA_BITS_PER_CONTEXT; 84 85 context_id = hash__alloc_context_id(); 86 if (context_id < 0) 87 return context_id; 88 89 VM_WARN_ON(mm->context.extended_id[index]); 90 mm->context.extended_id[index] = context_id; 91 return context_id; 92 } 93 94 static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea) 95 { 96 int context_id; 97 98 context_id = get_user_context(&mm->context, ea); 99 if (!context_id) 100 return true; 101 return false; 102 } 103 104 #else 105 extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next, 106 struct task_struct *tsk); 107 extern unsigned long __init_new_context(void); 108 extern void __destroy_context(unsigned long context_id); 109 extern void mmu_context_init(void); 110 static inline int alloc_extended_context(struct mm_struct *mm, 111 unsigned long ea) 112 { 113 /* non book3s_64 should never find this called */ 114 WARN_ON(1); 115 return -ENOMEM; 116 } 117 118 static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea) 119 { 120 return false; 121 } 122 #endif 123 124 extern void switch_cop(struct mm_struct *next); 125 extern int use_cop(unsigned long acop, struct mm_struct *mm); 126 extern void drop_cop(unsigned long acop, struct mm_struct *mm); 127 128 #ifdef CONFIG_PPC_BOOK3S_64 129 static inline void inc_mm_active_cpus(struct mm_struct *mm) 130 { 131 atomic_inc(&mm->context.active_cpus); 132 } 133 134 static inline void dec_mm_active_cpus(struct mm_struct *mm) 135 { 136 atomic_dec(&mm->context.active_cpus); 137 } 138 139 static inline void mm_context_add_copro(struct mm_struct *mm) 140 { 141 /* 142 * If any copro is in use, increment the active CPU count 143 * in order to force TLB invalidations to be global as to 144 * propagate to the Nest MMU. 145 */ 146 if (atomic_inc_return(&mm->context.copros) == 1) 147 inc_mm_active_cpus(mm); 148 } 149 150 static inline void mm_context_remove_copro(struct mm_struct *mm) 151 { 152 int c; 153 154 /* 155 * When removing the last copro, we need to broadcast a global 156 * flush of the full mm, as the next TLBI may be local and the 157 * nMMU and/or PSL need to be cleaned up. 158 * 159 * Both the 'copros' and 'active_cpus' counts are looked at in 160 * flush_all_mm() to determine the scope (local/global) of the 161 * TLBIs, so we need to flush first before decrementing 162 * 'copros'. If this API is used by several callers for the 163 * same context, it can lead to over-flushing. It's hopefully 164 * not common enough to be a problem. 165 * 166 * Skip on hash, as we don't know how to do the proper flush 167 * for the time being. Invalidations will remain global if 168 * used on hash. Note that we can't drop 'copros' either, as 169 * it could make some invalidations local with no flush 170 * in-between. 171 */ 172 if (radix_enabled()) { 173 flush_all_mm(mm); 174 175 c = atomic_dec_if_positive(&mm->context.copros); 176 /* Detect imbalance between add and remove */ 177 WARN_ON(c < 0); 178 179 if (c == 0) 180 dec_mm_active_cpus(mm); 181 } 182 } 183 184 /* 185 * vas_windows counter shows number of open windows in the mm 186 * context. During context switch, use this counter to clear the 187 * foreign real address mapping (CP_ABORT) for the thread / process 188 * that intend to use COPY/PASTE. When a process closes all windows, 189 * disable CP_ABORT which is expensive to run. 190 * 191 * For user context, register a copro so that TLBIs are seen by the 192 * nest MMU. mm_context_add/remove_vas_window() are used only for user 193 * space windows. 194 */ 195 static inline void mm_context_add_vas_window(struct mm_struct *mm) 196 { 197 atomic_inc(&mm->context.vas_windows); 198 mm_context_add_copro(mm); 199 } 200 201 static inline void mm_context_remove_vas_window(struct mm_struct *mm) 202 { 203 int v; 204 205 mm_context_remove_copro(mm); 206 v = atomic_dec_if_positive(&mm->context.vas_windows); 207 208 /* Detect imbalance between add and remove */ 209 WARN_ON(v < 0); 210 } 211 #else 212 static inline void inc_mm_active_cpus(struct mm_struct *mm) { } 213 static inline void dec_mm_active_cpus(struct mm_struct *mm) { } 214 static inline void mm_context_add_copro(struct mm_struct *mm) { } 215 static inline void mm_context_remove_copro(struct mm_struct *mm) { } 216 #endif 217 218 219 extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, 220 struct task_struct *tsk); 221 222 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 223 struct task_struct *tsk) 224 { 225 unsigned long flags; 226 227 local_irq_save(flags); 228 switch_mm_irqs_off(prev, next, tsk); 229 local_irq_restore(flags); 230 } 231 #define switch_mm_irqs_off switch_mm_irqs_off 232 233 /* 234 * After we have set current->mm to a new value, this activates 235 * the context for the new mm so we see the new mappings. 236 */ 237 #define activate_mm activate_mm 238 static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) 239 { 240 switch_mm_irqs_off(prev, next, current); 241 } 242 243 /* We don't currently use enter_lazy_tlb() for anything */ 244 #ifdef CONFIG_PPC_BOOK3E_64 245 #define enter_lazy_tlb enter_lazy_tlb 246 static inline void enter_lazy_tlb(struct mm_struct *mm, 247 struct task_struct *tsk) 248 { 249 /* 64-bit Book3E keeps track of current PGD in the PACA */ 250 get_paca()->pgd = NULL; 251 } 252 #endif 253 254 extern void arch_exit_mmap(struct mm_struct *mm); 255 256 static inline void arch_unmap(struct mm_struct *mm, 257 unsigned long start, unsigned long end) 258 { 259 unsigned long vdso_base = (unsigned long)mm->context.vdso; 260 261 if (start <= vdso_base && vdso_base < end) 262 mm->context.vdso = NULL; 263 } 264 265 #ifdef CONFIG_PPC_MEM_KEYS 266 bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write, 267 bool execute, bool foreign); 268 void arch_dup_pkeys(struct mm_struct *oldmm, struct mm_struct *mm); 269 #else /* CONFIG_PPC_MEM_KEYS */ 270 static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, 271 bool write, bool execute, bool foreign) 272 { 273 /* by default, allow everything */ 274 return true; 275 } 276 277 #define pkey_mm_init(mm) 278 #define arch_dup_pkeys(oldmm, mm) 279 280 static inline u64 pte_to_hpte_pkey_bits(u64 pteflags, unsigned long flags) 281 { 282 return 0x0UL; 283 } 284 285 #endif /* CONFIG_PPC_MEM_KEYS */ 286 287 static inline int arch_dup_mmap(struct mm_struct *oldmm, 288 struct mm_struct *mm) 289 { 290 arch_dup_pkeys(oldmm, mm); 291 return 0; 292 } 293 294 #include <asm-generic/mmu_context.h> 295 296 #endif /* __KERNEL__ */ 297 #endif /* __ASM_POWERPC_MMU_CONTEXT_H */ 298