1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __ASM_POWERPC_MMU_CONTEXT_H 3 #define __ASM_POWERPC_MMU_CONTEXT_H 4 #ifdef __KERNEL__ 5 6 #include <linux/kernel.h> 7 #include <linux/mm.h> 8 #include <linux/sched.h> 9 #include <linux/spinlock.h> 10 #include <asm/mmu.h> 11 #include <asm/cputable.h> 12 #include <asm/cputhreads.h> 13 14 /* 15 * Most if the context management is out of line 16 */ 17 #define init_new_context init_new_context 18 extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 19 #define destroy_context destroy_context 20 extern void destroy_context(struct mm_struct *mm); 21 #ifdef CONFIG_SPAPR_TCE_IOMMU 22 struct mm_iommu_table_group_mem_t; 23 24 extern int isolate_lru_page(struct page *page); /* from internal.h */ 25 extern bool mm_iommu_preregistered(struct mm_struct *mm); 26 extern long mm_iommu_new(struct mm_struct *mm, 27 unsigned long ua, unsigned long entries, 28 struct mm_iommu_table_group_mem_t **pmem); 29 extern long mm_iommu_newdev(struct mm_struct *mm, unsigned long ua, 30 unsigned long entries, unsigned long dev_hpa, 31 struct mm_iommu_table_group_mem_t **pmem); 32 extern long mm_iommu_put(struct mm_struct *mm, 33 struct mm_iommu_table_group_mem_t *mem); 34 extern void mm_iommu_init(struct mm_struct *mm); 35 extern void mm_iommu_cleanup(struct mm_struct *mm); 36 extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm, 37 unsigned long ua, unsigned long size); 38 extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm( 39 struct mm_struct *mm, unsigned long ua, unsigned long size); 40 extern struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm, 41 unsigned long ua, unsigned long entries); 42 extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, 43 unsigned long ua, unsigned int pageshift, unsigned long *hpa); 44 extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem, 45 unsigned long ua, unsigned int pageshift, unsigned long *hpa); 46 extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua); 47 extern bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa, 48 unsigned int pageshift, unsigned long *size); 49 extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem); 50 extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem); 51 #else 52 static inline bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa, 53 unsigned int pageshift, unsigned long *size) 54 { 55 return false; 56 } 57 static inline void mm_iommu_init(struct mm_struct *mm) { } 58 #endif 59 extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm); 60 61 #ifdef CONFIG_PPC_BOOK3S_64 62 extern void radix__switch_mmu_context(struct mm_struct *prev, 63 struct mm_struct *next); 64 static inline void switch_mmu_context(struct mm_struct *prev, 65 struct mm_struct *next, 66 struct task_struct *tsk) 67 { 68 if (radix_enabled()) 69 return radix__switch_mmu_context(prev, next); 70 return switch_slb(tsk, next); 71 } 72 73 extern int hash__alloc_context_id(void); 74 void __init hash__reserve_context_id(int id); 75 extern void __destroy_context(int context_id); 76 static inline void mmu_context_init(void) { } 77 78 #ifdef CONFIG_PPC_64S_HASH_MMU 79 static inline int alloc_extended_context(struct mm_struct *mm, 80 unsigned long ea) 81 { 82 int context_id; 83 84 int index = ea >> MAX_EA_BITS_PER_CONTEXT; 85 86 context_id = hash__alloc_context_id(); 87 if (context_id < 0) 88 return context_id; 89 90 VM_WARN_ON(mm->context.extended_id[index]); 91 mm->context.extended_id[index] = context_id; 92 return context_id; 93 } 94 95 static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea) 96 { 97 int context_id; 98 99 context_id = get_user_context(&mm->context, ea); 100 if (!context_id) 101 return true; 102 return false; 103 } 104 #endif 105 106 #else 107 extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next, 108 struct task_struct *tsk); 109 extern unsigned long __init_new_context(void); 110 extern void __destroy_context(unsigned long context_id); 111 extern void mmu_context_init(void); 112 static inline int alloc_extended_context(struct mm_struct *mm, 113 unsigned long ea) 114 { 115 /* non book3s_64 should never find this called */ 116 WARN_ON(1); 117 return -ENOMEM; 118 } 119 120 static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea) 121 { 122 return false; 123 } 124 #endif 125 126 extern void switch_cop(struct mm_struct *next); 127 extern int use_cop(unsigned long acop, struct mm_struct *mm); 128 extern void drop_cop(unsigned long acop, struct mm_struct *mm); 129 130 #ifdef CONFIG_PPC_BOOK3S_64 131 static inline void inc_mm_active_cpus(struct mm_struct *mm) 132 { 133 atomic_inc(&mm->context.active_cpus); 134 } 135 136 static inline void dec_mm_active_cpus(struct mm_struct *mm) 137 { 138 atomic_dec(&mm->context.active_cpus); 139 } 140 141 static inline void mm_context_add_copro(struct mm_struct *mm) 142 { 143 /* 144 * If any copro is in use, increment the active CPU count 145 * in order to force TLB invalidations to be global as to 146 * propagate to the Nest MMU. 147 */ 148 if (atomic_inc_return(&mm->context.copros) == 1) 149 inc_mm_active_cpus(mm); 150 } 151 152 static inline void mm_context_remove_copro(struct mm_struct *mm) 153 { 154 int c; 155 156 /* 157 * When removing the last copro, we need to broadcast a global 158 * flush of the full mm, as the next TLBI may be local and the 159 * nMMU and/or PSL need to be cleaned up. 160 * 161 * Both the 'copros' and 'active_cpus' counts are looked at in 162 * flush_all_mm() to determine the scope (local/global) of the 163 * TLBIs, so we need to flush first before decrementing 164 * 'copros'. If this API is used by several callers for the 165 * same context, it can lead to over-flushing. It's hopefully 166 * not common enough to be a problem. 167 * 168 * Skip on hash, as we don't know how to do the proper flush 169 * for the time being. Invalidations will remain global if 170 * used on hash. Note that we can't drop 'copros' either, as 171 * it could make some invalidations local with no flush 172 * in-between. 173 */ 174 if (radix_enabled()) { 175 flush_all_mm(mm); 176 177 c = atomic_dec_if_positive(&mm->context.copros); 178 /* Detect imbalance between add and remove */ 179 WARN_ON(c < 0); 180 181 if (c == 0) 182 dec_mm_active_cpus(mm); 183 } 184 } 185 186 /* 187 * vas_windows counter shows number of open windows in the mm 188 * context. During context switch, use this counter to clear the 189 * foreign real address mapping (CP_ABORT) for the thread / process 190 * that intend to use COPY/PASTE. When a process closes all windows, 191 * disable CP_ABORT which is expensive to run. 192 * 193 * For user context, register a copro so that TLBIs are seen by the 194 * nest MMU. mm_context_add/remove_vas_window() are used only for user 195 * space windows. 196 */ 197 static inline void mm_context_add_vas_window(struct mm_struct *mm) 198 { 199 atomic_inc(&mm->context.vas_windows); 200 mm_context_add_copro(mm); 201 } 202 203 static inline void mm_context_remove_vas_window(struct mm_struct *mm) 204 { 205 int v; 206 207 mm_context_remove_copro(mm); 208 v = atomic_dec_if_positive(&mm->context.vas_windows); 209 210 /* Detect imbalance between add and remove */ 211 WARN_ON(v < 0); 212 } 213 #else 214 static inline void inc_mm_active_cpus(struct mm_struct *mm) { } 215 static inline void dec_mm_active_cpus(struct mm_struct *mm) { } 216 static inline void mm_context_add_copro(struct mm_struct *mm) { } 217 static inline void mm_context_remove_copro(struct mm_struct *mm) { } 218 #endif 219 220 #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) && defined(CONFIG_PPC_RADIX_MMU) 221 void do_h_rpt_invalidate_prt(unsigned long pid, unsigned long lpid, 222 unsigned long type, unsigned long pg_sizes, 223 unsigned long start, unsigned long end); 224 #else 225 static inline void do_h_rpt_invalidate_prt(unsigned long pid, 226 unsigned long lpid, 227 unsigned long type, 228 unsigned long pg_sizes, 229 unsigned long start, 230 unsigned long end) { } 231 #endif 232 233 extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, 234 struct task_struct *tsk); 235 236 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 237 struct task_struct *tsk) 238 { 239 unsigned long flags; 240 241 local_irq_save(flags); 242 switch_mm_irqs_off(prev, next, tsk); 243 local_irq_restore(flags); 244 } 245 #define switch_mm_irqs_off switch_mm_irqs_off 246 247 /* 248 * After we have set current->mm to a new value, this activates 249 * the context for the new mm so we see the new mappings. 250 */ 251 #define activate_mm activate_mm 252 static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) 253 { 254 switch_mm_irqs_off(prev, next, current); 255 } 256 257 /* We don't currently use enter_lazy_tlb() for anything */ 258 #ifdef CONFIG_PPC_BOOK3E_64 259 #define enter_lazy_tlb enter_lazy_tlb 260 static inline void enter_lazy_tlb(struct mm_struct *mm, 261 struct task_struct *tsk) 262 { 263 /* 64-bit Book3E keeps track of current PGD in the PACA */ 264 get_paca()->pgd = NULL; 265 } 266 #endif 267 268 extern void arch_exit_mmap(struct mm_struct *mm); 269 270 static inline void arch_unmap(struct mm_struct *mm, 271 unsigned long start, unsigned long end) 272 { 273 unsigned long vdso_base = (unsigned long)mm->context.vdso; 274 275 if (start <= vdso_base && vdso_base < end) 276 mm->context.vdso = NULL; 277 } 278 279 #ifdef CONFIG_PPC_MEM_KEYS 280 bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write, 281 bool execute, bool foreign); 282 void arch_dup_pkeys(struct mm_struct *oldmm, struct mm_struct *mm); 283 #else /* CONFIG_PPC_MEM_KEYS */ 284 static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, 285 bool write, bool execute, bool foreign) 286 { 287 /* by default, allow everything */ 288 return true; 289 } 290 291 #define pkey_mm_init(mm) 292 #define arch_dup_pkeys(oldmm, mm) 293 294 static inline u64 pte_to_hpte_pkey_bits(u64 pteflags, unsigned long flags) 295 { 296 return 0x0UL; 297 } 298 299 #endif /* CONFIG_PPC_MEM_KEYS */ 300 301 static inline int arch_dup_mmap(struct mm_struct *oldmm, 302 struct mm_struct *mm) 303 { 304 arch_dup_pkeys(oldmm, mm); 305 return 0; 306 } 307 308 #include <asm-generic/mmu_context.h> 309 310 #endif /* __KERNEL__ */ 311 #endif /* __ASM_POWERPC_MMU_CONTEXT_H */ 312