1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __ASM_POWERPC_MMU_CONTEXT_H 3 #define __ASM_POWERPC_MMU_CONTEXT_H 4 #ifdef __KERNEL__ 5 6 #include <linux/kernel.h> 7 #include <linux/mm.h> 8 #include <linux/sched.h> 9 #include <linux/spinlock.h> 10 #include <asm/mmu.h> 11 #include <asm/cputable.h> 12 #include <asm/cputhreads.h> 13 14 /* 15 * Most if the context management is out of line 16 */ 17 extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 18 extern void destroy_context(struct mm_struct *mm); 19 #ifdef CONFIG_SPAPR_TCE_IOMMU 20 struct mm_iommu_table_group_mem_t; 21 22 extern int isolate_lru_page(struct page *page); /* from internal.h */ 23 extern bool mm_iommu_preregistered(struct mm_struct *mm); 24 extern long mm_iommu_get(struct mm_struct *mm, 25 unsigned long ua, unsigned long entries, 26 struct mm_iommu_table_group_mem_t **pmem); 27 extern long mm_iommu_put(struct mm_struct *mm, 28 struct mm_iommu_table_group_mem_t *mem); 29 extern void mm_iommu_init(struct mm_struct *mm); 30 extern void mm_iommu_cleanup(struct mm_struct *mm); 31 extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm, 32 unsigned long ua, unsigned long size); 33 extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm( 34 struct mm_struct *mm, unsigned long ua, unsigned long size); 35 extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm, 36 unsigned long ua, unsigned long entries); 37 extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, 38 unsigned long ua, unsigned long *hpa); 39 extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem, 40 unsigned long ua, unsigned long *hpa); 41 extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem); 42 extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem); 43 #endif 44 extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm); 45 extern void set_context(unsigned long id, pgd_t *pgd); 46 47 #ifdef CONFIG_PPC_BOOK3S_64 48 extern void radix__switch_mmu_context(struct mm_struct *prev, 49 struct mm_struct *next); 50 static inline void switch_mmu_context(struct mm_struct *prev, 51 struct mm_struct *next, 52 struct task_struct *tsk) 53 { 54 if (radix_enabled()) 55 return radix__switch_mmu_context(prev, next); 56 return switch_slb(tsk, next); 57 } 58 59 extern int hash__alloc_context_id(void); 60 extern void hash__reserve_context_id(int id); 61 extern void __destroy_context(int context_id); 62 static inline void mmu_context_init(void) { } 63 64 static inline int alloc_extended_context(struct mm_struct *mm, 65 unsigned long ea) 66 { 67 int context_id; 68 69 int index = ea >> MAX_EA_BITS_PER_CONTEXT; 70 71 context_id = hash__alloc_context_id(); 72 if (context_id < 0) 73 return context_id; 74 75 VM_WARN_ON(mm->context.extended_id[index]); 76 mm->context.extended_id[index] = context_id; 77 return context_id; 78 } 79 80 static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea) 81 { 82 int context_id; 83 84 context_id = get_ea_context(&mm->context, ea); 85 if (!context_id) 86 return true; 87 return false; 88 } 89 90 #else 91 extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next, 92 struct task_struct *tsk); 93 extern unsigned long __init_new_context(void); 94 extern void __destroy_context(unsigned long context_id); 95 extern void mmu_context_init(void); 96 static inline int alloc_extended_context(struct mm_struct *mm, 97 unsigned long ea) 98 { 99 /* non book3s_64 should never find this called */ 100 WARN_ON(1); 101 return -ENOMEM; 102 } 103 104 static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea) 105 { 106 return false; 107 } 108 #endif 109 110 #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) && defined(CONFIG_PPC_RADIX_MMU) 111 extern void radix_kvm_prefetch_workaround(struct mm_struct *mm); 112 #else 113 static inline void radix_kvm_prefetch_workaround(struct mm_struct *mm) { } 114 #endif 115 116 extern void switch_cop(struct mm_struct *next); 117 extern int use_cop(unsigned long acop, struct mm_struct *mm); 118 extern void drop_cop(unsigned long acop, struct mm_struct *mm); 119 120 #ifdef CONFIG_PPC_BOOK3S_64 121 static inline void inc_mm_active_cpus(struct mm_struct *mm) 122 { 123 atomic_inc(&mm->context.active_cpus); 124 } 125 126 static inline void dec_mm_active_cpus(struct mm_struct *mm) 127 { 128 atomic_dec(&mm->context.active_cpus); 129 } 130 131 static inline void mm_context_add_copro(struct mm_struct *mm) 132 { 133 /* 134 * If any copro is in use, increment the active CPU count 135 * in order to force TLB invalidations to be global as to 136 * propagate to the Nest MMU. 137 */ 138 if (atomic_inc_return(&mm->context.copros) == 1) 139 inc_mm_active_cpus(mm); 140 } 141 142 static inline void mm_context_remove_copro(struct mm_struct *mm) 143 { 144 int c; 145 146 c = atomic_dec_if_positive(&mm->context.copros); 147 148 /* Detect imbalance between add and remove */ 149 WARN_ON(c < 0); 150 151 /* 152 * Need to broadcast a global flush of the full mm before 153 * decrementing active_cpus count, as the next TLBI may be 154 * local and the nMMU and/or PSL need to be cleaned up. 155 * Should be rare enough so that it's acceptable. 156 * 157 * Skip on hash, as we don't know how to do the proper flush 158 * for the time being. Invalidations will remain global if 159 * used on hash. 160 */ 161 if (c == 0 && radix_enabled()) { 162 flush_all_mm(mm); 163 dec_mm_active_cpus(mm); 164 } 165 } 166 #else 167 static inline void inc_mm_active_cpus(struct mm_struct *mm) { } 168 static inline void dec_mm_active_cpus(struct mm_struct *mm) { } 169 static inline void mm_context_add_copro(struct mm_struct *mm) { } 170 static inline void mm_context_remove_copro(struct mm_struct *mm) { } 171 #endif 172 173 174 extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, 175 struct task_struct *tsk); 176 177 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 178 struct task_struct *tsk) 179 { 180 unsigned long flags; 181 182 local_irq_save(flags); 183 switch_mm_irqs_off(prev, next, tsk); 184 local_irq_restore(flags); 185 } 186 #define switch_mm_irqs_off switch_mm_irqs_off 187 188 189 #define deactivate_mm(tsk,mm) do { } while (0) 190 191 /* 192 * After we have set current->mm to a new value, this activates 193 * the context for the new mm so we see the new mappings. 194 */ 195 static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) 196 { 197 switch_mm(prev, next, current); 198 } 199 200 /* We don't currently use enter_lazy_tlb() for anything */ 201 static inline void enter_lazy_tlb(struct mm_struct *mm, 202 struct task_struct *tsk) 203 { 204 /* 64-bit Book3E keeps track of current PGD in the PACA */ 205 #ifdef CONFIG_PPC_BOOK3E_64 206 get_paca()->pgd = NULL; 207 #endif 208 } 209 210 static inline int arch_dup_mmap(struct mm_struct *oldmm, 211 struct mm_struct *mm) 212 { 213 return 0; 214 } 215 216 #ifndef CONFIG_PPC_BOOK3S_64 217 static inline void arch_exit_mmap(struct mm_struct *mm) 218 { 219 } 220 #else 221 extern void arch_exit_mmap(struct mm_struct *mm); 222 #endif 223 224 static inline void arch_unmap(struct mm_struct *mm, 225 struct vm_area_struct *vma, 226 unsigned long start, unsigned long end) 227 { 228 if (start <= mm->context.vdso_base && mm->context.vdso_base < end) 229 mm->context.vdso_base = 0; 230 } 231 232 static inline void arch_bprm_mm_init(struct mm_struct *mm, 233 struct vm_area_struct *vma) 234 { 235 } 236 237 #ifdef CONFIG_PPC_MEM_KEYS 238 bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write, 239 bool execute, bool foreign); 240 #else /* CONFIG_PPC_MEM_KEYS */ 241 static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, 242 bool write, bool execute, bool foreign) 243 { 244 /* by default, allow everything */ 245 return true; 246 } 247 248 #define pkey_mm_init(mm) 249 #define thread_pkey_regs_save(thread) 250 #define thread_pkey_regs_restore(new_thread, old_thread) 251 #define thread_pkey_regs_init(thread) 252 253 static inline u64 pte_to_hpte_pkey_bits(u64 pteflags) 254 { 255 return 0x0UL; 256 } 257 258 #endif /* CONFIG_PPC_MEM_KEYS */ 259 260 #endif /* __KERNEL__ */ 261 #endif /* __ASM_POWERPC_MMU_CONTEXT_H */ 262