1 /* 2 * Switch a MMU context. 3 * 4 * This file is subject to the terms and conditions of the GNU General Public 5 * License. See the file "COPYING" in the main directory of this archive 6 * for more details. 7 * 8 * Copyright (C) 1996, 1997, 1998, 1999 by Ralf Baechle 9 * Copyright (C) 1999 Silicon Graphics, Inc. 10 */ 11 #ifndef _ASM_MMU_CONTEXT_H 12 #define _ASM_MMU_CONTEXT_H 13 14 #include <linux/errno.h> 15 #include <linux/sched.h> 16 #include <linux/smp.h> 17 #include <linux/slab.h> 18 #include <asm/cacheflush.h> 19 #include <asm/hazards.h> 20 #include <asm/tlbflush.h> 21 #ifdef CONFIG_MIPS_MT_SMTC 22 #include <asm/mipsmtregs.h> 23 #include <asm/smtc.h> 24 #endif /* SMTC */ 25 #include <asm-generic/mm_hooks.h> 26 27 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT 28 29 #define TLBMISS_HANDLER_SETUP_PGD(pgd) \ 30 do { \ 31 extern void tlbmiss_handler_setup_pgd(unsigned long); \ 32 tlbmiss_handler_setup_pgd((unsigned long)(pgd)); \ 33 } while (0) 34 35 #define TLBMISS_HANDLER_SETUP() \ 36 do { \ 37 TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir); \ 38 write_c0_xcontext((unsigned long) smp_processor_id() << \ 39 SMP_CPUID_REGSHIFT); \ 40 } while (0) 41 42 #else /* !CONFIG_MIPS_PGD_C0_CONTEXT: using pgd_current*/ 43 44 /* 45 * For the fast tlb miss handlers, we keep a per cpu array of pointers 46 * to the current pgd for each processor. Also, the proc. id is stuffed 47 * into the context register. 48 */ 49 extern unsigned long pgd_current[]; 50 51 #define TLBMISS_HANDLER_SETUP_PGD(pgd) \ 52 pgd_current[smp_processor_id()] = (unsigned long)(pgd) 53 54 #define TLBMISS_HANDLER_SETUP() \ 55 write_c0_context((unsigned long) smp_processor_id() << \ 56 SMP_CPUID_REGSHIFT); \ 57 back_to_back_c0_hazard(); \ 58 TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir) 59 #endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/ 60 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 61 62 #define ASID_INC 0x40 63 #define ASID_MASK 0xfc0 64 65 #elif defined(CONFIG_CPU_R8000) 66 67 #define ASID_INC 0x10 68 #define ASID_MASK 0xff0 69 70 #elif defined(CONFIG_MIPS_MT_SMTC) 71 72 #define ASID_INC 0x1 73 extern unsigned long smtc_asid_mask; 74 #define ASID_MASK (smtc_asid_mask) 75 #define HW_ASID_MASK 0xff 76 /* End SMTC/34K debug hack */ 77 #else /* FIXME: not correct for R6000 */ 78 79 #define ASID_INC 0x1 80 #define ASID_MASK 0xff 81 82 #endif 83 84 #define cpu_context(cpu, mm) ((mm)->context.asid[cpu]) 85 #define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK) 86 #define asid_cache(cpu) (cpu_data[cpu].asid_cache) 87 88 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 89 { 90 } 91 92 /* 93 * All unused by hardware upper bits will be considered 94 * as a software asid extension. 95 */ 96 #define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1))) 97 #define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1) 98 99 #ifndef CONFIG_MIPS_MT_SMTC 100 /* Normal, classic MIPS get_new_mmu_context */ 101 static inline void 102 get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) 103 { 104 extern void kvm_local_flush_tlb_all(void); 105 unsigned long asid = asid_cache(cpu); 106 107 if (! ((asid += ASID_INC) & ASID_MASK) ) { 108 if (cpu_has_vtag_icache) 109 flush_icache_all(); 110 #ifdef CONFIG_KVM 111 kvm_local_flush_tlb_all(); /* start new asid cycle */ 112 #else 113 local_flush_tlb_all(); /* start new asid cycle */ 114 #endif 115 if (!asid) /* fix version if needed */ 116 asid = ASID_FIRST_VERSION; 117 } 118 119 cpu_context(cpu, mm) = asid_cache(cpu) = asid; 120 } 121 122 #else /* CONFIG_MIPS_MT_SMTC */ 123 124 #define get_new_mmu_context(mm, cpu) smtc_get_new_mmu_context((mm), (cpu)) 125 126 #endif /* CONFIG_MIPS_MT_SMTC */ 127 128 /* 129 * Initialize the context related info for a new mm_struct 130 * instance. 131 */ 132 static inline int 133 init_new_context(struct task_struct *tsk, struct mm_struct *mm) 134 { 135 int i; 136 137 for_each_possible_cpu(i) 138 cpu_context(i, mm) = 0; 139 140 return 0; 141 } 142 143 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 144 struct task_struct *tsk) 145 { 146 unsigned int cpu = smp_processor_id(); 147 unsigned long flags; 148 #ifdef CONFIG_MIPS_MT_SMTC 149 unsigned long oldasid; 150 unsigned long mtflags; 151 int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id; 152 local_irq_save(flags); 153 mtflags = dvpe(); 154 #else /* Not SMTC */ 155 local_irq_save(flags); 156 #endif /* CONFIG_MIPS_MT_SMTC */ 157 158 /* Check if our ASID is of an older version and thus invalid */ 159 if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK) 160 get_new_mmu_context(next, cpu); 161 #ifdef CONFIG_MIPS_MT_SMTC 162 /* 163 * If the EntryHi ASID being replaced happens to be 164 * the value flagged at ASID recycling time as having 165 * an extended life, clear the bit showing it being 166 * in use by this "CPU", and if that's the last bit, 167 * free up the ASID value for use and flush any old 168 * instances of it from the TLB. 169 */ 170 oldasid = (read_c0_entryhi() & ASID_MASK); 171 if(smtc_live_asid[mytlb][oldasid]) { 172 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); 173 if(smtc_live_asid[mytlb][oldasid] == 0) 174 smtc_flush_tlb_asid(oldasid); 175 } 176 /* 177 * Tread softly on EntryHi, and so long as we support 178 * having ASID_MASK smaller than the hardware maximum, 179 * make sure no "soft" bits become "hard"... 180 */ 181 write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) | 182 cpu_asid(cpu, next)); 183 ehb(); /* Make sure it propagates to TCStatus */ 184 evpe(mtflags); 185 #else 186 write_c0_entryhi(cpu_asid(cpu, next)); 187 #endif /* CONFIG_MIPS_MT_SMTC */ 188 TLBMISS_HANDLER_SETUP_PGD(next->pgd); 189 190 /* 191 * Mark current->active_mm as not "active" anymore. 192 * We don't want to mislead possible IPI tlb flush routines. 193 */ 194 cpumask_clear_cpu(cpu, mm_cpumask(prev)); 195 cpumask_set_cpu(cpu, mm_cpumask(next)); 196 197 local_irq_restore(flags); 198 } 199 200 /* 201 * Destroy context related info for an mm_struct that is about 202 * to be put to rest. 203 */ 204 static inline void destroy_context(struct mm_struct *mm) 205 { 206 } 207 208 #define deactivate_mm(tsk, mm) do { } while (0) 209 210 /* 211 * After we have set current->mm to a new value, this activates 212 * the context for the new mm so we see the new mappings. 213 */ 214 static inline void 215 activate_mm(struct mm_struct *prev, struct mm_struct *next) 216 { 217 unsigned long flags; 218 unsigned int cpu = smp_processor_id(); 219 220 #ifdef CONFIG_MIPS_MT_SMTC 221 unsigned long oldasid; 222 unsigned long mtflags; 223 int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id; 224 #endif /* CONFIG_MIPS_MT_SMTC */ 225 226 local_irq_save(flags); 227 228 /* Unconditionally get a new ASID. */ 229 get_new_mmu_context(next, cpu); 230 231 #ifdef CONFIG_MIPS_MT_SMTC 232 /* See comments for similar code above */ 233 mtflags = dvpe(); 234 oldasid = read_c0_entryhi() & ASID_MASK; 235 if(smtc_live_asid[mytlb][oldasid]) { 236 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); 237 if(smtc_live_asid[mytlb][oldasid] == 0) 238 smtc_flush_tlb_asid(oldasid); 239 } 240 /* See comments for similar code above */ 241 write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) | 242 cpu_asid(cpu, next)); 243 ehb(); /* Make sure it propagates to TCStatus */ 244 evpe(mtflags); 245 #else 246 write_c0_entryhi(cpu_asid(cpu, next)); 247 #endif /* CONFIG_MIPS_MT_SMTC */ 248 TLBMISS_HANDLER_SETUP_PGD(next->pgd); 249 250 /* mark mmu ownership change */ 251 cpumask_clear_cpu(cpu, mm_cpumask(prev)); 252 cpumask_set_cpu(cpu, mm_cpumask(next)); 253 254 local_irq_restore(flags); 255 } 256 257 /* 258 * If mm is currently active_mm, we can't really drop it. Instead, 259 * we will get a new one for it. 260 */ 261 static inline void 262 drop_mmu_context(struct mm_struct *mm, unsigned cpu) 263 { 264 unsigned long flags; 265 #ifdef CONFIG_MIPS_MT_SMTC 266 unsigned long oldasid; 267 /* Can't use spinlock because called from TLB flush within DVPE */ 268 unsigned int prevvpe; 269 int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id; 270 #endif /* CONFIG_MIPS_MT_SMTC */ 271 272 local_irq_save(flags); 273 274 if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { 275 get_new_mmu_context(mm, cpu); 276 #ifdef CONFIG_MIPS_MT_SMTC 277 /* See comments for similar code above */ 278 prevvpe = dvpe(); 279 oldasid = (read_c0_entryhi() & ASID_MASK); 280 if (smtc_live_asid[mytlb][oldasid]) { 281 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); 282 if(smtc_live_asid[mytlb][oldasid] == 0) 283 smtc_flush_tlb_asid(oldasid); 284 } 285 /* See comments for similar code above */ 286 write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) 287 | cpu_asid(cpu, mm)); 288 ehb(); /* Make sure it propagates to TCStatus */ 289 evpe(prevvpe); 290 #else /* not CONFIG_MIPS_MT_SMTC */ 291 write_c0_entryhi(cpu_asid(cpu, mm)); 292 #endif /* CONFIG_MIPS_MT_SMTC */ 293 } else { 294 /* will get a new context next time */ 295 #ifndef CONFIG_MIPS_MT_SMTC 296 cpu_context(cpu, mm) = 0; 297 #else /* SMTC */ 298 int i; 299 300 /* SMTC shares the TLB (and ASIDs) across VPEs */ 301 for_each_online_cpu(i) { 302 if((smtc_status & SMTC_TLB_SHARED) 303 || (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id)) 304 cpu_context(i, mm) = 0; 305 } 306 #endif /* CONFIG_MIPS_MT_SMTC */ 307 } 308 local_irq_restore(flags); 309 } 310 311 #endif /* _ASM_MMU_CONTEXT_H */ 312