1 #ifndef __ALPHA_MMU_CONTEXT_H 2 #define __ALPHA_MMU_CONTEXT_H 3 4 /* 5 * get a new mmu context.. 6 * 7 * Copyright (C) 1996, Linus Torvalds 8 */ 9 10 #include <linux/mm_types.h> 11 #include <linux/sched.h> 12 13 #include <asm/machvec.h> 14 #include <asm/compiler.h> 15 #include <asm-generic/mm_hooks.h> 16 17 /* 18 * Force a context reload. This is needed when we change the page 19 * table pointer or when we update the ASN of the current process. 20 */ 21 22 /* Don't get into trouble with dueling __EXTERN_INLINEs. */ 23 #ifndef __EXTERN_INLINE 24 #include <asm/io.h> 25 #endif 26 27 28 static inline unsigned long 29 __reload_thread(struct pcb_struct *pcb) 30 { 31 register unsigned long a0 __asm__("$16"); 32 register unsigned long v0 __asm__("$0"); 33 34 a0 = virt_to_phys(pcb); 35 __asm__ __volatile__( 36 "call_pal %2 #__reload_thread" 37 : "=r"(v0), "=r"(a0) 38 : "i"(PAL_swpctx), "r"(a0) 39 : "$1", "$22", "$23", "$24", "$25"); 40 41 return v0; 42 } 43 44 45 /* 46 * The maximum ASN's the processor supports. On the EV4 this is 63 47 * but the PAL-code doesn't actually use this information. On the 48 * EV5 this is 127, and EV6 has 255. 49 * 50 * On the EV4, the ASNs are more-or-less useless anyway, as they are 51 * only used as an icache tag, not for TB entries. On the EV5 and EV6, 52 * ASN's also validate the TB entries, and thus make a lot more sense. 53 * 54 * The EV4 ASN's don't even match the architecture manual, ugh. And 55 * I quote: "If a processor implements address space numbers (ASNs), 56 * and the old PTE has the Address Space Match (ASM) bit clear (ASNs 57 * in use) and the Valid bit set, then entries can also effectively be 58 * made coherent by assigning a new, unused ASN to the currently 59 * running process and not reusing the previous ASN before calling the 60 * appropriate PALcode routine to invalidate the translation buffer (TB)". 61 * 62 * In short, the EV4 has a "kind of" ASN capability, but it doesn't actually 63 * work correctly and can thus not be used (explaining the lack of PAL-code 64 * support). 65 */ 66 #define EV4_MAX_ASN 63 67 #define EV5_MAX_ASN 127 68 #define EV6_MAX_ASN 255 69 70 #ifdef CONFIG_ALPHA_GENERIC 71 # define MAX_ASN (alpha_mv.max_asn) 72 #else 73 # ifdef CONFIG_ALPHA_EV4 74 # define MAX_ASN EV4_MAX_ASN 75 # elif defined(CONFIG_ALPHA_EV5) 76 # define MAX_ASN EV5_MAX_ASN 77 # else 78 # define MAX_ASN EV6_MAX_ASN 79 # endif 80 #endif 81 82 /* 83 * cpu_last_asn(processor): 84 * 63 0 85 * +-------------+----------------+--------------+ 86 * | asn version | this processor | hardware asn | 87 * +-------------+----------------+--------------+ 88 */ 89 90 #include <asm/smp.h> 91 #ifdef CONFIG_SMP 92 #define cpu_last_asn(cpuid) (cpu_data[cpuid].last_asn) 93 #else 94 extern unsigned long last_asn; 95 #define cpu_last_asn(cpuid) last_asn 96 #endif /* CONFIG_SMP */ 97 98 #define WIDTH_HARDWARE_ASN 8 99 #define ASN_FIRST_VERSION (1UL << WIDTH_HARDWARE_ASN) 100 #define HARDWARE_ASN_MASK ((1UL << WIDTH_HARDWARE_ASN) - 1) 101 102 /* 103 * NOTE! The way this is set up, the high bits of the "asn_cache" (and 104 * the "mm->context") are the ASN _version_ code. A version of 0 is 105 * always considered invalid, so to invalidate another process you only 106 * need to do "p->mm->context = 0". 107 * 108 * If we need more ASN's than the processor has, we invalidate the old 109 * user TLB's (tbiap()) and start a new ASN version. That will automatically 110 * force a new asn for any other processes the next time they want to 111 * run. 112 */ 113 114 #ifndef __EXTERN_INLINE 115 #define __EXTERN_INLINE extern inline 116 #define __MMU_EXTERN_INLINE 117 #endif 118 119 extern inline unsigned long 120 __get_new_mm_context(struct mm_struct *mm, long cpu) 121 { 122 unsigned long asn = cpu_last_asn(cpu); 123 unsigned long next = asn + 1; 124 125 if ((asn & HARDWARE_ASN_MASK) >= MAX_ASN) { 126 tbiap(); 127 imb(); 128 next = (asn & ~HARDWARE_ASN_MASK) + ASN_FIRST_VERSION; 129 } 130 cpu_last_asn(cpu) = next; 131 return next; 132 } 133 134 __EXTERN_INLINE void 135 ev5_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm, 136 struct task_struct *next) 137 { 138 /* Check if our ASN is of an older version, and thus invalid. */ 139 unsigned long asn; 140 unsigned long mmc; 141 long cpu = smp_processor_id(); 142 143 #ifdef CONFIG_SMP 144 cpu_data[cpu].asn_lock = 1; 145 barrier(); 146 #endif 147 asn = cpu_last_asn(cpu); 148 mmc = next_mm->context[cpu]; 149 if ((mmc ^ asn) & ~HARDWARE_ASN_MASK) { 150 mmc = __get_new_mm_context(next_mm, cpu); 151 next_mm->context[cpu] = mmc; 152 } 153 #ifdef CONFIG_SMP 154 else 155 cpu_data[cpu].need_new_asn = 1; 156 #endif 157 158 /* Always update the PCB ASN. Another thread may have allocated 159 a new mm->context (via flush_tlb_mm) without the ASN serial 160 number wrapping. We have no way to detect when this is needed. */ 161 task_thread_info(next)->pcb.asn = mmc & HARDWARE_ASN_MASK; 162 } 163 164 __EXTERN_INLINE void 165 ev4_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm, 166 struct task_struct *next) 167 { 168 /* As described, ASN's are broken for TLB usage. But we can 169 optimize for switching between threads -- if the mm is 170 unchanged from current we needn't flush. */ 171 /* ??? May not be needed because EV4 PALcode recognizes that 172 ASN's are broken and does a tbiap itself on swpctx, under 173 the "Must set ASN or flush" rule. At least this is true 174 for a 1992 SRM, reports Joseph Martin (jmartin@hlo.dec.com). 175 I'm going to leave this here anyway, just to Be Sure. -- r~ */ 176 if (prev_mm != next_mm) 177 tbiap(); 178 179 /* Do continue to allocate ASNs, because we can still use them 180 to avoid flushing the icache. */ 181 ev5_switch_mm(prev_mm, next_mm, next); 182 } 183 184 extern void __load_new_mm_context(struct mm_struct *); 185 186 #ifdef CONFIG_SMP 187 #define check_mmu_context() \ 188 do { \ 189 int cpu = smp_processor_id(); \ 190 cpu_data[cpu].asn_lock = 0; \ 191 barrier(); \ 192 if (cpu_data[cpu].need_new_asn) { \ 193 struct mm_struct * mm = current->active_mm; \ 194 cpu_data[cpu].need_new_asn = 0; \ 195 if (!mm->context[cpu]) \ 196 __load_new_mm_context(mm); \ 197 } \ 198 } while(0) 199 #else 200 #define check_mmu_context() do { } while(0) 201 #endif 202 203 __EXTERN_INLINE void 204 ev5_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm) 205 { 206 __load_new_mm_context(next_mm); 207 } 208 209 __EXTERN_INLINE void 210 ev4_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm) 211 { 212 __load_new_mm_context(next_mm); 213 tbiap(); 214 } 215 216 #define deactivate_mm(tsk,mm) do { } while (0) 217 218 #ifdef CONFIG_ALPHA_GENERIC 219 # define switch_mm(a,b,c) alpha_mv.mv_switch_mm((a),(b),(c)) 220 # define activate_mm(x,y) alpha_mv.mv_activate_mm((x),(y)) 221 #else 222 # ifdef CONFIG_ALPHA_EV4 223 # define switch_mm(a,b,c) ev4_switch_mm((a),(b),(c)) 224 # define activate_mm(x,y) ev4_activate_mm((x),(y)) 225 # else 226 # define switch_mm(a,b,c) ev5_switch_mm((a),(b),(c)) 227 # define activate_mm(x,y) ev5_activate_mm((x),(y)) 228 # endif 229 #endif 230 231 static inline int 232 init_new_context(struct task_struct *tsk, struct mm_struct *mm) 233 { 234 int i; 235 236 for_each_online_cpu(i) 237 mm->context[i] = 0; 238 if (tsk != current) 239 task_thread_info(tsk)->pcb.ptbr 240 = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT; 241 return 0; 242 } 243 244 extern inline void 245 destroy_context(struct mm_struct *mm) 246 { 247 /* Nothing to do. */ 248 } 249 250 static inline void 251 enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 252 { 253 task_thread_info(tsk)->pcb.ptbr 254 = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT; 255 } 256 257 #ifdef __MMU_EXTERN_INLINE 258 #undef __EXTERN_INLINE 259 #undef __MMU_EXTERN_INLINE 260 #endif 261 262 #endif /* __ALPHA_MMU_CONTEXT_H */ 263