1 #ifndef __ALPHA_MMU_CONTEXT_H 2 #define __ALPHA_MMU_CONTEXT_H 3 4 /* 5 * get a new mmu context.. 6 * 7 * Copyright (C) 1996, Linus Torvalds 8 */ 9 10 #include <linux/mm_types.h> 11 12 #include <asm/machvec.h> 13 #include <asm/compiler.h> 14 #include <asm-generic/mm_hooks.h> 15 16 /* 17 * Force a context reload. This is needed when we change the page 18 * table pointer or when we update the ASN of the current process. 19 */ 20 21 /* Don't get into trouble with dueling __EXTERN_INLINEs. */ 22 #ifndef __EXTERN_INLINE 23 #include <asm/io.h> 24 #endif 25 26 27 static inline unsigned long 28 __reload_thread(struct pcb_struct *pcb) 29 { 30 register unsigned long a0 __asm__("$16"); 31 register unsigned long v0 __asm__("$0"); 32 33 a0 = virt_to_phys(pcb); 34 __asm__ __volatile__( 35 "call_pal %2 #__reload_thread" 36 : "=r"(v0), "=r"(a0) 37 : "i"(PAL_swpctx), "r"(a0) 38 : "$1", "$22", "$23", "$24", "$25"); 39 40 return v0; 41 } 42 43 44 /* 45 * The maximum ASN's the processor supports. On the EV4 this is 63 46 * but the PAL-code doesn't actually use this information. On the 47 * EV5 this is 127, and EV6 has 255. 48 * 49 * On the EV4, the ASNs are more-or-less useless anyway, as they are 50 * only used as an icache tag, not for TB entries. On the EV5 and EV6, 51 * ASN's also validate the TB entries, and thus make a lot more sense. 52 * 53 * The EV4 ASN's don't even match the architecture manual, ugh. And 54 * I quote: "If a processor implements address space numbers (ASNs), 55 * and the old PTE has the Address Space Match (ASM) bit clear (ASNs 56 * in use) and the Valid bit set, then entries can also effectively be 57 * made coherent by assigning a new, unused ASN to the currently 58 * running process and not reusing the previous ASN before calling the 59 * appropriate PALcode routine to invalidate the translation buffer (TB)". 60 * 61 * In short, the EV4 has a "kind of" ASN capability, but it doesn't actually 62 * work correctly and can thus not be used (explaining the lack of PAL-code 63 * support). 64 */ 65 #define EV4_MAX_ASN 63 66 #define EV5_MAX_ASN 127 67 #define EV6_MAX_ASN 255 68 69 #ifdef CONFIG_ALPHA_GENERIC 70 # define MAX_ASN (alpha_mv.max_asn) 71 #else 72 # ifdef CONFIG_ALPHA_EV4 73 # define MAX_ASN EV4_MAX_ASN 74 # elif defined(CONFIG_ALPHA_EV5) 75 # define MAX_ASN EV5_MAX_ASN 76 # else 77 # define MAX_ASN EV6_MAX_ASN 78 # endif 79 #endif 80 81 /* 82 * cpu_last_asn(processor): 83 * 63 0 84 * +-------------+----------------+--------------+ 85 * | asn version | this processor | hardware asn | 86 * +-------------+----------------+--------------+ 87 */ 88 89 #include <asm/smp.h> 90 #ifdef CONFIG_SMP 91 #define cpu_last_asn(cpuid) (cpu_data[cpuid].last_asn) 92 #else 93 extern unsigned long last_asn; 94 #define cpu_last_asn(cpuid) last_asn 95 #endif /* CONFIG_SMP */ 96 97 #define WIDTH_HARDWARE_ASN 8 98 #define ASN_FIRST_VERSION (1UL << WIDTH_HARDWARE_ASN) 99 #define HARDWARE_ASN_MASK ((1UL << WIDTH_HARDWARE_ASN) - 1) 100 101 /* 102 * NOTE! The way this is set up, the high bits of the "asn_cache" (and 103 * the "mm->context") are the ASN _version_ code. A version of 0 is 104 * always considered invalid, so to invalidate another process you only 105 * need to do "p->mm->context = 0". 106 * 107 * If we need more ASN's than the processor has, we invalidate the old 108 * user TLB's (tbiap()) and start a new ASN version. That will automatically 109 * force a new asn for any other processes the next time they want to 110 * run. 111 */ 112 113 #ifndef __EXTERN_INLINE 114 #define __EXTERN_INLINE extern inline 115 #define __MMU_EXTERN_INLINE 116 #endif 117 118 extern inline unsigned long 119 __get_new_mm_context(struct mm_struct *mm, long cpu) 120 { 121 unsigned long asn = cpu_last_asn(cpu); 122 unsigned long next = asn + 1; 123 124 if ((asn & HARDWARE_ASN_MASK) >= MAX_ASN) { 125 tbiap(); 126 imb(); 127 next = (asn & ~HARDWARE_ASN_MASK) + ASN_FIRST_VERSION; 128 } 129 cpu_last_asn(cpu) = next; 130 return next; 131 } 132 133 __EXTERN_INLINE void 134 ev5_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm, 135 struct task_struct *next) 136 { 137 /* Check if our ASN is of an older version, and thus invalid. */ 138 unsigned long asn; 139 unsigned long mmc; 140 long cpu = smp_processor_id(); 141 142 #ifdef CONFIG_SMP 143 cpu_data[cpu].asn_lock = 1; 144 barrier(); 145 #endif 146 asn = cpu_last_asn(cpu); 147 mmc = next_mm->context[cpu]; 148 if ((mmc ^ asn) & ~HARDWARE_ASN_MASK) { 149 mmc = __get_new_mm_context(next_mm, cpu); 150 next_mm->context[cpu] = mmc; 151 } 152 #ifdef CONFIG_SMP 153 else 154 cpu_data[cpu].need_new_asn = 1; 155 #endif 156 157 /* Always update the PCB ASN. Another thread may have allocated 158 a new mm->context (via flush_tlb_mm) without the ASN serial 159 number wrapping. We have no way to detect when this is needed. */ 160 task_thread_info(next)->pcb.asn = mmc & HARDWARE_ASN_MASK; 161 } 162 163 __EXTERN_INLINE void 164 ev4_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm, 165 struct task_struct *next) 166 { 167 /* As described, ASN's are broken for TLB usage. But we can 168 optimize for switching between threads -- if the mm is 169 unchanged from current we needn't flush. */ 170 /* ??? May not be needed because EV4 PALcode recognizes that 171 ASN's are broken and does a tbiap itself on swpctx, under 172 the "Must set ASN or flush" rule. At least this is true 173 for a 1992 SRM, reports Joseph Martin (jmartin@hlo.dec.com). 174 I'm going to leave this here anyway, just to Be Sure. -- r~ */ 175 if (prev_mm != next_mm) 176 tbiap(); 177 178 /* Do continue to allocate ASNs, because we can still use them 179 to avoid flushing the icache. */ 180 ev5_switch_mm(prev_mm, next_mm, next); 181 } 182 183 extern void __load_new_mm_context(struct mm_struct *); 184 185 #ifdef CONFIG_SMP 186 #define check_mmu_context() \ 187 do { \ 188 int cpu = smp_processor_id(); \ 189 cpu_data[cpu].asn_lock = 0; \ 190 barrier(); \ 191 if (cpu_data[cpu].need_new_asn) { \ 192 struct mm_struct * mm = current->active_mm; \ 193 cpu_data[cpu].need_new_asn = 0; \ 194 if (!mm->context[cpu]) \ 195 __load_new_mm_context(mm); \ 196 } \ 197 } while(0) 198 #else 199 #define check_mmu_context() do { } while(0) 200 #endif 201 202 __EXTERN_INLINE void 203 ev5_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm) 204 { 205 __load_new_mm_context(next_mm); 206 } 207 208 __EXTERN_INLINE void 209 ev4_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm) 210 { 211 __load_new_mm_context(next_mm); 212 tbiap(); 213 } 214 215 #define deactivate_mm(tsk,mm) do { } while (0) 216 217 #ifdef CONFIG_ALPHA_GENERIC 218 # define switch_mm(a,b,c) alpha_mv.mv_switch_mm((a),(b),(c)) 219 # define activate_mm(x,y) alpha_mv.mv_activate_mm((x),(y)) 220 #else 221 # ifdef CONFIG_ALPHA_EV4 222 # define switch_mm(a,b,c) ev4_switch_mm((a),(b),(c)) 223 # define activate_mm(x,y) ev4_activate_mm((x),(y)) 224 # else 225 # define switch_mm(a,b,c) ev5_switch_mm((a),(b),(c)) 226 # define activate_mm(x,y) ev5_activate_mm((x),(y)) 227 # endif 228 #endif 229 230 static inline int 231 init_new_context(struct task_struct *tsk, struct mm_struct *mm) 232 { 233 int i; 234 235 for_each_online_cpu(i) 236 mm->context[i] = 0; 237 if (tsk != current) 238 task_thread_info(tsk)->pcb.ptbr 239 = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT; 240 return 0; 241 } 242 243 extern inline void 244 destroy_context(struct mm_struct *mm) 245 { 246 /* Nothing to do. */ 247 } 248 249 static inline void 250 enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 251 { 252 task_thread_info(tsk)->pcb.ptbr 253 = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT; 254 } 255 256 #ifdef __MMU_EXTERN_INLINE 257 #undef __EXTERN_INLINE 258 #undef __MMU_EXTERN_INLINE 259 #endif 260 261 #endif /* __ALPHA_MMU_CONTEXT_H */ 262