1 #include <linux/init.h> 2 3 #include <linux/mm.h> 4 #include <linux/spinlock.h> 5 #include <linux/smp.h> 6 #include <linux/interrupt.h> 7 #include <linux/module.h> 8 #include <linux/cpu.h> 9 10 #include <asm/tlbflush.h> 11 #include <asm/mmu_context.h> 12 #include <asm/cache.h> 13 #include <asm/apic.h> 14 #include <asm/uv/uv.h> 15 #include <linux/debugfs.h> 16 17 /* 18 * Smarter SMP flushing macros. 19 * c/o Linus Torvalds. 20 * 21 * These mean you can really definitely utterly forget about 22 * writing to user space from interrupts. (Its not allowed anyway). 23 * 24 * Optimizations Manfred Spraul <manfred@colorfullife.com> 25 * 26 * More scalable flush, from Andi Kleen 27 * 28 * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi 29 */ 30 31 struct flush_tlb_info { 32 struct mm_struct *flush_mm; 33 unsigned long flush_start; 34 unsigned long flush_end; 35 }; 36 37 /* 38 * We cannot call mmdrop() because we are in interrupt context, 39 * instead update mm->cpu_vm_mask. 40 */ 41 void leave_mm(int cpu) 42 { 43 struct mm_struct *active_mm = this_cpu_read(cpu_tlbstate.active_mm); 44 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) 45 BUG(); 46 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) { 47 cpumask_clear_cpu(cpu, mm_cpumask(active_mm)); 48 load_cr3(swapper_pg_dir); 49 /* 50 * This gets called in the idle path where RCU 51 * functions differently. Tracing normally 52 * uses RCU, so we have to call the tracepoint 53 * specially here. 54 */ 55 trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); 56 } 57 } 58 EXPORT_SYMBOL_GPL(leave_mm); 59 60 /* 61 * The flush IPI assumes that a thread switch happens in this order: 62 * [cpu0: the cpu that switches] 63 * 1) switch_mm() either 1a) or 1b) 64 * 1a) thread switch to a different mm 65 * 1a1) set cpu_tlbstate to TLBSTATE_OK 66 * Now the tlb flush NMI handler flush_tlb_func won't call leave_mm 67 * if cpu0 was in lazy tlb mode. 68 * 1a2) update cpu active_mm 69 * Now cpu0 accepts tlb flushes for the new mm. 70 * 1a3) cpu_set(cpu, new_mm->cpu_vm_mask); 71 * Now the other cpus will send tlb flush ipis. 72 * 1a4) change cr3. 73 * 1a5) cpu_clear(cpu, old_mm->cpu_vm_mask); 74 * Stop ipi delivery for the old mm. This is not synchronized with 75 * the other cpus, but flush_tlb_func ignore flush ipis for the wrong 76 * mm, and in the worst case we perform a superfluous tlb flush. 77 * 1b) thread switch without mm change 78 * cpu active_mm is correct, cpu0 already handles flush ipis. 79 * 1b1) set cpu_tlbstate to TLBSTATE_OK 80 * 1b2) test_and_set the cpu bit in cpu_vm_mask. 81 * Atomically set the bit [other cpus will start sending flush ipis], 82 * and test the bit. 83 * 1b3) if the bit was 0: leave_mm was called, flush the tlb. 84 * 2) switch %%esp, ie current 85 * 86 * The interrupt must handle 2 special cases: 87 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm. 88 * - the cpu performs speculative tlb reads, i.e. even if the cpu only 89 * runs in kernel space, the cpu could load tlb entries for user space 90 * pages. 91 * 92 * The good news is that cpu_tlbstate is local to each cpu, no 93 * write/read ordering problems. 94 */ 95 96 /* 97 * TLB flush funcation: 98 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed. 99 * 2) Leave the mm if we are in the lazy tlb mode. 100 */ 101 static void flush_tlb_func(void *info) 102 { 103 struct flush_tlb_info *f = info; 104 105 inc_irq_stat(irq_tlb_count); 106 107 if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm)) 108 return; 109 if (!f->flush_end) 110 f->flush_end = f->flush_start + PAGE_SIZE; 111 112 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); 113 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { 114 if (f->flush_end == TLB_FLUSH_ALL) { 115 local_flush_tlb(); 116 trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, TLB_FLUSH_ALL); 117 } else { 118 unsigned long addr; 119 unsigned long nr_pages = 120 (f->flush_end - f->flush_start) / PAGE_SIZE; 121 addr = f->flush_start; 122 while (addr < f->flush_end) { 123 __flush_tlb_single(addr); 124 addr += PAGE_SIZE; 125 } 126 trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, nr_pages); 127 } 128 } else 129 leave_mm(smp_processor_id()); 130 131 } 132 133 void native_flush_tlb_others(const struct cpumask *cpumask, 134 struct mm_struct *mm, unsigned long start, 135 unsigned long end) 136 { 137 struct flush_tlb_info info; 138 info.flush_mm = mm; 139 info.flush_start = start; 140 info.flush_end = end; 141 142 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); 143 trace_tlb_flush(TLB_REMOTE_SEND_IPI, end - start); 144 if (is_uv_system()) { 145 unsigned int cpu; 146 147 cpu = smp_processor_id(); 148 cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu); 149 if (cpumask) 150 smp_call_function_many(cpumask, flush_tlb_func, 151 &info, 1); 152 return; 153 } 154 smp_call_function_many(cpumask, flush_tlb_func, &info, 1); 155 } 156 157 void flush_tlb_current_task(void) 158 { 159 struct mm_struct *mm = current->mm; 160 161 preempt_disable(); 162 163 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); 164 local_flush_tlb(); 165 trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL); 166 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) 167 flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL); 168 preempt_enable(); 169 } 170 171 /* 172 * See Documentation/x86/tlb.txt for details. We choose 33 173 * because it is large enough to cover the vast majority (at 174 * least 95%) of allocations, and is small enough that we are 175 * confident it will not cause too much overhead. Each single 176 * flush is about 100 ns, so this caps the maximum overhead at 177 * _about_ 3,000 ns. 178 * 179 * This is in units of pages. 180 */ 181 static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33; 182 183 void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, 184 unsigned long end, unsigned long vmflag) 185 { 186 unsigned long addr; 187 /* do a global flush by default */ 188 unsigned long base_pages_to_flush = TLB_FLUSH_ALL; 189 190 preempt_disable(); 191 if (current->active_mm != mm) 192 goto out; 193 194 if (!current->mm) { 195 leave_mm(smp_processor_id()); 196 goto out; 197 } 198 199 if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB)) 200 base_pages_to_flush = (end - start) >> PAGE_SHIFT; 201 202 if (base_pages_to_flush > tlb_single_page_flush_ceiling) { 203 base_pages_to_flush = TLB_FLUSH_ALL; 204 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); 205 local_flush_tlb(); 206 } else { 207 /* flush range by one by one 'invlpg' */ 208 for (addr = start; addr < end; addr += PAGE_SIZE) { 209 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); 210 __flush_tlb_single(addr); 211 } 212 } 213 trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN, base_pages_to_flush); 214 out: 215 if (base_pages_to_flush == TLB_FLUSH_ALL) { 216 start = 0UL; 217 end = TLB_FLUSH_ALL; 218 } 219 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) 220 flush_tlb_others(mm_cpumask(mm), mm, start, end); 221 preempt_enable(); 222 } 223 224 void flush_tlb_page(struct vm_area_struct *vma, unsigned long start) 225 { 226 struct mm_struct *mm = vma->vm_mm; 227 228 preempt_disable(); 229 230 if (current->active_mm == mm) { 231 if (current->mm) 232 __flush_tlb_one(start); 233 else 234 leave_mm(smp_processor_id()); 235 } 236 237 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) 238 flush_tlb_others(mm_cpumask(mm), mm, start, 0UL); 239 240 preempt_enable(); 241 } 242 243 static void do_flush_tlb_all(void *info) 244 { 245 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); 246 __flush_tlb_all(); 247 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY) 248 leave_mm(smp_processor_id()); 249 } 250 251 void flush_tlb_all(void) 252 { 253 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); 254 on_each_cpu(do_flush_tlb_all, NULL, 1); 255 } 256 257 static void do_kernel_range_flush(void *info) 258 { 259 struct flush_tlb_info *f = info; 260 unsigned long addr; 261 262 /* flush range by one by one 'invlpg' */ 263 for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE) 264 __flush_tlb_single(addr); 265 } 266 267 void flush_tlb_kernel_range(unsigned long start, unsigned long end) 268 { 269 270 /* Balance as user space task's flush, a bit conservative */ 271 if (end == TLB_FLUSH_ALL || 272 (end - start) > tlb_single_page_flush_ceiling * PAGE_SIZE) { 273 on_each_cpu(do_flush_tlb_all, NULL, 1); 274 } else { 275 struct flush_tlb_info info; 276 info.flush_start = start; 277 info.flush_end = end; 278 on_each_cpu(do_kernel_range_flush, &info, 1); 279 } 280 } 281 282 static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf, 283 size_t count, loff_t *ppos) 284 { 285 char buf[32]; 286 unsigned int len; 287 288 len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling); 289 return simple_read_from_buffer(user_buf, count, ppos, buf, len); 290 } 291 292 static ssize_t tlbflush_write_file(struct file *file, 293 const char __user *user_buf, size_t count, loff_t *ppos) 294 { 295 char buf[32]; 296 ssize_t len; 297 int ceiling; 298 299 len = min(count, sizeof(buf) - 1); 300 if (copy_from_user(buf, user_buf, len)) 301 return -EFAULT; 302 303 buf[len] = '\0'; 304 if (kstrtoint(buf, 0, &ceiling)) 305 return -EINVAL; 306 307 if (ceiling < 0) 308 return -EINVAL; 309 310 tlb_single_page_flush_ceiling = ceiling; 311 return count; 312 } 313 314 static const struct file_operations fops_tlbflush = { 315 .read = tlbflush_read_file, 316 .write = tlbflush_write_file, 317 .llseek = default_llseek, 318 }; 319 320 static int __init create_tlb_single_page_flush_ceiling(void) 321 { 322 debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR, 323 arch_debugfs_dir, NULL, &fops_tlbflush); 324 return 0; 325 } 326 late_initcall(create_tlb_single_page_flush_ceiling); 327