1 #include <linux/init.h> 2 3 #include <linux/mm.h> 4 #include <linux/spinlock.h> 5 #include <linux/smp.h> 6 #include <linux/interrupt.h> 7 #include <linux/export.h> 8 #include <linux/cpu.h> 9 10 #include <asm/tlbflush.h> 11 #include <asm/mmu_context.h> 12 #include <asm/cache.h> 13 #include <asm/apic.h> 14 #include <asm/uv/uv.h> 15 #include <linux/debugfs.h> 16 17 /* 18 * TLB flushing, formerly SMP-only 19 * c/o Linus Torvalds. 20 * 21 * These mean you can really definitely utterly forget about 22 * writing to user space from interrupts. (Its not allowed anyway). 23 * 24 * Optimizations Manfred Spraul <manfred@colorfullife.com> 25 * 26 * More scalable flush, from Andi Kleen 27 * 28 * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi 29 */ 30 31 /* 32 * We get here when we do something requiring a TLB invalidation 33 * but could not go invalidate all of the contexts. We do the 34 * necessary invalidation by clearing out the 'ctx_id' which 35 * forces a TLB flush when the context is loaded. 36 */ 37 void clear_asid_other(void) 38 { 39 u16 asid; 40 41 /* 42 * This is only expected to be set if we have disabled 43 * kernel _PAGE_GLOBAL pages. 44 */ 45 if (!static_cpu_has(X86_FEATURE_PTI)) { 46 WARN_ON_ONCE(1); 47 return; 48 } 49 50 for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) { 51 /* Do not need to flush the current asid */ 52 if (asid == this_cpu_read(cpu_tlbstate.loaded_mm_asid)) 53 continue; 54 /* 55 * Make sure the next time we go to switch to 56 * this asid, we do a flush: 57 */ 58 this_cpu_write(cpu_tlbstate.ctxs[asid].ctx_id, 0); 59 } 60 this_cpu_write(cpu_tlbstate.invalidate_other, false); 61 } 62 63 atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1); 64 65 66 static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen, 67 u16 *new_asid, bool *need_flush) 68 { 69 u16 asid; 70 71 if (!static_cpu_has(X86_FEATURE_PCID)) { 72 *new_asid = 0; 73 *need_flush = true; 74 return; 75 } 76 77 if (this_cpu_read(cpu_tlbstate.invalidate_other)) 78 clear_asid_other(); 79 80 for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) { 81 if (this_cpu_read(cpu_tlbstate.ctxs[asid].ctx_id) != 82 next->context.ctx_id) 83 continue; 84 85 *new_asid = asid; 86 *need_flush = (this_cpu_read(cpu_tlbstate.ctxs[asid].tlb_gen) < 87 next_tlb_gen); 88 return; 89 } 90 91 /* 92 * We don't currently own an ASID slot on this CPU. 93 * Allocate a slot. 94 */ 95 *new_asid = this_cpu_add_return(cpu_tlbstate.next_asid, 1) - 1; 96 if (*new_asid >= TLB_NR_DYN_ASIDS) { 97 *new_asid = 0; 98 this_cpu_write(cpu_tlbstate.next_asid, 1); 99 } 100 *need_flush = true; 101 } 102 103 static void load_new_mm_cr3(pgd_t *pgdir, u16 new_asid, bool need_flush) 104 { 105 unsigned long new_mm_cr3; 106 107 if (need_flush) { 108 invalidate_user_asid(new_asid); 109 new_mm_cr3 = build_cr3(pgdir, new_asid); 110 } else { 111 new_mm_cr3 = build_cr3_noflush(pgdir, new_asid); 112 } 113 114 /* 115 * Caution: many callers of this function expect 116 * that load_cr3() is serializing and orders TLB 117 * fills with respect to the mm_cpumask writes. 118 */ 119 write_cr3(new_mm_cr3); 120 } 121 122 void leave_mm(int cpu) 123 { 124 struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm); 125 126 /* 127 * It's plausible that we're in lazy TLB mode while our mm is init_mm. 128 * If so, our callers still expect us to flush the TLB, but there 129 * aren't any user TLB entries in init_mm to worry about. 130 * 131 * This needs to happen before any other sanity checks due to 132 * intel_idle's shenanigans. 133 */ 134 if (loaded_mm == &init_mm) 135 return; 136 137 /* Warn if we're not lazy. */ 138 WARN_ON(!this_cpu_read(cpu_tlbstate.is_lazy)); 139 140 switch_mm(NULL, &init_mm, NULL); 141 } 142 EXPORT_SYMBOL_GPL(leave_mm); 143 144 void switch_mm(struct mm_struct *prev, struct mm_struct *next, 145 struct task_struct *tsk) 146 { 147 unsigned long flags; 148 149 local_irq_save(flags); 150 switch_mm_irqs_off(prev, next, tsk); 151 local_irq_restore(flags); 152 } 153 154 void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, 155 struct task_struct *tsk) 156 { 157 struct mm_struct *real_prev = this_cpu_read(cpu_tlbstate.loaded_mm); 158 u16 prev_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid); 159 unsigned cpu = smp_processor_id(); 160 u64 next_tlb_gen; 161 162 /* 163 * NB: The scheduler will call us with prev == next when switching 164 * from lazy TLB mode to normal mode if active_mm isn't changing. 165 * When this happens, we don't assume that CR3 (and hence 166 * cpu_tlbstate.loaded_mm) matches next. 167 * 168 * NB: leave_mm() calls us with prev == NULL and tsk == NULL. 169 */ 170 171 /* We don't want flush_tlb_func_* to run concurrently with us. */ 172 if (IS_ENABLED(CONFIG_PROVE_LOCKING)) 173 WARN_ON_ONCE(!irqs_disabled()); 174 175 /* 176 * Verify that CR3 is what we think it is. This will catch 177 * hypothetical buggy code that directly switches to swapper_pg_dir 178 * without going through leave_mm() / switch_mm_irqs_off() or that 179 * does something like write_cr3(read_cr3_pa()). 180 * 181 * Only do this check if CONFIG_DEBUG_VM=y because __read_cr3() 182 * isn't free. 183 */ 184 #ifdef CONFIG_DEBUG_VM 185 if (WARN_ON_ONCE(__read_cr3() != build_cr3(real_prev->pgd, prev_asid))) { 186 /* 187 * If we were to BUG here, we'd be very likely to kill 188 * the system so hard that we don't see the call trace. 189 * Try to recover instead by ignoring the error and doing 190 * a global flush to minimize the chance of corruption. 191 * 192 * (This is far from being a fully correct recovery. 193 * Architecturally, the CPU could prefetch something 194 * back into an incorrect ASID slot and leave it there 195 * to cause trouble down the road. It's better than 196 * nothing, though.) 197 */ 198 __flush_tlb_all(); 199 } 200 #endif 201 this_cpu_write(cpu_tlbstate.is_lazy, false); 202 203 if (real_prev == next) { 204 VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) != 205 next->context.ctx_id); 206 207 /* 208 * We don't currently support having a real mm loaded without 209 * our cpu set in mm_cpumask(). We have all the bookkeeping 210 * in place to figure out whether we would need to flush 211 * if our cpu were cleared in mm_cpumask(), but we don't 212 * currently use it. 213 */ 214 if (WARN_ON_ONCE(real_prev != &init_mm && 215 !cpumask_test_cpu(cpu, mm_cpumask(next)))) 216 cpumask_set_cpu(cpu, mm_cpumask(next)); 217 218 return; 219 } else { 220 u16 new_asid; 221 bool need_flush; 222 223 if (IS_ENABLED(CONFIG_VMAP_STACK)) { 224 /* 225 * If our current stack is in vmalloc space and isn't 226 * mapped in the new pgd, we'll double-fault. Forcibly 227 * map it. 228 */ 229 unsigned int index = pgd_index(current_stack_pointer); 230 pgd_t *pgd = next->pgd + index; 231 232 if (unlikely(pgd_none(*pgd))) 233 set_pgd(pgd, init_mm.pgd[index]); 234 } 235 236 /* Stop remote flushes for the previous mm */ 237 VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(real_prev)) && 238 real_prev != &init_mm); 239 cpumask_clear_cpu(cpu, mm_cpumask(real_prev)); 240 241 /* 242 * Start remote flushes and then read tlb_gen. 243 */ 244 cpumask_set_cpu(cpu, mm_cpumask(next)); 245 next_tlb_gen = atomic64_read(&next->context.tlb_gen); 246 247 choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush); 248 249 if (need_flush) { 250 this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id); 251 this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen); 252 load_new_mm_cr3(next->pgd, new_asid, true); 253 254 /* 255 * NB: This gets called via leave_mm() in the idle path 256 * where RCU functions differently. Tracing normally 257 * uses RCU, so we need to use the _rcuidle variant. 258 * 259 * (There is no good reason for this. The idle code should 260 * be rearranged to call this before rcu_idle_enter().) 261 */ 262 trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); 263 } else { 264 /* The new ASID is already up to date. */ 265 load_new_mm_cr3(next->pgd, new_asid, false); 266 267 /* See above wrt _rcuidle. */ 268 trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0); 269 } 270 271 this_cpu_write(cpu_tlbstate.loaded_mm, next); 272 this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid); 273 } 274 275 load_mm_cr4(next); 276 switch_ldt(real_prev, next); 277 } 278 279 /* 280 * Please ignore the name of this function. It should be called 281 * switch_to_kernel_thread(). 282 * 283 * enter_lazy_tlb() is a hint from the scheduler that we are entering a 284 * kernel thread or other context without an mm. Acceptable implementations 285 * include doing nothing whatsoever, switching to init_mm, or various clever 286 * lazy tricks to try to minimize TLB flushes. 287 * 288 * The scheduler reserves the right to call enter_lazy_tlb() several times 289 * in a row. It will notify us that we're going back to a real mm by 290 * calling switch_mm_irqs_off(). 291 */ 292 void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 293 { 294 if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm) 295 return; 296 297 if (tlb_defer_switch_to_init_mm()) { 298 /* 299 * There's a significant optimization that may be possible 300 * here. We have accurate enough TLB flush tracking that we 301 * don't need to maintain coherence of TLB per se when we're 302 * lazy. We do, however, need to maintain coherence of 303 * paging-structure caches. We could, in principle, leave our 304 * old mm loaded and only switch to init_mm when 305 * tlb_remove_page() happens. 306 */ 307 this_cpu_write(cpu_tlbstate.is_lazy, true); 308 } else { 309 switch_mm(NULL, &init_mm, NULL); 310 } 311 } 312 313 /* 314 * Call this when reinitializing a CPU. It fixes the following potential 315 * problems: 316 * 317 * - The ASID changed from what cpu_tlbstate thinks it is (most likely 318 * because the CPU was taken down and came back up with CR3's PCID 319 * bits clear. CPU hotplug can do this. 320 * 321 * - The TLB contains junk in slots corresponding to inactive ASIDs. 322 * 323 * - The CPU went so far out to lunch that it may have missed a TLB 324 * flush. 325 */ 326 void initialize_tlbstate_and_flush(void) 327 { 328 int i; 329 struct mm_struct *mm = this_cpu_read(cpu_tlbstate.loaded_mm); 330 u64 tlb_gen = atomic64_read(&init_mm.context.tlb_gen); 331 unsigned long cr3 = __read_cr3(); 332 333 /* Assert that CR3 already references the right mm. */ 334 WARN_ON((cr3 & CR3_ADDR_MASK) != __pa(mm->pgd)); 335 336 /* 337 * Assert that CR4.PCIDE is set if needed. (CR4.PCIDE initialization 338 * doesn't work like other CR4 bits because it can only be set from 339 * long mode.) 340 */ 341 WARN_ON(boot_cpu_has(X86_FEATURE_PCID) && 342 !(cr4_read_shadow() & X86_CR4_PCIDE)); 343 344 /* Force ASID 0 and force a TLB flush. */ 345 write_cr3(build_cr3(mm->pgd, 0)); 346 347 /* Reinitialize tlbstate. */ 348 this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0); 349 this_cpu_write(cpu_tlbstate.next_asid, 1); 350 this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, mm->context.ctx_id); 351 this_cpu_write(cpu_tlbstate.ctxs[0].tlb_gen, tlb_gen); 352 353 for (i = 1; i < TLB_NR_DYN_ASIDS; i++) 354 this_cpu_write(cpu_tlbstate.ctxs[i].ctx_id, 0); 355 } 356 357 /* 358 * flush_tlb_func_common()'s memory ordering requirement is that any 359 * TLB fills that happen after we flush the TLB are ordered after we 360 * read active_mm's tlb_gen. We don't need any explicit barriers 361 * because all x86 flush operations are serializing and the 362 * atomic64_read operation won't be reordered by the compiler. 363 */ 364 static void flush_tlb_func_common(const struct flush_tlb_info *f, 365 bool local, enum tlb_flush_reason reason) 366 { 367 /* 368 * We have three different tlb_gen values in here. They are: 369 * 370 * - mm_tlb_gen: the latest generation. 371 * - local_tlb_gen: the generation that this CPU has already caught 372 * up to. 373 * - f->new_tlb_gen: the generation that the requester of the flush 374 * wants us to catch up to. 375 */ 376 struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm); 377 u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid); 378 u64 mm_tlb_gen = atomic64_read(&loaded_mm->context.tlb_gen); 379 u64 local_tlb_gen = this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen); 380 381 /* This code cannot presently handle being reentered. */ 382 VM_WARN_ON(!irqs_disabled()); 383 384 if (unlikely(loaded_mm == &init_mm)) 385 return; 386 387 VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) != 388 loaded_mm->context.ctx_id); 389 390 if (this_cpu_read(cpu_tlbstate.is_lazy)) { 391 /* 392 * We're in lazy mode. We need to at least flush our 393 * paging-structure cache to avoid speculatively reading 394 * garbage into our TLB. Since switching to init_mm is barely 395 * slower than a minimal flush, just switch to init_mm. 396 */ 397 switch_mm_irqs_off(NULL, &init_mm, NULL); 398 return; 399 } 400 401 if (unlikely(local_tlb_gen == mm_tlb_gen)) { 402 /* 403 * There's nothing to do: we're already up to date. This can 404 * happen if two concurrent flushes happen -- the first flush to 405 * be handled can catch us all the way up, leaving no work for 406 * the second flush. 407 */ 408 trace_tlb_flush(reason, 0); 409 return; 410 } 411 412 WARN_ON_ONCE(local_tlb_gen > mm_tlb_gen); 413 WARN_ON_ONCE(f->new_tlb_gen > mm_tlb_gen); 414 415 /* 416 * If we get to this point, we know that our TLB is out of date. 417 * This does not strictly imply that we need to flush (it's 418 * possible that f->new_tlb_gen <= local_tlb_gen), but we're 419 * going to need to flush in the very near future, so we might 420 * as well get it over with. 421 * 422 * The only question is whether to do a full or partial flush. 423 * 424 * We do a partial flush if requested and two extra conditions 425 * are met: 426 * 427 * 1. f->new_tlb_gen == local_tlb_gen + 1. We have an invariant that 428 * we've always done all needed flushes to catch up to 429 * local_tlb_gen. If, for example, local_tlb_gen == 2 and 430 * f->new_tlb_gen == 3, then we know that the flush needed to bring 431 * us up to date for tlb_gen 3 is the partial flush we're 432 * processing. 433 * 434 * As an example of why this check is needed, suppose that there 435 * are two concurrent flushes. The first is a full flush that 436 * changes context.tlb_gen from 1 to 2. The second is a partial 437 * flush that changes context.tlb_gen from 2 to 3. If they get 438 * processed on this CPU in reverse order, we'll see 439 * local_tlb_gen == 1, mm_tlb_gen == 3, and end != TLB_FLUSH_ALL. 440 * If we were to use __flush_tlb_single() and set local_tlb_gen to 441 * 3, we'd be break the invariant: we'd update local_tlb_gen above 442 * 1 without the full flush that's needed for tlb_gen 2. 443 * 444 * 2. f->new_tlb_gen == mm_tlb_gen. This is purely an optimiation. 445 * Partial TLB flushes are not all that much cheaper than full TLB 446 * flushes, so it seems unlikely that it would be a performance win 447 * to do a partial flush if that won't bring our TLB fully up to 448 * date. By doing a full flush instead, we can increase 449 * local_tlb_gen all the way to mm_tlb_gen and we can probably 450 * avoid another flush in the very near future. 451 */ 452 if (f->end != TLB_FLUSH_ALL && 453 f->new_tlb_gen == local_tlb_gen + 1 && 454 f->new_tlb_gen == mm_tlb_gen) { 455 /* Partial flush */ 456 unsigned long addr; 457 unsigned long nr_pages = (f->end - f->start) >> PAGE_SHIFT; 458 459 addr = f->start; 460 while (addr < f->end) { 461 __flush_tlb_single(addr); 462 addr += PAGE_SIZE; 463 } 464 if (local) 465 count_vm_tlb_events(NR_TLB_LOCAL_FLUSH_ONE, nr_pages); 466 trace_tlb_flush(reason, nr_pages); 467 } else { 468 /* Full flush. */ 469 local_flush_tlb(); 470 if (local) 471 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); 472 trace_tlb_flush(reason, TLB_FLUSH_ALL); 473 } 474 475 /* Both paths above update our state to mm_tlb_gen. */ 476 this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen); 477 } 478 479 static void flush_tlb_func_local(void *info, enum tlb_flush_reason reason) 480 { 481 const struct flush_tlb_info *f = info; 482 483 flush_tlb_func_common(f, true, reason); 484 } 485 486 static void flush_tlb_func_remote(void *info) 487 { 488 const struct flush_tlb_info *f = info; 489 490 inc_irq_stat(irq_tlb_count); 491 492 if (f->mm && f->mm != this_cpu_read(cpu_tlbstate.loaded_mm)) 493 return; 494 495 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); 496 flush_tlb_func_common(f, false, TLB_REMOTE_SHOOTDOWN); 497 } 498 499 void native_flush_tlb_others(const struct cpumask *cpumask, 500 const struct flush_tlb_info *info) 501 { 502 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); 503 if (info->end == TLB_FLUSH_ALL) 504 trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL); 505 else 506 trace_tlb_flush(TLB_REMOTE_SEND_IPI, 507 (info->end - info->start) >> PAGE_SHIFT); 508 509 if (is_uv_system()) { 510 /* 511 * This whole special case is confused. UV has a "Broadcast 512 * Assist Unit", which seems to be a fancy way to send IPIs. 513 * Back when x86 used an explicit TLB flush IPI, UV was 514 * optimized to use its own mechanism. These days, x86 uses 515 * smp_call_function_many(), but UV still uses a manual IPI, 516 * and that IPI's action is out of date -- it does a manual 517 * flush instead of calling flush_tlb_func_remote(). This 518 * means that the percpu tlb_gen variables won't be updated 519 * and we'll do pointless flushes on future context switches. 520 * 521 * Rather than hooking native_flush_tlb_others() here, I think 522 * that UV should be updated so that smp_call_function_many(), 523 * etc, are optimal on UV. 524 */ 525 unsigned int cpu; 526 527 cpu = smp_processor_id(); 528 cpumask = uv_flush_tlb_others(cpumask, info); 529 if (cpumask) 530 smp_call_function_many(cpumask, flush_tlb_func_remote, 531 (void *)info, 1); 532 return; 533 } 534 smp_call_function_many(cpumask, flush_tlb_func_remote, 535 (void *)info, 1); 536 } 537 538 /* 539 * See Documentation/x86/tlb.txt for details. We choose 33 540 * because it is large enough to cover the vast majority (at 541 * least 95%) of allocations, and is small enough that we are 542 * confident it will not cause too much overhead. Each single 543 * flush is about 100 ns, so this caps the maximum overhead at 544 * _about_ 3,000 ns. 545 * 546 * This is in units of pages. 547 */ 548 static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33; 549 550 void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, 551 unsigned long end, unsigned long vmflag) 552 { 553 int cpu; 554 555 struct flush_tlb_info info = { 556 .mm = mm, 557 }; 558 559 cpu = get_cpu(); 560 561 /* This is also a barrier that synchronizes with switch_mm(). */ 562 info.new_tlb_gen = inc_mm_tlb_gen(mm); 563 564 /* Should we flush just the requested range? */ 565 if ((end != TLB_FLUSH_ALL) && 566 !(vmflag & VM_HUGETLB) && 567 ((end - start) >> PAGE_SHIFT) <= tlb_single_page_flush_ceiling) { 568 info.start = start; 569 info.end = end; 570 } else { 571 info.start = 0UL; 572 info.end = TLB_FLUSH_ALL; 573 } 574 575 if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) { 576 VM_WARN_ON(irqs_disabled()); 577 local_irq_disable(); 578 flush_tlb_func_local(&info, TLB_LOCAL_MM_SHOOTDOWN); 579 local_irq_enable(); 580 } 581 582 if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids) 583 flush_tlb_others(mm_cpumask(mm), &info); 584 585 put_cpu(); 586 } 587 588 589 static void do_flush_tlb_all(void *info) 590 { 591 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); 592 __flush_tlb_all(); 593 } 594 595 void flush_tlb_all(void) 596 { 597 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); 598 on_each_cpu(do_flush_tlb_all, NULL, 1); 599 } 600 601 static void do_kernel_range_flush(void *info) 602 { 603 struct flush_tlb_info *f = info; 604 unsigned long addr; 605 606 /* flush range by one by one 'invlpg' */ 607 for (addr = f->start; addr < f->end; addr += PAGE_SIZE) 608 __flush_tlb_one(addr); 609 } 610 611 void flush_tlb_kernel_range(unsigned long start, unsigned long end) 612 { 613 614 /* Balance as user space task's flush, a bit conservative */ 615 if (end == TLB_FLUSH_ALL || 616 (end - start) > tlb_single_page_flush_ceiling << PAGE_SHIFT) { 617 on_each_cpu(do_flush_tlb_all, NULL, 1); 618 } else { 619 struct flush_tlb_info info; 620 info.start = start; 621 info.end = end; 622 on_each_cpu(do_kernel_range_flush, &info, 1); 623 } 624 } 625 626 void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch) 627 { 628 struct flush_tlb_info info = { 629 .mm = NULL, 630 .start = 0UL, 631 .end = TLB_FLUSH_ALL, 632 }; 633 634 int cpu = get_cpu(); 635 636 if (cpumask_test_cpu(cpu, &batch->cpumask)) { 637 VM_WARN_ON(irqs_disabled()); 638 local_irq_disable(); 639 flush_tlb_func_local(&info, TLB_LOCAL_SHOOTDOWN); 640 local_irq_enable(); 641 } 642 643 if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids) 644 flush_tlb_others(&batch->cpumask, &info); 645 646 cpumask_clear(&batch->cpumask); 647 648 put_cpu(); 649 } 650 651 static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf, 652 size_t count, loff_t *ppos) 653 { 654 char buf[32]; 655 unsigned int len; 656 657 len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling); 658 return simple_read_from_buffer(user_buf, count, ppos, buf, len); 659 } 660 661 static ssize_t tlbflush_write_file(struct file *file, 662 const char __user *user_buf, size_t count, loff_t *ppos) 663 { 664 char buf[32]; 665 ssize_t len; 666 int ceiling; 667 668 len = min(count, sizeof(buf) - 1); 669 if (copy_from_user(buf, user_buf, len)) 670 return -EFAULT; 671 672 buf[len] = '\0'; 673 if (kstrtoint(buf, 0, &ceiling)) 674 return -EINVAL; 675 676 if (ceiling < 0) 677 return -EINVAL; 678 679 tlb_single_page_flush_ceiling = ceiling; 680 return count; 681 } 682 683 static const struct file_operations fops_tlbflush = { 684 .read = tlbflush_read_file, 685 .write = tlbflush_write_file, 686 .llseek = default_llseek, 687 }; 688 689 static int __init create_tlb_single_page_flush_ceiling(void) 690 { 691 debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR, 692 arch_debugfs_dir, NULL, &fops_tlbflush); 693 return 0; 694 } 695 late_initcall(create_tlb_single_page_flush_ceiling); 696