xref: /openbmc/linux/arch/x86/mm/tlb.c (revision e1e38ea1)
1 #include <linux/init.h>
2 
3 #include <linux/mm.h>
4 #include <linux/spinlock.h>
5 #include <linux/smp.h>
6 #include <linux/interrupt.h>
7 #include <linux/export.h>
8 #include <linux/cpu.h>
9 #include <linux/debugfs.h>
10 
11 #include <asm/tlbflush.h>
12 #include <asm/mmu_context.h>
13 #include <asm/nospec-branch.h>
14 #include <asm/cache.h>
15 #include <asm/apic.h>
16 #include <asm/uv/uv.h>
17 
18 /*
19  *	TLB flushing, formerly SMP-only
20  *		c/o Linus Torvalds.
21  *
22  *	These mean you can really definitely utterly forget about
23  *	writing to user space from interrupts. (Its not allowed anyway).
24  *
25  *	Optimizations Manfred Spraul <manfred@colorfullife.com>
26  *
27  *	More scalable flush, from Andi Kleen
28  *
29  *	Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
30  */
31 
32 /*
33  * We get here when we do something requiring a TLB invalidation
34  * but could not go invalidate all of the contexts.  We do the
35  * necessary invalidation by clearing out the 'ctx_id' which
36  * forces a TLB flush when the context is loaded.
37  */
38 static void clear_asid_other(void)
39 {
40 	u16 asid;
41 
42 	/*
43 	 * This is only expected to be set if we have disabled
44 	 * kernel _PAGE_GLOBAL pages.
45 	 */
46 	if (!static_cpu_has(X86_FEATURE_PTI)) {
47 		WARN_ON_ONCE(1);
48 		return;
49 	}
50 
51 	for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) {
52 		/* Do not need to flush the current asid */
53 		if (asid == this_cpu_read(cpu_tlbstate.loaded_mm_asid))
54 			continue;
55 		/*
56 		 * Make sure the next time we go to switch to
57 		 * this asid, we do a flush:
58 		 */
59 		this_cpu_write(cpu_tlbstate.ctxs[asid].ctx_id, 0);
60 	}
61 	this_cpu_write(cpu_tlbstate.invalidate_other, false);
62 }
63 
64 atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1);
65 
66 
67 static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
68 			    u16 *new_asid, bool *need_flush)
69 {
70 	u16 asid;
71 
72 	if (!static_cpu_has(X86_FEATURE_PCID)) {
73 		*new_asid = 0;
74 		*need_flush = true;
75 		return;
76 	}
77 
78 	if (this_cpu_read(cpu_tlbstate.invalidate_other))
79 		clear_asid_other();
80 
81 	for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) {
82 		if (this_cpu_read(cpu_tlbstate.ctxs[asid].ctx_id) !=
83 		    next->context.ctx_id)
84 			continue;
85 
86 		*new_asid = asid;
87 		*need_flush = (this_cpu_read(cpu_tlbstate.ctxs[asid].tlb_gen) <
88 			       next_tlb_gen);
89 		return;
90 	}
91 
92 	/*
93 	 * We don't currently own an ASID slot on this CPU.
94 	 * Allocate a slot.
95 	 */
96 	*new_asid = this_cpu_add_return(cpu_tlbstate.next_asid, 1) - 1;
97 	if (*new_asid >= TLB_NR_DYN_ASIDS) {
98 		*new_asid = 0;
99 		this_cpu_write(cpu_tlbstate.next_asid, 1);
100 	}
101 	*need_flush = true;
102 }
103 
104 static void load_new_mm_cr3(pgd_t *pgdir, u16 new_asid, bool need_flush)
105 {
106 	unsigned long new_mm_cr3;
107 
108 	if (need_flush) {
109 		invalidate_user_asid(new_asid);
110 		new_mm_cr3 = build_cr3(pgdir, new_asid);
111 	} else {
112 		new_mm_cr3 = build_cr3_noflush(pgdir, new_asid);
113 	}
114 
115 	/*
116 	 * Caution: many callers of this function expect
117 	 * that load_cr3() is serializing and orders TLB
118 	 * fills with respect to the mm_cpumask writes.
119 	 */
120 	write_cr3(new_mm_cr3);
121 }
122 
123 void leave_mm(int cpu)
124 {
125 	struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
126 
127 	/*
128 	 * It's plausible that we're in lazy TLB mode while our mm is init_mm.
129 	 * If so, our callers still expect us to flush the TLB, but there
130 	 * aren't any user TLB entries in init_mm to worry about.
131 	 *
132 	 * This needs to happen before any other sanity checks due to
133 	 * intel_idle's shenanigans.
134 	 */
135 	if (loaded_mm == &init_mm)
136 		return;
137 
138 	/* Warn if we're not lazy. */
139 	WARN_ON(!this_cpu_read(cpu_tlbstate.is_lazy));
140 
141 	switch_mm(NULL, &init_mm, NULL);
142 }
143 EXPORT_SYMBOL_GPL(leave_mm);
144 
145 void switch_mm(struct mm_struct *prev, struct mm_struct *next,
146 	       struct task_struct *tsk)
147 {
148 	unsigned long flags;
149 
150 	local_irq_save(flags);
151 	switch_mm_irqs_off(prev, next, tsk);
152 	local_irq_restore(flags);
153 }
154 
155 static void sync_current_stack_to_mm(struct mm_struct *mm)
156 {
157 	unsigned long sp = current_stack_pointer;
158 	pgd_t *pgd = pgd_offset(mm, sp);
159 
160 	if (pgtable_l5_enabled()) {
161 		if (unlikely(pgd_none(*pgd))) {
162 			pgd_t *pgd_ref = pgd_offset_k(sp);
163 
164 			set_pgd(pgd, *pgd_ref);
165 		}
166 	} else {
167 		/*
168 		 * "pgd" is faked.  The top level entries are "p4d"s, so sync
169 		 * the p4d.  This compiles to approximately the same code as
170 		 * the 5-level case.
171 		 */
172 		p4d_t *p4d = p4d_offset(pgd, sp);
173 
174 		if (unlikely(p4d_none(*p4d))) {
175 			pgd_t *pgd_ref = pgd_offset_k(sp);
176 			p4d_t *p4d_ref = p4d_offset(pgd_ref, sp);
177 
178 			set_p4d(p4d, *p4d_ref);
179 		}
180 	}
181 }
182 
183 void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
184 			struct task_struct *tsk)
185 {
186 	struct mm_struct *real_prev = this_cpu_read(cpu_tlbstate.loaded_mm);
187 	u16 prev_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
188 	unsigned cpu = smp_processor_id();
189 	u64 next_tlb_gen;
190 
191 	/*
192 	 * NB: The scheduler will call us with prev == next when switching
193 	 * from lazy TLB mode to normal mode if active_mm isn't changing.
194 	 * When this happens, we don't assume that CR3 (and hence
195 	 * cpu_tlbstate.loaded_mm) matches next.
196 	 *
197 	 * NB: leave_mm() calls us with prev == NULL and tsk == NULL.
198 	 */
199 
200 	/* We don't want flush_tlb_func_* to run concurrently with us. */
201 	if (IS_ENABLED(CONFIG_PROVE_LOCKING))
202 		WARN_ON_ONCE(!irqs_disabled());
203 
204 	/*
205 	 * Verify that CR3 is what we think it is.  This will catch
206 	 * hypothetical buggy code that directly switches to swapper_pg_dir
207 	 * without going through leave_mm() / switch_mm_irqs_off() or that
208 	 * does something like write_cr3(read_cr3_pa()).
209 	 *
210 	 * Only do this check if CONFIG_DEBUG_VM=y because __read_cr3()
211 	 * isn't free.
212 	 */
213 #ifdef CONFIG_DEBUG_VM
214 	if (WARN_ON_ONCE(__read_cr3() != build_cr3(real_prev->pgd, prev_asid))) {
215 		/*
216 		 * If we were to BUG here, we'd be very likely to kill
217 		 * the system so hard that we don't see the call trace.
218 		 * Try to recover instead by ignoring the error and doing
219 		 * a global flush to minimize the chance of corruption.
220 		 *
221 		 * (This is far from being a fully correct recovery.
222 		 *  Architecturally, the CPU could prefetch something
223 		 *  back into an incorrect ASID slot and leave it there
224 		 *  to cause trouble down the road.  It's better than
225 		 *  nothing, though.)
226 		 */
227 		__flush_tlb_all();
228 	}
229 #endif
230 	this_cpu_write(cpu_tlbstate.is_lazy, false);
231 
232 	/*
233 	 * The membarrier system call requires a full memory barrier and
234 	 * core serialization before returning to user-space, after
235 	 * storing to rq->curr. Writing to CR3 provides that full
236 	 * memory barrier and core serializing instruction.
237 	 */
238 	if (real_prev == next) {
239 		VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) !=
240 			   next->context.ctx_id);
241 
242 		/*
243 		 * We don't currently support having a real mm loaded without
244 		 * our cpu set in mm_cpumask().  We have all the bookkeeping
245 		 * in place to figure out whether we would need to flush
246 		 * if our cpu were cleared in mm_cpumask(), but we don't
247 		 * currently use it.
248 		 */
249 		if (WARN_ON_ONCE(real_prev != &init_mm &&
250 				 !cpumask_test_cpu(cpu, mm_cpumask(next))))
251 			cpumask_set_cpu(cpu, mm_cpumask(next));
252 
253 		return;
254 	} else {
255 		u16 new_asid;
256 		bool need_flush;
257 		u64 last_ctx_id = this_cpu_read(cpu_tlbstate.last_ctx_id);
258 
259 		/*
260 		 * Avoid user/user BTB poisoning by flushing the branch
261 		 * predictor when switching between processes. This stops
262 		 * one process from doing Spectre-v2 attacks on another.
263 		 *
264 		 * As an optimization, flush indirect branches only when
265 		 * switching into processes that disable dumping. This
266 		 * protects high value processes like gpg, without having
267 		 * too high performance overhead. IBPB is *expensive*!
268 		 *
269 		 * This will not flush branches when switching into kernel
270 		 * threads. It will also not flush if we switch to idle
271 		 * thread and back to the same process. It will flush if we
272 		 * switch to a different non-dumpable process.
273 		 */
274 		if (tsk && tsk->mm &&
275 		    tsk->mm->context.ctx_id != last_ctx_id &&
276 		    get_dumpable(tsk->mm) != SUID_DUMP_USER)
277 			indirect_branch_prediction_barrier();
278 
279 		if (IS_ENABLED(CONFIG_VMAP_STACK)) {
280 			/*
281 			 * If our current stack is in vmalloc space and isn't
282 			 * mapped in the new pgd, we'll double-fault.  Forcibly
283 			 * map it.
284 			 */
285 			sync_current_stack_to_mm(next);
286 		}
287 
288 		/*
289 		 * Stop remote flushes for the previous mm.
290 		 * Skip kernel threads; we never send init_mm TLB flushing IPIs,
291 		 * but the bitmap manipulation can cause cache line contention.
292 		 */
293 		if (real_prev != &init_mm) {
294 			VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu,
295 						mm_cpumask(real_prev)));
296 			cpumask_clear_cpu(cpu, mm_cpumask(real_prev));
297 		}
298 
299 		/*
300 		 * Start remote flushes and then read tlb_gen.
301 		 */
302 		if (next != &init_mm)
303 			cpumask_set_cpu(cpu, mm_cpumask(next));
304 		next_tlb_gen = atomic64_read(&next->context.tlb_gen);
305 
306 		choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush);
307 
308 		if (need_flush) {
309 			this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
310 			this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
311 			load_new_mm_cr3(next->pgd, new_asid, true);
312 
313 			/*
314 			 * NB: This gets called via leave_mm() in the idle path
315 			 * where RCU functions differently.  Tracing normally
316 			 * uses RCU, so we need to use the _rcuidle variant.
317 			 *
318 			 * (There is no good reason for this.  The idle code should
319 			 *  be rearranged to call this before rcu_idle_enter().)
320 			 */
321 			trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
322 		} else {
323 			/* The new ASID is already up to date. */
324 			load_new_mm_cr3(next->pgd, new_asid, false);
325 
326 			/* See above wrt _rcuidle. */
327 			trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
328 		}
329 
330 		/*
331 		 * Record last user mm's context id, so we can avoid
332 		 * flushing branch buffer with IBPB if we switch back
333 		 * to the same user.
334 		 */
335 		if (next != &init_mm)
336 			this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id);
337 
338 		this_cpu_write(cpu_tlbstate.loaded_mm, next);
339 		this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
340 	}
341 
342 	load_mm_cr4(next);
343 	switch_ldt(real_prev, next);
344 }
345 
346 /*
347  * Please ignore the name of this function.  It should be called
348  * switch_to_kernel_thread().
349  *
350  * enter_lazy_tlb() is a hint from the scheduler that we are entering a
351  * kernel thread or other context without an mm.  Acceptable implementations
352  * include doing nothing whatsoever, switching to init_mm, or various clever
353  * lazy tricks to try to minimize TLB flushes.
354  *
355  * The scheduler reserves the right to call enter_lazy_tlb() several times
356  * in a row.  It will notify us that we're going back to a real mm by
357  * calling switch_mm_irqs_off().
358  */
359 void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
360 {
361 	if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm)
362 		return;
363 
364 	if (tlb_defer_switch_to_init_mm()) {
365 		/*
366 		 * There's a significant optimization that may be possible
367 		 * here.  We have accurate enough TLB flush tracking that we
368 		 * don't need to maintain coherence of TLB per se when we're
369 		 * lazy.  We do, however, need to maintain coherence of
370 		 * paging-structure caches.  We could, in principle, leave our
371 		 * old mm loaded and only switch to init_mm when
372 		 * tlb_remove_page() happens.
373 		 */
374 		this_cpu_write(cpu_tlbstate.is_lazy, true);
375 	} else {
376 		switch_mm(NULL, &init_mm, NULL);
377 	}
378 }
379 
380 /*
381  * Call this when reinitializing a CPU.  It fixes the following potential
382  * problems:
383  *
384  * - The ASID changed from what cpu_tlbstate thinks it is (most likely
385  *   because the CPU was taken down and came back up with CR3's PCID
386  *   bits clear.  CPU hotplug can do this.
387  *
388  * - The TLB contains junk in slots corresponding to inactive ASIDs.
389  *
390  * - The CPU went so far out to lunch that it may have missed a TLB
391  *   flush.
392  */
393 void initialize_tlbstate_and_flush(void)
394 {
395 	int i;
396 	struct mm_struct *mm = this_cpu_read(cpu_tlbstate.loaded_mm);
397 	u64 tlb_gen = atomic64_read(&init_mm.context.tlb_gen);
398 	unsigned long cr3 = __read_cr3();
399 
400 	/* Assert that CR3 already references the right mm. */
401 	WARN_ON((cr3 & CR3_ADDR_MASK) != __pa(mm->pgd));
402 
403 	/*
404 	 * Assert that CR4.PCIDE is set if needed.  (CR4.PCIDE initialization
405 	 * doesn't work like other CR4 bits because it can only be set from
406 	 * long mode.)
407 	 */
408 	WARN_ON(boot_cpu_has(X86_FEATURE_PCID) &&
409 		!(cr4_read_shadow() & X86_CR4_PCIDE));
410 
411 	/* Force ASID 0 and force a TLB flush. */
412 	write_cr3(build_cr3(mm->pgd, 0));
413 
414 	/* Reinitialize tlbstate. */
415 	this_cpu_write(cpu_tlbstate.last_ctx_id, mm->context.ctx_id);
416 	this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0);
417 	this_cpu_write(cpu_tlbstate.next_asid, 1);
418 	this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, mm->context.ctx_id);
419 	this_cpu_write(cpu_tlbstate.ctxs[0].tlb_gen, tlb_gen);
420 
421 	for (i = 1; i < TLB_NR_DYN_ASIDS; i++)
422 		this_cpu_write(cpu_tlbstate.ctxs[i].ctx_id, 0);
423 }
424 
425 /*
426  * flush_tlb_func_common()'s memory ordering requirement is that any
427  * TLB fills that happen after we flush the TLB are ordered after we
428  * read active_mm's tlb_gen.  We don't need any explicit barriers
429  * because all x86 flush operations are serializing and the
430  * atomic64_read operation won't be reordered by the compiler.
431  */
432 static void flush_tlb_func_common(const struct flush_tlb_info *f,
433 				  bool local, enum tlb_flush_reason reason)
434 {
435 	/*
436 	 * We have three different tlb_gen values in here.  They are:
437 	 *
438 	 * - mm_tlb_gen:     the latest generation.
439 	 * - local_tlb_gen:  the generation that this CPU has already caught
440 	 *                   up to.
441 	 * - f->new_tlb_gen: the generation that the requester of the flush
442 	 *                   wants us to catch up to.
443 	 */
444 	struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
445 	u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
446 	u64 mm_tlb_gen = atomic64_read(&loaded_mm->context.tlb_gen);
447 	u64 local_tlb_gen = this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen);
448 
449 	/* This code cannot presently handle being reentered. */
450 	VM_WARN_ON(!irqs_disabled());
451 
452 	if (unlikely(loaded_mm == &init_mm))
453 		return;
454 
455 	VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) !=
456 		   loaded_mm->context.ctx_id);
457 
458 	if (this_cpu_read(cpu_tlbstate.is_lazy)) {
459 		/*
460 		 * We're in lazy mode.  We need to at least flush our
461 		 * paging-structure cache to avoid speculatively reading
462 		 * garbage into our TLB.  Since switching to init_mm is barely
463 		 * slower than a minimal flush, just switch to init_mm.
464 		 */
465 		switch_mm_irqs_off(NULL, &init_mm, NULL);
466 		return;
467 	}
468 
469 	if (unlikely(local_tlb_gen == mm_tlb_gen)) {
470 		/*
471 		 * There's nothing to do: we're already up to date.  This can
472 		 * happen if two concurrent flushes happen -- the first flush to
473 		 * be handled can catch us all the way up, leaving no work for
474 		 * the second flush.
475 		 */
476 		trace_tlb_flush(reason, 0);
477 		return;
478 	}
479 
480 	WARN_ON_ONCE(local_tlb_gen > mm_tlb_gen);
481 	WARN_ON_ONCE(f->new_tlb_gen > mm_tlb_gen);
482 
483 	/*
484 	 * If we get to this point, we know that our TLB is out of date.
485 	 * This does not strictly imply that we need to flush (it's
486 	 * possible that f->new_tlb_gen <= local_tlb_gen), but we're
487 	 * going to need to flush in the very near future, so we might
488 	 * as well get it over with.
489 	 *
490 	 * The only question is whether to do a full or partial flush.
491 	 *
492 	 * We do a partial flush if requested and two extra conditions
493 	 * are met:
494 	 *
495 	 * 1. f->new_tlb_gen == local_tlb_gen + 1.  We have an invariant that
496 	 *    we've always done all needed flushes to catch up to
497 	 *    local_tlb_gen.  If, for example, local_tlb_gen == 2 and
498 	 *    f->new_tlb_gen == 3, then we know that the flush needed to bring
499 	 *    us up to date for tlb_gen 3 is the partial flush we're
500 	 *    processing.
501 	 *
502 	 *    As an example of why this check is needed, suppose that there
503 	 *    are two concurrent flushes.  The first is a full flush that
504 	 *    changes context.tlb_gen from 1 to 2.  The second is a partial
505 	 *    flush that changes context.tlb_gen from 2 to 3.  If they get
506 	 *    processed on this CPU in reverse order, we'll see
507 	 *     local_tlb_gen == 1, mm_tlb_gen == 3, and end != TLB_FLUSH_ALL.
508 	 *    If we were to use __flush_tlb_one_user() and set local_tlb_gen to
509 	 *    3, we'd be break the invariant: we'd update local_tlb_gen above
510 	 *    1 without the full flush that's needed for tlb_gen 2.
511 	 *
512 	 * 2. f->new_tlb_gen == mm_tlb_gen.  This is purely an optimiation.
513 	 *    Partial TLB flushes are not all that much cheaper than full TLB
514 	 *    flushes, so it seems unlikely that it would be a performance win
515 	 *    to do a partial flush if that won't bring our TLB fully up to
516 	 *    date.  By doing a full flush instead, we can increase
517 	 *    local_tlb_gen all the way to mm_tlb_gen and we can probably
518 	 *    avoid another flush in the very near future.
519 	 */
520 	if (f->end != TLB_FLUSH_ALL &&
521 	    f->new_tlb_gen == local_tlb_gen + 1 &&
522 	    f->new_tlb_gen == mm_tlb_gen) {
523 		/* Partial flush */
524 		unsigned long addr;
525 		unsigned long nr_pages = (f->end - f->start) >> PAGE_SHIFT;
526 
527 		addr = f->start;
528 		while (addr < f->end) {
529 			__flush_tlb_one_user(addr);
530 			addr += PAGE_SIZE;
531 		}
532 		if (local)
533 			count_vm_tlb_events(NR_TLB_LOCAL_FLUSH_ONE, nr_pages);
534 		trace_tlb_flush(reason, nr_pages);
535 	} else {
536 		/* Full flush. */
537 		local_flush_tlb();
538 		if (local)
539 			count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
540 		trace_tlb_flush(reason, TLB_FLUSH_ALL);
541 	}
542 
543 	/* Both paths above update our state to mm_tlb_gen. */
544 	this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen);
545 }
546 
547 static void flush_tlb_func_local(void *info, enum tlb_flush_reason reason)
548 {
549 	const struct flush_tlb_info *f = info;
550 
551 	flush_tlb_func_common(f, true, reason);
552 }
553 
554 static void flush_tlb_func_remote(void *info)
555 {
556 	const struct flush_tlb_info *f = info;
557 
558 	inc_irq_stat(irq_tlb_count);
559 
560 	if (f->mm && f->mm != this_cpu_read(cpu_tlbstate.loaded_mm))
561 		return;
562 
563 	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
564 	flush_tlb_func_common(f, false, TLB_REMOTE_SHOOTDOWN);
565 }
566 
567 void native_flush_tlb_others(const struct cpumask *cpumask,
568 			     const struct flush_tlb_info *info)
569 {
570 	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
571 	if (info->end == TLB_FLUSH_ALL)
572 		trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL);
573 	else
574 		trace_tlb_flush(TLB_REMOTE_SEND_IPI,
575 				(info->end - info->start) >> PAGE_SHIFT);
576 
577 	if (is_uv_system()) {
578 		/*
579 		 * This whole special case is confused.  UV has a "Broadcast
580 		 * Assist Unit", which seems to be a fancy way to send IPIs.
581 		 * Back when x86 used an explicit TLB flush IPI, UV was
582 		 * optimized to use its own mechanism.  These days, x86 uses
583 		 * smp_call_function_many(), but UV still uses a manual IPI,
584 		 * and that IPI's action is out of date -- it does a manual
585 		 * flush instead of calling flush_tlb_func_remote().  This
586 		 * means that the percpu tlb_gen variables won't be updated
587 		 * and we'll do pointless flushes on future context switches.
588 		 *
589 		 * Rather than hooking native_flush_tlb_others() here, I think
590 		 * that UV should be updated so that smp_call_function_many(),
591 		 * etc, are optimal on UV.
592 		 */
593 		unsigned int cpu;
594 
595 		cpu = smp_processor_id();
596 		cpumask = uv_flush_tlb_others(cpumask, info);
597 		if (cpumask)
598 			smp_call_function_many(cpumask, flush_tlb_func_remote,
599 					       (void *)info, 1);
600 		return;
601 	}
602 	smp_call_function_many(cpumask, flush_tlb_func_remote,
603 			       (void *)info, 1);
604 }
605 
606 /*
607  * See Documentation/x86/tlb.txt for details.  We choose 33
608  * because it is large enough to cover the vast majority (at
609  * least 95%) of allocations, and is small enough that we are
610  * confident it will not cause too much overhead.  Each single
611  * flush is about 100 ns, so this caps the maximum overhead at
612  * _about_ 3,000 ns.
613  *
614  * This is in units of pages.
615  */
616 static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
617 
618 void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
619 				unsigned long end, unsigned long vmflag)
620 {
621 	int cpu;
622 
623 	struct flush_tlb_info info __aligned(SMP_CACHE_BYTES) = {
624 		.mm = mm,
625 	};
626 
627 	cpu = get_cpu();
628 
629 	/* This is also a barrier that synchronizes with switch_mm(). */
630 	info.new_tlb_gen = inc_mm_tlb_gen(mm);
631 
632 	/* Should we flush just the requested range? */
633 	if ((end != TLB_FLUSH_ALL) &&
634 	    !(vmflag & VM_HUGETLB) &&
635 	    ((end - start) >> PAGE_SHIFT) <= tlb_single_page_flush_ceiling) {
636 		info.start = start;
637 		info.end = end;
638 	} else {
639 		info.start = 0UL;
640 		info.end = TLB_FLUSH_ALL;
641 	}
642 
643 	if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) {
644 		VM_WARN_ON(irqs_disabled());
645 		local_irq_disable();
646 		flush_tlb_func_local(&info, TLB_LOCAL_MM_SHOOTDOWN);
647 		local_irq_enable();
648 	}
649 
650 	if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids)
651 		flush_tlb_others(mm_cpumask(mm), &info);
652 
653 	put_cpu();
654 }
655 
656 
657 static void do_flush_tlb_all(void *info)
658 {
659 	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
660 	__flush_tlb_all();
661 }
662 
663 void flush_tlb_all(void)
664 {
665 	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
666 	on_each_cpu(do_flush_tlb_all, NULL, 1);
667 }
668 
669 static void do_kernel_range_flush(void *info)
670 {
671 	struct flush_tlb_info *f = info;
672 	unsigned long addr;
673 
674 	/* flush range by one by one 'invlpg' */
675 	for (addr = f->start; addr < f->end; addr += PAGE_SIZE)
676 		__flush_tlb_one_kernel(addr);
677 }
678 
679 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
680 {
681 
682 	/* Balance as user space task's flush, a bit conservative */
683 	if (end == TLB_FLUSH_ALL ||
684 	    (end - start) > tlb_single_page_flush_ceiling << PAGE_SHIFT) {
685 		on_each_cpu(do_flush_tlb_all, NULL, 1);
686 	} else {
687 		struct flush_tlb_info info;
688 		info.start = start;
689 		info.end = end;
690 		on_each_cpu(do_kernel_range_flush, &info, 1);
691 	}
692 }
693 
694 void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
695 {
696 	struct flush_tlb_info info = {
697 		.mm = NULL,
698 		.start = 0UL,
699 		.end = TLB_FLUSH_ALL,
700 	};
701 
702 	int cpu = get_cpu();
703 
704 	if (cpumask_test_cpu(cpu, &batch->cpumask)) {
705 		VM_WARN_ON(irqs_disabled());
706 		local_irq_disable();
707 		flush_tlb_func_local(&info, TLB_LOCAL_SHOOTDOWN);
708 		local_irq_enable();
709 	}
710 
711 	if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids)
712 		flush_tlb_others(&batch->cpumask, &info);
713 
714 	cpumask_clear(&batch->cpumask);
715 
716 	put_cpu();
717 }
718 
719 static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf,
720 			     size_t count, loff_t *ppos)
721 {
722 	char buf[32];
723 	unsigned int len;
724 
725 	len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling);
726 	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
727 }
728 
729 static ssize_t tlbflush_write_file(struct file *file,
730 		 const char __user *user_buf, size_t count, loff_t *ppos)
731 {
732 	char buf[32];
733 	ssize_t len;
734 	int ceiling;
735 
736 	len = min(count, sizeof(buf) - 1);
737 	if (copy_from_user(buf, user_buf, len))
738 		return -EFAULT;
739 
740 	buf[len] = '\0';
741 	if (kstrtoint(buf, 0, &ceiling))
742 		return -EINVAL;
743 
744 	if (ceiling < 0)
745 		return -EINVAL;
746 
747 	tlb_single_page_flush_ceiling = ceiling;
748 	return count;
749 }
750 
751 static const struct file_operations fops_tlbflush = {
752 	.read = tlbflush_read_file,
753 	.write = tlbflush_write_file,
754 	.llseek = default_llseek,
755 };
756 
757 static int __init create_tlb_single_page_flush_ceiling(void)
758 {
759 	debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR,
760 			    arch_debugfs_dir, NULL, &fops_tlbflush);
761 	return 0;
762 }
763 late_initcall(create_tlb_single_page_flush_ceiling);
764