xref: /openbmc/linux/arch/x86/mm/tlb.c (revision 036b9e7c)
1 #include <linux/init.h>
2 
3 #include <linux/mm.h>
4 #include <linux/spinlock.h>
5 #include <linux/smp.h>
6 #include <linux/interrupt.h>
7 #include <linux/export.h>
8 #include <linux/cpu.h>
9 #include <linux/debugfs.h>
10 
11 #include <asm/tlbflush.h>
12 #include <asm/mmu_context.h>
13 #include <asm/nospec-branch.h>
14 #include <asm/cache.h>
15 #include <asm/apic.h>
16 #include <asm/uv/uv.h>
17 
18 /*
19  *	TLB flushing, formerly SMP-only
20  *		c/o Linus Torvalds.
21  *
22  *	These mean you can really definitely utterly forget about
23  *	writing to user space from interrupts. (Its not allowed anyway).
24  *
25  *	Optimizations Manfred Spraul <manfred@colorfullife.com>
26  *
27  *	More scalable flush, from Andi Kleen
28  *
29  *	Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
30  */
31 
32 /*
33  * Use bit 0 to mangle the TIF_SPEC_IB state into the mm pointer which is
34  * stored in cpu_tlb_state.last_user_mm_ibpb.
35  */
36 #define LAST_USER_MM_IBPB	0x1UL
37 
38 /*
39  * We get here when we do something requiring a TLB invalidation
40  * but could not go invalidate all of the contexts.  We do the
41  * necessary invalidation by clearing out the 'ctx_id' which
42  * forces a TLB flush when the context is loaded.
43  */
44 static void clear_asid_other(void)
45 {
46 	u16 asid;
47 
48 	/*
49 	 * This is only expected to be set if we have disabled
50 	 * kernel _PAGE_GLOBAL pages.
51 	 */
52 	if (!static_cpu_has(X86_FEATURE_PTI)) {
53 		WARN_ON_ONCE(1);
54 		return;
55 	}
56 
57 	for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) {
58 		/* Do not need to flush the current asid */
59 		if (asid == this_cpu_read(cpu_tlbstate.loaded_mm_asid))
60 			continue;
61 		/*
62 		 * Make sure the next time we go to switch to
63 		 * this asid, we do a flush:
64 		 */
65 		this_cpu_write(cpu_tlbstate.ctxs[asid].ctx_id, 0);
66 	}
67 	this_cpu_write(cpu_tlbstate.invalidate_other, false);
68 }
69 
70 atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1);
71 
72 
73 static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
74 			    u16 *new_asid, bool *need_flush)
75 {
76 	u16 asid;
77 
78 	if (!static_cpu_has(X86_FEATURE_PCID)) {
79 		*new_asid = 0;
80 		*need_flush = true;
81 		return;
82 	}
83 
84 	if (this_cpu_read(cpu_tlbstate.invalidate_other))
85 		clear_asid_other();
86 
87 	for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) {
88 		if (this_cpu_read(cpu_tlbstate.ctxs[asid].ctx_id) !=
89 		    next->context.ctx_id)
90 			continue;
91 
92 		*new_asid = asid;
93 		*need_flush = (this_cpu_read(cpu_tlbstate.ctxs[asid].tlb_gen) <
94 			       next_tlb_gen);
95 		return;
96 	}
97 
98 	/*
99 	 * We don't currently own an ASID slot on this CPU.
100 	 * Allocate a slot.
101 	 */
102 	*new_asid = this_cpu_add_return(cpu_tlbstate.next_asid, 1) - 1;
103 	if (*new_asid >= TLB_NR_DYN_ASIDS) {
104 		*new_asid = 0;
105 		this_cpu_write(cpu_tlbstate.next_asid, 1);
106 	}
107 	*need_flush = true;
108 }
109 
110 static void load_new_mm_cr3(pgd_t *pgdir, u16 new_asid, bool need_flush)
111 {
112 	unsigned long new_mm_cr3;
113 
114 	if (need_flush) {
115 		invalidate_user_asid(new_asid);
116 		new_mm_cr3 = build_cr3(pgdir, new_asid);
117 	} else {
118 		new_mm_cr3 = build_cr3_noflush(pgdir, new_asid);
119 	}
120 
121 	/*
122 	 * Caution: many callers of this function expect
123 	 * that load_cr3() is serializing and orders TLB
124 	 * fills with respect to the mm_cpumask writes.
125 	 */
126 	write_cr3(new_mm_cr3);
127 }
128 
129 void leave_mm(int cpu)
130 {
131 	struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
132 
133 	/*
134 	 * It's plausible that we're in lazy TLB mode while our mm is init_mm.
135 	 * If so, our callers still expect us to flush the TLB, but there
136 	 * aren't any user TLB entries in init_mm to worry about.
137 	 *
138 	 * This needs to happen before any other sanity checks due to
139 	 * intel_idle's shenanigans.
140 	 */
141 	if (loaded_mm == &init_mm)
142 		return;
143 
144 	/* Warn if we're not lazy. */
145 	WARN_ON(!this_cpu_read(cpu_tlbstate.is_lazy));
146 
147 	switch_mm(NULL, &init_mm, NULL);
148 }
149 EXPORT_SYMBOL_GPL(leave_mm);
150 
151 void switch_mm(struct mm_struct *prev, struct mm_struct *next,
152 	       struct task_struct *tsk)
153 {
154 	unsigned long flags;
155 
156 	local_irq_save(flags);
157 	switch_mm_irqs_off(prev, next, tsk);
158 	local_irq_restore(flags);
159 }
160 
161 static void sync_current_stack_to_mm(struct mm_struct *mm)
162 {
163 	unsigned long sp = current_stack_pointer;
164 	pgd_t *pgd = pgd_offset(mm, sp);
165 
166 	if (pgtable_l5_enabled()) {
167 		if (unlikely(pgd_none(*pgd))) {
168 			pgd_t *pgd_ref = pgd_offset_k(sp);
169 
170 			set_pgd(pgd, *pgd_ref);
171 		}
172 	} else {
173 		/*
174 		 * "pgd" is faked.  The top level entries are "p4d"s, so sync
175 		 * the p4d.  This compiles to approximately the same code as
176 		 * the 5-level case.
177 		 */
178 		p4d_t *p4d = p4d_offset(pgd, sp);
179 
180 		if (unlikely(p4d_none(*p4d))) {
181 			pgd_t *pgd_ref = pgd_offset_k(sp);
182 			p4d_t *p4d_ref = p4d_offset(pgd_ref, sp);
183 
184 			set_p4d(p4d, *p4d_ref);
185 		}
186 	}
187 }
188 
189 static inline unsigned long mm_mangle_tif_spec_ib(struct task_struct *next)
190 {
191 	unsigned long next_tif = task_thread_info(next)->flags;
192 	unsigned long ibpb = (next_tif >> TIF_SPEC_IB) & LAST_USER_MM_IBPB;
193 
194 	return (unsigned long)next->mm | ibpb;
195 }
196 
197 static void cond_ibpb(struct task_struct *next)
198 {
199 	if (!next || !next->mm)
200 		return;
201 
202 	/*
203 	 * Both, the conditional and the always IBPB mode use the mm
204 	 * pointer to avoid the IBPB when switching between tasks of the
205 	 * same process. Using the mm pointer instead of mm->context.ctx_id
206 	 * opens a hypothetical hole vs. mm_struct reuse, which is more or
207 	 * less impossible to control by an attacker. Aside of that it
208 	 * would only affect the first schedule so the theoretically
209 	 * exposed data is not really interesting.
210 	 */
211 	if (static_branch_likely(&switch_mm_cond_ibpb)) {
212 		unsigned long prev_mm, next_mm;
213 
214 		/*
215 		 * This is a bit more complex than the always mode because
216 		 * it has to handle two cases:
217 		 *
218 		 * 1) Switch from a user space task (potential attacker)
219 		 *    which has TIF_SPEC_IB set to a user space task
220 		 *    (potential victim) which has TIF_SPEC_IB not set.
221 		 *
222 		 * 2) Switch from a user space task (potential attacker)
223 		 *    which has TIF_SPEC_IB not set to a user space task
224 		 *    (potential victim) which has TIF_SPEC_IB set.
225 		 *
226 		 * This could be done by unconditionally issuing IBPB when
227 		 * a task which has TIF_SPEC_IB set is either scheduled in
228 		 * or out. Though that results in two flushes when:
229 		 *
230 		 * - the same user space task is scheduled out and later
231 		 *   scheduled in again and only a kernel thread ran in
232 		 *   between.
233 		 *
234 		 * - a user space task belonging to the same process is
235 		 *   scheduled in after a kernel thread ran in between
236 		 *
237 		 * - a user space task belonging to the same process is
238 		 *   scheduled in immediately.
239 		 *
240 		 * Optimize this with reasonably small overhead for the
241 		 * above cases. Mangle the TIF_SPEC_IB bit into the mm
242 		 * pointer of the incoming task which is stored in
243 		 * cpu_tlbstate.last_user_mm_ibpb for comparison.
244 		 */
245 		next_mm = mm_mangle_tif_spec_ib(next);
246 		prev_mm = this_cpu_read(cpu_tlbstate.last_user_mm_ibpb);
247 
248 		/*
249 		 * Issue IBPB only if the mm's are different and one or
250 		 * both have the IBPB bit set.
251 		 */
252 		if (next_mm != prev_mm &&
253 		    (next_mm | prev_mm) & LAST_USER_MM_IBPB)
254 			indirect_branch_prediction_barrier();
255 
256 		this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, next_mm);
257 	}
258 
259 	if (static_branch_unlikely(&switch_mm_always_ibpb)) {
260 		/*
261 		 * Only flush when switching to a user space task with a
262 		 * different context than the user space task which ran
263 		 * last on this CPU.
264 		 */
265 		if (this_cpu_read(cpu_tlbstate.last_user_mm) != next->mm) {
266 			indirect_branch_prediction_barrier();
267 			this_cpu_write(cpu_tlbstate.last_user_mm, next->mm);
268 		}
269 	}
270 }
271 
272 void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
273 			struct task_struct *tsk)
274 {
275 	struct mm_struct *real_prev = this_cpu_read(cpu_tlbstate.loaded_mm);
276 	u16 prev_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
277 	bool was_lazy = this_cpu_read(cpu_tlbstate.is_lazy);
278 	unsigned cpu = smp_processor_id();
279 	u64 next_tlb_gen;
280 	bool need_flush;
281 	u16 new_asid;
282 
283 	/*
284 	 * NB: The scheduler will call us with prev == next when switching
285 	 * from lazy TLB mode to normal mode if active_mm isn't changing.
286 	 * When this happens, we don't assume that CR3 (and hence
287 	 * cpu_tlbstate.loaded_mm) matches next.
288 	 *
289 	 * NB: leave_mm() calls us with prev == NULL and tsk == NULL.
290 	 */
291 
292 	/* We don't want flush_tlb_func_* to run concurrently with us. */
293 	if (IS_ENABLED(CONFIG_PROVE_LOCKING))
294 		WARN_ON_ONCE(!irqs_disabled());
295 
296 	/*
297 	 * Verify that CR3 is what we think it is.  This will catch
298 	 * hypothetical buggy code that directly switches to swapper_pg_dir
299 	 * without going through leave_mm() / switch_mm_irqs_off() or that
300 	 * does something like write_cr3(read_cr3_pa()).
301 	 *
302 	 * Only do this check if CONFIG_DEBUG_VM=y because __read_cr3()
303 	 * isn't free.
304 	 */
305 #ifdef CONFIG_DEBUG_VM
306 	if (WARN_ON_ONCE(__read_cr3() != build_cr3(real_prev->pgd, prev_asid))) {
307 		/*
308 		 * If we were to BUG here, we'd be very likely to kill
309 		 * the system so hard that we don't see the call trace.
310 		 * Try to recover instead by ignoring the error and doing
311 		 * a global flush to minimize the chance of corruption.
312 		 *
313 		 * (This is far from being a fully correct recovery.
314 		 *  Architecturally, the CPU could prefetch something
315 		 *  back into an incorrect ASID slot and leave it there
316 		 *  to cause trouble down the road.  It's better than
317 		 *  nothing, though.)
318 		 */
319 		__flush_tlb_all();
320 	}
321 #endif
322 	this_cpu_write(cpu_tlbstate.is_lazy, false);
323 
324 	/*
325 	 * The membarrier system call requires a full memory barrier and
326 	 * core serialization before returning to user-space, after
327 	 * storing to rq->curr. Writing to CR3 provides that full
328 	 * memory barrier and core serializing instruction.
329 	 */
330 	if (real_prev == next) {
331 		VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) !=
332 			   next->context.ctx_id);
333 
334 		/*
335 		 * Even in lazy TLB mode, the CPU should stay set in the
336 		 * mm_cpumask. The TLB shootdown code can figure out from
337 		 * from cpu_tlbstate.is_lazy whether or not to send an IPI.
338 		 */
339 		if (WARN_ON_ONCE(real_prev != &init_mm &&
340 				 !cpumask_test_cpu(cpu, mm_cpumask(next))))
341 			cpumask_set_cpu(cpu, mm_cpumask(next));
342 
343 		/*
344 		 * If the CPU is not in lazy TLB mode, we are just switching
345 		 * from one thread in a process to another thread in the same
346 		 * process. No TLB flush required.
347 		 */
348 		if (!was_lazy)
349 			return;
350 
351 		/*
352 		 * Read the tlb_gen to check whether a flush is needed.
353 		 * If the TLB is up to date, just use it.
354 		 * The barrier synchronizes with the tlb_gen increment in
355 		 * the TLB shootdown code.
356 		 */
357 		smp_mb();
358 		next_tlb_gen = atomic64_read(&next->context.tlb_gen);
359 		if (this_cpu_read(cpu_tlbstate.ctxs[prev_asid].tlb_gen) ==
360 				next_tlb_gen)
361 			return;
362 
363 		/*
364 		 * TLB contents went out of date while we were in lazy
365 		 * mode. Fall through to the TLB switching code below.
366 		 */
367 		new_asid = prev_asid;
368 		need_flush = true;
369 	} else {
370 		/*
371 		 * Avoid user/user BTB poisoning by flushing the branch
372 		 * predictor when switching between processes. This stops
373 		 * one process from doing Spectre-v2 attacks on another.
374 		 */
375 		cond_ibpb(tsk);
376 
377 		if (IS_ENABLED(CONFIG_VMAP_STACK)) {
378 			/*
379 			 * If our current stack is in vmalloc space and isn't
380 			 * mapped in the new pgd, we'll double-fault.  Forcibly
381 			 * map it.
382 			 */
383 			sync_current_stack_to_mm(next);
384 		}
385 
386 		/*
387 		 * Stop remote flushes for the previous mm.
388 		 * Skip kernel threads; we never send init_mm TLB flushing IPIs,
389 		 * but the bitmap manipulation can cause cache line contention.
390 		 */
391 		if (real_prev != &init_mm) {
392 			VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu,
393 						mm_cpumask(real_prev)));
394 			cpumask_clear_cpu(cpu, mm_cpumask(real_prev));
395 		}
396 
397 		/*
398 		 * Start remote flushes and then read tlb_gen.
399 		 */
400 		if (next != &init_mm)
401 			cpumask_set_cpu(cpu, mm_cpumask(next));
402 		next_tlb_gen = atomic64_read(&next->context.tlb_gen);
403 
404 		choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush);
405 
406 		/* Let nmi_uaccess_okay() know that we're changing CR3. */
407 		this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING);
408 		barrier();
409 	}
410 
411 	if (need_flush) {
412 		this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
413 		this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
414 		load_new_mm_cr3(next->pgd, new_asid, true);
415 
416 		/*
417 		 * NB: This gets called via leave_mm() in the idle path
418 		 * where RCU functions differently.  Tracing normally
419 		 * uses RCU, so we need to use the _rcuidle variant.
420 		 *
421 		 * (There is no good reason for this.  The idle code should
422 		 *  be rearranged to call this before rcu_idle_enter().)
423 		 */
424 		trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
425 	} else {
426 		/* The new ASID is already up to date. */
427 		load_new_mm_cr3(next->pgd, new_asid, false);
428 
429 		/* See above wrt _rcuidle. */
430 		trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
431 	}
432 
433 	/* Make sure we write CR3 before loaded_mm. */
434 	barrier();
435 
436 	this_cpu_write(cpu_tlbstate.loaded_mm, next);
437 	this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
438 
439 	if (next != real_prev) {
440 		load_mm_cr4(next);
441 		switch_ldt(real_prev, next);
442 	}
443 }
444 
445 /*
446  * Please ignore the name of this function.  It should be called
447  * switch_to_kernel_thread().
448  *
449  * enter_lazy_tlb() is a hint from the scheduler that we are entering a
450  * kernel thread or other context without an mm.  Acceptable implementations
451  * include doing nothing whatsoever, switching to init_mm, or various clever
452  * lazy tricks to try to minimize TLB flushes.
453  *
454  * The scheduler reserves the right to call enter_lazy_tlb() several times
455  * in a row.  It will notify us that we're going back to a real mm by
456  * calling switch_mm_irqs_off().
457  */
458 void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
459 {
460 	if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm)
461 		return;
462 
463 	this_cpu_write(cpu_tlbstate.is_lazy, true);
464 }
465 
466 /*
467  * Call this when reinitializing a CPU.  It fixes the following potential
468  * problems:
469  *
470  * - The ASID changed from what cpu_tlbstate thinks it is (most likely
471  *   because the CPU was taken down and came back up with CR3's PCID
472  *   bits clear.  CPU hotplug can do this.
473  *
474  * - The TLB contains junk in slots corresponding to inactive ASIDs.
475  *
476  * - The CPU went so far out to lunch that it may have missed a TLB
477  *   flush.
478  */
479 void initialize_tlbstate_and_flush(void)
480 {
481 	int i;
482 	struct mm_struct *mm = this_cpu_read(cpu_tlbstate.loaded_mm);
483 	u64 tlb_gen = atomic64_read(&init_mm.context.tlb_gen);
484 	unsigned long cr3 = __read_cr3();
485 
486 	/* Assert that CR3 already references the right mm. */
487 	WARN_ON((cr3 & CR3_ADDR_MASK) != __pa(mm->pgd));
488 
489 	/*
490 	 * Assert that CR4.PCIDE is set if needed.  (CR4.PCIDE initialization
491 	 * doesn't work like other CR4 bits because it can only be set from
492 	 * long mode.)
493 	 */
494 	WARN_ON(boot_cpu_has(X86_FEATURE_PCID) &&
495 		!(cr4_read_shadow() & X86_CR4_PCIDE));
496 
497 	/* Force ASID 0 and force a TLB flush. */
498 	write_cr3(build_cr3(mm->pgd, 0));
499 
500 	/* Reinitialize tlbstate. */
501 	this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, LAST_USER_MM_IBPB);
502 	this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0);
503 	this_cpu_write(cpu_tlbstate.next_asid, 1);
504 	this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, mm->context.ctx_id);
505 	this_cpu_write(cpu_tlbstate.ctxs[0].tlb_gen, tlb_gen);
506 
507 	for (i = 1; i < TLB_NR_DYN_ASIDS; i++)
508 		this_cpu_write(cpu_tlbstate.ctxs[i].ctx_id, 0);
509 }
510 
511 /*
512  * flush_tlb_func_common()'s memory ordering requirement is that any
513  * TLB fills that happen after we flush the TLB are ordered after we
514  * read active_mm's tlb_gen.  We don't need any explicit barriers
515  * because all x86 flush operations are serializing and the
516  * atomic64_read operation won't be reordered by the compiler.
517  */
518 static void flush_tlb_func_common(const struct flush_tlb_info *f,
519 				  bool local, enum tlb_flush_reason reason)
520 {
521 	/*
522 	 * We have three different tlb_gen values in here.  They are:
523 	 *
524 	 * - mm_tlb_gen:     the latest generation.
525 	 * - local_tlb_gen:  the generation that this CPU has already caught
526 	 *                   up to.
527 	 * - f->new_tlb_gen: the generation that the requester of the flush
528 	 *                   wants us to catch up to.
529 	 */
530 	struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
531 	u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
532 	u64 mm_tlb_gen = atomic64_read(&loaded_mm->context.tlb_gen);
533 	u64 local_tlb_gen = this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen);
534 
535 	/* This code cannot presently handle being reentered. */
536 	VM_WARN_ON(!irqs_disabled());
537 
538 	if (unlikely(loaded_mm == &init_mm))
539 		return;
540 
541 	VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) !=
542 		   loaded_mm->context.ctx_id);
543 
544 	if (this_cpu_read(cpu_tlbstate.is_lazy)) {
545 		/*
546 		 * We're in lazy mode.  We need to at least flush our
547 		 * paging-structure cache to avoid speculatively reading
548 		 * garbage into our TLB.  Since switching to init_mm is barely
549 		 * slower than a minimal flush, just switch to init_mm.
550 		 *
551 		 * This should be rare, with native_flush_tlb_others skipping
552 		 * IPIs to lazy TLB mode CPUs.
553 		 */
554 		switch_mm_irqs_off(NULL, &init_mm, NULL);
555 		return;
556 	}
557 
558 	if (unlikely(local_tlb_gen == mm_tlb_gen)) {
559 		/*
560 		 * There's nothing to do: we're already up to date.  This can
561 		 * happen if two concurrent flushes happen -- the first flush to
562 		 * be handled can catch us all the way up, leaving no work for
563 		 * the second flush.
564 		 */
565 		trace_tlb_flush(reason, 0);
566 		return;
567 	}
568 
569 	WARN_ON_ONCE(local_tlb_gen > mm_tlb_gen);
570 	WARN_ON_ONCE(f->new_tlb_gen > mm_tlb_gen);
571 
572 	/*
573 	 * If we get to this point, we know that our TLB is out of date.
574 	 * This does not strictly imply that we need to flush (it's
575 	 * possible that f->new_tlb_gen <= local_tlb_gen), but we're
576 	 * going to need to flush in the very near future, so we might
577 	 * as well get it over with.
578 	 *
579 	 * The only question is whether to do a full or partial flush.
580 	 *
581 	 * We do a partial flush if requested and two extra conditions
582 	 * are met:
583 	 *
584 	 * 1. f->new_tlb_gen == local_tlb_gen + 1.  We have an invariant that
585 	 *    we've always done all needed flushes to catch up to
586 	 *    local_tlb_gen.  If, for example, local_tlb_gen == 2 and
587 	 *    f->new_tlb_gen == 3, then we know that the flush needed to bring
588 	 *    us up to date for tlb_gen 3 is the partial flush we're
589 	 *    processing.
590 	 *
591 	 *    As an example of why this check is needed, suppose that there
592 	 *    are two concurrent flushes.  The first is a full flush that
593 	 *    changes context.tlb_gen from 1 to 2.  The second is a partial
594 	 *    flush that changes context.tlb_gen from 2 to 3.  If they get
595 	 *    processed on this CPU in reverse order, we'll see
596 	 *     local_tlb_gen == 1, mm_tlb_gen == 3, and end != TLB_FLUSH_ALL.
597 	 *    If we were to use __flush_tlb_one_user() and set local_tlb_gen to
598 	 *    3, we'd be break the invariant: we'd update local_tlb_gen above
599 	 *    1 without the full flush that's needed for tlb_gen 2.
600 	 *
601 	 * 2. f->new_tlb_gen == mm_tlb_gen.  This is purely an optimiation.
602 	 *    Partial TLB flushes are not all that much cheaper than full TLB
603 	 *    flushes, so it seems unlikely that it would be a performance win
604 	 *    to do a partial flush if that won't bring our TLB fully up to
605 	 *    date.  By doing a full flush instead, we can increase
606 	 *    local_tlb_gen all the way to mm_tlb_gen and we can probably
607 	 *    avoid another flush in the very near future.
608 	 */
609 	if (f->end != TLB_FLUSH_ALL &&
610 	    f->new_tlb_gen == local_tlb_gen + 1 &&
611 	    f->new_tlb_gen == mm_tlb_gen) {
612 		/* Partial flush */
613 		unsigned long nr_invalidate = (f->end - f->start) >> f->stride_shift;
614 		unsigned long addr = f->start;
615 
616 		while (addr < f->end) {
617 			__flush_tlb_one_user(addr);
618 			addr += 1UL << f->stride_shift;
619 		}
620 		if (local)
621 			count_vm_tlb_events(NR_TLB_LOCAL_FLUSH_ONE, nr_invalidate);
622 		trace_tlb_flush(reason, nr_invalidate);
623 	} else {
624 		/* Full flush. */
625 		local_flush_tlb();
626 		if (local)
627 			count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
628 		trace_tlb_flush(reason, TLB_FLUSH_ALL);
629 	}
630 
631 	/* Both paths above update our state to mm_tlb_gen. */
632 	this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen);
633 }
634 
635 static void flush_tlb_func_local(void *info, enum tlb_flush_reason reason)
636 {
637 	const struct flush_tlb_info *f = info;
638 
639 	flush_tlb_func_common(f, true, reason);
640 }
641 
642 static void flush_tlb_func_remote(void *info)
643 {
644 	const struct flush_tlb_info *f = info;
645 
646 	inc_irq_stat(irq_tlb_count);
647 
648 	if (f->mm && f->mm != this_cpu_read(cpu_tlbstate.loaded_mm))
649 		return;
650 
651 	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
652 	flush_tlb_func_common(f, false, TLB_REMOTE_SHOOTDOWN);
653 }
654 
655 static bool tlb_is_not_lazy(int cpu, void *data)
656 {
657 	return !per_cpu(cpu_tlbstate.is_lazy, cpu);
658 }
659 
660 void native_flush_tlb_others(const struct cpumask *cpumask,
661 			     const struct flush_tlb_info *info)
662 {
663 	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
664 	if (info->end == TLB_FLUSH_ALL)
665 		trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL);
666 	else
667 		trace_tlb_flush(TLB_REMOTE_SEND_IPI,
668 				(info->end - info->start) >> PAGE_SHIFT);
669 
670 	if (is_uv_system()) {
671 		/*
672 		 * This whole special case is confused.  UV has a "Broadcast
673 		 * Assist Unit", which seems to be a fancy way to send IPIs.
674 		 * Back when x86 used an explicit TLB flush IPI, UV was
675 		 * optimized to use its own mechanism.  These days, x86 uses
676 		 * smp_call_function_many(), but UV still uses a manual IPI,
677 		 * and that IPI's action is out of date -- it does a manual
678 		 * flush instead of calling flush_tlb_func_remote().  This
679 		 * means that the percpu tlb_gen variables won't be updated
680 		 * and we'll do pointless flushes on future context switches.
681 		 *
682 		 * Rather than hooking native_flush_tlb_others() here, I think
683 		 * that UV should be updated so that smp_call_function_many(),
684 		 * etc, are optimal on UV.
685 		 */
686 		unsigned int cpu;
687 
688 		cpu = smp_processor_id();
689 		cpumask = uv_flush_tlb_others(cpumask, info);
690 		if (cpumask)
691 			smp_call_function_many(cpumask, flush_tlb_func_remote,
692 					       (void *)info, 1);
693 		return;
694 	}
695 
696 	/*
697 	 * If no page tables were freed, we can skip sending IPIs to
698 	 * CPUs in lazy TLB mode. They will flush the CPU themselves
699 	 * at the next context switch.
700 	 *
701 	 * However, if page tables are getting freed, we need to send the
702 	 * IPI everywhere, to prevent CPUs in lazy TLB mode from tripping
703 	 * up on the new contents of what used to be page tables, while
704 	 * doing a speculative memory access.
705 	 */
706 	if (info->freed_tables)
707 		smp_call_function_many(cpumask, flush_tlb_func_remote,
708 			       (void *)info, 1);
709 	else
710 		on_each_cpu_cond_mask(tlb_is_not_lazy, flush_tlb_func_remote,
711 				(void *)info, 1, GFP_ATOMIC, cpumask);
712 }
713 
714 /*
715  * See Documentation/x86/tlb.txt for details.  We choose 33
716  * because it is large enough to cover the vast majority (at
717  * least 95%) of allocations, and is small enough that we are
718  * confident it will not cause too much overhead.  Each single
719  * flush is about 100 ns, so this caps the maximum overhead at
720  * _about_ 3,000 ns.
721  *
722  * This is in units of pages.
723  */
724 static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
725 
726 void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
727 				unsigned long end, unsigned int stride_shift,
728 				bool freed_tables)
729 {
730 	int cpu;
731 
732 	struct flush_tlb_info info __aligned(SMP_CACHE_BYTES) = {
733 		.mm = mm,
734 		.stride_shift = stride_shift,
735 		.freed_tables = freed_tables,
736 	};
737 
738 	cpu = get_cpu();
739 
740 	/* This is also a barrier that synchronizes with switch_mm(). */
741 	info.new_tlb_gen = inc_mm_tlb_gen(mm);
742 
743 	/* Should we flush just the requested range? */
744 	if ((end != TLB_FLUSH_ALL) &&
745 	    ((end - start) >> stride_shift) <= tlb_single_page_flush_ceiling) {
746 		info.start = start;
747 		info.end = end;
748 	} else {
749 		info.start = 0UL;
750 		info.end = TLB_FLUSH_ALL;
751 	}
752 
753 	if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) {
754 		VM_WARN_ON(irqs_disabled());
755 		local_irq_disable();
756 		flush_tlb_func_local(&info, TLB_LOCAL_MM_SHOOTDOWN);
757 		local_irq_enable();
758 	}
759 
760 	if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids)
761 		flush_tlb_others(mm_cpumask(mm), &info);
762 
763 	put_cpu();
764 }
765 
766 
767 static void do_flush_tlb_all(void *info)
768 {
769 	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
770 	__flush_tlb_all();
771 }
772 
773 void flush_tlb_all(void)
774 {
775 	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
776 	on_each_cpu(do_flush_tlb_all, NULL, 1);
777 }
778 
779 static void do_kernel_range_flush(void *info)
780 {
781 	struct flush_tlb_info *f = info;
782 	unsigned long addr;
783 
784 	/* flush range by one by one 'invlpg' */
785 	for (addr = f->start; addr < f->end; addr += PAGE_SIZE)
786 		__flush_tlb_one_kernel(addr);
787 }
788 
789 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
790 {
791 
792 	/* Balance as user space task's flush, a bit conservative */
793 	if (end == TLB_FLUSH_ALL ||
794 	    (end - start) > tlb_single_page_flush_ceiling << PAGE_SHIFT) {
795 		on_each_cpu(do_flush_tlb_all, NULL, 1);
796 	} else {
797 		struct flush_tlb_info info;
798 		info.start = start;
799 		info.end = end;
800 		on_each_cpu(do_kernel_range_flush, &info, 1);
801 	}
802 }
803 
804 void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
805 {
806 	struct flush_tlb_info info = {
807 		.mm = NULL,
808 		.start = 0UL,
809 		.end = TLB_FLUSH_ALL,
810 	};
811 
812 	int cpu = get_cpu();
813 
814 	if (cpumask_test_cpu(cpu, &batch->cpumask)) {
815 		VM_WARN_ON(irqs_disabled());
816 		local_irq_disable();
817 		flush_tlb_func_local(&info, TLB_LOCAL_SHOOTDOWN);
818 		local_irq_enable();
819 	}
820 
821 	if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids)
822 		flush_tlb_others(&batch->cpumask, &info);
823 
824 	cpumask_clear(&batch->cpumask);
825 
826 	put_cpu();
827 }
828 
829 static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf,
830 			     size_t count, loff_t *ppos)
831 {
832 	char buf[32];
833 	unsigned int len;
834 
835 	len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling);
836 	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
837 }
838 
839 static ssize_t tlbflush_write_file(struct file *file,
840 		 const char __user *user_buf, size_t count, loff_t *ppos)
841 {
842 	char buf[32];
843 	ssize_t len;
844 	int ceiling;
845 
846 	len = min(count, sizeof(buf) - 1);
847 	if (copy_from_user(buf, user_buf, len))
848 		return -EFAULT;
849 
850 	buf[len] = '\0';
851 	if (kstrtoint(buf, 0, &ceiling))
852 		return -EINVAL;
853 
854 	if (ceiling < 0)
855 		return -EINVAL;
856 
857 	tlb_single_page_flush_ceiling = ceiling;
858 	return count;
859 }
860 
861 static const struct file_operations fops_tlbflush = {
862 	.read = tlbflush_read_file,
863 	.write = tlbflush_write_file,
864 	.llseek = default_llseek,
865 };
866 
867 static int __init create_tlb_single_page_flush_ceiling(void)
868 {
869 	debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR,
870 			    arch_debugfs_dir, NULL, &fops_tlbflush);
871 	return 0;
872 }
873 late_initcall(create_tlb_single_page_flush_ceiling);
874