xref: /openbmc/linux/arch/x86/kernel/tsc_sync.c (revision b1c2d09a)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2250c2277SThomas Gleixner /*
3835c34a1SDave Jones  * check TSC synchronization.
4250c2277SThomas Gleixner  *
5250c2277SThomas Gleixner  * Copyright (C) 2006, Red Hat, Inc., Ingo Molnar
6250c2277SThomas Gleixner  *
7250c2277SThomas Gleixner  * We check whether all boot CPUs have their TSC's synchronized,
8250c2277SThomas Gleixner  * print a warning if not and turn off the TSC clock-source.
9250c2277SThomas Gleixner  *
10250c2277SThomas Gleixner  * The warp-check is point-to-point between two CPUs, the CPU
11250c2277SThomas Gleixner  * initiating the bootup is the 'source CPU', the freshly booting
12250c2277SThomas Gleixner  * CPU is the 'target CPU'.
13250c2277SThomas Gleixner  *
14250c2277SThomas Gleixner  * Only two CPUs may participate - they can enter in any order.
15250c2277SThomas Gleixner  * ( The serial nature of the boot logic and the CPU hotplug lock
16250c2277SThomas Gleixner  *   protects against more than 2 CPUs entering this code. )
17250c2277SThomas Gleixner  */
18bd94d86fSThomas Gleixner #include <linux/workqueue.h>
198b223bc7SThomas Gleixner #include <linux/topology.h>
20250c2277SThomas Gleixner #include <linux/spinlock.h>
21250c2277SThomas Gleixner #include <linux/kernel.h>
22250c2277SThomas Gleixner #include <linux/smp.h>
23250c2277SThomas Gleixner #include <linux/nmi.h>
24250c2277SThomas Gleixner #include <asm/tsc.h>
25250c2277SThomas Gleixner 
268b223bc7SThomas Gleixner struct tsc_adjust {
278b223bc7SThomas Gleixner 	s64		bootval;
288b223bc7SThomas Gleixner 	s64		adjusted;
291d0095feSThomas Gleixner 	unsigned long	nextcheck;
301d0095feSThomas Gleixner 	bool		warned;
318b223bc7SThomas Gleixner };
328b223bc7SThomas Gleixner 
338b223bc7SThomas Gleixner static DEFINE_PER_CPU(struct tsc_adjust, tsc_adjust);
34c7719e79SFeng Tang static struct timer_list tsc_sync_check_timer;
358b223bc7SThomas Gleixner 
36341102c3Smike.travis@hpe.com /*
37341102c3Smike.travis@hpe.com  * TSC's on different sockets may be reset asynchronously.
38341102c3Smike.travis@hpe.com  * This may cause the TSC ADJUST value on socket 0 to be NOT 0.
39341102c3Smike.travis@hpe.com  */
40341102c3Smike.travis@hpe.com bool __read_mostly tsc_async_resets;
41341102c3Smike.travis@hpe.com 
mark_tsc_async_resets(char * reason)42341102c3Smike.travis@hpe.com void mark_tsc_async_resets(char *reason)
43341102c3Smike.travis@hpe.com {
44341102c3Smike.travis@hpe.com 	if (tsc_async_resets)
45341102c3Smike.travis@hpe.com 		return;
46341102c3Smike.travis@hpe.com 	tsc_async_resets = true;
47341102c3Smike.travis@hpe.com 	pr_info("tsc: Marking TSC async resets true due to %s\n", reason);
48341102c3Smike.travis@hpe.com }
49341102c3Smike.travis@hpe.com 
tsc_verify_tsc_adjust(bool resume)506a369583SThomas Gleixner void tsc_verify_tsc_adjust(bool resume)
511d0095feSThomas Gleixner {
521d0095feSThomas Gleixner 	struct tsc_adjust *adj = this_cpu_ptr(&tsc_adjust);
531d0095feSThomas Gleixner 	s64 curval;
541d0095feSThomas Gleixner 
551d0095feSThomas Gleixner 	if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST))
561d0095feSThomas Gleixner 		return;
571d0095feSThomas Gleixner 
589514ececSmike.travis@hpe.com 	/* Skip unnecessary error messages if TSC already unstable */
599514ececSmike.travis@hpe.com 	if (check_tsc_unstable())
609514ececSmike.travis@hpe.com 		return;
619514ececSmike.travis@hpe.com 
621d0095feSThomas Gleixner 	/* Rate limit the MSR check */
636a369583SThomas Gleixner 	if (!resume && time_before(jiffies, adj->nextcheck))
641d0095feSThomas Gleixner 		return;
651d0095feSThomas Gleixner 
661d0095feSThomas Gleixner 	adj->nextcheck = jiffies + HZ;
671d0095feSThomas Gleixner 
681d0095feSThomas Gleixner 	rdmsrl(MSR_IA32_TSC_ADJUST, curval);
691d0095feSThomas Gleixner 	if (adj->adjusted == curval)
701d0095feSThomas Gleixner 		return;
711d0095feSThomas Gleixner 
721d0095feSThomas Gleixner 	/* Restore the original value */
731d0095feSThomas Gleixner 	wrmsrl(MSR_IA32_TSC_ADJUST, adj->adjusted);
741d0095feSThomas Gleixner 
756a369583SThomas Gleixner 	if (!adj->warned || resume) {
761d0095feSThomas Gleixner 		pr_warn(FW_BUG "TSC ADJUST differs: CPU%u %lld --> %lld. Restoring\n",
771d0095feSThomas Gleixner 			smp_processor_id(), adj->adjusted, curval);
781d0095feSThomas Gleixner 		adj->warned = true;
791d0095feSThomas Gleixner 	}
801d0095feSThomas Gleixner }
811d0095feSThomas Gleixner 
82c7719e79SFeng Tang /*
83c7719e79SFeng Tang  * Normally the tsc_sync will be checked every time system enters idle
84c7719e79SFeng Tang  * state, but there is still caveat that a system won't enter idle,
85c7719e79SFeng Tang  * either because it's too busy or configured purposely to not enter
86c7719e79SFeng Tang  * idle.
87c7719e79SFeng Tang  *
88c7719e79SFeng Tang  * So setup a periodic timer (every 10 minutes) to make sure the check
89c7719e79SFeng Tang  * is always on.
90c7719e79SFeng Tang  */
91c7719e79SFeng Tang 
92c7719e79SFeng Tang #define SYNC_CHECK_INTERVAL		(HZ * 600)
93c7719e79SFeng Tang 
tsc_sync_check_timer_fn(struct timer_list * unused)94c7719e79SFeng Tang static void tsc_sync_check_timer_fn(struct timer_list *unused)
95c7719e79SFeng Tang {
96c7719e79SFeng Tang 	int next_cpu;
97c7719e79SFeng Tang 
98c7719e79SFeng Tang 	tsc_verify_tsc_adjust(false);
99c7719e79SFeng Tang 
100c7719e79SFeng Tang 	/* Run the check for all onlined CPUs in turn */
101c7719e79SFeng Tang 	next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
102c7719e79SFeng Tang 	if (next_cpu >= nr_cpu_ids)
103c7719e79SFeng Tang 		next_cpu = cpumask_first(cpu_online_mask);
104c7719e79SFeng Tang 
105c7719e79SFeng Tang 	tsc_sync_check_timer.expires += SYNC_CHECK_INTERVAL;
106c7719e79SFeng Tang 	add_timer_on(&tsc_sync_check_timer, next_cpu);
107c7719e79SFeng Tang }
108c7719e79SFeng Tang 
start_sync_check_timer(void)109c7719e79SFeng Tang static int __init start_sync_check_timer(void)
110c7719e79SFeng Tang {
111c7719e79SFeng Tang 	if (!cpu_feature_enabled(X86_FEATURE_TSC_ADJUST) || tsc_clocksource_reliable)
112c7719e79SFeng Tang 		return 0;
113c7719e79SFeng Tang 
114c7719e79SFeng Tang 	timer_setup(&tsc_sync_check_timer, tsc_sync_check_timer_fn, 0);
115c7719e79SFeng Tang 	tsc_sync_check_timer.expires = jiffies + SYNC_CHECK_INTERVAL;
116c7719e79SFeng Tang 	add_timer(&tsc_sync_check_timer);
117c7719e79SFeng Tang 
118c7719e79SFeng Tang 	return 0;
119c7719e79SFeng Tang }
120c7719e79SFeng Tang late_initcall(start_sync_check_timer);
121c7719e79SFeng Tang 
tsc_sanitize_first_cpu(struct tsc_adjust * cur,s64 bootval,unsigned int cpu,bool bootcpu)1225bae1562SThomas Gleixner static void tsc_sanitize_first_cpu(struct tsc_adjust *cur, s64 bootval,
1235bae1562SThomas Gleixner 				   unsigned int cpu, bool bootcpu)
1245bae1562SThomas Gleixner {
1255bae1562SThomas Gleixner 	/*
1265bae1562SThomas Gleixner 	 * First online CPU in a package stores the boot value in the
1275bae1562SThomas Gleixner 	 * adjustment value. This value might change later via the sync
1285bae1562SThomas Gleixner 	 * mechanism. If that fails we still can yell about boot values not
1295bae1562SThomas Gleixner 	 * being consistent.
1305bae1562SThomas Gleixner 	 *
1315bae1562SThomas Gleixner 	 * On the boot cpu we just force set the ADJUST value to 0 if it's
1325bae1562SThomas Gleixner 	 * non zero. We don't do that on non boot cpus because physical
1335bae1562SThomas Gleixner 	 * hotplug should have set the ADJUST register to a value > 0 so
1345bae1562SThomas Gleixner 	 * the TSC is in sync with the already running cpus.
135341102c3Smike.travis@hpe.com 	 *
136341102c3Smike.travis@hpe.com 	 * Also don't force the ADJUST value to zero if that is a valid value
137341102c3Smike.travis@hpe.com 	 * for socket 0 as determined by the system arch.  This is required
138341102c3Smike.travis@hpe.com 	 * when multiple sockets are reset asynchronously with each other
139341102c3Smike.travis@hpe.com 	 * and socket 0 may not have an TSC ADJUST value of 0.
1405bae1562SThomas Gleixner 	 */
141855615eeSPeter Zijlstra 	if (bootcpu && bootval != 0) {
142341102c3Smike.travis@hpe.com 		if (likely(!tsc_async_resets)) {
143341102c3Smike.travis@hpe.com 			pr_warn(FW_BUG "TSC ADJUST: CPU%u: %lld force to 0\n",
144341102c3Smike.travis@hpe.com 				cpu, bootval);
1455bae1562SThomas Gleixner 			wrmsrl(MSR_IA32_TSC_ADJUST, 0);
1465bae1562SThomas Gleixner 			bootval = 0;
147341102c3Smike.travis@hpe.com 		} else {
148341102c3Smike.travis@hpe.com 			pr_info("TSC ADJUST: CPU%u: %lld NOT forced to 0\n",
149341102c3Smike.travis@hpe.com 				cpu, bootval);
150341102c3Smike.travis@hpe.com 		}
1515bae1562SThomas Gleixner 	}
1525bae1562SThomas Gleixner 	cur->adjusted = bootval;
1535bae1562SThomas Gleixner }
1545bae1562SThomas Gleixner 
1558b223bc7SThomas Gleixner #ifndef CONFIG_SMP
tsc_store_and_check_tsc_adjust(bool bootcpu)1565bae1562SThomas Gleixner bool __init tsc_store_and_check_tsc_adjust(bool bootcpu)
1578b223bc7SThomas Gleixner {
158b8365543SThomas Gleixner 	struct tsc_adjust *cur = this_cpu_ptr(&tsc_adjust);
1598b223bc7SThomas Gleixner 	s64 bootval;
1608b223bc7SThomas Gleixner 
1618b223bc7SThomas Gleixner 	if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST))
162a36f5136SThomas Gleixner 		return false;
1638b223bc7SThomas Gleixner 
1649514ececSmike.travis@hpe.com 	/* Skip unnecessary error messages if TSC already unstable */
1659514ececSmike.travis@hpe.com 	if (check_tsc_unstable())
1669514ececSmike.travis@hpe.com 		return false;
1679514ececSmike.travis@hpe.com 
1688b223bc7SThomas Gleixner 	rdmsrl(MSR_IA32_TSC_ADJUST, bootval);
1698b223bc7SThomas Gleixner 	cur->bootval = bootval;
1701d0095feSThomas Gleixner 	cur->nextcheck = jiffies + HZ;
1715bae1562SThomas Gleixner 	tsc_sanitize_first_cpu(cur, bootval, smp_processor_id(), bootcpu);
172a36f5136SThomas Gleixner 	return false;
1738b223bc7SThomas Gleixner }
1748b223bc7SThomas Gleixner 
1758b223bc7SThomas Gleixner #else /* !CONFIG_SMP */
1768b223bc7SThomas Gleixner 
1778b223bc7SThomas Gleixner /*
1788b223bc7SThomas Gleixner  * Store and check the TSC ADJUST MSR if available
1798b223bc7SThomas Gleixner  */
tsc_store_and_check_tsc_adjust(bool bootcpu)1805bae1562SThomas Gleixner bool tsc_store_and_check_tsc_adjust(bool bootcpu)
1818b223bc7SThomas Gleixner {
1828b223bc7SThomas Gleixner 	struct tsc_adjust *ref, *cur = this_cpu_ptr(&tsc_adjust);
1838b223bc7SThomas Gleixner 	unsigned int refcpu, cpu = smp_processor_id();
18431f8a651SThomas Gleixner 	struct cpumask *mask;
1858b223bc7SThomas Gleixner 	s64 bootval;
1868b223bc7SThomas Gleixner 
1878b223bc7SThomas Gleixner 	if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST))
188a36f5136SThomas Gleixner 		return false;
1898b223bc7SThomas Gleixner 
1908b223bc7SThomas Gleixner 	rdmsrl(MSR_IA32_TSC_ADJUST, bootval);
1918b223bc7SThomas Gleixner 	cur->bootval = bootval;
1921d0095feSThomas Gleixner 	cur->nextcheck = jiffies + HZ;
1931d0095feSThomas Gleixner 	cur->warned = false;
1948b223bc7SThomas Gleixner 
1958b223bc7SThomas Gleixner 	/*
196b1c2d09aSDaniel J Blueman 	 * The default adjust value cannot be assumed to be zero on any socket.
197341102c3Smike.travis@hpe.com 	 */
198341102c3Smike.travis@hpe.com 	cur->adjusted = bootval;
199341102c3Smike.travis@hpe.com 
200341102c3Smike.travis@hpe.com 	/*
2018b223bc7SThomas Gleixner 	 * Check whether this CPU is the first in a package to come up. In
2028b223bc7SThomas Gleixner 	 * this case do not check the boot value against another package
2035bae1562SThomas Gleixner 	 * because the new package might have been physically hotplugged,
2045bae1562SThomas Gleixner 	 * where TSC_ADJUST is expected to be different. When called on the
2055bae1562SThomas Gleixner 	 * boot CPU topology_core_cpumask() might not be available yet.
2068b223bc7SThomas Gleixner 	 */
20731f8a651SThomas Gleixner 	mask = topology_core_cpumask(cpu);
20831f8a651SThomas Gleixner 	refcpu = mask ? cpumask_any_but(mask, cpu) : nr_cpu_ids;
2098b223bc7SThomas Gleixner 
2108b223bc7SThomas Gleixner 	if (refcpu >= nr_cpu_ids) {
2115bae1562SThomas Gleixner 		tsc_sanitize_first_cpu(cur, bootval, smp_processor_id(),
2125bae1562SThomas Gleixner 				       bootcpu);
213a36f5136SThomas Gleixner 		return false;
2148b223bc7SThomas Gleixner 	}
2158b223bc7SThomas Gleixner 
2168b223bc7SThomas Gleixner 	ref = per_cpu_ptr(&tsc_adjust, refcpu);
2178b223bc7SThomas Gleixner 	/*
2188b223bc7SThomas Gleixner 	 * Compare the boot value and complain if it differs in the
2198b223bc7SThomas Gleixner 	 * package.
2208b223bc7SThomas Gleixner 	 */
22141e7864aSmike.travis@hpe.com 	if (bootval != ref->bootval)
22241e7864aSmike.travis@hpe.com 		printk_once(FW_BUG "TSC ADJUST differs within socket(s), fixing all errors\n");
22341e7864aSmike.travis@hpe.com 
2248b223bc7SThomas Gleixner 	/*
2258b223bc7SThomas Gleixner 	 * The TSC_ADJUST values in a package must be the same. If the boot
2268b223bc7SThomas Gleixner 	 * value on this newly upcoming CPU differs from the adjustment
2278b223bc7SThomas Gleixner 	 * value of the already online CPU in this package, set it to that
2288b223bc7SThomas Gleixner 	 * adjusted value.
2298b223bc7SThomas Gleixner 	 */
2308b223bc7SThomas Gleixner 	if (bootval != ref->adjusted) {
2318b223bc7SThomas Gleixner 		cur->adjusted = ref->adjusted;
2328b223bc7SThomas Gleixner 		wrmsrl(MSR_IA32_TSC_ADJUST, ref->adjusted);
2338b223bc7SThomas Gleixner 	}
234a36f5136SThomas Gleixner 	/*
235a36f5136SThomas Gleixner 	 * We have the TSCs forced to be in sync on this package. Skip sync
236a36f5136SThomas Gleixner 	 * test:
237a36f5136SThomas Gleixner 	 */
238a36f5136SThomas Gleixner 	return true;
2398b223bc7SThomas Gleixner }
2408b223bc7SThomas Gleixner 
241250c2277SThomas Gleixner /*
242250c2277SThomas Gleixner  * Entry/exit counters that make sure that both CPUs
243250c2277SThomas Gleixner  * run the measurement code at once:
244250c2277SThomas Gleixner  */
245148f9bb8SPaul Gortmaker static atomic_t start_count;
246148f9bb8SPaul Gortmaker static atomic_t stop_count;
247cc4db268SThomas Gleixner static atomic_t test_runs;
248250c2277SThomas Gleixner 
249250c2277SThomas Gleixner /*
250250c2277SThomas Gleixner  * We use a raw spinlock in this exceptional case, because
251250c2277SThomas Gleixner  * we want to have the fastest, inlined, non-debug version
252250c2277SThomas Gleixner  * of a critical section, to be able to prove TSC time-warps:
253250c2277SThomas Gleixner  */
254148f9bb8SPaul Gortmaker static arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED;
255643bec95SIngo Molnar 
256148f9bb8SPaul Gortmaker static cycles_t last_tsc;
257148f9bb8SPaul Gortmaker static cycles_t max_warp;
258148f9bb8SPaul Gortmaker static int nr_warps;
259bec8520dSThomas Gleixner static int random_warps;
260250c2277SThomas Gleixner 
261250c2277SThomas Gleixner /*
262eee6946eSAndy Lutomirski  * TSC-warp measurement loop running on both CPUs.  This is not called
263eee6946eSAndy Lutomirski  * if there is no TSC.
264250c2277SThomas Gleixner  */
check_tsc_warp(unsigned int timeout)26576d3b851SThomas Gleixner static cycles_t check_tsc_warp(unsigned int timeout)
266250c2277SThomas Gleixner {
26776d3b851SThomas Gleixner 	cycles_t start, now, prev, end, cur_max_warp = 0;
268bec8520dSThomas Gleixner 	int i, cur_warps = 0;
269250c2277SThomas Gleixner 
270eee6946eSAndy Lutomirski 	start = rdtsc_ordered();
271250c2277SThomas Gleixner 	/*
272b0e5c779SSuresh Siddha 	 * The measurement runs for 'timeout' msecs:
273250c2277SThomas Gleixner 	 */
274b0e5c779SSuresh Siddha 	end = start + (cycles_t) tsc_khz * timeout;
275250c2277SThomas Gleixner 
276250c2277SThomas Gleixner 	for (i = 0; ; i++) {
277250c2277SThomas Gleixner 		/*
278250c2277SThomas Gleixner 		 * We take the global lock, measure TSC, save the
279250c2277SThomas Gleixner 		 * previous TSC that was measured (possibly on
280250c2277SThomas Gleixner 		 * another CPU) and update the previous TSC timestamp.
281250c2277SThomas Gleixner 		 */
2820199c4e6SThomas Gleixner 		arch_spin_lock(&sync_lock);
283250c2277SThomas Gleixner 		prev = last_tsc;
284eee6946eSAndy Lutomirski 		now = rdtsc_ordered();
285250c2277SThomas Gleixner 		last_tsc = now;
2860199c4e6SThomas Gleixner 		arch_spin_unlock(&sync_lock);
287250c2277SThomas Gleixner 
288250c2277SThomas Gleixner 		/*
289250c2277SThomas Gleixner 		 * Be nice every now and then (and also check whether
290df43510bSIngo Molnar 		 * measurement is done [we also insert a 10 million
291250c2277SThomas Gleixner 		 * loops safety exit, so we dont lock up in case the
292250c2277SThomas Gleixner 		 * TSC readout is totally broken]):
293250c2277SThomas Gleixner 		 */
294250c2277SThomas Gleixner 		if (unlikely(!(i & 7))) {
295df43510bSIngo Molnar 			if (now > end || i > 10000000)
296250c2277SThomas Gleixner 				break;
297250c2277SThomas Gleixner 			cpu_relax();
298250c2277SThomas Gleixner 			touch_nmi_watchdog();
299250c2277SThomas Gleixner 		}
300250c2277SThomas Gleixner 		/*
301250c2277SThomas Gleixner 		 * Outside the critical section we can now see whether
302250c2277SThomas Gleixner 		 * we saw a time-warp of the TSC going backwards:
303250c2277SThomas Gleixner 		 */
304250c2277SThomas Gleixner 		if (unlikely(prev > now)) {
3050199c4e6SThomas Gleixner 			arch_spin_lock(&sync_lock);
306250c2277SThomas Gleixner 			max_warp = max(max_warp, prev - now);
30776d3b851SThomas Gleixner 			cur_max_warp = max_warp;
308bec8520dSThomas Gleixner 			/*
309bec8520dSThomas Gleixner 			 * Check whether this bounces back and forth. Only
310bec8520dSThomas Gleixner 			 * one CPU should observe time going backwards.
311bec8520dSThomas Gleixner 			 */
312bec8520dSThomas Gleixner 			if (cur_warps != nr_warps)
313bec8520dSThomas Gleixner 				random_warps++;
314250c2277SThomas Gleixner 			nr_warps++;
315bec8520dSThomas Gleixner 			cur_warps = nr_warps;
3160199c4e6SThomas Gleixner 			arch_spin_unlock(&sync_lock);
317250c2277SThomas Gleixner 		}
318ad8ca495SIngo Molnar 	}
319bde78a79SArjan van de Ven 	WARN(!(now-start),
320bde78a79SArjan van de Ven 		"Warning: zero tsc calibration delta: %Ld [max: %Ld]\n",
321ad8ca495SIngo Molnar 			now-start, end-start);
32276d3b851SThomas Gleixner 	return cur_max_warp;
323250c2277SThomas Gleixner }
324250c2277SThomas Gleixner 
325250c2277SThomas Gleixner /*
326b0e5c779SSuresh Siddha  * If the target CPU coming online doesn't have any of its core-siblings
327b0e5c779SSuresh Siddha  * online, a timeout of 20msec will be used for the TSC-warp measurement
328b0e5c779SSuresh Siddha  * loop. Otherwise a smaller timeout of 2msec will be used, as we have some
329b0e5c779SSuresh Siddha  * information about this socket already (and this information grows as we
330b0e5c779SSuresh Siddha  * have more and more logical-siblings in that socket).
331b0e5c779SSuresh Siddha  *
332b0e5c779SSuresh Siddha  * Ideally we should be able to skip the TSC sync check on the other
333b0e5c779SSuresh Siddha  * core-siblings, if the first logical CPU in a socket passed the sync test.
334b0e5c779SSuresh Siddha  * But as the TSC is per-logical CPU and can potentially be modified wrongly
335b0e5c779SSuresh Siddha  * by the bios, TSC sync test for smaller duration should be able
336b0e5c779SSuresh Siddha  * to catch such errors. Also this will catch the condition where all the
3374d1d0977SMartin Molnar  * cores in the socket don't get reset at the same time.
338b0e5c779SSuresh Siddha  */
loop_timeout(int cpu)339b0e5c779SSuresh Siddha static inline unsigned int loop_timeout(int cpu)
340b0e5c779SSuresh Siddha {
3417d79a7bdSBartosz Golaszewski 	return (cpumask_weight(topology_core_cpumask(cpu)) > 1) ? 2 : 20;
342b0e5c779SSuresh Siddha }
343b0e5c779SSuresh Siddha 
tsc_sync_mark_tsc_unstable(struct work_struct * work)344bd94d86fSThomas Gleixner static void tsc_sync_mark_tsc_unstable(struct work_struct *work)
345bd94d86fSThomas Gleixner {
346bd94d86fSThomas Gleixner 	mark_tsc_unstable("check_tsc_sync_source failed");
347bd94d86fSThomas Gleixner }
348bd94d86fSThomas Gleixner 
349bd94d86fSThomas Gleixner static DECLARE_WORK(tsc_sync_work, tsc_sync_mark_tsc_unstable);
350bd94d86fSThomas Gleixner 
351b0e5c779SSuresh Siddha /*
3529d349d47SThomas Gleixner  * The freshly booted CPU initiates this via an async SMP function call.
353250c2277SThomas Gleixner  */
check_tsc_sync_source(void * __cpu)3549d349d47SThomas Gleixner static void check_tsc_sync_source(void *__cpu)
355250c2277SThomas Gleixner {
3569d349d47SThomas Gleixner 	unsigned int cpu = (unsigned long)__cpu;
357250c2277SThomas Gleixner 	int cpus = 2;
358250c2277SThomas Gleixner 
359250c2277SThomas Gleixner 	/*
360cc4db268SThomas Gleixner 	 * Set the maximum number of test runs to
361cc4db268SThomas Gleixner 	 *  1 if the CPU does not provide the TSC_ADJUST MSR
362cc4db268SThomas Gleixner 	 *  3 if the MSR is available, so the target can try to adjust
363cc4db268SThomas Gleixner 	 */
364cc4db268SThomas Gleixner 	if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST))
365cc4db268SThomas Gleixner 		atomic_set(&test_runs, 1);
366cc4db268SThomas Gleixner 	else
367cc4db268SThomas Gleixner 		atomic_set(&test_runs, 3);
368cc4db268SThomas Gleixner retry:
3699d349d47SThomas Gleixner 	/* Wait for the target to start. */
3709d349d47SThomas Gleixner 	while (atomic_read(&start_count) != cpus - 1)
371250c2277SThomas Gleixner 		cpu_relax();
372a36f5136SThomas Gleixner 
373250c2277SThomas Gleixner 	/*
374250c2277SThomas Gleixner 	 * Trigger the target to continue into the measurement too:
375250c2277SThomas Gleixner 	 */
376250c2277SThomas Gleixner 	atomic_inc(&start_count);
377250c2277SThomas Gleixner 
378b0e5c779SSuresh Siddha 	check_tsc_warp(loop_timeout(cpu));
379250c2277SThomas Gleixner 
380250c2277SThomas Gleixner 	while (atomic_read(&stop_count) != cpus-1)
381250c2277SThomas Gleixner 		cpu_relax();
382250c2277SThomas Gleixner 
383cc4db268SThomas Gleixner 	/*
384cc4db268SThomas Gleixner 	 * If the test was successful set the number of runs to zero and
385cc4db268SThomas Gleixner 	 * stop. If not, decrement the number of runs an check if we can
386cc4db268SThomas Gleixner 	 * retry. In case of random warps no retry is attempted.
387cc4db268SThomas Gleixner 	 */
388cc4db268SThomas Gleixner 	if (!nr_warps) {
389cc4db268SThomas Gleixner 		atomic_set(&test_runs, 0);
390cc4db268SThomas Gleixner 
3919d349d47SThomas Gleixner 		pr_debug("TSC synchronization [CPU#%d -> CPU#%u]: passed\n",
392cc4db268SThomas Gleixner 			smp_processor_id(), cpu);
393cc4db268SThomas Gleixner 
394cc4db268SThomas Gleixner 	} else if (atomic_dec_and_test(&test_runs) || random_warps) {
395cc4db268SThomas Gleixner 		/* Force it to 0 if random warps brought us here */
396cc4db268SThomas Gleixner 		atomic_set(&test_runs, 0);
397cc4db268SThomas Gleixner 
3989d349d47SThomas Gleixner 		pr_warn("TSC synchronization [CPU#%d -> CPU#%u]:\n",
3999b3660a5SMike Travis 			smp_processor_id(), cpu);
4008d3bcc44SKefeng Wang 		pr_warn("Measured %Ld cycles TSC warp between CPUs, "
401250c2277SThomas Gleixner 			"turning off TSC clock.\n", max_warp);
402bec8520dSThomas Gleixner 		if (random_warps)
4038d3bcc44SKefeng Wang 			pr_warn("TSC warped randomly between CPUs\n");
404bd94d86fSThomas Gleixner 		schedule_work(&tsc_sync_work);
405250c2277SThomas Gleixner 	}
406250c2277SThomas Gleixner 
407250c2277SThomas Gleixner 	/*
4084c6b8b4dSMike Galbraith 	 * Reset it - just in case we boot another CPU later:
4094c6b8b4dSMike Galbraith 	 */
4104c6b8b4dSMike Galbraith 	atomic_set(&start_count, 0);
411bec8520dSThomas Gleixner 	random_warps = 0;
4124c6b8b4dSMike Galbraith 	nr_warps = 0;
4134c6b8b4dSMike Galbraith 	max_warp = 0;
4144c6b8b4dSMike Galbraith 	last_tsc = 0;
4154c6b8b4dSMike Galbraith 
4164c6b8b4dSMike Galbraith 	/*
417250c2277SThomas Gleixner 	 * Let the target continue with the bootup:
418250c2277SThomas Gleixner 	 */
419250c2277SThomas Gleixner 	atomic_inc(&stop_count);
420cc4db268SThomas Gleixner 
421cc4db268SThomas Gleixner 	/*
422cc4db268SThomas Gleixner 	 * Retry, if there is a chance to do so.
423cc4db268SThomas Gleixner 	 */
424cc4db268SThomas Gleixner 	if (atomic_read(&test_runs) > 0)
425cc4db268SThomas Gleixner 		goto retry;
426250c2277SThomas Gleixner }
427250c2277SThomas Gleixner 
428250c2277SThomas Gleixner /*
429250c2277SThomas Gleixner  * Freshly booted CPUs call into this:
430250c2277SThomas Gleixner  */
check_tsc_sync_target(void)431148f9bb8SPaul Gortmaker void check_tsc_sync_target(void)
432250c2277SThomas Gleixner {
433cc4db268SThomas Gleixner 	struct tsc_adjust *cur = this_cpu_ptr(&tsc_adjust);
434cc4db268SThomas Gleixner 	unsigned int cpu = smp_processor_id();
435cc4db268SThomas Gleixner 	cycles_t cur_max_warp, gbl_max_warp;
436250c2277SThomas Gleixner 	int cpus = 2;
437250c2277SThomas Gleixner 
438eee6946eSAndy Lutomirski 	/* Also aborts if there is no TSC. */
4395f2e71e7SThomas Gleixner 	if (unsynchronized_tsc())
440250c2277SThomas Gleixner 		return;
441250c2277SThomas Gleixner 
442a36f5136SThomas Gleixner 	/*
443a36f5136SThomas Gleixner 	 * Store, verify and sanitize the TSC adjust register. If
444a36f5136SThomas Gleixner 	 * successful skip the test.
4455f2e71e7SThomas Gleixner 	 *
4465f2e71e7SThomas Gleixner 	 * The test is also skipped when the TSC is marked reliable. This
4475f2e71e7SThomas Gleixner 	 * is true for SoCs which have no fallback clocksource. On these
4485f2e71e7SThomas Gleixner 	 * SoCs the TSC is frequency synchronized, but still the TSC ADJUST
4495f2e71e7SThomas Gleixner 	 * register might have been wreckaged by the BIOS..
450a36f5136SThomas Gleixner 	 */
4519d349d47SThomas Gleixner 	if (tsc_store_and_check_tsc_adjust(false) || tsc_clocksource_reliable)
452a36f5136SThomas Gleixner 		return;
4538b223bc7SThomas Gleixner 
4549d349d47SThomas Gleixner 	/* Kick the control CPU into the TSC synchronization function */
4559d349d47SThomas Gleixner 	smp_call_function_single(cpumask_first(cpu_online_mask), check_tsc_sync_source,
4569d349d47SThomas Gleixner 				 (unsigned long *)(unsigned long)cpu, 0);
457cc4db268SThomas Gleixner retry:
458250c2277SThomas Gleixner 	/*
459250c2277SThomas Gleixner 	 * Register this CPU's participation and wait for the
460250c2277SThomas Gleixner 	 * source CPU to start the measurement:
461250c2277SThomas Gleixner 	 */
462250c2277SThomas Gleixner 	atomic_inc(&start_count);
463250c2277SThomas Gleixner 	while (atomic_read(&start_count) != cpus)
464250c2277SThomas Gleixner 		cpu_relax();
465250c2277SThomas Gleixner 
466cc4db268SThomas Gleixner 	cur_max_warp = check_tsc_warp(loop_timeout(cpu));
467cc4db268SThomas Gleixner 
468cc4db268SThomas Gleixner 	/*
469cc4db268SThomas Gleixner 	 * Store the maximum observed warp value for a potential retry:
470cc4db268SThomas Gleixner 	 */
471cc4db268SThomas Gleixner 	gbl_max_warp = max_warp;
472250c2277SThomas Gleixner 
473250c2277SThomas Gleixner 	/*
474250c2277SThomas Gleixner 	 * Ok, we are done:
475250c2277SThomas Gleixner 	 */
476250c2277SThomas Gleixner 	atomic_inc(&stop_count);
477250c2277SThomas Gleixner 
478250c2277SThomas Gleixner 	/*
479250c2277SThomas Gleixner 	 * Wait for the source CPU to print stuff:
480250c2277SThomas Gleixner 	 */
481250c2277SThomas Gleixner 	while (atomic_read(&stop_count) != cpus)
482250c2277SThomas Gleixner 		cpu_relax();
4834c5e3c63SThomas Gleixner 
4844c5e3c63SThomas Gleixner 	/*
4854c5e3c63SThomas Gleixner 	 * Reset it for the next sync test:
4864c5e3c63SThomas Gleixner 	 */
4874c5e3c63SThomas Gleixner 	atomic_set(&stop_count, 0);
488cc4db268SThomas Gleixner 
489cc4db268SThomas Gleixner 	/*
490cc4db268SThomas Gleixner 	 * Check the number of remaining test runs. If not zero, the test
491cc4db268SThomas Gleixner 	 * failed and a retry with adjusted TSC is possible. If zero the
492cc4db268SThomas Gleixner 	 * test was either successful or failed terminally.
493cc4db268SThomas Gleixner 	 */
494cc4db268SThomas Gleixner 	if (!atomic_read(&test_runs))
495cc4db268SThomas Gleixner 		return;
496cc4db268SThomas Gleixner 
497cc4db268SThomas Gleixner 	/*
498cc4db268SThomas Gleixner 	 * If the warp value of this CPU is 0, then the other CPU
499cc4db268SThomas Gleixner 	 * observed time going backwards so this TSC was ahead and
500cc4db268SThomas Gleixner 	 * needs to move backwards.
501cc4db268SThomas Gleixner 	 */
502cc4db268SThomas Gleixner 	if (!cur_max_warp)
503cc4db268SThomas Gleixner 		cur_max_warp = -gbl_max_warp;
504cc4db268SThomas Gleixner 
505cc4db268SThomas Gleixner 	/*
506cc4db268SThomas Gleixner 	 * Add the result to the previous adjustment value.
507cc4db268SThomas Gleixner 	 *
508163b0991SIngo Molnar 	 * The adjustment value is slightly off by the overhead of the
509cc4db268SThomas Gleixner 	 * sync mechanism (observed values are ~200 TSC cycles), but this
510cc4db268SThomas Gleixner 	 * really depends on CPU, node distance and frequency. So
511cc4db268SThomas Gleixner 	 * compensating for this is hard to get right. Experiments show
512cc4db268SThomas Gleixner 	 * that the warp is not longer detectable when the observed warp
513cc4db268SThomas Gleixner 	 * value is used. In the worst case the adjustment needs to go
514cc4db268SThomas Gleixner 	 * through a 3rd run for fine tuning.
515cc4db268SThomas Gleixner 	 */
516cc4db268SThomas Gleixner 	cur->adjusted += cur_max_warp;
5178c9b9d87SThomas Gleixner 
518cc4db268SThomas Gleixner 	pr_warn("TSC ADJUST compensate: CPU%u observed %lld warp. Adjust: %lld\n",
519cc4db268SThomas Gleixner 		cpu, cur_max_warp, cur->adjusted);
520cc4db268SThomas Gleixner 
521cc4db268SThomas Gleixner 	wrmsrl(MSR_IA32_TSC_ADJUST, cur->adjusted);
522cc4db268SThomas Gleixner 	goto retry;
523cc4db268SThomas Gleixner 
524250c2277SThomas Gleixner }
5258b223bc7SThomas Gleixner 
5268b223bc7SThomas Gleixner #endif /* CONFIG_SMP */
527