1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 2250c2277SThomas Gleixner /* 3835c34a1SDave Jones * check TSC synchronization. 4250c2277SThomas Gleixner * 5250c2277SThomas Gleixner * Copyright (C) 2006, Red Hat, Inc., Ingo Molnar 6250c2277SThomas Gleixner * 7250c2277SThomas Gleixner * We check whether all boot CPUs have their TSC's synchronized, 8250c2277SThomas Gleixner * print a warning if not and turn off the TSC clock-source. 9250c2277SThomas Gleixner * 10250c2277SThomas Gleixner * The warp-check is point-to-point between two CPUs, the CPU 11250c2277SThomas Gleixner * initiating the bootup is the 'source CPU', the freshly booting 12250c2277SThomas Gleixner * CPU is the 'target CPU'. 13250c2277SThomas Gleixner * 14250c2277SThomas Gleixner * Only two CPUs may participate - they can enter in any order. 15250c2277SThomas Gleixner * ( The serial nature of the boot logic and the CPU hotplug lock 16250c2277SThomas Gleixner * protects against more than 2 CPUs entering this code. ) 17250c2277SThomas Gleixner */ 18*bd94d86fSThomas Gleixner #include <linux/workqueue.h> 198b223bc7SThomas Gleixner #include <linux/topology.h> 20250c2277SThomas Gleixner #include <linux/spinlock.h> 21250c2277SThomas Gleixner #include <linux/kernel.h> 22250c2277SThomas Gleixner #include <linux/smp.h> 23250c2277SThomas Gleixner #include <linux/nmi.h> 24250c2277SThomas Gleixner #include <asm/tsc.h> 25250c2277SThomas Gleixner 268b223bc7SThomas Gleixner struct tsc_adjust { 278b223bc7SThomas Gleixner s64 bootval; 288b223bc7SThomas Gleixner s64 adjusted; 291d0095feSThomas Gleixner unsigned long nextcheck; 301d0095feSThomas Gleixner bool warned; 318b223bc7SThomas Gleixner }; 328b223bc7SThomas Gleixner 338b223bc7SThomas Gleixner static DEFINE_PER_CPU(struct tsc_adjust, tsc_adjust); 34c7719e79SFeng Tang static struct timer_list tsc_sync_check_timer; 358b223bc7SThomas Gleixner 36341102c3Smike.travis@hpe.com /* 37341102c3Smike.travis@hpe.com * TSC's on different sockets may be reset asynchronously. 38341102c3Smike.travis@hpe.com * This may cause the TSC ADJUST value on socket 0 to be NOT 0. 39341102c3Smike.travis@hpe.com */ 40341102c3Smike.travis@hpe.com bool __read_mostly tsc_async_resets; 41341102c3Smike.travis@hpe.com 42341102c3Smike.travis@hpe.com void mark_tsc_async_resets(char *reason) 43341102c3Smike.travis@hpe.com { 44341102c3Smike.travis@hpe.com if (tsc_async_resets) 45341102c3Smike.travis@hpe.com return; 46341102c3Smike.travis@hpe.com tsc_async_resets = true; 47341102c3Smike.travis@hpe.com pr_info("tsc: Marking TSC async resets true due to %s\n", reason); 48341102c3Smike.travis@hpe.com } 49341102c3Smike.travis@hpe.com 506a369583SThomas Gleixner void tsc_verify_tsc_adjust(bool resume) 511d0095feSThomas Gleixner { 521d0095feSThomas Gleixner struct tsc_adjust *adj = this_cpu_ptr(&tsc_adjust); 531d0095feSThomas Gleixner s64 curval; 541d0095feSThomas Gleixner 551d0095feSThomas Gleixner if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST)) 561d0095feSThomas Gleixner return; 571d0095feSThomas Gleixner 589514ececSmike.travis@hpe.com /* Skip unnecessary error messages if TSC already unstable */ 599514ececSmike.travis@hpe.com if (check_tsc_unstable()) 609514ececSmike.travis@hpe.com return; 619514ececSmike.travis@hpe.com 621d0095feSThomas Gleixner /* Rate limit the MSR check */ 636a369583SThomas Gleixner if (!resume && time_before(jiffies, adj->nextcheck)) 641d0095feSThomas Gleixner return; 651d0095feSThomas Gleixner 661d0095feSThomas Gleixner adj->nextcheck = jiffies + HZ; 671d0095feSThomas Gleixner 681d0095feSThomas Gleixner rdmsrl(MSR_IA32_TSC_ADJUST, curval); 691d0095feSThomas Gleixner if (adj->adjusted == curval) 701d0095feSThomas Gleixner return; 711d0095feSThomas Gleixner 721d0095feSThomas Gleixner /* Restore the original value */ 731d0095feSThomas Gleixner wrmsrl(MSR_IA32_TSC_ADJUST, adj->adjusted); 741d0095feSThomas Gleixner 756a369583SThomas Gleixner if (!adj->warned || resume) { 761d0095feSThomas Gleixner pr_warn(FW_BUG "TSC ADJUST differs: CPU%u %lld --> %lld. Restoring\n", 771d0095feSThomas Gleixner smp_processor_id(), adj->adjusted, curval); 781d0095feSThomas Gleixner adj->warned = true; 791d0095feSThomas Gleixner } 801d0095feSThomas Gleixner } 811d0095feSThomas Gleixner 82c7719e79SFeng Tang /* 83c7719e79SFeng Tang * Normally the tsc_sync will be checked every time system enters idle 84c7719e79SFeng Tang * state, but there is still caveat that a system won't enter idle, 85c7719e79SFeng Tang * either because it's too busy or configured purposely to not enter 86c7719e79SFeng Tang * idle. 87c7719e79SFeng Tang * 88c7719e79SFeng Tang * So setup a periodic timer (every 10 minutes) to make sure the check 89c7719e79SFeng Tang * is always on. 90c7719e79SFeng Tang */ 91c7719e79SFeng Tang 92c7719e79SFeng Tang #define SYNC_CHECK_INTERVAL (HZ * 600) 93c7719e79SFeng Tang 94c7719e79SFeng Tang static void tsc_sync_check_timer_fn(struct timer_list *unused) 95c7719e79SFeng Tang { 96c7719e79SFeng Tang int next_cpu; 97c7719e79SFeng Tang 98c7719e79SFeng Tang tsc_verify_tsc_adjust(false); 99c7719e79SFeng Tang 100c7719e79SFeng Tang /* Run the check for all onlined CPUs in turn */ 101c7719e79SFeng Tang next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask); 102c7719e79SFeng Tang if (next_cpu >= nr_cpu_ids) 103c7719e79SFeng Tang next_cpu = cpumask_first(cpu_online_mask); 104c7719e79SFeng Tang 105c7719e79SFeng Tang tsc_sync_check_timer.expires += SYNC_CHECK_INTERVAL; 106c7719e79SFeng Tang add_timer_on(&tsc_sync_check_timer, next_cpu); 107c7719e79SFeng Tang } 108c7719e79SFeng Tang 109c7719e79SFeng Tang static int __init start_sync_check_timer(void) 110c7719e79SFeng Tang { 111c7719e79SFeng Tang if (!cpu_feature_enabled(X86_FEATURE_TSC_ADJUST) || tsc_clocksource_reliable) 112c7719e79SFeng Tang return 0; 113c7719e79SFeng Tang 114c7719e79SFeng Tang timer_setup(&tsc_sync_check_timer, tsc_sync_check_timer_fn, 0); 115c7719e79SFeng Tang tsc_sync_check_timer.expires = jiffies + SYNC_CHECK_INTERVAL; 116c7719e79SFeng Tang add_timer(&tsc_sync_check_timer); 117c7719e79SFeng Tang 118c7719e79SFeng Tang return 0; 119c7719e79SFeng Tang } 120c7719e79SFeng Tang late_initcall(start_sync_check_timer); 121c7719e79SFeng Tang 1225bae1562SThomas Gleixner static void tsc_sanitize_first_cpu(struct tsc_adjust *cur, s64 bootval, 1235bae1562SThomas Gleixner unsigned int cpu, bool bootcpu) 1245bae1562SThomas Gleixner { 1255bae1562SThomas Gleixner /* 1265bae1562SThomas Gleixner * First online CPU in a package stores the boot value in the 1275bae1562SThomas Gleixner * adjustment value. This value might change later via the sync 1285bae1562SThomas Gleixner * mechanism. If that fails we still can yell about boot values not 1295bae1562SThomas Gleixner * being consistent. 1305bae1562SThomas Gleixner * 1315bae1562SThomas Gleixner * On the boot cpu we just force set the ADJUST value to 0 if it's 1325bae1562SThomas Gleixner * non zero. We don't do that on non boot cpus because physical 1335bae1562SThomas Gleixner * hotplug should have set the ADJUST register to a value > 0 so 1345bae1562SThomas Gleixner * the TSC is in sync with the already running cpus. 135341102c3Smike.travis@hpe.com * 136341102c3Smike.travis@hpe.com * Also don't force the ADJUST value to zero if that is a valid value 137341102c3Smike.travis@hpe.com * for socket 0 as determined by the system arch. This is required 138341102c3Smike.travis@hpe.com * when multiple sockets are reset asynchronously with each other 139341102c3Smike.travis@hpe.com * and socket 0 may not have an TSC ADJUST value of 0. 1405bae1562SThomas Gleixner */ 141855615eeSPeter Zijlstra if (bootcpu && bootval != 0) { 142341102c3Smike.travis@hpe.com if (likely(!tsc_async_resets)) { 143341102c3Smike.travis@hpe.com pr_warn(FW_BUG "TSC ADJUST: CPU%u: %lld force to 0\n", 144341102c3Smike.travis@hpe.com cpu, bootval); 1455bae1562SThomas Gleixner wrmsrl(MSR_IA32_TSC_ADJUST, 0); 1465bae1562SThomas Gleixner bootval = 0; 147341102c3Smike.travis@hpe.com } else { 148341102c3Smike.travis@hpe.com pr_info("TSC ADJUST: CPU%u: %lld NOT forced to 0\n", 149341102c3Smike.travis@hpe.com cpu, bootval); 150341102c3Smike.travis@hpe.com } 1515bae1562SThomas Gleixner } 1525bae1562SThomas Gleixner cur->adjusted = bootval; 1535bae1562SThomas Gleixner } 1545bae1562SThomas Gleixner 1558b223bc7SThomas Gleixner #ifndef CONFIG_SMP 1565bae1562SThomas Gleixner bool __init tsc_store_and_check_tsc_adjust(bool bootcpu) 1578b223bc7SThomas Gleixner { 158b8365543SThomas Gleixner struct tsc_adjust *cur = this_cpu_ptr(&tsc_adjust); 1598b223bc7SThomas Gleixner s64 bootval; 1608b223bc7SThomas Gleixner 1618b223bc7SThomas Gleixner if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST)) 162a36f5136SThomas Gleixner return false; 1638b223bc7SThomas Gleixner 1649514ececSmike.travis@hpe.com /* Skip unnecessary error messages if TSC already unstable */ 1659514ececSmike.travis@hpe.com if (check_tsc_unstable()) 1669514ececSmike.travis@hpe.com return false; 1679514ececSmike.travis@hpe.com 1688b223bc7SThomas Gleixner rdmsrl(MSR_IA32_TSC_ADJUST, bootval); 1698b223bc7SThomas Gleixner cur->bootval = bootval; 1701d0095feSThomas Gleixner cur->nextcheck = jiffies + HZ; 1715bae1562SThomas Gleixner tsc_sanitize_first_cpu(cur, bootval, smp_processor_id(), bootcpu); 172a36f5136SThomas Gleixner return false; 1738b223bc7SThomas Gleixner } 1748b223bc7SThomas Gleixner 1758b223bc7SThomas Gleixner #else /* !CONFIG_SMP */ 1768b223bc7SThomas Gleixner 1778b223bc7SThomas Gleixner /* 1788b223bc7SThomas Gleixner * Store and check the TSC ADJUST MSR if available 1798b223bc7SThomas Gleixner */ 1805bae1562SThomas Gleixner bool tsc_store_and_check_tsc_adjust(bool bootcpu) 1818b223bc7SThomas Gleixner { 1828b223bc7SThomas Gleixner struct tsc_adjust *ref, *cur = this_cpu_ptr(&tsc_adjust); 1838b223bc7SThomas Gleixner unsigned int refcpu, cpu = smp_processor_id(); 18431f8a651SThomas Gleixner struct cpumask *mask; 1858b223bc7SThomas Gleixner s64 bootval; 1868b223bc7SThomas Gleixner 1878b223bc7SThomas Gleixner if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST)) 188a36f5136SThomas Gleixner return false; 1898b223bc7SThomas Gleixner 1908b223bc7SThomas Gleixner rdmsrl(MSR_IA32_TSC_ADJUST, bootval); 1918b223bc7SThomas Gleixner cur->bootval = bootval; 1921d0095feSThomas Gleixner cur->nextcheck = jiffies + HZ; 1931d0095feSThomas Gleixner cur->warned = false; 1948b223bc7SThomas Gleixner 1958b223bc7SThomas Gleixner /* 196341102c3Smike.travis@hpe.com * If a non-zero TSC value for socket 0 may be valid then the default 197341102c3Smike.travis@hpe.com * adjusted value cannot assumed to be zero either. 198341102c3Smike.travis@hpe.com */ 199341102c3Smike.travis@hpe.com if (tsc_async_resets) 200341102c3Smike.travis@hpe.com cur->adjusted = bootval; 201341102c3Smike.travis@hpe.com 202341102c3Smike.travis@hpe.com /* 2038b223bc7SThomas Gleixner * Check whether this CPU is the first in a package to come up. In 2048b223bc7SThomas Gleixner * this case do not check the boot value against another package 2055bae1562SThomas Gleixner * because the new package might have been physically hotplugged, 2065bae1562SThomas Gleixner * where TSC_ADJUST is expected to be different. When called on the 2075bae1562SThomas Gleixner * boot CPU topology_core_cpumask() might not be available yet. 2088b223bc7SThomas Gleixner */ 20931f8a651SThomas Gleixner mask = topology_core_cpumask(cpu); 21031f8a651SThomas Gleixner refcpu = mask ? cpumask_any_but(mask, cpu) : nr_cpu_ids; 2118b223bc7SThomas Gleixner 2128b223bc7SThomas Gleixner if (refcpu >= nr_cpu_ids) { 2135bae1562SThomas Gleixner tsc_sanitize_first_cpu(cur, bootval, smp_processor_id(), 2145bae1562SThomas Gleixner bootcpu); 215a36f5136SThomas Gleixner return false; 2168b223bc7SThomas Gleixner } 2178b223bc7SThomas Gleixner 2188b223bc7SThomas Gleixner ref = per_cpu_ptr(&tsc_adjust, refcpu); 2198b223bc7SThomas Gleixner /* 2208b223bc7SThomas Gleixner * Compare the boot value and complain if it differs in the 2218b223bc7SThomas Gleixner * package. 2228b223bc7SThomas Gleixner */ 22341e7864aSmike.travis@hpe.com if (bootval != ref->bootval) 22441e7864aSmike.travis@hpe.com printk_once(FW_BUG "TSC ADJUST differs within socket(s), fixing all errors\n"); 22541e7864aSmike.travis@hpe.com 2268b223bc7SThomas Gleixner /* 2278b223bc7SThomas Gleixner * The TSC_ADJUST values in a package must be the same. If the boot 2288b223bc7SThomas Gleixner * value on this newly upcoming CPU differs from the adjustment 2298b223bc7SThomas Gleixner * value of the already online CPU in this package, set it to that 2308b223bc7SThomas Gleixner * adjusted value. 2318b223bc7SThomas Gleixner */ 2328b223bc7SThomas Gleixner if (bootval != ref->adjusted) { 2338b223bc7SThomas Gleixner cur->adjusted = ref->adjusted; 2348b223bc7SThomas Gleixner wrmsrl(MSR_IA32_TSC_ADJUST, ref->adjusted); 2358b223bc7SThomas Gleixner } 236a36f5136SThomas Gleixner /* 237a36f5136SThomas Gleixner * We have the TSCs forced to be in sync on this package. Skip sync 238a36f5136SThomas Gleixner * test: 239a36f5136SThomas Gleixner */ 240a36f5136SThomas Gleixner return true; 2418b223bc7SThomas Gleixner } 2428b223bc7SThomas Gleixner 243250c2277SThomas Gleixner /* 244250c2277SThomas Gleixner * Entry/exit counters that make sure that both CPUs 245250c2277SThomas Gleixner * run the measurement code at once: 246250c2277SThomas Gleixner */ 247148f9bb8SPaul Gortmaker static atomic_t start_count; 248148f9bb8SPaul Gortmaker static atomic_t stop_count; 249cc4db268SThomas Gleixner static atomic_t test_runs; 250250c2277SThomas Gleixner 251250c2277SThomas Gleixner /* 252250c2277SThomas Gleixner * We use a raw spinlock in this exceptional case, because 253250c2277SThomas Gleixner * we want to have the fastest, inlined, non-debug version 254250c2277SThomas Gleixner * of a critical section, to be able to prove TSC time-warps: 255250c2277SThomas Gleixner */ 256148f9bb8SPaul Gortmaker static arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED; 257643bec95SIngo Molnar 258148f9bb8SPaul Gortmaker static cycles_t last_tsc; 259148f9bb8SPaul Gortmaker static cycles_t max_warp; 260148f9bb8SPaul Gortmaker static int nr_warps; 261bec8520dSThomas Gleixner static int random_warps; 262250c2277SThomas Gleixner 263250c2277SThomas Gleixner /* 264eee6946eSAndy Lutomirski * TSC-warp measurement loop running on both CPUs. This is not called 265eee6946eSAndy Lutomirski * if there is no TSC. 266250c2277SThomas Gleixner */ 26776d3b851SThomas Gleixner static cycles_t check_tsc_warp(unsigned int timeout) 268250c2277SThomas Gleixner { 26976d3b851SThomas Gleixner cycles_t start, now, prev, end, cur_max_warp = 0; 270bec8520dSThomas Gleixner int i, cur_warps = 0; 271250c2277SThomas Gleixner 272eee6946eSAndy Lutomirski start = rdtsc_ordered(); 273250c2277SThomas Gleixner /* 274b0e5c779SSuresh Siddha * The measurement runs for 'timeout' msecs: 275250c2277SThomas Gleixner */ 276b0e5c779SSuresh Siddha end = start + (cycles_t) tsc_khz * timeout; 277250c2277SThomas Gleixner 278250c2277SThomas Gleixner for (i = 0; ; i++) { 279250c2277SThomas Gleixner /* 280250c2277SThomas Gleixner * We take the global lock, measure TSC, save the 281250c2277SThomas Gleixner * previous TSC that was measured (possibly on 282250c2277SThomas Gleixner * another CPU) and update the previous TSC timestamp. 283250c2277SThomas Gleixner */ 2840199c4e6SThomas Gleixner arch_spin_lock(&sync_lock); 285250c2277SThomas Gleixner prev = last_tsc; 286eee6946eSAndy Lutomirski now = rdtsc_ordered(); 287250c2277SThomas Gleixner last_tsc = now; 2880199c4e6SThomas Gleixner arch_spin_unlock(&sync_lock); 289250c2277SThomas Gleixner 290250c2277SThomas Gleixner /* 291250c2277SThomas Gleixner * Be nice every now and then (and also check whether 292df43510bSIngo Molnar * measurement is done [we also insert a 10 million 293250c2277SThomas Gleixner * loops safety exit, so we dont lock up in case the 294250c2277SThomas Gleixner * TSC readout is totally broken]): 295250c2277SThomas Gleixner */ 296250c2277SThomas Gleixner if (unlikely(!(i & 7))) { 297df43510bSIngo Molnar if (now > end || i > 10000000) 298250c2277SThomas Gleixner break; 299250c2277SThomas Gleixner cpu_relax(); 300250c2277SThomas Gleixner touch_nmi_watchdog(); 301250c2277SThomas Gleixner } 302250c2277SThomas Gleixner /* 303250c2277SThomas Gleixner * Outside the critical section we can now see whether 304250c2277SThomas Gleixner * we saw a time-warp of the TSC going backwards: 305250c2277SThomas Gleixner */ 306250c2277SThomas Gleixner if (unlikely(prev > now)) { 3070199c4e6SThomas Gleixner arch_spin_lock(&sync_lock); 308250c2277SThomas Gleixner max_warp = max(max_warp, prev - now); 30976d3b851SThomas Gleixner cur_max_warp = max_warp; 310bec8520dSThomas Gleixner /* 311bec8520dSThomas Gleixner * Check whether this bounces back and forth. Only 312bec8520dSThomas Gleixner * one CPU should observe time going backwards. 313bec8520dSThomas Gleixner */ 314bec8520dSThomas Gleixner if (cur_warps != nr_warps) 315bec8520dSThomas Gleixner random_warps++; 316250c2277SThomas Gleixner nr_warps++; 317bec8520dSThomas Gleixner cur_warps = nr_warps; 3180199c4e6SThomas Gleixner arch_spin_unlock(&sync_lock); 319250c2277SThomas Gleixner } 320ad8ca495SIngo Molnar } 321bde78a79SArjan van de Ven WARN(!(now-start), 322bde78a79SArjan van de Ven "Warning: zero tsc calibration delta: %Ld [max: %Ld]\n", 323ad8ca495SIngo Molnar now-start, end-start); 32476d3b851SThomas Gleixner return cur_max_warp; 325250c2277SThomas Gleixner } 326250c2277SThomas Gleixner 327250c2277SThomas Gleixner /* 328b0e5c779SSuresh Siddha * If the target CPU coming online doesn't have any of its core-siblings 329b0e5c779SSuresh Siddha * online, a timeout of 20msec will be used for the TSC-warp measurement 330b0e5c779SSuresh Siddha * loop. Otherwise a smaller timeout of 2msec will be used, as we have some 331b0e5c779SSuresh Siddha * information about this socket already (and this information grows as we 332b0e5c779SSuresh Siddha * have more and more logical-siblings in that socket). 333b0e5c779SSuresh Siddha * 334b0e5c779SSuresh Siddha * Ideally we should be able to skip the TSC sync check on the other 335b0e5c779SSuresh Siddha * core-siblings, if the first logical CPU in a socket passed the sync test. 336b0e5c779SSuresh Siddha * But as the TSC is per-logical CPU and can potentially be modified wrongly 337b0e5c779SSuresh Siddha * by the bios, TSC sync test for smaller duration should be able 338b0e5c779SSuresh Siddha * to catch such errors. Also this will catch the condition where all the 3394d1d0977SMartin Molnar * cores in the socket don't get reset at the same time. 340b0e5c779SSuresh Siddha */ 341b0e5c779SSuresh Siddha static inline unsigned int loop_timeout(int cpu) 342b0e5c779SSuresh Siddha { 3437d79a7bdSBartosz Golaszewski return (cpumask_weight(topology_core_cpumask(cpu)) > 1) ? 2 : 20; 344b0e5c779SSuresh Siddha } 345b0e5c779SSuresh Siddha 346*bd94d86fSThomas Gleixner static void tsc_sync_mark_tsc_unstable(struct work_struct *work) 347*bd94d86fSThomas Gleixner { 348*bd94d86fSThomas Gleixner mark_tsc_unstable("check_tsc_sync_source failed"); 349*bd94d86fSThomas Gleixner } 350*bd94d86fSThomas Gleixner 351*bd94d86fSThomas Gleixner static DECLARE_WORK(tsc_sync_work, tsc_sync_mark_tsc_unstable); 352*bd94d86fSThomas Gleixner 353b0e5c779SSuresh Siddha /* 3549d349d47SThomas Gleixner * The freshly booted CPU initiates this via an async SMP function call. 355250c2277SThomas Gleixner */ 3569d349d47SThomas Gleixner static void check_tsc_sync_source(void *__cpu) 357250c2277SThomas Gleixner { 3589d349d47SThomas Gleixner unsigned int cpu = (unsigned long)__cpu; 359250c2277SThomas Gleixner int cpus = 2; 360250c2277SThomas Gleixner 361250c2277SThomas Gleixner /* 362cc4db268SThomas Gleixner * Set the maximum number of test runs to 363cc4db268SThomas Gleixner * 1 if the CPU does not provide the TSC_ADJUST MSR 364cc4db268SThomas Gleixner * 3 if the MSR is available, so the target can try to adjust 365cc4db268SThomas Gleixner */ 366cc4db268SThomas Gleixner if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST)) 367cc4db268SThomas Gleixner atomic_set(&test_runs, 1); 368cc4db268SThomas Gleixner else 369cc4db268SThomas Gleixner atomic_set(&test_runs, 3); 370cc4db268SThomas Gleixner retry: 3719d349d47SThomas Gleixner /* Wait for the target to start. */ 3729d349d47SThomas Gleixner while (atomic_read(&start_count) != cpus - 1) 373250c2277SThomas Gleixner cpu_relax(); 374a36f5136SThomas Gleixner 375250c2277SThomas Gleixner /* 376250c2277SThomas Gleixner * Trigger the target to continue into the measurement too: 377250c2277SThomas Gleixner */ 378250c2277SThomas Gleixner atomic_inc(&start_count); 379250c2277SThomas Gleixner 380b0e5c779SSuresh Siddha check_tsc_warp(loop_timeout(cpu)); 381250c2277SThomas Gleixner 382250c2277SThomas Gleixner while (atomic_read(&stop_count) != cpus-1) 383250c2277SThomas Gleixner cpu_relax(); 384250c2277SThomas Gleixner 385cc4db268SThomas Gleixner /* 386cc4db268SThomas Gleixner * If the test was successful set the number of runs to zero and 387cc4db268SThomas Gleixner * stop. If not, decrement the number of runs an check if we can 388cc4db268SThomas Gleixner * retry. In case of random warps no retry is attempted. 389cc4db268SThomas Gleixner */ 390cc4db268SThomas Gleixner if (!nr_warps) { 391cc4db268SThomas Gleixner atomic_set(&test_runs, 0); 392cc4db268SThomas Gleixner 3939d349d47SThomas Gleixner pr_debug("TSC synchronization [CPU#%d -> CPU#%u]: passed\n", 394cc4db268SThomas Gleixner smp_processor_id(), cpu); 395cc4db268SThomas Gleixner 396cc4db268SThomas Gleixner } else if (atomic_dec_and_test(&test_runs) || random_warps) { 397cc4db268SThomas Gleixner /* Force it to 0 if random warps brought us here */ 398cc4db268SThomas Gleixner atomic_set(&test_runs, 0); 399cc4db268SThomas Gleixner 4009d349d47SThomas Gleixner pr_warn("TSC synchronization [CPU#%d -> CPU#%u]:\n", 4019b3660a5SMike Travis smp_processor_id(), cpu); 4028d3bcc44SKefeng Wang pr_warn("Measured %Ld cycles TSC warp between CPUs, " 403250c2277SThomas Gleixner "turning off TSC clock.\n", max_warp); 404bec8520dSThomas Gleixner if (random_warps) 4058d3bcc44SKefeng Wang pr_warn("TSC warped randomly between CPUs\n"); 406*bd94d86fSThomas Gleixner schedule_work(&tsc_sync_work); 407250c2277SThomas Gleixner } 408250c2277SThomas Gleixner 409250c2277SThomas Gleixner /* 4104c6b8b4dSMike Galbraith * Reset it - just in case we boot another CPU later: 4114c6b8b4dSMike Galbraith */ 4124c6b8b4dSMike Galbraith atomic_set(&start_count, 0); 413bec8520dSThomas Gleixner random_warps = 0; 4144c6b8b4dSMike Galbraith nr_warps = 0; 4154c6b8b4dSMike Galbraith max_warp = 0; 4164c6b8b4dSMike Galbraith last_tsc = 0; 4174c6b8b4dSMike Galbraith 4184c6b8b4dSMike Galbraith /* 419250c2277SThomas Gleixner * Let the target continue with the bootup: 420250c2277SThomas Gleixner */ 421250c2277SThomas Gleixner atomic_inc(&stop_count); 422cc4db268SThomas Gleixner 423cc4db268SThomas Gleixner /* 424cc4db268SThomas Gleixner * Retry, if there is a chance to do so. 425cc4db268SThomas Gleixner */ 426cc4db268SThomas Gleixner if (atomic_read(&test_runs) > 0) 427cc4db268SThomas Gleixner goto retry; 428250c2277SThomas Gleixner } 429250c2277SThomas Gleixner 430250c2277SThomas Gleixner /* 431250c2277SThomas Gleixner * Freshly booted CPUs call into this: 432250c2277SThomas Gleixner */ 433148f9bb8SPaul Gortmaker void check_tsc_sync_target(void) 434250c2277SThomas Gleixner { 435cc4db268SThomas Gleixner struct tsc_adjust *cur = this_cpu_ptr(&tsc_adjust); 436cc4db268SThomas Gleixner unsigned int cpu = smp_processor_id(); 437cc4db268SThomas Gleixner cycles_t cur_max_warp, gbl_max_warp; 438250c2277SThomas Gleixner int cpus = 2; 439250c2277SThomas Gleixner 440eee6946eSAndy Lutomirski /* Also aborts if there is no TSC. */ 4415f2e71e7SThomas Gleixner if (unsynchronized_tsc()) 442250c2277SThomas Gleixner return; 443250c2277SThomas Gleixner 444a36f5136SThomas Gleixner /* 445a36f5136SThomas Gleixner * Store, verify and sanitize the TSC adjust register. If 446a36f5136SThomas Gleixner * successful skip the test. 4475f2e71e7SThomas Gleixner * 4485f2e71e7SThomas Gleixner * The test is also skipped when the TSC is marked reliable. This 4495f2e71e7SThomas Gleixner * is true for SoCs which have no fallback clocksource. On these 4505f2e71e7SThomas Gleixner * SoCs the TSC is frequency synchronized, but still the TSC ADJUST 4515f2e71e7SThomas Gleixner * register might have been wreckaged by the BIOS.. 452a36f5136SThomas Gleixner */ 4539d349d47SThomas Gleixner if (tsc_store_and_check_tsc_adjust(false) || tsc_clocksource_reliable) 454a36f5136SThomas Gleixner return; 4558b223bc7SThomas Gleixner 4569d349d47SThomas Gleixner /* Kick the control CPU into the TSC synchronization function */ 4579d349d47SThomas Gleixner smp_call_function_single(cpumask_first(cpu_online_mask), check_tsc_sync_source, 4589d349d47SThomas Gleixner (unsigned long *)(unsigned long)cpu, 0); 459cc4db268SThomas Gleixner retry: 460250c2277SThomas Gleixner /* 461250c2277SThomas Gleixner * Register this CPU's participation and wait for the 462250c2277SThomas Gleixner * source CPU to start the measurement: 463250c2277SThomas Gleixner */ 464250c2277SThomas Gleixner atomic_inc(&start_count); 465250c2277SThomas Gleixner while (atomic_read(&start_count) != cpus) 466250c2277SThomas Gleixner cpu_relax(); 467250c2277SThomas Gleixner 468cc4db268SThomas Gleixner cur_max_warp = check_tsc_warp(loop_timeout(cpu)); 469cc4db268SThomas Gleixner 470cc4db268SThomas Gleixner /* 471cc4db268SThomas Gleixner * Store the maximum observed warp value for a potential retry: 472cc4db268SThomas Gleixner */ 473cc4db268SThomas Gleixner gbl_max_warp = max_warp; 474250c2277SThomas Gleixner 475250c2277SThomas Gleixner /* 476250c2277SThomas Gleixner * Ok, we are done: 477250c2277SThomas Gleixner */ 478250c2277SThomas Gleixner atomic_inc(&stop_count); 479250c2277SThomas Gleixner 480250c2277SThomas Gleixner /* 481250c2277SThomas Gleixner * Wait for the source CPU to print stuff: 482250c2277SThomas Gleixner */ 483250c2277SThomas Gleixner while (atomic_read(&stop_count) != cpus) 484250c2277SThomas Gleixner cpu_relax(); 4854c5e3c63SThomas Gleixner 4864c5e3c63SThomas Gleixner /* 4874c5e3c63SThomas Gleixner * Reset it for the next sync test: 4884c5e3c63SThomas Gleixner */ 4894c5e3c63SThomas Gleixner atomic_set(&stop_count, 0); 490cc4db268SThomas Gleixner 491cc4db268SThomas Gleixner /* 492cc4db268SThomas Gleixner * Check the number of remaining test runs. If not zero, the test 493cc4db268SThomas Gleixner * failed and a retry with adjusted TSC is possible. If zero the 494cc4db268SThomas Gleixner * test was either successful or failed terminally. 495cc4db268SThomas Gleixner */ 496cc4db268SThomas Gleixner if (!atomic_read(&test_runs)) 497cc4db268SThomas Gleixner return; 498cc4db268SThomas Gleixner 499cc4db268SThomas Gleixner /* 500cc4db268SThomas Gleixner * If the warp value of this CPU is 0, then the other CPU 501cc4db268SThomas Gleixner * observed time going backwards so this TSC was ahead and 502cc4db268SThomas Gleixner * needs to move backwards. 503cc4db268SThomas Gleixner */ 504cc4db268SThomas Gleixner if (!cur_max_warp) 505cc4db268SThomas Gleixner cur_max_warp = -gbl_max_warp; 506cc4db268SThomas Gleixner 507cc4db268SThomas Gleixner /* 508cc4db268SThomas Gleixner * Add the result to the previous adjustment value. 509cc4db268SThomas Gleixner * 510163b0991SIngo Molnar * The adjustment value is slightly off by the overhead of the 511cc4db268SThomas Gleixner * sync mechanism (observed values are ~200 TSC cycles), but this 512cc4db268SThomas Gleixner * really depends on CPU, node distance and frequency. So 513cc4db268SThomas Gleixner * compensating for this is hard to get right. Experiments show 514cc4db268SThomas Gleixner * that the warp is not longer detectable when the observed warp 515cc4db268SThomas Gleixner * value is used. In the worst case the adjustment needs to go 516cc4db268SThomas Gleixner * through a 3rd run for fine tuning. 517cc4db268SThomas Gleixner */ 518cc4db268SThomas Gleixner cur->adjusted += cur_max_warp; 5198c9b9d87SThomas Gleixner 520cc4db268SThomas Gleixner pr_warn("TSC ADJUST compensate: CPU%u observed %lld warp. Adjust: %lld\n", 521cc4db268SThomas Gleixner cpu, cur_max_warp, cur->adjusted); 522cc4db268SThomas Gleixner 523cc4db268SThomas Gleixner wrmsrl(MSR_IA32_TSC_ADJUST, cur->adjusted); 524cc4db268SThomas Gleixner goto retry; 525cc4db268SThomas Gleixner 526250c2277SThomas Gleixner } 5278b223bc7SThomas Gleixner 5288b223bc7SThomas Gleixner #endif /* CONFIG_SMP */ 529