1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 2250c2277SThomas Gleixner /* 3835c34a1SDave Jones * check TSC synchronization. 4250c2277SThomas Gleixner * 5250c2277SThomas Gleixner * Copyright (C) 2006, Red Hat, Inc., Ingo Molnar 6250c2277SThomas Gleixner * 7250c2277SThomas Gleixner * We check whether all boot CPUs have their TSC's synchronized, 8250c2277SThomas Gleixner * print a warning if not and turn off the TSC clock-source. 9250c2277SThomas Gleixner * 10250c2277SThomas Gleixner * The warp-check is point-to-point between two CPUs, the CPU 11250c2277SThomas Gleixner * initiating the bootup is the 'source CPU', the freshly booting 12250c2277SThomas Gleixner * CPU is the 'target CPU'. 13250c2277SThomas Gleixner * 14250c2277SThomas Gleixner * Only two CPUs may participate - they can enter in any order. 15250c2277SThomas Gleixner * ( The serial nature of the boot logic and the CPU hotplug lock 16250c2277SThomas Gleixner * protects against more than 2 CPUs entering this code. ) 17250c2277SThomas Gleixner */ 188b223bc7SThomas Gleixner #include <linux/topology.h> 19250c2277SThomas Gleixner #include <linux/spinlock.h> 20250c2277SThomas Gleixner #include <linux/kernel.h> 21250c2277SThomas Gleixner #include <linux/smp.h> 22250c2277SThomas Gleixner #include <linux/nmi.h> 23250c2277SThomas Gleixner #include <asm/tsc.h> 24250c2277SThomas Gleixner 258b223bc7SThomas Gleixner struct tsc_adjust { 268b223bc7SThomas Gleixner s64 bootval; 278b223bc7SThomas Gleixner s64 adjusted; 281d0095feSThomas Gleixner unsigned long nextcheck; 291d0095feSThomas Gleixner bool warned; 308b223bc7SThomas Gleixner }; 318b223bc7SThomas Gleixner 328b223bc7SThomas Gleixner static DEFINE_PER_CPU(struct tsc_adjust, tsc_adjust); 338b223bc7SThomas Gleixner 34341102c3Smike.travis@hpe.com /* 35341102c3Smike.travis@hpe.com * TSC's on different sockets may be reset asynchronously. 36341102c3Smike.travis@hpe.com * This may cause the TSC ADJUST value on socket 0 to be NOT 0. 37341102c3Smike.travis@hpe.com */ 38341102c3Smike.travis@hpe.com bool __read_mostly tsc_async_resets; 39341102c3Smike.travis@hpe.com 40341102c3Smike.travis@hpe.com void mark_tsc_async_resets(char *reason) 41341102c3Smike.travis@hpe.com { 42341102c3Smike.travis@hpe.com if (tsc_async_resets) 43341102c3Smike.travis@hpe.com return; 44341102c3Smike.travis@hpe.com tsc_async_resets = true; 45341102c3Smike.travis@hpe.com pr_info("tsc: Marking TSC async resets true due to %s\n", reason); 46341102c3Smike.travis@hpe.com } 47341102c3Smike.travis@hpe.com 486a369583SThomas Gleixner void tsc_verify_tsc_adjust(bool resume) 491d0095feSThomas Gleixner { 501d0095feSThomas Gleixner struct tsc_adjust *adj = this_cpu_ptr(&tsc_adjust); 511d0095feSThomas Gleixner s64 curval; 521d0095feSThomas Gleixner 531d0095feSThomas Gleixner if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST)) 541d0095feSThomas Gleixner return; 551d0095feSThomas Gleixner 569514ececSmike.travis@hpe.com /* Skip unnecessary error messages if TSC already unstable */ 579514ececSmike.travis@hpe.com if (check_tsc_unstable()) 589514ececSmike.travis@hpe.com return; 599514ececSmike.travis@hpe.com 601d0095feSThomas Gleixner /* Rate limit the MSR check */ 616a369583SThomas Gleixner if (!resume && time_before(jiffies, adj->nextcheck)) 621d0095feSThomas Gleixner return; 631d0095feSThomas Gleixner 641d0095feSThomas Gleixner adj->nextcheck = jiffies + HZ; 651d0095feSThomas Gleixner 661d0095feSThomas Gleixner rdmsrl(MSR_IA32_TSC_ADJUST, curval); 671d0095feSThomas Gleixner if (adj->adjusted == curval) 681d0095feSThomas Gleixner return; 691d0095feSThomas Gleixner 701d0095feSThomas Gleixner /* Restore the original value */ 711d0095feSThomas Gleixner wrmsrl(MSR_IA32_TSC_ADJUST, adj->adjusted); 721d0095feSThomas Gleixner 736a369583SThomas Gleixner if (!adj->warned || resume) { 741d0095feSThomas Gleixner pr_warn(FW_BUG "TSC ADJUST differs: CPU%u %lld --> %lld. Restoring\n", 751d0095feSThomas Gleixner smp_processor_id(), adj->adjusted, curval); 761d0095feSThomas Gleixner adj->warned = true; 771d0095feSThomas Gleixner } 781d0095feSThomas Gleixner } 791d0095feSThomas Gleixner 805bae1562SThomas Gleixner static void tsc_sanitize_first_cpu(struct tsc_adjust *cur, s64 bootval, 815bae1562SThomas Gleixner unsigned int cpu, bool bootcpu) 825bae1562SThomas Gleixner { 835bae1562SThomas Gleixner /* 845bae1562SThomas Gleixner * First online CPU in a package stores the boot value in the 855bae1562SThomas Gleixner * adjustment value. This value might change later via the sync 865bae1562SThomas Gleixner * mechanism. If that fails we still can yell about boot values not 875bae1562SThomas Gleixner * being consistent. 885bae1562SThomas Gleixner * 895bae1562SThomas Gleixner * On the boot cpu we just force set the ADJUST value to 0 if it's 905bae1562SThomas Gleixner * non zero. We don't do that on non boot cpus because physical 915bae1562SThomas Gleixner * hotplug should have set the ADJUST register to a value > 0 so 925bae1562SThomas Gleixner * the TSC is in sync with the already running cpus. 93341102c3Smike.travis@hpe.com * 94341102c3Smike.travis@hpe.com * Also don't force the ADJUST value to zero if that is a valid value 95341102c3Smike.travis@hpe.com * for socket 0 as determined by the system arch. This is required 96341102c3Smike.travis@hpe.com * when multiple sockets are reset asynchronously with each other 97341102c3Smike.travis@hpe.com * and socket 0 may not have an TSC ADJUST value of 0. 985bae1562SThomas Gleixner */ 99855615eeSPeter Zijlstra if (bootcpu && bootval != 0) { 100341102c3Smike.travis@hpe.com if (likely(!tsc_async_resets)) { 101341102c3Smike.travis@hpe.com pr_warn(FW_BUG "TSC ADJUST: CPU%u: %lld force to 0\n", 102341102c3Smike.travis@hpe.com cpu, bootval); 1035bae1562SThomas Gleixner wrmsrl(MSR_IA32_TSC_ADJUST, 0); 1045bae1562SThomas Gleixner bootval = 0; 105341102c3Smike.travis@hpe.com } else { 106341102c3Smike.travis@hpe.com pr_info("TSC ADJUST: CPU%u: %lld NOT forced to 0\n", 107341102c3Smike.travis@hpe.com cpu, bootval); 108341102c3Smike.travis@hpe.com } 1095bae1562SThomas Gleixner } 1105bae1562SThomas Gleixner cur->adjusted = bootval; 1115bae1562SThomas Gleixner } 1125bae1562SThomas Gleixner 1138b223bc7SThomas Gleixner #ifndef CONFIG_SMP 1145bae1562SThomas Gleixner bool __init tsc_store_and_check_tsc_adjust(bool bootcpu) 1158b223bc7SThomas Gleixner { 116b8365543SThomas Gleixner struct tsc_adjust *cur = this_cpu_ptr(&tsc_adjust); 1178b223bc7SThomas Gleixner s64 bootval; 1188b223bc7SThomas Gleixner 1198b223bc7SThomas Gleixner if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST)) 120a36f5136SThomas Gleixner return false; 1218b223bc7SThomas Gleixner 1229514ececSmike.travis@hpe.com /* Skip unnecessary error messages if TSC already unstable */ 1239514ececSmike.travis@hpe.com if (check_tsc_unstable()) 1249514ececSmike.travis@hpe.com return false; 1259514ececSmike.travis@hpe.com 1268b223bc7SThomas Gleixner rdmsrl(MSR_IA32_TSC_ADJUST, bootval); 1278b223bc7SThomas Gleixner cur->bootval = bootval; 1281d0095feSThomas Gleixner cur->nextcheck = jiffies + HZ; 1295bae1562SThomas Gleixner tsc_sanitize_first_cpu(cur, bootval, smp_processor_id(), bootcpu); 130a36f5136SThomas Gleixner return false; 1318b223bc7SThomas Gleixner } 1328b223bc7SThomas Gleixner 1338b223bc7SThomas Gleixner #else /* !CONFIG_SMP */ 1348b223bc7SThomas Gleixner 1358b223bc7SThomas Gleixner /* 1368b223bc7SThomas Gleixner * Store and check the TSC ADJUST MSR if available 1378b223bc7SThomas Gleixner */ 1385bae1562SThomas Gleixner bool tsc_store_and_check_tsc_adjust(bool bootcpu) 1398b223bc7SThomas Gleixner { 1408b223bc7SThomas Gleixner struct tsc_adjust *ref, *cur = this_cpu_ptr(&tsc_adjust); 1418b223bc7SThomas Gleixner unsigned int refcpu, cpu = smp_processor_id(); 14231f8a651SThomas Gleixner struct cpumask *mask; 1438b223bc7SThomas Gleixner s64 bootval; 1448b223bc7SThomas Gleixner 1458b223bc7SThomas Gleixner if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST)) 146a36f5136SThomas Gleixner return false; 1478b223bc7SThomas Gleixner 1488b223bc7SThomas Gleixner rdmsrl(MSR_IA32_TSC_ADJUST, bootval); 1498b223bc7SThomas Gleixner cur->bootval = bootval; 1501d0095feSThomas Gleixner cur->nextcheck = jiffies + HZ; 1511d0095feSThomas Gleixner cur->warned = false; 1528b223bc7SThomas Gleixner 1538b223bc7SThomas Gleixner /* 154341102c3Smike.travis@hpe.com * If a non-zero TSC value for socket 0 may be valid then the default 155341102c3Smike.travis@hpe.com * adjusted value cannot assumed to be zero either. 156341102c3Smike.travis@hpe.com */ 157341102c3Smike.travis@hpe.com if (tsc_async_resets) 158341102c3Smike.travis@hpe.com cur->adjusted = bootval; 159341102c3Smike.travis@hpe.com 160341102c3Smike.travis@hpe.com /* 1618b223bc7SThomas Gleixner * Check whether this CPU is the first in a package to come up. In 1628b223bc7SThomas Gleixner * this case do not check the boot value against another package 1635bae1562SThomas Gleixner * because the new package might have been physically hotplugged, 1645bae1562SThomas Gleixner * where TSC_ADJUST is expected to be different. When called on the 1655bae1562SThomas Gleixner * boot CPU topology_core_cpumask() might not be available yet. 1668b223bc7SThomas Gleixner */ 16731f8a651SThomas Gleixner mask = topology_core_cpumask(cpu); 16831f8a651SThomas Gleixner refcpu = mask ? cpumask_any_but(mask, cpu) : nr_cpu_ids; 1698b223bc7SThomas Gleixner 1708b223bc7SThomas Gleixner if (refcpu >= nr_cpu_ids) { 1715bae1562SThomas Gleixner tsc_sanitize_first_cpu(cur, bootval, smp_processor_id(), 1725bae1562SThomas Gleixner bootcpu); 173a36f5136SThomas Gleixner return false; 1748b223bc7SThomas Gleixner } 1758b223bc7SThomas Gleixner 1768b223bc7SThomas Gleixner ref = per_cpu_ptr(&tsc_adjust, refcpu); 1778b223bc7SThomas Gleixner /* 1788b223bc7SThomas Gleixner * Compare the boot value and complain if it differs in the 1798b223bc7SThomas Gleixner * package. 1808b223bc7SThomas Gleixner */ 18141e7864aSmike.travis@hpe.com if (bootval != ref->bootval) 18241e7864aSmike.travis@hpe.com printk_once(FW_BUG "TSC ADJUST differs within socket(s), fixing all errors\n"); 18341e7864aSmike.travis@hpe.com 1848b223bc7SThomas Gleixner /* 1858b223bc7SThomas Gleixner * The TSC_ADJUST values in a package must be the same. If the boot 1868b223bc7SThomas Gleixner * value on this newly upcoming CPU differs from the adjustment 1878b223bc7SThomas Gleixner * value of the already online CPU in this package, set it to that 1888b223bc7SThomas Gleixner * adjusted value. 1898b223bc7SThomas Gleixner */ 1908b223bc7SThomas Gleixner if (bootval != ref->adjusted) { 1918b223bc7SThomas Gleixner cur->adjusted = ref->adjusted; 1928b223bc7SThomas Gleixner wrmsrl(MSR_IA32_TSC_ADJUST, ref->adjusted); 1938b223bc7SThomas Gleixner } 194a36f5136SThomas Gleixner /* 195a36f5136SThomas Gleixner * We have the TSCs forced to be in sync on this package. Skip sync 196a36f5136SThomas Gleixner * test: 197a36f5136SThomas Gleixner */ 198a36f5136SThomas Gleixner return true; 1998b223bc7SThomas Gleixner } 2008b223bc7SThomas Gleixner 201250c2277SThomas Gleixner /* 202250c2277SThomas Gleixner * Entry/exit counters that make sure that both CPUs 203250c2277SThomas Gleixner * run the measurement code at once: 204250c2277SThomas Gleixner */ 205148f9bb8SPaul Gortmaker static atomic_t start_count; 206148f9bb8SPaul Gortmaker static atomic_t stop_count; 207a36f5136SThomas Gleixner static atomic_t skip_test; 208cc4db268SThomas Gleixner static atomic_t test_runs; 209250c2277SThomas Gleixner 210250c2277SThomas Gleixner /* 211250c2277SThomas Gleixner * We use a raw spinlock in this exceptional case, because 212250c2277SThomas Gleixner * we want to have the fastest, inlined, non-debug version 213250c2277SThomas Gleixner * of a critical section, to be able to prove TSC time-warps: 214250c2277SThomas Gleixner */ 215148f9bb8SPaul Gortmaker static arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED; 216643bec95SIngo Molnar 217148f9bb8SPaul Gortmaker static cycles_t last_tsc; 218148f9bb8SPaul Gortmaker static cycles_t max_warp; 219148f9bb8SPaul Gortmaker static int nr_warps; 220bec8520dSThomas Gleixner static int random_warps; 221250c2277SThomas Gleixner 222250c2277SThomas Gleixner /* 223eee6946eSAndy Lutomirski * TSC-warp measurement loop running on both CPUs. This is not called 224eee6946eSAndy Lutomirski * if there is no TSC. 225250c2277SThomas Gleixner */ 22676d3b851SThomas Gleixner static cycles_t check_tsc_warp(unsigned int timeout) 227250c2277SThomas Gleixner { 22876d3b851SThomas Gleixner cycles_t start, now, prev, end, cur_max_warp = 0; 229bec8520dSThomas Gleixner int i, cur_warps = 0; 230250c2277SThomas Gleixner 231eee6946eSAndy Lutomirski start = rdtsc_ordered(); 232250c2277SThomas Gleixner /* 233b0e5c779SSuresh Siddha * The measurement runs for 'timeout' msecs: 234250c2277SThomas Gleixner */ 235b0e5c779SSuresh Siddha end = start + (cycles_t) tsc_khz * timeout; 236250c2277SThomas Gleixner 237250c2277SThomas Gleixner for (i = 0; ; i++) { 238250c2277SThomas Gleixner /* 239250c2277SThomas Gleixner * We take the global lock, measure TSC, save the 240250c2277SThomas Gleixner * previous TSC that was measured (possibly on 241250c2277SThomas Gleixner * another CPU) and update the previous TSC timestamp. 242250c2277SThomas Gleixner */ 2430199c4e6SThomas Gleixner arch_spin_lock(&sync_lock); 244250c2277SThomas Gleixner prev = last_tsc; 245eee6946eSAndy Lutomirski now = rdtsc_ordered(); 246250c2277SThomas Gleixner last_tsc = now; 2470199c4e6SThomas Gleixner arch_spin_unlock(&sync_lock); 248250c2277SThomas Gleixner 249250c2277SThomas Gleixner /* 250250c2277SThomas Gleixner * Be nice every now and then (and also check whether 251df43510bSIngo Molnar * measurement is done [we also insert a 10 million 252250c2277SThomas Gleixner * loops safety exit, so we dont lock up in case the 253250c2277SThomas Gleixner * TSC readout is totally broken]): 254250c2277SThomas Gleixner */ 255250c2277SThomas Gleixner if (unlikely(!(i & 7))) { 256df43510bSIngo Molnar if (now > end || i > 10000000) 257250c2277SThomas Gleixner break; 258250c2277SThomas Gleixner cpu_relax(); 259250c2277SThomas Gleixner touch_nmi_watchdog(); 260250c2277SThomas Gleixner } 261250c2277SThomas Gleixner /* 262250c2277SThomas Gleixner * Outside the critical section we can now see whether 263250c2277SThomas Gleixner * we saw a time-warp of the TSC going backwards: 264250c2277SThomas Gleixner */ 265250c2277SThomas Gleixner if (unlikely(prev > now)) { 2660199c4e6SThomas Gleixner arch_spin_lock(&sync_lock); 267250c2277SThomas Gleixner max_warp = max(max_warp, prev - now); 26876d3b851SThomas Gleixner cur_max_warp = max_warp; 269bec8520dSThomas Gleixner /* 270bec8520dSThomas Gleixner * Check whether this bounces back and forth. Only 271bec8520dSThomas Gleixner * one CPU should observe time going backwards. 272bec8520dSThomas Gleixner */ 273bec8520dSThomas Gleixner if (cur_warps != nr_warps) 274bec8520dSThomas Gleixner random_warps++; 275250c2277SThomas Gleixner nr_warps++; 276bec8520dSThomas Gleixner cur_warps = nr_warps; 2770199c4e6SThomas Gleixner arch_spin_unlock(&sync_lock); 278250c2277SThomas Gleixner } 279ad8ca495SIngo Molnar } 280bde78a79SArjan van de Ven WARN(!(now-start), 281bde78a79SArjan van de Ven "Warning: zero tsc calibration delta: %Ld [max: %Ld]\n", 282ad8ca495SIngo Molnar now-start, end-start); 28376d3b851SThomas Gleixner return cur_max_warp; 284250c2277SThomas Gleixner } 285250c2277SThomas Gleixner 286250c2277SThomas Gleixner /* 287b0e5c779SSuresh Siddha * If the target CPU coming online doesn't have any of its core-siblings 288b0e5c779SSuresh Siddha * online, a timeout of 20msec will be used for the TSC-warp measurement 289b0e5c779SSuresh Siddha * loop. Otherwise a smaller timeout of 2msec will be used, as we have some 290b0e5c779SSuresh Siddha * information about this socket already (and this information grows as we 291b0e5c779SSuresh Siddha * have more and more logical-siblings in that socket). 292b0e5c779SSuresh Siddha * 293b0e5c779SSuresh Siddha * Ideally we should be able to skip the TSC sync check on the other 294b0e5c779SSuresh Siddha * core-siblings, if the first logical CPU in a socket passed the sync test. 295b0e5c779SSuresh Siddha * But as the TSC is per-logical CPU and can potentially be modified wrongly 296b0e5c779SSuresh Siddha * by the bios, TSC sync test for smaller duration should be able 297b0e5c779SSuresh Siddha * to catch such errors. Also this will catch the condition where all the 2984d1d0977SMartin Molnar * cores in the socket don't get reset at the same time. 299b0e5c779SSuresh Siddha */ 300b0e5c779SSuresh Siddha static inline unsigned int loop_timeout(int cpu) 301b0e5c779SSuresh Siddha { 3027d79a7bdSBartosz Golaszewski return (cpumask_weight(topology_core_cpumask(cpu)) > 1) ? 2 : 20; 303b0e5c779SSuresh Siddha } 304b0e5c779SSuresh Siddha 305b0e5c779SSuresh Siddha /* 306250c2277SThomas Gleixner * Source CPU calls into this - it waits for the freshly booted 307250c2277SThomas Gleixner * target CPU to arrive and then starts the measurement: 308250c2277SThomas Gleixner */ 309148f9bb8SPaul Gortmaker void check_tsc_sync_source(int cpu) 310250c2277SThomas Gleixner { 311250c2277SThomas Gleixner int cpus = 2; 312250c2277SThomas Gleixner 313250c2277SThomas Gleixner /* 314250c2277SThomas Gleixner * No need to check if we already know that the TSC is not 315eee6946eSAndy Lutomirski * synchronized or if we have no TSC. 316250c2277SThomas Gleixner */ 317250c2277SThomas Gleixner if (unsynchronized_tsc()) 318250c2277SThomas Gleixner return; 319250c2277SThomas Gleixner 320250c2277SThomas Gleixner /* 321cc4db268SThomas Gleixner * Set the maximum number of test runs to 322cc4db268SThomas Gleixner * 1 if the CPU does not provide the TSC_ADJUST MSR 323cc4db268SThomas Gleixner * 3 if the MSR is available, so the target can try to adjust 324cc4db268SThomas Gleixner */ 325cc4db268SThomas Gleixner if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST)) 326cc4db268SThomas Gleixner atomic_set(&test_runs, 1); 327cc4db268SThomas Gleixner else 328cc4db268SThomas Gleixner atomic_set(&test_runs, 3); 329cc4db268SThomas Gleixner retry: 330cc4db268SThomas Gleixner /* 331a36f5136SThomas Gleixner * Wait for the target to start or to skip the test: 332250c2277SThomas Gleixner */ 333a36f5136SThomas Gleixner while (atomic_read(&start_count) != cpus - 1) { 334a36f5136SThomas Gleixner if (atomic_read(&skip_test) > 0) { 335a36f5136SThomas Gleixner atomic_set(&skip_test, 0); 336a36f5136SThomas Gleixner return; 337a36f5136SThomas Gleixner } 338250c2277SThomas Gleixner cpu_relax(); 339a36f5136SThomas Gleixner } 340a36f5136SThomas Gleixner 341250c2277SThomas Gleixner /* 342250c2277SThomas Gleixner * Trigger the target to continue into the measurement too: 343250c2277SThomas Gleixner */ 344250c2277SThomas Gleixner atomic_inc(&start_count); 345250c2277SThomas Gleixner 346b0e5c779SSuresh Siddha check_tsc_warp(loop_timeout(cpu)); 347250c2277SThomas Gleixner 348250c2277SThomas Gleixner while (atomic_read(&stop_count) != cpus-1) 349250c2277SThomas Gleixner cpu_relax(); 350250c2277SThomas Gleixner 351cc4db268SThomas Gleixner /* 352cc4db268SThomas Gleixner * If the test was successful set the number of runs to zero and 353cc4db268SThomas Gleixner * stop. If not, decrement the number of runs an check if we can 354cc4db268SThomas Gleixner * retry. In case of random warps no retry is attempted. 355cc4db268SThomas Gleixner */ 356cc4db268SThomas Gleixner if (!nr_warps) { 357cc4db268SThomas Gleixner atomic_set(&test_runs, 0); 358cc4db268SThomas Gleixner 359cc4db268SThomas Gleixner pr_debug("TSC synchronization [CPU#%d -> CPU#%d]: passed\n", 360cc4db268SThomas Gleixner smp_processor_id(), cpu); 361cc4db268SThomas Gleixner 362cc4db268SThomas Gleixner } else if (atomic_dec_and_test(&test_runs) || random_warps) { 363cc4db268SThomas Gleixner /* Force it to 0 if random warps brought us here */ 364cc4db268SThomas Gleixner atomic_set(&test_runs, 0); 365cc4db268SThomas Gleixner 3668d3bcc44SKefeng Wang pr_warn("TSC synchronization [CPU#%d -> CPU#%d]:\n", 3679b3660a5SMike Travis smp_processor_id(), cpu); 3688d3bcc44SKefeng Wang pr_warn("Measured %Ld cycles TSC warp between CPUs, " 369250c2277SThomas Gleixner "turning off TSC clock.\n", max_warp); 370bec8520dSThomas Gleixner if (random_warps) 3718d3bcc44SKefeng Wang pr_warn("TSC warped randomly between CPUs\n"); 372250c2277SThomas Gleixner mark_tsc_unstable("check_tsc_sync_source failed"); 373250c2277SThomas Gleixner } 374250c2277SThomas Gleixner 375250c2277SThomas Gleixner /* 3764c6b8b4dSMike Galbraith * Reset it - just in case we boot another CPU later: 3774c6b8b4dSMike Galbraith */ 3784c6b8b4dSMike Galbraith atomic_set(&start_count, 0); 379bec8520dSThomas Gleixner random_warps = 0; 3804c6b8b4dSMike Galbraith nr_warps = 0; 3814c6b8b4dSMike Galbraith max_warp = 0; 3824c6b8b4dSMike Galbraith last_tsc = 0; 3834c6b8b4dSMike Galbraith 3844c6b8b4dSMike Galbraith /* 385250c2277SThomas Gleixner * Let the target continue with the bootup: 386250c2277SThomas Gleixner */ 387250c2277SThomas Gleixner atomic_inc(&stop_count); 388cc4db268SThomas Gleixner 389cc4db268SThomas Gleixner /* 390cc4db268SThomas Gleixner * Retry, if there is a chance to do so. 391cc4db268SThomas Gleixner */ 392cc4db268SThomas Gleixner if (atomic_read(&test_runs) > 0) 393cc4db268SThomas Gleixner goto retry; 394250c2277SThomas Gleixner } 395250c2277SThomas Gleixner 396250c2277SThomas Gleixner /* 397250c2277SThomas Gleixner * Freshly booted CPUs call into this: 398250c2277SThomas Gleixner */ 399148f9bb8SPaul Gortmaker void check_tsc_sync_target(void) 400250c2277SThomas Gleixner { 401cc4db268SThomas Gleixner struct tsc_adjust *cur = this_cpu_ptr(&tsc_adjust); 402cc4db268SThomas Gleixner unsigned int cpu = smp_processor_id(); 403cc4db268SThomas Gleixner cycles_t cur_max_warp, gbl_max_warp; 404250c2277SThomas Gleixner int cpus = 2; 405250c2277SThomas Gleixner 406eee6946eSAndy Lutomirski /* Also aborts if there is no TSC. */ 4075f2e71e7SThomas Gleixner if (unsynchronized_tsc()) 408250c2277SThomas Gleixner return; 409250c2277SThomas Gleixner 410a36f5136SThomas Gleixner /* 411a36f5136SThomas Gleixner * Store, verify and sanitize the TSC adjust register. If 412a36f5136SThomas Gleixner * successful skip the test. 4135f2e71e7SThomas Gleixner * 4145f2e71e7SThomas Gleixner * The test is also skipped when the TSC is marked reliable. This 4155f2e71e7SThomas Gleixner * is true for SoCs which have no fallback clocksource. On these 4165f2e71e7SThomas Gleixner * SoCs the TSC is frequency synchronized, but still the TSC ADJUST 4175f2e71e7SThomas Gleixner * register might have been wreckaged by the BIOS.. 418a36f5136SThomas Gleixner */ 4195f2e71e7SThomas Gleixner if (tsc_store_and_check_tsc_adjust(false) || tsc_clocksource_reliable) { 420a36f5136SThomas Gleixner atomic_inc(&skip_test); 421a36f5136SThomas Gleixner return; 422a36f5136SThomas Gleixner } 4238b223bc7SThomas Gleixner 424cc4db268SThomas Gleixner retry: 425250c2277SThomas Gleixner /* 426250c2277SThomas Gleixner * Register this CPU's participation and wait for the 427250c2277SThomas Gleixner * source CPU to start the measurement: 428250c2277SThomas Gleixner */ 429250c2277SThomas Gleixner atomic_inc(&start_count); 430250c2277SThomas Gleixner while (atomic_read(&start_count) != cpus) 431250c2277SThomas Gleixner cpu_relax(); 432250c2277SThomas Gleixner 433cc4db268SThomas Gleixner cur_max_warp = check_tsc_warp(loop_timeout(cpu)); 434cc4db268SThomas Gleixner 435cc4db268SThomas Gleixner /* 436cc4db268SThomas Gleixner * Store the maximum observed warp value for a potential retry: 437cc4db268SThomas Gleixner */ 438cc4db268SThomas Gleixner gbl_max_warp = max_warp; 439250c2277SThomas Gleixner 440250c2277SThomas Gleixner /* 441250c2277SThomas Gleixner * Ok, we are done: 442250c2277SThomas Gleixner */ 443250c2277SThomas Gleixner atomic_inc(&stop_count); 444250c2277SThomas Gleixner 445250c2277SThomas Gleixner /* 446250c2277SThomas Gleixner * Wait for the source CPU to print stuff: 447250c2277SThomas Gleixner */ 448250c2277SThomas Gleixner while (atomic_read(&stop_count) != cpus) 449250c2277SThomas Gleixner cpu_relax(); 4504c5e3c63SThomas Gleixner 4514c5e3c63SThomas Gleixner /* 4524c5e3c63SThomas Gleixner * Reset it for the next sync test: 4534c5e3c63SThomas Gleixner */ 4544c5e3c63SThomas Gleixner atomic_set(&stop_count, 0); 455cc4db268SThomas Gleixner 456cc4db268SThomas Gleixner /* 457cc4db268SThomas Gleixner * Check the number of remaining test runs. If not zero, the test 458cc4db268SThomas Gleixner * failed and a retry with adjusted TSC is possible. If zero the 459cc4db268SThomas Gleixner * test was either successful or failed terminally. 460cc4db268SThomas Gleixner */ 461cc4db268SThomas Gleixner if (!atomic_read(&test_runs)) 462cc4db268SThomas Gleixner return; 463cc4db268SThomas Gleixner 464cc4db268SThomas Gleixner /* 465cc4db268SThomas Gleixner * If the warp value of this CPU is 0, then the other CPU 466cc4db268SThomas Gleixner * observed time going backwards so this TSC was ahead and 467cc4db268SThomas Gleixner * needs to move backwards. 468cc4db268SThomas Gleixner */ 469cc4db268SThomas Gleixner if (!cur_max_warp) 470cc4db268SThomas Gleixner cur_max_warp = -gbl_max_warp; 471cc4db268SThomas Gleixner 472cc4db268SThomas Gleixner /* 473cc4db268SThomas Gleixner * Add the result to the previous adjustment value. 474cc4db268SThomas Gleixner * 475cc4db268SThomas Gleixner * The adjustement value is slightly off by the overhead of the 476cc4db268SThomas Gleixner * sync mechanism (observed values are ~200 TSC cycles), but this 477cc4db268SThomas Gleixner * really depends on CPU, node distance and frequency. So 478cc4db268SThomas Gleixner * compensating for this is hard to get right. Experiments show 479cc4db268SThomas Gleixner * that the warp is not longer detectable when the observed warp 480cc4db268SThomas Gleixner * value is used. In the worst case the adjustment needs to go 481cc4db268SThomas Gleixner * through a 3rd run for fine tuning. 482cc4db268SThomas Gleixner */ 483cc4db268SThomas Gleixner cur->adjusted += cur_max_warp; 4848c9b9d87SThomas Gleixner 485cc4db268SThomas Gleixner pr_warn("TSC ADJUST compensate: CPU%u observed %lld warp. Adjust: %lld\n", 486cc4db268SThomas Gleixner cpu, cur_max_warp, cur->adjusted); 487cc4db268SThomas Gleixner 488cc4db268SThomas Gleixner wrmsrl(MSR_IA32_TSC_ADJUST, cur->adjusted); 489cc4db268SThomas Gleixner goto retry; 490cc4db268SThomas Gleixner 491250c2277SThomas Gleixner } 4928b223bc7SThomas Gleixner 4938b223bc7SThomas Gleixner #endif /* CONFIG_SMP */ 494