1 /* 2 * check TSC synchronization. 3 * 4 * Copyright (C) 2006, Red Hat, Inc., Ingo Molnar 5 * 6 * We check whether all boot CPUs have their TSC's synchronized, 7 * print a warning if not and turn off the TSC clock-source. 8 * 9 * The warp-check is point-to-point between two CPUs, the CPU 10 * initiating the bootup is the 'source CPU', the freshly booting 11 * CPU is the 'target CPU'. 12 * 13 * Only two CPUs may participate - they can enter in any order. 14 * ( The serial nature of the boot logic and the CPU hotplug lock 15 * protects against more than 2 CPUs entering this code. ) 16 */ 17 #include <linux/topology.h> 18 #include <linux/spinlock.h> 19 #include <linux/kernel.h> 20 #include <linux/smp.h> 21 #include <linux/nmi.h> 22 #include <asm/tsc.h> 23 24 struct tsc_adjust { 25 s64 bootval; 26 s64 adjusted; 27 unsigned long nextcheck; 28 bool warned; 29 }; 30 31 static DEFINE_PER_CPU(struct tsc_adjust, tsc_adjust); 32 33 void tsc_verify_tsc_adjust(void) 34 { 35 struct tsc_adjust *adj = this_cpu_ptr(&tsc_adjust); 36 s64 curval; 37 38 if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST)) 39 return; 40 41 /* Rate limit the MSR check */ 42 if (time_before(jiffies, adj->nextcheck)) 43 return; 44 45 adj->nextcheck = jiffies + HZ; 46 47 rdmsrl(MSR_IA32_TSC_ADJUST, curval); 48 if (adj->adjusted == curval) 49 return; 50 51 /* Restore the original value */ 52 wrmsrl(MSR_IA32_TSC_ADJUST, adj->adjusted); 53 54 if (!adj->warned) { 55 pr_warn(FW_BUG "TSC ADJUST differs: CPU%u %lld --> %lld. Restoring\n", 56 smp_processor_id(), adj->adjusted, curval); 57 adj->warned = true; 58 } 59 } 60 61 #ifndef CONFIG_SMP 62 bool __init tsc_store_and_check_tsc_adjust(void) 63 { 64 struct tsc_adjust *ref, *cur = this_cpu_ptr(&tsc_adjust); 65 s64 bootval; 66 67 if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST)) 68 return false; 69 70 rdmsrl(MSR_IA32_TSC_ADJUST, bootval); 71 cur->bootval = bootval; 72 cur->adjusted = bootval; 73 cur->nextcheck = jiffies + HZ; 74 pr_info("TSC ADJUST: Boot CPU0: %lld\n", bootval); 75 return false; 76 } 77 78 #else /* !CONFIG_SMP */ 79 80 /* 81 * Store and check the TSC ADJUST MSR if available 82 */ 83 bool tsc_store_and_check_tsc_adjust(void) 84 { 85 struct tsc_adjust *ref, *cur = this_cpu_ptr(&tsc_adjust); 86 unsigned int refcpu, cpu = smp_processor_id(); 87 s64 bootval; 88 89 if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST)) 90 return false; 91 92 rdmsrl(MSR_IA32_TSC_ADJUST, bootval); 93 cur->bootval = bootval; 94 cur->nextcheck = jiffies + HZ; 95 cur->warned = false; 96 97 /* 98 * Check whether this CPU is the first in a package to come up. In 99 * this case do not check the boot value against another package 100 * because the package might have been physically hotplugged, where 101 * TSC_ADJUST is expected to be different. 102 */ 103 refcpu = cpumask_any_but(topology_core_cpumask(cpu), cpu); 104 105 if (refcpu >= nr_cpu_ids) { 106 /* 107 * First online CPU in a package stores the boot value in 108 * the adjustment value. This value might change later via 109 * the sync mechanism. If that fails we still can yell 110 * about boot values not being consistent. 111 */ 112 cur->adjusted = bootval; 113 pr_info_once("TSC ADJUST: Boot CPU%u: %lld\n", cpu, bootval); 114 return false; 115 } 116 117 ref = per_cpu_ptr(&tsc_adjust, refcpu); 118 /* 119 * Compare the boot value and complain if it differs in the 120 * package. 121 */ 122 if (bootval != ref->bootval) { 123 pr_warn("TSC ADJUST differs: Reference CPU%u: %lld CPU%u: %lld\n", 124 refcpu, ref->bootval, cpu, bootval); 125 } 126 /* 127 * The TSC_ADJUST values in a package must be the same. If the boot 128 * value on this newly upcoming CPU differs from the adjustment 129 * value of the already online CPU in this package, set it to that 130 * adjusted value. 131 */ 132 if (bootval != ref->adjusted) { 133 pr_warn("TSC ADJUST synchronize: Reference CPU%u: %lld CPU%u: %lld\n", 134 refcpu, ref->adjusted, cpu, bootval); 135 cur->adjusted = ref->adjusted; 136 wrmsrl(MSR_IA32_TSC_ADJUST, ref->adjusted); 137 } 138 /* 139 * We have the TSCs forced to be in sync on this package. Skip sync 140 * test: 141 */ 142 return true; 143 } 144 145 /* 146 * Entry/exit counters that make sure that both CPUs 147 * run the measurement code at once: 148 */ 149 static atomic_t start_count; 150 static atomic_t stop_count; 151 static atomic_t skip_test; 152 153 /* 154 * We use a raw spinlock in this exceptional case, because 155 * we want to have the fastest, inlined, non-debug version 156 * of a critical section, to be able to prove TSC time-warps: 157 */ 158 static arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED; 159 160 static cycles_t last_tsc; 161 static cycles_t max_warp; 162 static int nr_warps; 163 static int random_warps; 164 165 /* 166 * TSC-warp measurement loop running on both CPUs. This is not called 167 * if there is no TSC. 168 */ 169 static void check_tsc_warp(unsigned int timeout) 170 { 171 cycles_t start, now, prev, end; 172 int i, cur_warps = 0; 173 174 start = rdtsc_ordered(); 175 /* 176 * The measurement runs for 'timeout' msecs: 177 */ 178 end = start + (cycles_t) tsc_khz * timeout; 179 now = start; 180 181 for (i = 0; ; i++) { 182 /* 183 * We take the global lock, measure TSC, save the 184 * previous TSC that was measured (possibly on 185 * another CPU) and update the previous TSC timestamp. 186 */ 187 arch_spin_lock(&sync_lock); 188 prev = last_tsc; 189 now = rdtsc_ordered(); 190 last_tsc = now; 191 arch_spin_unlock(&sync_lock); 192 193 /* 194 * Be nice every now and then (and also check whether 195 * measurement is done [we also insert a 10 million 196 * loops safety exit, so we dont lock up in case the 197 * TSC readout is totally broken]): 198 */ 199 if (unlikely(!(i & 7))) { 200 if (now > end || i > 10000000) 201 break; 202 cpu_relax(); 203 touch_nmi_watchdog(); 204 } 205 /* 206 * Outside the critical section we can now see whether 207 * we saw a time-warp of the TSC going backwards: 208 */ 209 if (unlikely(prev > now)) { 210 arch_spin_lock(&sync_lock); 211 max_warp = max(max_warp, prev - now); 212 /* 213 * Check whether this bounces back and forth. Only 214 * one CPU should observe time going backwards. 215 */ 216 if (cur_warps != nr_warps) 217 random_warps++; 218 nr_warps++; 219 cur_warps = nr_warps; 220 arch_spin_unlock(&sync_lock); 221 } 222 } 223 WARN(!(now-start), 224 "Warning: zero tsc calibration delta: %Ld [max: %Ld]\n", 225 now-start, end-start); 226 } 227 228 /* 229 * If the target CPU coming online doesn't have any of its core-siblings 230 * online, a timeout of 20msec will be used for the TSC-warp measurement 231 * loop. Otherwise a smaller timeout of 2msec will be used, as we have some 232 * information about this socket already (and this information grows as we 233 * have more and more logical-siblings in that socket). 234 * 235 * Ideally we should be able to skip the TSC sync check on the other 236 * core-siblings, if the first logical CPU in a socket passed the sync test. 237 * But as the TSC is per-logical CPU and can potentially be modified wrongly 238 * by the bios, TSC sync test for smaller duration should be able 239 * to catch such errors. Also this will catch the condition where all the 240 * cores in the socket doesn't get reset at the same time. 241 */ 242 static inline unsigned int loop_timeout(int cpu) 243 { 244 return (cpumask_weight(topology_core_cpumask(cpu)) > 1) ? 2 : 20; 245 } 246 247 /* 248 * Source CPU calls into this - it waits for the freshly booted 249 * target CPU to arrive and then starts the measurement: 250 */ 251 void check_tsc_sync_source(int cpu) 252 { 253 int cpus = 2; 254 255 /* 256 * No need to check if we already know that the TSC is not 257 * synchronized or if we have no TSC. 258 */ 259 if (unsynchronized_tsc()) 260 return; 261 262 if (tsc_clocksource_reliable) { 263 if (cpu == (nr_cpu_ids-1) || system_state != SYSTEM_BOOTING) 264 pr_info( 265 "Skipped synchronization checks as TSC is reliable.\n"); 266 return; 267 } 268 269 /* 270 * Reset it - in case this is a second bootup: 271 */ 272 atomic_set(&stop_count, 0); 273 274 /* 275 * Wait for the target to start or to skip the test: 276 */ 277 while (atomic_read(&start_count) != cpus - 1) { 278 if (atomic_read(&skip_test) > 0) { 279 atomic_set(&skip_test, 0); 280 return; 281 } 282 cpu_relax(); 283 } 284 285 /* 286 * Trigger the target to continue into the measurement too: 287 */ 288 atomic_inc(&start_count); 289 290 check_tsc_warp(loop_timeout(cpu)); 291 292 while (atomic_read(&stop_count) != cpus-1) 293 cpu_relax(); 294 295 if (nr_warps) { 296 pr_warning("TSC synchronization [CPU#%d -> CPU#%d]:\n", 297 smp_processor_id(), cpu); 298 pr_warning("Measured %Ld cycles TSC warp between CPUs, " 299 "turning off TSC clock.\n", max_warp); 300 if (random_warps) 301 pr_warning("TSC warped randomly between CPUs\n"); 302 mark_tsc_unstable("check_tsc_sync_source failed"); 303 } else { 304 pr_debug("TSC synchronization [CPU#%d -> CPU#%d]: passed\n", 305 smp_processor_id(), cpu); 306 } 307 308 /* 309 * Reset it - just in case we boot another CPU later: 310 */ 311 atomic_set(&start_count, 0); 312 random_warps = 0; 313 nr_warps = 0; 314 max_warp = 0; 315 last_tsc = 0; 316 317 /* 318 * Let the target continue with the bootup: 319 */ 320 atomic_inc(&stop_count); 321 } 322 323 /* 324 * Freshly booted CPUs call into this: 325 */ 326 void check_tsc_sync_target(void) 327 { 328 int cpus = 2; 329 330 /* Also aborts if there is no TSC. */ 331 if (unsynchronized_tsc() || tsc_clocksource_reliable) 332 return; 333 334 /* 335 * Store, verify and sanitize the TSC adjust register. If 336 * successful skip the test. 337 */ 338 if (tsc_store_and_check_tsc_adjust()) { 339 atomic_inc(&skip_test); 340 return; 341 } 342 343 /* 344 * Register this CPU's participation and wait for the 345 * source CPU to start the measurement: 346 */ 347 atomic_inc(&start_count); 348 while (atomic_read(&start_count) != cpus) 349 cpu_relax(); 350 351 check_tsc_warp(loop_timeout(smp_processor_id())); 352 353 /* 354 * Ok, we are done: 355 */ 356 atomic_inc(&stop_count); 357 358 /* 359 * Wait for the source CPU to print stuff: 360 */ 361 while (atomic_read(&stop_count) != cpus) 362 cpu_relax(); 363 } 364 365 #endif /* CONFIG_SMP */ 366