1 /* 2 * check TSC synchronization. 3 * 4 * Copyright (C) 2006, Red Hat, Inc., Ingo Molnar 5 * 6 * We check whether all boot CPUs have their TSC's synchronized, 7 * print a warning if not and turn off the TSC clock-source. 8 * 9 * The warp-check is point-to-point between two CPUs, the CPU 10 * initiating the bootup is the 'source CPU', the freshly booting 11 * CPU is the 'target CPU'. 12 * 13 * Only two CPUs may participate - they can enter in any order. 14 * ( The serial nature of the boot logic and the CPU hotplug lock 15 * protects against more than 2 CPUs entering this code. ) 16 */ 17 #include <linux/spinlock.h> 18 #include <linux/kernel.h> 19 #include <linux/smp.h> 20 #include <linux/nmi.h> 21 #include <asm/tsc.h> 22 23 /* 24 * Entry/exit counters that make sure that both CPUs 25 * run the measurement code at once: 26 */ 27 static atomic_t start_count; 28 static atomic_t stop_count; 29 30 /* 31 * We use a raw spinlock in this exceptional case, because 32 * we want to have the fastest, inlined, non-debug version 33 * of a critical section, to be able to prove TSC time-warps: 34 */ 35 static arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED; 36 37 static cycles_t last_tsc; 38 static cycles_t max_warp; 39 static int nr_warps; 40 41 /* 42 * TSC-warp measurement loop running on both CPUs: 43 */ 44 static void check_tsc_warp(unsigned int timeout) 45 { 46 cycles_t start, now, prev, end; 47 int i; 48 49 rdtsc_barrier(); 50 start = get_cycles(); 51 rdtsc_barrier(); 52 /* 53 * The measurement runs for 'timeout' msecs: 54 */ 55 end = start + (cycles_t) tsc_khz * timeout; 56 now = start; 57 58 for (i = 0; ; i++) { 59 /* 60 * We take the global lock, measure TSC, save the 61 * previous TSC that was measured (possibly on 62 * another CPU) and update the previous TSC timestamp. 63 */ 64 arch_spin_lock(&sync_lock); 65 prev = last_tsc; 66 rdtsc_barrier(); 67 now = get_cycles(); 68 rdtsc_barrier(); 69 last_tsc = now; 70 arch_spin_unlock(&sync_lock); 71 72 /* 73 * Be nice every now and then (and also check whether 74 * measurement is done [we also insert a 10 million 75 * loops safety exit, so we dont lock up in case the 76 * TSC readout is totally broken]): 77 */ 78 if (unlikely(!(i & 7))) { 79 if (now > end || i > 10000000) 80 break; 81 cpu_relax(); 82 touch_nmi_watchdog(); 83 } 84 /* 85 * Outside the critical section we can now see whether 86 * we saw a time-warp of the TSC going backwards: 87 */ 88 if (unlikely(prev > now)) { 89 arch_spin_lock(&sync_lock); 90 max_warp = max(max_warp, prev - now); 91 nr_warps++; 92 arch_spin_unlock(&sync_lock); 93 } 94 } 95 WARN(!(now-start), 96 "Warning: zero tsc calibration delta: %Ld [max: %Ld]\n", 97 now-start, end-start); 98 } 99 100 /* 101 * If the target CPU coming online doesn't have any of its core-siblings 102 * online, a timeout of 20msec will be used for the TSC-warp measurement 103 * loop. Otherwise a smaller timeout of 2msec will be used, as we have some 104 * information about this socket already (and this information grows as we 105 * have more and more logical-siblings in that socket). 106 * 107 * Ideally we should be able to skip the TSC sync check on the other 108 * core-siblings, if the first logical CPU in a socket passed the sync test. 109 * But as the TSC is per-logical CPU and can potentially be modified wrongly 110 * by the bios, TSC sync test for smaller duration should be able 111 * to catch such errors. Also this will catch the condition where all the 112 * cores in the socket doesn't get reset at the same time. 113 */ 114 static inline unsigned int loop_timeout(int cpu) 115 { 116 return (cpumask_weight(cpu_core_mask(cpu)) > 1) ? 2 : 20; 117 } 118 119 /* 120 * Source CPU calls into this - it waits for the freshly booted 121 * target CPU to arrive and then starts the measurement: 122 */ 123 void check_tsc_sync_source(int cpu) 124 { 125 int cpus = 2; 126 127 /* 128 * No need to check if we already know that the TSC is not 129 * synchronized: 130 */ 131 if (unsynchronized_tsc()) 132 return; 133 134 if (tsc_clocksource_reliable) { 135 if (cpu == (nr_cpu_ids-1) || system_state != SYSTEM_BOOTING) 136 pr_info( 137 "Skipped synchronization checks as TSC is reliable.\n"); 138 return; 139 } 140 141 /* 142 * Reset it - in case this is a second bootup: 143 */ 144 atomic_set(&stop_count, 0); 145 146 /* 147 * Wait for the target to arrive: 148 */ 149 while (atomic_read(&start_count) != cpus-1) 150 cpu_relax(); 151 /* 152 * Trigger the target to continue into the measurement too: 153 */ 154 atomic_inc(&start_count); 155 156 check_tsc_warp(loop_timeout(cpu)); 157 158 while (atomic_read(&stop_count) != cpus-1) 159 cpu_relax(); 160 161 if (nr_warps) { 162 pr_warning("TSC synchronization [CPU#%d -> CPU#%d]:\n", 163 smp_processor_id(), cpu); 164 pr_warning("Measured %Ld cycles TSC warp between CPUs, " 165 "turning off TSC clock.\n", max_warp); 166 mark_tsc_unstable("check_tsc_sync_source failed"); 167 } else { 168 pr_debug("TSC synchronization [CPU#%d -> CPU#%d]: passed\n", 169 smp_processor_id(), cpu); 170 } 171 172 /* 173 * Reset it - just in case we boot another CPU later: 174 */ 175 atomic_set(&start_count, 0); 176 nr_warps = 0; 177 max_warp = 0; 178 last_tsc = 0; 179 180 /* 181 * Let the target continue with the bootup: 182 */ 183 atomic_inc(&stop_count); 184 } 185 186 /* 187 * Freshly booted CPUs call into this: 188 */ 189 void check_tsc_sync_target(void) 190 { 191 int cpus = 2; 192 193 if (unsynchronized_tsc() || tsc_clocksource_reliable) 194 return; 195 196 /* 197 * Register this CPU's participation and wait for the 198 * source CPU to start the measurement: 199 */ 200 atomic_inc(&start_count); 201 while (atomic_read(&start_count) != cpus) 202 cpu_relax(); 203 204 check_tsc_warp(loop_timeout(smp_processor_id())); 205 206 /* 207 * Ok, we are done: 208 */ 209 atomic_inc(&stop_count); 210 211 /* 212 * Wait for the source CPU to print stuff: 213 */ 214 while (atomic_read(&stop_count) != cpus) 215 cpu_relax(); 216 } 217