1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (c) 2012 The Chromium OS Authors. 4 * 5 * TSC calibration codes are adapted from Linux kernel 6 * arch/x86/kernel/tsc_msr.c and arch/x86/kernel/tsc.c 7 */ 8 9 #include <common.h> 10 #include <dm.h> 11 #include <malloc.h> 12 #include <timer.h> 13 #include <asm/cpu.h> 14 #include <asm/io.h> 15 #include <asm/i8254.h> 16 #include <asm/ibmpc.h> 17 #include <asm/msr.h> 18 #include <asm/u-boot-x86.h> 19 20 #define MAX_NUM_FREQS 9 21 22 DECLARE_GLOBAL_DATA_PTR; 23 24 static unsigned long cpu_mhz_from_cpuid(void) 25 { 26 if (gd->arch.x86_vendor != X86_VENDOR_INTEL) 27 return 0; 28 29 if (cpuid_eax(0) < 0x16) 30 return 0; 31 32 return cpuid_eax(0x16); 33 } 34 35 /* 36 * According to Intel 64 and IA-32 System Programming Guide, 37 * if MSR_PERF_STAT[31] is set, the maximum resolved bus ratio can be 38 * read in MSR_PLATFORM_ID[12:8], otherwise in MSR_PERF_STAT[44:40]. 39 * Unfortunately some Intel Atom SoCs aren't quite compliant to this, 40 * so we need manually differentiate SoC families. This is what the 41 * field msr_plat does. 42 */ 43 struct freq_desc { 44 u8 x86_family; /* CPU family */ 45 u8 x86_model; /* model */ 46 /* 2: use 100MHz, 1: use MSR_PLATFORM_INFO, 0: MSR_IA32_PERF_STATUS */ 47 u8 msr_plat; 48 u32 freqs[MAX_NUM_FREQS]; 49 }; 50 51 static struct freq_desc freq_desc_tables[] = { 52 /* PNW */ 53 { 6, 0x27, 0, { 0, 0, 0, 0, 0, 99840, 0, 83200, 0 } }, 54 /* CLV+ */ 55 { 6, 0x35, 0, { 0, 133200, 0, 0, 0, 99840, 0, 83200, 0 } }, 56 /* TNG - Intel Atom processor Z3400 series */ 57 { 6, 0x4a, 1, { 0, 100000, 133300, 0, 0, 0, 0, 0, 0 } }, 58 /* VLV2 - Intel Atom processor E3000, Z3600, Z3700 series */ 59 { 6, 0x37, 1, { 83300, 100000, 133300, 116700, 80000, 0, 0, 0, 0 } }, 60 /* ANN - Intel Atom processor Z3500 series */ 61 { 6, 0x5a, 1, { 83300, 100000, 133300, 100000, 0, 0, 0, 0, 0 } }, 62 /* AMT - Intel Atom processor X7-Z8000 and X5-Z8000 series */ 63 { 6, 0x4c, 1, { 83300, 100000, 133300, 116700, 64 80000, 93300, 90000, 88900, 87500 } }, 65 /* Ivybridge */ 66 { 6, 0x3a, 2, { 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, 67 }; 68 69 static int match_cpu(u8 family, u8 model) 70 { 71 int i; 72 73 for (i = 0; i < ARRAY_SIZE(freq_desc_tables); i++) { 74 if ((family == freq_desc_tables[i].x86_family) && 75 (model == freq_desc_tables[i].x86_model)) 76 return i; 77 } 78 79 return -1; 80 } 81 82 /* Map CPU reference clock freq ID(0-7) to CPU reference clock freq(KHz) */ 83 #define id_to_freq(cpu_index, freq_id) \ 84 (freq_desc_tables[cpu_index].freqs[freq_id]) 85 86 /* 87 * TSC on Intel Atom SoCs capable of determining TSC frequency by MSR is 88 * reliable and the frequency is known (provided by HW). 89 * 90 * On these platforms PIT/HPET is generally not available so calibration won't 91 * work at all and there is no other clocksource to act as a watchdog for the 92 * TSC, so we have no other choice than to trust it. 93 * 94 * Returns the TSC frequency in MHz or 0 if HW does not provide it. 95 */ 96 static unsigned long __maybe_unused cpu_mhz_from_msr(void) 97 { 98 u32 lo, hi, ratio, freq_id, freq; 99 unsigned long res; 100 int cpu_index; 101 102 if (gd->arch.x86_vendor != X86_VENDOR_INTEL) 103 return 0; 104 105 cpu_index = match_cpu(gd->arch.x86, gd->arch.x86_model); 106 if (cpu_index < 0) 107 return 0; 108 109 if (freq_desc_tables[cpu_index].msr_plat) { 110 rdmsr(MSR_PLATFORM_INFO, lo, hi); 111 ratio = (lo >> 8) & 0xff; 112 } else { 113 rdmsr(MSR_IA32_PERF_STATUS, lo, hi); 114 ratio = (hi >> 8) & 0x1f; 115 } 116 debug("Maximum core-clock to bus-clock ratio: 0x%x\n", ratio); 117 118 if (freq_desc_tables[cpu_index].msr_plat == 2) { 119 /* TODO: Figure out how best to deal with this */ 120 freq = 100000; 121 debug("Using frequency: %u KHz\n", freq); 122 } else { 123 /* Get FSB FREQ ID */ 124 rdmsr(MSR_FSB_FREQ, lo, hi); 125 freq_id = lo & 0x7; 126 freq = id_to_freq(cpu_index, freq_id); 127 debug("Resolved frequency ID: %u, frequency: %u KHz\n", 128 freq_id, freq); 129 } 130 131 /* TSC frequency = maximum resolved freq * maximum resolved bus ratio */ 132 res = freq * ratio / 1000; 133 debug("TSC runs at %lu MHz\n", res); 134 135 return res; 136 } 137 138 /* 139 * This reads the current MSB of the PIT counter, and 140 * checks if we are running on sufficiently fast and 141 * non-virtualized hardware. 142 * 143 * Our expectations are: 144 * 145 * - the PIT is running at roughly 1.19MHz 146 * 147 * - each IO is going to take about 1us on real hardware, 148 * but we allow it to be much faster (by a factor of 10) or 149 * _slightly_ slower (ie we allow up to a 2us read+counter 150 * update - anything else implies a unacceptably slow CPU 151 * or PIT for the fast calibration to work. 152 * 153 * - with 256 PIT ticks to read the value, we have 214us to 154 * see the same MSB (and overhead like doing a single TSC 155 * read per MSB value etc). 156 * 157 * - We're doing 2 reads per loop (LSB, MSB), and we expect 158 * them each to take about a microsecond on real hardware. 159 * So we expect a count value of around 100. But we'll be 160 * generous, and accept anything over 50. 161 * 162 * - if the PIT is stuck, and we see *many* more reads, we 163 * return early (and the next caller of pit_expect_msb() 164 * then consider it a failure when they don't see the 165 * next expected value). 166 * 167 * These expectations mean that we know that we have seen the 168 * transition from one expected value to another with a fairly 169 * high accuracy, and we didn't miss any events. We can thus 170 * use the TSC value at the transitions to calculate a pretty 171 * good value for the TSC frequencty. 172 */ 173 static inline int pit_verify_msb(unsigned char val) 174 { 175 /* Ignore LSB */ 176 inb(0x42); 177 return inb(0x42) == val; 178 } 179 180 static inline int pit_expect_msb(unsigned char val, u64 *tscp, 181 unsigned long *deltap) 182 { 183 int count; 184 u64 tsc = 0, prev_tsc = 0; 185 186 for (count = 0; count < 50000; count++) { 187 if (!pit_verify_msb(val)) 188 break; 189 prev_tsc = tsc; 190 tsc = rdtsc(); 191 } 192 *deltap = rdtsc() - prev_tsc; 193 *tscp = tsc; 194 195 /* 196 * We require _some_ success, but the quality control 197 * will be based on the error terms on the TSC values. 198 */ 199 return count > 5; 200 } 201 202 /* 203 * How many MSB values do we want to see? We aim for 204 * a maximum error rate of 500ppm (in practice the 205 * real error is much smaller), but refuse to spend 206 * more than 50ms on it. 207 */ 208 #define MAX_QUICK_PIT_MS 50 209 #define MAX_QUICK_PIT_ITERATIONS (MAX_QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256) 210 211 static unsigned long __maybe_unused quick_pit_calibrate(void) 212 { 213 int i; 214 u64 tsc, delta; 215 unsigned long d1, d2; 216 217 /* Set the Gate high, disable speaker */ 218 outb((inb(0x61) & ~0x02) | 0x01, 0x61); 219 220 /* 221 * Counter 2, mode 0 (one-shot), binary count 222 * 223 * NOTE! Mode 2 decrements by two (and then the 224 * output is flipped each time, giving the same 225 * final output frequency as a decrement-by-one), 226 * so mode 0 is much better when looking at the 227 * individual counts. 228 */ 229 outb(0xb0, 0x43); 230 231 /* Start at 0xffff */ 232 outb(0xff, 0x42); 233 outb(0xff, 0x42); 234 235 /* 236 * The PIT starts counting at the next edge, so we 237 * need to delay for a microsecond. The easiest way 238 * to do that is to just read back the 16-bit counter 239 * once from the PIT. 240 */ 241 pit_verify_msb(0); 242 243 if (pit_expect_msb(0xff, &tsc, &d1)) { 244 for (i = 1; i <= MAX_QUICK_PIT_ITERATIONS; i++) { 245 if (!pit_expect_msb(0xff-i, &delta, &d2)) 246 break; 247 248 /* 249 * Iterate until the error is less than 500 ppm 250 */ 251 delta -= tsc; 252 if (d1+d2 >= delta >> 11) 253 continue; 254 255 /* 256 * Check the PIT one more time to verify that 257 * all TSC reads were stable wrt the PIT. 258 * 259 * This also guarantees serialization of the 260 * last cycle read ('d2') in pit_expect_msb. 261 */ 262 if (!pit_verify_msb(0xfe - i)) 263 break; 264 goto success; 265 } 266 } 267 debug("Fast TSC calibration failed\n"); 268 return 0; 269 270 success: 271 /* 272 * Ok, if we get here, then we've seen the 273 * MSB of the PIT decrement 'i' times, and the 274 * error has shrunk to less than 500 ppm. 275 * 276 * As a result, we can depend on there not being 277 * any odd delays anywhere, and the TSC reads are 278 * reliable (within the error). 279 * 280 * kHz = ticks / time-in-seconds / 1000; 281 * kHz = (t2 - t1) / (I * 256 / PIT_TICK_RATE) / 1000 282 * kHz = ((t2 - t1) * PIT_TICK_RATE) / (I * 256 * 1000) 283 */ 284 delta *= PIT_TICK_RATE; 285 delta /= (i*256*1000); 286 debug("Fast TSC calibration using PIT\n"); 287 return delta / 1000; 288 } 289 290 /* Get the speed of the TSC timer in MHz */ 291 unsigned notrace long get_tbclk_mhz(void) 292 { 293 return get_tbclk() / 1000000; 294 } 295 296 static ulong get_ms_timer(void) 297 { 298 return (get_ticks() * 1000) / get_tbclk(); 299 } 300 301 ulong get_timer(ulong base) 302 { 303 return get_ms_timer() - base; 304 } 305 306 ulong notrace timer_get_us(void) 307 { 308 return get_ticks() / get_tbclk_mhz(); 309 } 310 311 ulong timer_get_boot_us(void) 312 { 313 return timer_get_us(); 314 } 315 316 void __udelay(unsigned long usec) 317 { 318 u64 now = get_ticks(); 319 u64 stop; 320 321 stop = now + usec * get_tbclk_mhz(); 322 323 while ((int64_t)(stop - get_ticks()) > 0) 324 #if defined(CONFIG_QEMU) && defined(CONFIG_SMP) 325 /* 326 * Add a 'pause' instruction on qemu target, 327 * to give other VCPUs a chance to run. 328 */ 329 asm volatile("pause"); 330 #else 331 ; 332 #endif 333 } 334 335 static int tsc_timer_get_count(struct udevice *dev, u64 *count) 336 { 337 u64 now_tick = rdtsc(); 338 339 *count = now_tick - gd->arch.tsc_base; 340 341 return 0; 342 } 343 344 static void tsc_timer_ensure_setup(bool early) 345 { 346 if (gd->arch.tsc_base) 347 return; 348 gd->arch.tsc_base = rdtsc(); 349 350 if (!gd->arch.clock_rate) { 351 unsigned long fast_calibrate; 352 353 fast_calibrate = cpu_mhz_from_cpuid(); 354 if (fast_calibrate) 355 goto done; 356 357 fast_calibrate = cpu_mhz_from_msr(); 358 if (fast_calibrate) 359 goto done; 360 361 fast_calibrate = quick_pit_calibrate(); 362 if (fast_calibrate) 363 goto done; 364 365 if (early) 366 fast_calibrate = CONFIG_X86_TSC_TIMER_EARLY_FREQ; 367 else 368 return; 369 370 done: 371 gd->arch.clock_rate = fast_calibrate * 1000000; 372 } 373 } 374 375 static int tsc_timer_probe(struct udevice *dev) 376 { 377 struct timer_dev_priv *uc_priv = dev_get_uclass_priv(dev); 378 379 /* Try hardware calibration first */ 380 tsc_timer_ensure_setup(false); 381 if (!gd->arch.clock_rate) { 382 /* 383 * Use the clock frequency specified in the 384 * device tree as last resort 385 */ 386 if (!uc_priv->clock_rate) 387 panic("TSC frequency is ZERO"); 388 } else { 389 uc_priv->clock_rate = gd->arch.clock_rate; 390 } 391 392 return 0; 393 } 394 395 unsigned long notrace timer_early_get_rate(void) 396 { 397 /* 398 * When TSC timer is used as the early timer, be warned that the timer 399 * clock rate can only be calibrated via some hardware ways. Specifying 400 * it in the device tree won't work for the early timer. 401 */ 402 tsc_timer_ensure_setup(true); 403 404 return gd->arch.clock_rate; 405 } 406 407 u64 notrace timer_early_get_count(void) 408 { 409 return rdtsc() - gd->arch.tsc_base; 410 } 411 412 static const struct timer_ops tsc_timer_ops = { 413 .get_count = tsc_timer_get_count, 414 }; 415 416 static const struct udevice_id tsc_timer_ids[] = { 417 { .compatible = "x86,tsc-timer", }, 418 { } 419 }; 420 421 U_BOOT_DRIVER(tsc_timer) = { 422 .name = "tsc_timer", 423 .id = UCLASS_TIMER, 424 .of_match = tsc_timer_ids, 425 .probe = tsc_timer_probe, 426 .ops = &tsc_timer_ops, 427 .flags = DM_FLAG_PRE_RELOC, 428 }; 429