1 // SPDX-License-Identifier: GPL-2.0-only 2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 3 4 #include <linux/kernel.h> 5 #include <linux/sched.h> 6 #include <linux/sched/clock.h> 7 #include <linux/init.h> 8 #include <linux/export.h> 9 #include <linux/timer.h> 10 #include <linux/acpi_pmtmr.h> 11 #include <linux/cpufreq.h> 12 #include <linux/delay.h> 13 #include <linux/clocksource.h> 14 #include <linux/percpu.h> 15 #include <linux/timex.h> 16 #include <linux/static_key.h> 17 18 #include <asm/hpet.h> 19 #include <asm/timer.h> 20 #include <asm/vgtod.h> 21 #include <asm/time.h> 22 #include <asm/delay.h> 23 #include <asm/hypervisor.h> 24 #include <asm/nmi.h> 25 #include <asm/x86_init.h> 26 #include <asm/geode.h> 27 #include <asm/apic.h> 28 #include <asm/intel-family.h> 29 #include <asm/i8259.h> 30 #include <asm/uv/uv.h> 31 32 unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */ 33 EXPORT_SYMBOL(cpu_khz); 34 35 unsigned int __read_mostly tsc_khz; 36 EXPORT_SYMBOL(tsc_khz); 37 38 #define KHZ 1000 39 40 /* 41 * TSC can be unstable due to cpufreq or due to unsynced TSCs 42 */ 43 static int __read_mostly tsc_unstable; 44 45 static DEFINE_STATIC_KEY_FALSE(__use_tsc); 46 47 int tsc_clocksource_reliable; 48 49 static u32 art_to_tsc_numerator; 50 static u32 art_to_tsc_denominator; 51 static u64 art_to_tsc_offset; 52 struct clocksource *art_related_clocksource; 53 54 struct cyc2ns { 55 struct cyc2ns_data data[2]; /* 0 + 2*16 = 32 */ 56 seqcount_t seq; /* 32 + 4 = 36 */ 57 58 }; /* fits one cacheline */ 59 60 static DEFINE_PER_CPU_ALIGNED(struct cyc2ns, cyc2ns); 61 62 __always_inline void cyc2ns_read_begin(struct cyc2ns_data *data) 63 { 64 int seq, idx; 65 66 preempt_disable_notrace(); 67 68 do { 69 seq = this_cpu_read(cyc2ns.seq.sequence); 70 idx = seq & 1; 71 72 data->cyc2ns_offset = this_cpu_read(cyc2ns.data[idx].cyc2ns_offset); 73 data->cyc2ns_mul = this_cpu_read(cyc2ns.data[idx].cyc2ns_mul); 74 data->cyc2ns_shift = this_cpu_read(cyc2ns.data[idx].cyc2ns_shift); 75 76 } while (unlikely(seq != this_cpu_read(cyc2ns.seq.sequence))); 77 } 78 79 __always_inline void cyc2ns_read_end(void) 80 { 81 preempt_enable_notrace(); 82 } 83 84 /* 85 * Accelerators for sched_clock() 86 * convert from cycles(64bits) => nanoseconds (64bits) 87 * basic equation: 88 * ns = cycles / (freq / ns_per_sec) 89 * ns = cycles * (ns_per_sec / freq) 90 * ns = cycles * (10^9 / (cpu_khz * 10^3)) 91 * ns = cycles * (10^6 / cpu_khz) 92 * 93 * Then we use scaling math (suggested by george@mvista.com) to get: 94 * ns = cycles * (10^6 * SC / cpu_khz) / SC 95 * ns = cycles * cyc2ns_scale / SC 96 * 97 * And since SC is a constant power of two, we can convert the div 98 * into a shift. The larger SC is, the more accurate the conversion, but 99 * cyc2ns_scale needs to be a 32-bit value so that 32-bit multiplication 100 * (64-bit result) can be used. 101 * 102 * We can use khz divisor instead of mhz to keep a better precision. 103 * (mathieu.desnoyers@polymtl.ca) 104 * 105 * -johnstul@us.ibm.com "math is hard, lets go shopping!" 106 */ 107 108 static __always_inline unsigned long long cycles_2_ns(unsigned long long cyc) 109 { 110 struct cyc2ns_data data; 111 unsigned long long ns; 112 113 cyc2ns_read_begin(&data); 114 115 ns = data.cyc2ns_offset; 116 ns += mul_u64_u32_shr(cyc, data.cyc2ns_mul, data.cyc2ns_shift); 117 118 cyc2ns_read_end(); 119 120 return ns; 121 } 122 123 static void __set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now) 124 { 125 unsigned long long ns_now; 126 struct cyc2ns_data data; 127 struct cyc2ns *c2n; 128 129 ns_now = cycles_2_ns(tsc_now); 130 131 /* 132 * Compute a new multiplier as per the above comment and ensure our 133 * time function is continuous; see the comment near struct 134 * cyc2ns_data. 135 */ 136 clocks_calc_mult_shift(&data.cyc2ns_mul, &data.cyc2ns_shift, khz, 137 NSEC_PER_MSEC, 0); 138 139 /* 140 * cyc2ns_shift is exported via arch_perf_update_userpage() where it is 141 * not expected to be greater than 31 due to the original published 142 * conversion algorithm shifting a 32-bit value (now specifies a 64-bit 143 * value) - refer perf_event_mmap_page documentation in perf_event.h. 144 */ 145 if (data.cyc2ns_shift == 32) { 146 data.cyc2ns_shift = 31; 147 data.cyc2ns_mul >>= 1; 148 } 149 150 data.cyc2ns_offset = ns_now - 151 mul_u64_u32_shr(tsc_now, data.cyc2ns_mul, data.cyc2ns_shift); 152 153 c2n = per_cpu_ptr(&cyc2ns, cpu); 154 155 raw_write_seqcount_latch(&c2n->seq); 156 c2n->data[0] = data; 157 raw_write_seqcount_latch(&c2n->seq); 158 c2n->data[1] = data; 159 } 160 161 static void set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now) 162 { 163 unsigned long flags; 164 165 local_irq_save(flags); 166 sched_clock_idle_sleep_event(); 167 168 if (khz) 169 __set_cyc2ns_scale(khz, cpu, tsc_now); 170 171 sched_clock_idle_wakeup_event(); 172 local_irq_restore(flags); 173 } 174 175 /* 176 * Initialize cyc2ns for boot cpu 177 */ 178 static void __init cyc2ns_init_boot_cpu(void) 179 { 180 struct cyc2ns *c2n = this_cpu_ptr(&cyc2ns); 181 182 seqcount_init(&c2n->seq); 183 __set_cyc2ns_scale(tsc_khz, smp_processor_id(), rdtsc()); 184 } 185 186 /* 187 * Secondary CPUs do not run through tsc_init(), so set up 188 * all the scale factors for all CPUs, assuming the same 189 * speed as the bootup CPU. 190 */ 191 static void __init cyc2ns_init_secondary_cpus(void) 192 { 193 unsigned int cpu, this_cpu = smp_processor_id(); 194 struct cyc2ns *c2n = this_cpu_ptr(&cyc2ns); 195 struct cyc2ns_data *data = c2n->data; 196 197 for_each_possible_cpu(cpu) { 198 if (cpu != this_cpu) { 199 seqcount_init(&c2n->seq); 200 c2n = per_cpu_ptr(&cyc2ns, cpu); 201 c2n->data[0] = data[0]; 202 c2n->data[1] = data[1]; 203 } 204 } 205 } 206 207 /* 208 * Scheduler clock - returns current time in nanosec units. 209 */ 210 u64 native_sched_clock(void) 211 { 212 if (static_branch_likely(&__use_tsc)) { 213 u64 tsc_now = rdtsc(); 214 215 /* return the value in ns */ 216 return cycles_2_ns(tsc_now); 217 } 218 219 /* 220 * Fall back to jiffies if there's no TSC available: 221 * ( But note that we still use it if the TSC is marked 222 * unstable. We do this because unlike Time Of Day, 223 * the scheduler clock tolerates small errors and it's 224 * very important for it to be as fast as the platform 225 * can achieve it. ) 226 */ 227 228 /* No locking but a rare wrong value is not a big deal: */ 229 return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); 230 } 231 232 /* 233 * Generate a sched_clock if you already have a TSC value. 234 */ 235 u64 native_sched_clock_from_tsc(u64 tsc) 236 { 237 return cycles_2_ns(tsc); 238 } 239 240 /* We need to define a real function for sched_clock, to override the 241 weak default version */ 242 #ifdef CONFIG_PARAVIRT 243 unsigned long long sched_clock(void) 244 { 245 return paravirt_sched_clock(); 246 } 247 248 bool using_native_sched_clock(void) 249 { 250 return pv_ops.time.sched_clock == native_sched_clock; 251 } 252 #else 253 unsigned long long 254 sched_clock(void) __attribute__((alias("native_sched_clock"))); 255 256 bool using_native_sched_clock(void) { return true; } 257 #endif 258 259 int check_tsc_unstable(void) 260 { 261 return tsc_unstable; 262 } 263 EXPORT_SYMBOL_GPL(check_tsc_unstable); 264 265 #ifdef CONFIG_X86_TSC 266 int __init notsc_setup(char *str) 267 { 268 mark_tsc_unstable("boot parameter notsc"); 269 return 1; 270 } 271 #else 272 /* 273 * disable flag for tsc. Takes effect by clearing the TSC cpu flag 274 * in cpu/common.c 275 */ 276 int __init notsc_setup(char *str) 277 { 278 setup_clear_cpu_cap(X86_FEATURE_TSC); 279 return 1; 280 } 281 #endif 282 283 __setup("notsc", notsc_setup); 284 285 static int no_sched_irq_time; 286 static int no_tsc_watchdog; 287 288 static int __init tsc_setup(char *str) 289 { 290 if (!strcmp(str, "reliable")) 291 tsc_clocksource_reliable = 1; 292 if (!strncmp(str, "noirqtime", 9)) 293 no_sched_irq_time = 1; 294 if (!strcmp(str, "unstable")) 295 mark_tsc_unstable("boot parameter"); 296 if (!strcmp(str, "nowatchdog")) 297 no_tsc_watchdog = 1; 298 return 1; 299 } 300 301 __setup("tsc=", tsc_setup); 302 303 #define MAX_RETRIES 5 304 #define TSC_DEFAULT_THRESHOLD 0x20000 305 306 /* 307 * Read TSC and the reference counters. Take care of any disturbances 308 */ 309 static u64 tsc_read_refs(u64 *p, int hpet) 310 { 311 u64 t1, t2; 312 u64 thresh = tsc_khz ? tsc_khz >> 5 : TSC_DEFAULT_THRESHOLD; 313 int i; 314 315 for (i = 0; i < MAX_RETRIES; i++) { 316 t1 = get_cycles(); 317 if (hpet) 318 *p = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF; 319 else 320 *p = acpi_pm_read_early(); 321 t2 = get_cycles(); 322 if ((t2 - t1) < thresh) 323 return t2; 324 } 325 return ULLONG_MAX; 326 } 327 328 /* 329 * Calculate the TSC frequency from HPET reference 330 */ 331 static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2) 332 { 333 u64 tmp; 334 335 if (hpet2 < hpet1) 336 hpet2 += 0x100000000ULL; 337 hpet2 -= hpet1; 338 tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD)); 339 do_div(tmp, 1000000); 340 deltatsc = div64_u64(deltatsc, tmp); 341 342 return (unsigned long) deltatsc; 343 } 344 345 /* 346 * Calculate the TSC frequency from PMTimer reference 347 */ 348 static unsigned long calc_pmtimer_ref(u64 deltatsc, u64 pm1, u64 pm2) 349 { 350 u64 tmp; 351 352 if (!pm1 && !pm2) 353 return ULONG_MAX; 354 355 if (pm2 < pm1) 356 pm2 += (u64)ACPI_PM_OVRRUN; 357 pm2 -= pm1; 358 tmp = pm2 * 1000000000LL; 359 do_div(tmp, PMTMR_TICKS_PER_SEC); 360 do_div(deltatsc, tmp); 361 362 return (unsigned long) deltatsc; 363 } 364 365 #define CAL_MS 10 366 #define CAL_LATCH (PIT_TICK_RATE / (1000 / CAL_MS)) 367 #define CAL_PIT_LOOPS 1000 368 369 #define CAL2_MS 50 370 #define CAL2_LATCH (PIT_TICK_RATE / (1000 / CAL2_MS)) 371 #define CAL2_PIT_LOOPS 5000 372 373 374 /* 375 * Try to calibrate the TSC against the Programmable 376 * Interrupt Timer and return the frequency of the TSC 377 * in kHz. 378 * 379 * Return ULONG_MAX on failure to calibrate. 380 */ 381 static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin) 382 { 383 u64 tsc, t1, t2, delta; 384 unsigned long tscmin, tscmax; 385 int pitcnt; 386 387 if (!has_legacy_pic()) { 388 /* 389 * Relies on tsc_early_delay_calibrate() to have given us semi 390 * usable udelay(), wait for the same 50ms we would have with 391 * the PIT loop below. 392 */ 393 udelay(10 * USEC_PER_MSEC); 394 udelay(10 * USEC_PER_MSEC); 395 udelay(10 * USEC_PER_MSEC); 396 udelay(10 * USEC_PER_MSEC); 397 udelay(10 * USEC_PER_MSEC); 398 return ULONG_MAX; 399 } 400 401 /* Set the Gate high, disable speaker */ 402 outb((inb(0x61) & ~0x02) | 0x01, 0x61); 403 404 /* 405 * Setup CTC channel 2* for mode 0, (interrupt on terminal 406 * count mode), binary count. Set the latch register to 50ms 407 * (LSB then MSB) to begin countdown. 408 */ 409 outb(0xb0, 0x43); 410 outb(latch & 0xff, 0x42); 411 outb(latch >> 8, 0x42); 412 413 tsc = t1 = t2 = get_cycles(); 414 415 pitcnt = 0; 416 tscmax = 0; 417 tscmin = ULONG_MAX; 418 while ((inb(0x61) & 0x20) == 0) { 419 t2 = get_cycles(); 420 delta = t2 - tsc; 421 tsc = t2; 422 if ((unsigned long) delta < tscmin) 423 tscmin = (unsigned int) delta; 424 if ((unsigned long) delta > tscmax) 425 tscmax = (unsigned int) delta; 426 pitcnt++; 427 } 428 429 /* 430 * Sanity checks: 431 * 432 * If we were not able to read the PIT more than loopmin 433 * times, then we have been hit by a massive SMI 434 * 435 * If the maximum is 10 times larger than the minimum, 436 * then we got hit by an SMI as well. 437 */ 438 if (pitcnt < loopmin || tscmax > 10 * tscmin) 439 return ULONG_MAX; 440 441 /* Calculate the PIT value */ 442 delta = t2 - t1; 443 do_div(delta, ms); 444 return delta; 445 } 446 447 /* 448 * This reads the current MSB of the PIT counter, and 449 * checks if we are running on sufficiently fast and 450 * non-virtualized hardware. 451 * 452 * Our expectations are: 453 * 454 * - the PIT is running at roughly 1.19MHz 455 * 456 * - each IO is going to take about 1us on real hardware, 457 * but we allow it to be much faster (by a factor of 10) or 458 * _slightly_ slower (ie we allow up to a 2us read+counter 459 * update - anything else implies a unacceptably slow CPU 460 * or PIT for the fast calibration to work. 461 * 462 * - with 256 PIT ticks to read the value, we have 214us to 463 * see the same MSB (and overhead like doing a single TSC 464 * read per MSB value etc). 465 * 466 * - We're doing 2 reads per loop (LSB, MSB), and we expect 467 * them each to take about a microsecond on real hardware. 468 * So we expect a count value of around 100. But we'll be 469 * generous, and accept anything over 50. 470 * 471 * - if the PIT is stuck, and we see *many* more reads, we 472 * return early (and the next caller of pit_expect_msb() 473 * then consider it a failure when they don't see the 474 * next expected value). 475 * 476 * These expectations mean that we know that we have seen the 477 * transition from one expected value to another with a fairly 478 * high accuracy, and we didn't miss any events. We can thus 479 * use the TSC value at the transitions to calculate a pretty 480 * good value for the TSC frequencty. 481 */ 482 static inline int pit_verify_msb(unsigned char val) 483 { 484 /* Ignore LSB */ 485 inb(0x42); 486 return inb(0x42) == val; 487 } 488 489 static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap) 490 { 491 int count; 492 u64 tsc = 0, prev_tsc = 0; 493 494 for (count = 0; count < 50000; count++) { 495 if (!pit_verify_msb(val)) 496 break; 497 prev_tsc = tsc; 498 tsc = get_cycles(); 499 } 500 *deltap = get_cycles() - prev_tsc; 501 *tscp = tsc; 502 503 /* 504 * We require _some_ success, but the quality control 505 * will be based on the error terms on the TSC values. 506 */ 507 return count > 5; 508 } 509 510 /* 511 * How many MSB values do we want to see? We aim for 512 * a maximum error rate of 500ppm (in practice the 513 * real error is much smaller), but refuse to spend 514 * more than 50ms on it. 515 */ 516 #define MAX_QUICK_PIT_MS 50 517 #define MAX_QUICK_PIT_ITERATIONS (MAX_QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256) 518 519 static unsigned long quick_pit_calibrate(void) 520 { 521 int i; 522 u64 tsc, delta; 523 unsigned long d1, d2; 524 525 if (!has_legacy_pic()) 526 return 0; 527 528 /* Set the Gate high, disable speaker */ 529 outb((inb(0x61) & ~0x02) | 0x01, 0x61); 530 531 /* 532 * Counter 2, mode 0 (one-shot), binary count 533 * 534 * NOTE! Mode 2 decrements by two (and then the 535 * output is flipped each time, giving the same 536 * final output frequency as a decrement-by-one), 537 * so mode 0 is much better when looking at the 538 * individual counts. 539 */ 540 outb(0xb0, 0x43); 541 542 /* Start at 0xffff */ 543 outb(0xff, 0x42); 544 outb(0xff, 0x42); 545 546 /* 547 * The PIT starts counting at the next edge, so we 548 * need to delay for a microsecond. The easiest way 549 * to do that is to just read back the 16-bit counter 550 * once from the PIT. 551 */ 552 pit_verify_msb(0); 553 554 if (pit_expect_msb(0xff, &tsc, &d1)) { 555 for (i = 1; i <= MAX_QUICK_PIT_ITERATIONS; i++) { 556 if (!pit_expect_msb(0xff-i, &delta, &d2)) 557 break; 558 559 delta -= tsc; 560 561 /* 562 * Extrapolate the error and fail fast if the error will 563 * never be below 500 ppm. 564 */ 565 if (i == 1 && 566 d1 + d2 >= (delta * MAX_QUICK_PIT_ITERATIONS) >> 11) 567 return 0; 568 569 /* 570 * Iterate until the error is less than 500 ppm 571 */ 572 if (d1+d2 >= delta >> 11) 573 continue; 574 575 /* 576 * Check the PIT one more time to verify that 577 * all TSC reads were stable wrt the PIT. 578 * 579 * This also guarantees serialization of the 580 * last cycle read ('d2') in pit_expect_msb. 581 */ 582 if (!pit_verify_msb(0xfe - i)) 583 break; 584 goto success; 585 } 586 } 587 pr_info("Fast TSC calibration failed\n"); 588 return 0; 589 590 success: 591 /* 592 * Ok, if we get here, then we've seen the 593 * MSB of the PIT decrement 'i' times, and the 594 * error has shrunk to less than 500 ppm. 595 * 596 * As a result, we can depend on there not being 597 * any odd delays anywhere, and the TSC reads are 598 * reliable (within the error). 599 * 600 * kHz = ticks / time-in-seconds / 1000; 601 * kHz = (t2 - t1) / (I * 256 / PIT_TICK_RATE) / 1000 602 * kHz = ((t2 - t1) * PIT_TICK_RATE) / (I * 256 * 1000) 603 */ 604 delta *= PIT_TICK_RATE; 605 do_div(delta, i*256*1000); 606 pr_info("Fast TSC calibration using PIT\n"); 607 return delta; 608 } 609 610 /** 611 * native_calibrate_tsc 612 * Determine TSC frequency via CPUID, else return 0. 613 */ 614 unsigned long native_calibrate_tsc(void) 615 { 616 unsigned int eax_denominator, ebx_numerator, ecx_hz, edx; 617 unsigned int crystal_khz; 618 619 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) 620 return 0; 621 622 if (boot_cpu_data.cpuid_level < 0x15) 623 return 0; 624 625 eax_denominator = ebx_numerator = ecx_hz = edx = 0; 626 627 /* CPUID 15H TSC/Crystal ratio, plus optionally Crystal Hz */ 628 cpuid(0x15, &eax_denominator, &ebx_numerator, &ecx_hz, &edx); 629 630 if (ebx_numerator == 0 || eax_denominator == 0) 631 return 0; 632 633 crystal_khz = ecx_hz / 1000; 634 635 /* 636 * Denverton SoCs don't report crystal clock, and also don't support 637 * CPUID.0x16 for the calculation below, so hardcode the 25MHz crystal 638 * clock. 639 */ 640 if (crystal_khz == 0 && 641 boot_cpu_data.x86_model == INTEL_FAM6_ATOM_GOLDMONT_X) 642 crystal_khz = 25000; 643 644 /* 645 * TSC frequency reported directly by CPUID is a "hardware reported" 646 * frequency and is the most accurate one so far we have. This 647 * is considered a known frequency. 648 */ 649 if (crystal_khz != 0) 650 setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ); 651 652 /* 653 * Some Intel SoCs like Skylake and Kabylake don't report the crystal 654 * clock, but we can easily calculate it to a high degree of accuracy 655 * by considering the crystal ratio and the CPU speed. 656 */ 657 if (crystal_khz == 0 && boot_cpu_data.cpuid_level >= 0x16) { 658 unsigned int eax_base_mhz, ebx, ecx, edx; 659 660 cpuid(0x16, &eax_base_mhz, &ebx, &ecx, &edx); 661 crystal_khz = eax_base_mhz * 1000 * 662 eax_denominator / ebx_numerator; 663 } 664 665 if (crystal_khz == 0) 666 return 0; 667 668 /* 669 * For Atom SoCs TSC is the only reliable clocksource. 670 * Mark TSC reliable so no watchdog on it. 671 */ 672 if (boot_cpu_data.x86_model == INTEL_FAM6_ATOM_GOLDMONT) 673 setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE); 674 675 #ifdef CONFIG_X86_LOCAL_APIC 676 /* 677 * The local APIC appears to be fed by the core crystal clock 678 * (which sounds entirely sensible). We can set the global 679 * lapic_timer_period here to avoid having to calibrate the APIC 680 * timer later. 681 */ 682 lapic_timer_period = crystal_khz * 1000 / HZ; 683 #endif 684 685 return crystal_khz * ebx_numerator / eax_denominator; 686 } 687 688 static unsigned long cpu_khz_from_cpuid(void) 689 { 690 unsigned int eax_base_mhz, ebx_max_mhz, ecx_bus_mhz, edx; 691 692 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) 693 return 0; 694 695 if (boot_cpu_data.cpuid_level < 0x16) 696 return 0; 697 698 eax_base_mhz = ebx_max_mhz = ecx_bus_mhz = edx = 0; 699 700 cpuid(0x16, &eax_base_mhz, &ebx_max_mhz, &ecx_bus_mhz, &edx); 701 702 return eax_base_mhz * 1000; 703 } 704 705 /* 706 * calibrate cpu using pit, hpet, and ptimer methods. They are available 707 * later in boot after acpi is initialized. 708 */ 709 static unsigned long pit_hpet_ptimer_calibrate_cpu(void) 710 { 711 u64 tsc1, tsc2, delta, ref1, ref2; 712 unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX; 713 unsigned long flags, latch, ms; 714 int hpet = is_hpet_enabled(), i, loopmin; 715 716 /* 717 * Run 5 calibration loops to get the lowest frequency value 718 * (the best estimate). We use two different calibration modes 719 * here: 720 * 721 * 1) PIT loop. We set the PIT Channel 2 to oneshot mode and 722 * load a timeout of 50ms. We read the time right after we 723 * started the timer and wait until the PIT count down reaches 724 * zero. In each wait loop iteration we read the TSC and check 725 * the delta to the previous read. We keep track of the min 726 * and max values of that delta. The delta is mostly defined 727 * by the IO time of the PIT access, so we can detect when 728 * any disturbance happened between the two reads. If the 729 * maximum time is significantly larger than the minimum time, 730 * then we discard the result and have another try. 731 * 732 * 2) Reference counter. If available we use the HPET or the 733 * PMTIMER as a reference to check the sanity of that value. 734 * We use separate TSC readouts and check inside of the 735 * reference read for any possible disturbance. We dicard 736 * disturbed values here as well. We do that around the PIT 737 * calibration delay loop as we have to wait for a certain 738 * amount of time anyway. 739 */ 740 741 /* Preset PIT loop values */ 742 latch = CAL_LATCH; 743 ms = CAL_MS; 744 loopmin = CAL_PIT_LOOPS; 745 746 for (i = 0; i < 3; i++) { 747 unsigned long tsc_pit_khz; 748 749 /* 750 * Read the start value and the reference count of 751 * hpet/pmtimer when available. Then do the PIT 752 * calibration, which will take at least 50ms, and 753 * read the end value. 754 */ 755 local_irq_save(flags); 756 tsc1 = tsc_read_refs(&ref1, hpet); 757 tsc_pit_khz = pit_calibrate_tsc(latch, ms, loopmin); 758 tsc2 = tsc_read_refs(&ref2, hpet); 759 local_irq_restore(flags); 760 761 /* Pick the lowest PIT TSC calibration so far */ 762 tsc_pit_min = min(tsc_pit_min, tsc_pit_khz); 763 764 /* hpet or pmtimer available ? */ 765 if (ref1 == ref2) 766 continue; 767 768 /* Check, whether the sampling was disturbed */ 769 if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX) 770 continue; 771 772 tsc2 = (tsc2 - tsc1) * 1000000LL; 773 if (hpet) 774 tsc2 = calc_hpet_ref(tsc2, ref1, ref2); 775 else 776 tsc2 = calc_pmtimer_ref(tsc2, ref1, ref2); 777 778 tsc_ref_min = min(tsc_ref_min, (unsigned long) tsc2); 779 780 /* Check the reference deviation */ 781 delta = ((u64) tsc_pit_min) * 100; 782 do_div(delta, tsc_ref_min); 783 784 /* 785 * If both calibration results are inside a 10% window 786 * then we can be sure, that the calibration 787 * succeeded. We break out of the loop right away. We 788 * use the reference value, as it is more precise. 789 */ 790 if (delta >= 90 && delta <= 110) { 791 pr_info("PIT calibration matches %s. %d loops\n", 792 hpet ? "HPET" : "PMTIMER", i + 1); 793 return tsc_ref_min; 794 } 795 796 /* 797 * Check whether PIT failed more than once. This 798 * happens in virtualized environments. We need to 799 * give the virtual PC a slightly longer timeframe for 800 * the HPET/PMTIMER to make the result precise. 801 */ 802 if (i == 1 && tsc_pit_min == ULONG_MAX) { 803 latch = CAL2_LATCH; 804 ms = CAL2_MS; 805 loopmin = CAL2_PIT_LOOPS; 806 } 807 } 808 809 /* 810 * Now check the results. 811 */ 812 if (tsc_pit_min == ULONG_MAX) { 813 /* PIT gave no useful value */ 814 pr_warn("Unable to calibrate against PIT\n"); 815 816 /* We don't have an alternative source, disable TSC */ 817 if (!hpet && !ref1 && !ref2) { 818 pr_notice("No reference (HPET/PMTIMER) available\n"); 819 return 0; 820 } 821 822 /* The alternative source failed as well, disable TSC */ 823 if (tsc_ref_min == ULONG_MAX) { 824 pr_warn("HPET/PMTIMER calibration failed\n"); 825 return 0; 826 } 827 828 /* Use the alternative source */ 829 pr_info("using %s reference calibration\n", 830 hpet ? "HPET" : "PMTIMER"); 831 832 return tsc_ref_min; 833 } 834 835 /* We don't have an alternative source, use the PIT calibration value */ 836 if (!hpet && !ref1 && !ref2) { 837 pr_info("Using PIT calibration value\n"); 838 return tsc_pit_min; 839 } 840 841 /* The alternative source failed, use the PIT calibration value */ 842 if (tsc_ref_min == ULONG_MAX) { 843 pr_warn("HPET/PMTIMER calibration failed. Using PIT calibration.\n"); 844 return tsc_pit_min; 845 } 846 847 /* 848 * The calibration values differ too much. In doubt, we use 849 * the PIT value as we know that there are PMTIMERs around 850 * running at double speed. At least we let the user know: 851 */ 852 pr_warn("PIT calibration deviates from %s: %lu %lu\n", 853 hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min); 854 pr_info("Using PIT calibration value\n"); 855 return tsc_pit_min; 856 } 857 858 /** 859 * native_calibrate_cpu_early - can calibrate the cpu early in boot 860 */ 861 unsigned long native_calibrate_cpu_early(void) 862 { 863 unsigned long flags, fast_calibrate = cpu_khz_from_cpuid(); 864 865 if (!fast_calibrate) 866 fast_calibrate = cpu_khz_from_msr(); 867 if (!fast_calibrate) { 868 local_irq_save(flags); 869 fast_calibrate = quick_pit_calibrate(); 870 local_irq_restore(flags); 871 } 872 return fast_calibrate; 873 } 874 875 876 /** 877 * native_calibrate_cpu - calibrate the cpu 878 */ 879 static unsigned long native_calibrate_cpu(void) 880 { 881 unsigned long tsc_freq = native_calibrate_cpu_early(); 882 883 if (!tsc_freq) 884 tsc_freq = pit_hpet_ptimer_calibrate_cpu(); 885 886 return tsc_freq; 887 } 888 889 void recalibrate_cpu_khz(void) 890 { 891 #ifndef CONFIG_SMP 892 unsigned long cpu_khz_old = cpu_khz; 893 894 if (!boot_cpu_has(X86_FEATURE_TSC)) 895 return; 896 897 cpu_khz = x86_platform.calibrate_cpu(); 898 tsc_khz = x86_platform.calibrate_tsc(); 899 if (tsc_khz == 0) 900 tsc_khz = cpu_khz; 901 else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz) 902 cpu_khz = tsc_khz; 903 cpu_data(0).loops_per_jiffy = cpufreq_scale(cpu_data(0).loops_per_jiffy, 904 cpu_khz_old, cpu_khz); 905 #endif 906 } 907 908 EXPORT_SYMBOL(recalibrate_cpu_khz); 909 910 911 static unsigned long long cyc2ns_suspend; 912 913 void tsc_save_sched_clock_state(void) 914 { 915 if (!sched_clock_stable()) 916 return; 917 918 cyc2ns_suspend = sched_clock(); 919 } 920 921 /* 922 * Even on processors with invariant TSC, TSC gets reset in some the 923 * ACPI system sleep states. And in some systems BIOS seem to reinit TSC to 924 * arbitrary value (still sync'd across cpu's) during resume from such sleep 925 * states. To cope up with this, recompute the cyc2ns_offset for each cpu so 926 * that sched_clock() continues from the point where it was left off during 927 * suspend. 928 */ 929 void tsc_restore_sched_clock_state(void) 930 { 931 unsigned long long offset; 932 unsigned long flags; 933 int cpu; 934 935 if (!sched_clock_stable()) 936 return; 937 938 local_irq_save(flags); 939 940 /* 941 * We're coming out of suspend, there's no concurrency yet; don't 942 * bother being nice about the RCU stuff, just write to both 943 * data fields. 944 */ 945 946 this_cpu_write(cyc2ns.data[0].cyc2ns_offset, 0); 947 this_cpu_write(cyc2ns.data[1].cyc2ns_offset, 0); 948 949 offset = cyc2ns_suspend - sched_clock(); 950 951 for_each_possible_cpu(cpu) { 952 per_cpu(cyc2ns.data[0].cyc2ns_offset, cpu) = offset; 953 per_cpu(cyc2ns.data[1].cyc2ns_offset, cpu) = offset; 954 } 955 956 local_irq_restore(flags); 957 } 958 959 #ifdef CONFIG_CPU_FREQ 960 /* 961 * Frequency scaling support. Adjust the TSC based timer when the CPU frequency 962 * changes. 963 * 964 * NOTE: On SMP the situation is not fixable in general, so simply mark the TSC 965 * as unstable and give up in those cases. 966 * 967 * Should fix up last_tsc too. Currently gettimeofday in the 968 * first tick after the change will be slightly wrong. 969 */ 970 971 static unsigned int ref_freq; 972 static unsigned long loops_per_jiffy_ref; 973 static unsigned long tsc_khz_ref; 974 975 static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, 976 void *data) 977 { 978 struct cpufreq_freqs *freq = data; 979 980 if (num_online_cpus() > 1) { 981 mark_tsc_unstable("cpufreq changes on SMP"); 982 return 0; 983 } 984 985 if (!ref_freq) { 986 ref_freq = freq->old; 987 loops_per_jiffy_ref = boot_cpu_data.loops_per_jiffy; 988 tsc_khz_ref = tsc_khz; 989 } 990 991 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || 992 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) { 993 boot_cpu_data.loops_per_jiffy = 994 cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new); 995 996 tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new); 997 if (!(freq->flags & CPUFREQ_CONST_LOOPS)) 998 mark_tsc_unstable("cpufreq changes"); 999 1000 set_cyc2ns_scale(tsc_khz, freq->policy->cpu, rdtsc()); 1001 } 1002 1003 return 0; 1004 } 1005 1006 static struct notifier_block time_cpufreq_notifier_block = { 1007 .notifier_call = time_cpufreq_notifier 1008 }; 1009 1010 static int __init cpufreq_register_tsc_scaling(void) 1011 { 1012 if (!boot_cpu_has(X86_FEATURE_TSC)) 1013 return 0; 1014 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) 1015 return 0; 1016 cpufreq_register_notifier(&time_cpufreq_notifier_block, 1017 CPUFREQ_TRANSITION_NOTIFIER); 1018 return 0; 1019 } 1020 1021 core_initcall(cpufreq_register_tsc_scaling); 1022 1023 #endif /* CONFIG_CPU_FREQ */ 1024 1025 #define ART_CPUID_LEAF (0x15) 1026 #define ART_MIN_DENOMINATOR (1) 1027 1028 1029 /* 1030 * If ART is present detect the numerator:denominator to convert to TSC 1031 */ 1032 static void __init detect_art(void) 1033 { 1034 unsigned int unused[2]; 1035 1036 if (boot_cpu_data.cpuid_level < ART_CPUID_LEAF) 1037 return; 1038 1039 /* 1040 * Don't enable ART in a VM, non-stop TSC and TSC_ADJUST required, 1041 * and the TSC counter resets must not occur asynchronously. 1042 */ 1043 if (boot_cpu_has(X86_FEATURE_HYPERVISOR) || 1044 !boot_cpu_has(X86_FEATURE_NONSTOP_TSC) || 1045 !boot_cpu_has(X86_FEATURE_TSC_ADJUST) || 1046 tsc_async_resets) 1047 return; 1048 1049 cpuid(ART_CPUID_LEAF, &art_to_tsc_denominator, 1050 &art_to_tsc_numerator, unused, unused+1); 1051 1052 if (art_to_tsc_denominator < ART_MIN_DENOMINATOR) 1053 return; 1054 1055 rdmsrl(MSR_IA32_TSC_ADJUST, art_to_tsc_offset); 1056 1057 /* Make this sticky over multiple CPU init calls */ 1058 setup_force_cpu_cap(X86_FEATURE_ART); 1059 } 1060 1061 1062 /* clocksource code */ 1063 1064 static void tsc_resume(struct clocksource *cs) 1065 { 1066 tsc_verify_tsc_adjust(true); 1067 } 1068 1069 /* 1070 * We used to compare the TSC to the cycle_last value in the clocksource 1071 * structure to avoid a nasty time-warp. This can be observed in a 1072 * very small window right after one CPU updated cycle_last under 1073 * xtime/vsyscall_gtod lock and the other CPU reads a TSC value which 1074 * is smaller than the cycle_last reference value due to a TSC which 1075 * is slighty behind. This delta is nowhere else observable, but in 1076 * that case it results in a forward time jump in the range of hours 1077 * due to the unsigned delta calculation of the time keeping core 1078 * code, which is necessary to support wrapping clocksources like pm 1079 * timer. 1080 * 1081 * This sanity check is now done in the core timekeeping code. 1082 * checking the result of read_tsc() - cycle_last for being negative. 1083 * That works because CLOCKSOURCE_MASK(64) does not mask out any bit. 1084 */ 1085 static u64 read_tsc(struct clocksource *cs) 1086 { 1087 return (u64)rdtsc_ordered(); 1088 } 1089 1090 static void tsc_cs_mark_unstable(struct clocksource *cs) 1091 { 1092 if (tsc_unstable) 1093 return; 1094 1095 tsc_unstable = 1; 1096 if (using_native_sched_clock()) 1097 clear_sched_clock_stable(); 1098 disable_sched_clock_irqtime(); 1099 pr_info("Marking TSC unstable due to clocksource watchdog\n"); 1100 } 1101 1102 static void tsc_cs_tick_stable(struct clocksource *cs) 1103 { 1104 if (tsc_unstable) 1105 return; 1106 1107 if (using_native_sched_clock()) 1108 sched_clock_tick_stable(); 1109 } 1110 1111 /* 1112 * .mask MUST be CLOCKSOURCE_MASK(64). See comment above read_tsc() 1113 */ 1114 static struct clocksource clocksource_tsc_early = { 1115 .name = "tsc-early", 1116 .rating = 299, 1117 .read = read_tsc, 1118 .mask = CLOCKSOURCE_MASK(64), 1119 .flags = CLOCK_SOURCE_IS_CONTINUOUS | 1120 CLOCK_SOURCE_MUST_VERIFY, 1121 .archdata = { .vclock_mode = VCLOCK_TSC }, 1122 .resume = tsc_resume, 1123 .mark_unstable = tsc_cs_mark_unstable, 1124 .tick_stable = tsc_cs_tick_stable, 1125 .list = LIST_HEAD_INIT(clocksource_tsc_early.list), 1126 }; 1127 1128 /* 1129 * Must mark VALID_FOR_HRES early such that when we unregister tsc_early 1130 * this one will immediately take over. We will only register if TSC has 1131 * been found good. 1132 */ 1133 static struct clocksource clocksource_tsc = { 1134 .name = "tsc", 1135 .rating = 300, 1136 .read = read_tsc, 1137 .mask = CLOCKSOURCE_MASK(64), 1138 .flags = CLOCK_SOURCE_IS_CONTINUOUS | 1139 CLOCK_SOURCE_VALID_FOR_HRES | 1140 CLOCK_SOURCE_MUST_VERIFY, 1141 .archdata = { .vclock_mode = VCLOCK_TSC }, 1142 .resume = tsc_resume, 1143 .mark_unstable = tsc_cs_mark_unstable, 1144 .tick_stable = tsc_cs_tick_stable, 1145 .list = LIST_HEAD_INIT(clocksource_tsc.list), 1146 }; 1147 1148 void mark_tsc_unstable(char *reason) 1149 { 1150 if (tsc_unstable) 1151 return; 1152 1153 tsc_unstable = 1; 1154 if (using_native_sched_clock()) 1155 clear_sched_clock_stable(); 1156 disable_sched_clock_irqtime(); 1157 pr_info("Marking TSC unstable due to %s\n", reason); 1158 1159 clocksource_mark_unstable(&clocksource_tsc_early); 1160 clocksource_mark_unstable(&clocksource_tsc); 1161 } 1162 1163 EXPORT_SYMBOL_GPL(mark_tsc_unstable); 1164 1165 static void __init check_system_tsc_reliable(void) 1166 { 1167 #if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC) 1168 if (is_geode_lx()) { 1169 /* RTSC counts during suspend */ 1170 #define RTSC_SUSP 0x100 1171 unsigned long res_low, res_high; 1172 1173 rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high); 1174 /* Geode_LX - the OLPC CPU has a very reliable TSC */ 1175 if (res_low & RTSC_SUSP) 1176 tsc_clocksource_reliable = 1; 1177 } 1178 #endif 1179 if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) 1180 tsc_clocksource_reliable = 1; 1181 } 1182 1183 /* 1184 * Make an educated guess if the TSC is trustworthy and synchronized 1185 * over all CPUs. 1186 */ 1187 int unsynchronized_tsc(void) 1188 { 1189 if (!boot_cpu_has(X86_FEATURE_TSC) || tsc_unstable) 1190 return 1; 1191 1192 #ifdef CONFIG_SMP 1193 if (apic_is_clustered_box()) 1194 return 1; 1195 #endif 1196 1197 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) 1198 return 0; 1199 1200 if (tsc_clocksource_reliable) 1201 return 0; 1202 /* 1203 * Intel systems are normally all synchronized. 1204 * Exceptions must mark TSC as unstable: 1205 */ 1206 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { 1207 /* assume multi socket systems are not synchronized: */ 1208 if (num_possible_cpus() > 1) 1209 return 1; 1210 } 1211 1212 return 0; 1213 } 1214 1215 /* 1216 * Convert ART to TSC given numerator/denominator found in detect_art() 1217 */ 1218 struct system_counterval_t convert_art_to_tsc(u64 art) 1219 { 1220 u64 tmp, res, rem; 1221 1222 rem = do_div(art, art_to_tsc_denominator); 1223 1224 res = art * art_to_tsc_numerator; 1225 tmp = rem * art_to_tsc_numerator; 1226 1227 do_div(tmp, art_to_tsc_denominator); 1228 res += tmp + art_to_tsc_offset; 1229 1230 return (struct system_counterval_t) {.cs = art_related_clocksource, 1231 .cycles = res}; 1232 } 1233 EXPORT_SYMBOL(convert_art_to_tsc); 1234 1235 /** 1236 * convert_art_ns_to_tsc() - Convert ART in nanoseconds to TSC. 1237 * @art_ns: ART (Always Running Timer) in unit of nanoseconds 1238 * 1239 * PTM requires all timestamps to be in units of nanoseconds. When user 1240 * software requests a cross-timestamp, this function converts system timestamp 1241 * to TSC. 1242 * 1243 * This is valid when CPU feature flag X86_FEATURE_TSC_KNOWN_FREQ is set 1244 * indicating the tsc_khz is derived from CPUID[15H]. Drivers should check 1245 * that this flag is set before conversion to TSC is attempted. 1246 * 1247 * Return: 1248 * struct system_counterval_t - system counter value with the pointer to the 1249 * corresponding clocksource 1250 * @cycles: System counter value 1251 * @cs: Clocksource corresponding to system counter value. Used 1252 * by timekeeping code to verify comparibility of two cycle 1253 * values. 1254 */ 1255 1256 struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns) 1257 { 1258 u64 tmp, res, rem; 1259 1260 rem = do_div(art_ns, USEC_PER_SEC); 1261 1262 res = art_ns * tsc_khz; 1263 tmp = rem * tsc_khz; 1264 1265 do_div(tmp, USEC_PER_SEC); 1266 res += tmp; 1267 1268 return (struct system_counterval_t) { .cs = art_related_clocksource, 1269 .cycles = res}; 1270 } 1271 EXPORT_SYMBOL(convert_art_ns_to_tsc); 1272 1273 1274 static void tsc_refine_calibration_work(struct work_struct *work); 1275 static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work); 1276 /** 1277 * tsc_refine_calibration_work - Further refine tsc freq calibration 1278 * @work - ignored. 1279 * 1280 * This functions uses delayed work over a period of a 1281 * second to further refine the TSC freq value. Since this is 1282 * timer based, instead of loop based, we don't block the boot 1283 * process while this longer calibration is done. 1284 * 1285 * If there are any calibration anomalies (too many SMIs, etc), 1286 * or the refined calibration is off by 1% of the fast early 1287 * calibration, we throw out the new calibration and use the 1288 * early calibration. 1289 */ 1290 static void tsc_refine_calibration_work(struct work_struct *work) 1291 { 1292 static u64 tsc_start = ULLONG_MAX, ref_start; 1293 static int hpet; 1294 u64 tsc_stop, ref_stop, delta; 1295 unsigned long freq; 1296 int cpu; 1297 1298 /* Don't bother refining TSC on unstable systems */ 1299 if (tsc_unstable) 1300 goto unreg; 1301 1302 /* 1303 * Since the work is started early in boot, we may be 1304 * delayed the first time we expire. So set the workqueue 1305 * again once we know timers are working. 1306 */ 1307 if (tsc_start == ULLONG_MAX) { 1308 restart: 1309 /* 1310 * Only set hpet once, to avoid mixing hardware 1311 * if the hpet becomes enabled later. 1312 */ 1313 hpet = is_hpet_enabled(); 1314 tsc_start = tsc_read_refs(&ref_start, hpet); 1315 schedule_delayed_work(&tsc_irqwork, HZ); 1316 return; 1317 } 1318 1319 tsc_stop = tsc_read_refs(&ref_stop, hpet); 1320 1321 /* hpet or pmtimer available ? */ 1322 if (ref_start == ref_stop) 1323 goto out; 1324 1325 /* Check, whether the sampling was disturbed */ 1326 if (tsc_stop == ULLONG_MAX) 1327 goto restart; 1328 1329 delta = tsc_stop - tsc_start; 1330 delta *= 1000000LL; 1331 if (hpet) 1332 freq = calc_hpet_ref(delta, ref_start, ref_stop); 1333 else 1334 freq = calc_pmtimer_ref(delta, ref_start, ref_stop); 1335 1336 /* Make sure we're within 1% */ 1337 if (abs(tsc_khz - freq) > tsc_khz/100) 1338 goto out; 1339 1340 tsc_khz = freq; 1341 pr_info("Refined TSC clocksource calibration: %lu.%03lu MHz\n", 1342 (unsigned long)tsc_khz / 1000, 1343 (unsigned long)tsc_khz % 1000); 1344 1345 /* Inform the TSC deadline clockevent devices about the recalibration */ 1346 lapic_update_tsc_freq(); 1347 1348 /* Update the sched_clock() rate to match the clocksource one */ 1349 for_each_possible_cpu(cpu) 1350 set_cyc2ns_scale(tsc_khz, cpu, tsc_stop); 1351 1352 out: 1353 if (tsc_unstable) 1354 goto unreg; 1355 1356 if (boot_cpu_has(X86_FEATURE_ART)) 1357 art_related_clocksource = &clocksource_tsc; 1358 clocksource_register_khz(&clocksource_tsc, tsc_khz); 1359 unreg: 1360 clocksource_unregister(&clocksource_tsc_early); 1361 } 1362 1363 1364 static int __init init_tsc_clocksource(void) 1365 { 1366 if (!boot_cpu_has(X86_FEATURE_TSC) || !tsc_khz) 1367 return 0; 1368 1369 if (tsc_unstable) 1370 goto unreg; 1371 1372 if (tsc_clocksource_reliable || no_tsc_watchdog) 1373 clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY; 1374 1375 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3)) 1376 clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP; 1377 1378 /* 1379 * When TSC frequency is known (retrieved via MSR or CPUID), we skip 1380 * the refined calibration and directly register it as a clocksource. 1381 */ 1382 if (boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) { 1383 if (boot_cpu_has(X86_FEATURE_ART)) 1384 art_related_clocksource = &clocksource_tsc; 1385 clocksource_register_khz(&clocksource_tsc, tsc_khz); 1386 unreg: 1387 clocksource_unregister(&clocksource_tsc_early); 1388 return 0; 1389 } 1390 1391 schedule_delayed_work(&tsc_irqwork, 0); 1392 return 0; 1393 } 1394 /* 1395 * We use device_initcall here, to ensure we run after the hpet 1396 * is fully initialized, which may occur at fs_initcall time. 1397 */ 1398 device_initcall(init_tsc_clocksource); 1399 1400 static bool __init determine_cpu_tsc_frequencies(bool early) 1401 { 1402 /* Make sure that cpu and tsc are not already calibrated */ 1403 WARN_ON(cpu_khz || tsc_khz); 1404 1405 if (early) { 1406 cpu_khz = x86_platform.calibrate_cpu(); 1407 tsc_khz = x86_platform.calibrate_tsc(); 1408 } else { 1409 /* We should not be here with non-native cpu calibration */ 1410 WARN_ON(x86_platform.calibrate_cpu != native_calibrate_cpu); 1411 cpu_khz = pit_hpet_ptimer_calibrate_cpu(); 1412 } 1413 1414 /* 1415 * Trust non-zero tsc_khz as authoritative, 1416 * and use it to sanity check cpu_khz, 1417 * which will be off if system timer is off. 1418 */ 1419 if (tsc_khz == 0) 1420 tsc_khz = cpu_khz; 1421 else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz) 1422 cpu_khz = tsc_khz; 1423 1424 if (tsc_khz == 0) 1425 return false; 1426 1427 pr_info("Detected %lu.%03lu MHz processor\n", 1428 (unsigned long)cpu_khz / KHZ, 1429 (unsigned long)cpu_khz % KHZ); 1430 1431 if (cpu_khz != tsc_khz) { 1432 pr_info("Detected %lu.%03lu MHz TSC", 1433 (unsigned long)tsc_khz / KHZ, 1434 (unsigned long)tsc_khz % KHZ); 1435 } 1436 return true; 1437 } 1438 1439 static unsigned long __init get_loops_per_jiffy(void) 1440 { 1441 u64 lpj = (u64)tsc_khz * KHZ; 1442 1443 do_div(lpj, HZ); 1444 return lpj; 1445 } 1446 1447 static void __init tsc_enable_sched_clock(void) 1448 { 1449 /* Sanitize TSC ADJUST before cyc2ns gets initialized */ 1450 tsc_store_and_check_tsc_adjust(true); 1451 cyc2ns_init_boot_cpu(); 1452 static_branch_enable(&__use_tsc); 1453 } 1454 1455 void __init tsc_early_init(void) 1456 { 1457 if (!boot_cpu_has(X86_FEATURE_TSC)) 1458 return; 1459 /* Don't change UV TSC multi-chassis synchronization */ 1460 if (is_early_uv_system()) 1461 return; 1462 if (!determine_cpu_tsc_frequencies(true)) 1463 return; 1464 loops_per_jiffy = get_loops_per_jiffy(); 1465 1466 tsc_enable_sched_clock(); 1467 } 1468 1469 void __init tsc_init(void) 1470 { 1471 /* 1472 * native_calibrate_cpu_early can only calibrate using methods that are 1473 * available early in boot. 1474 */ 1475 if (x86_platform.calibrate_cpu == native_calibrate_cpu_early) 1476 x86_platform.calibrate_cpu = native_calibrate_cpu; 1477 1478 if (!boot_cpu_has(X86_FEATURE_TSC)) { 1479 setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER); 1480 return; 1481 } 1482 1483 if (!tsc_khz) { 1484 /* We failed to determine frequencies earlier, try again */ 1485 if (!determine_cpu_tsc_frequencies(false)) { 1486 mark_tsc_unstable("could not calculate TSC khz"); 1487 setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER); 1488 return; 1489 } 1490 tsc_enable_sched_clock(); 1491 } 1492 1493 cyc2ns_init_secondary_cpus(); 1494 1495 if (!no_sched_irq_time) 1496 enable_sched_clock_irqtime(); 1497 1498 lpj_fine = get_loops_per_jiffy(); 1499 use_tsc_delay(); 1500 1501 check_system_tsc_reliable(); 1502 1503 if (unsynchronized_tsc()) { 1504 mark_tsc_unstable("TSCs unsynchronized"); 1505 return; 1506 } 1507 1508 clocksource_register_khz(&clocksource_tsc_early, tsc_khz); 1509 detect_art(); 1510 } 1511 1512 #ifdef CONFIG_SMP 1513 /* 1514 * If we have a constant TSC and are using the TSC for the delay loop, 1515 * we can skip clock calibration if another cpu in the same socket has already 1516 * been calibrated. This assumes that CONSTANT_TSC applies to all 1517 * cpus in the socket - this should be a safe assumption. 1518 */ 1519 unsigned long calibrate_delay_is_known(void) 1520 { 1521 int sibling, cpu = smp_processor_id(); 1522 int constant_tsc = cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC); 1523 const struct cpumask *mask = topology_core_cpumask(cpu); 1524 1525 if (!constant_tsc || !mask) 1526 return 0; 1527 1528 sibling = cpumask_any_but(mask, cpu); 1529 if (sibling < nr_cpu_ids) 1530 return cpu_data(sibling).loops_per_jiffy; 1531 return 0; 1532 } 1533 #endif 1534