1 #include <linux/kernel.h> 2 #include <linux/sched.h> 3 #include <linux/init.h> 4 #include <linux/module.h> 5 #include <linux/timer.h> 6 #include <linux/acpi_pmtmr.h> 7 #include <linux/cpufreq.h> 8 #include <linux/delay.h> 9 #include <linux/clocksource.h> 10 #include <linux/percpu.h> 11 #include <linux/timex.h> 12 13 #include <asm/hpet.h> 14 #include <asm/timer.h> 15 #include <asm/vgtod.h> 16 #include <asm/time.h> 17 #include <asm/delay.h> 18 #include <asm/hypervisor.h> 19 #include <asm/nmi.h> 20 #include <asm/x86_init.h> 21 22 unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */ 23 EXPORT_SYMBOL(cpu_khz); 24 25 unsigned int __read_mostly tsc_khz; 26 EXPORT_SYMBOL(tsc_khz); 27 28 /* 29 * TSC can be unstable due to cpufreq or due to unsynced TSCs 30 */ 31 static int __read_mostly tsc_unstable; 32 33 /* native_sched_clock() is called before tsc_init(), so 34 we must start with the TSC soft disabled to prevent 35 erroneous rdtsc usage on !cpu_has_tsc processors */ 36 static int __read_mostly tsc_disabled = -1; 37 38 static int tsc_clocksource_reliable; 39 /* 40 * Scheduler clock - returns current time in nanosec units. 41 */ 42 u64 native_sched_clock(void) 43 { 44 u64 this_offset; 45 46 /* 47 * Fall back to jiffies if there's no TSC available: 48 * ( But note that we still use it if the TSC is marked 49 * unstable. We do this because unlike Time Of Day, 50 * the scheduler clock tolerates small errors and it's 51 * very important for it to be as fast as the platform 52 * can achieve it. ) 53 */ 54 if (unlikely(tsc_disabled)) { 55 /* No locking but a rare wrong value is not a big deal: */ 56 return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); 57 } 58 59 /* read the Time Stamp Counter: */ 60 rdtscll(this_offset); 61 62 /* return the value in ns */ 63 return __cycles_2_ns(this_offset); 64 } 65 66 /* We need to define a real function for sched_clock, to override the 67 weak default version */ 68 #ifdef CONFIG_PARAVIRT 69 unsigned long long sched_clock(void) 70 { 71 return paravirt_sched_clock(); 72 } 73 #else 74 unsigned long long 75 sched_clock(void) __attribute__((alias("native_sched_clock"))); 76 #endif 77 78 int check_tsc_unstable(void) 79 { 80 return tsc_unstable; 81 } 82 EXPORT_SYMBOL_GPL(check_tsc_unstable); 83 84 #ifdef CONFIG_X86_TSC 85 int __init notsc_setup(char *str) 86 { 87 printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, " 88 "cannot disable TSC completely.\n"); 89 tsc_disabled = 1; 90 return 1; 91 } 92 #else 93 /* 94 * disable flag for tsc. Takes effect by clearing the TSC cpu flag 95 * in cpu/common.c 96 */ 97 int __init notsc_setup(char *str) 98 { 99 setup_clear_cpu_cap(X86_FEATURE_TSC); 100 return 1; 101 } 102 #endif 103 104 __setup("notsc", notsc_setup); 105 106 static int no_sched_irq_time; 107 108 static int __init tsc_setup(char *str) 109 { 110 if (!strcmp(str, "reliable")) 111 tsc_clocksource_reliable = 1; 112 if (!strncmp(str, "noirqtime", 9)) 113 no_sched_irq_time = 1; 114 return 1; 115 } 116 117 __setup("tsc=", tsc_setup); 118 119 #define MAX_RETRIES 5 120 #define SMI_TRESHOLD 50000 121 122 /* 123 * Read TSC and the reference counters. Take care of SMI disturbance 124 */ 125 static u64 tsc_read_refs(u64 *p, int hpet) 126 { 127 u64 t1, t2; 128 int i; 129 130 for (i = 0; i < MAX_RETRIES; i++) { 131 t1 = get_cycles(); 132 if (hpet) 133 *p = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF; 134 else 135 *p = acpi_pm_read_early(); 136 t2 = get_cycles(); 137 if ((t2 - t1) < SMI_TRESHOLD) 138 return t2; 139 } 140 return ULLONG_MAX; 141 } 142 143 /* 144 * Calculate the TSC frequency from HPET reference 145 */ 146 static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2) 147 { 148 u64 tmp; 149 150 if (hpet2 < hpet1) 151 hpet2 += 0x100000000ULL; 152 hpet2 -= hpet1; 153 tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD)); 154 do_div(tmp, 1000000); 155 do_div(deltatsc, tmp); 156 157 return (unsigned long) deltatsc; 158 } 159 160 /* 161 * Calculate the TSC frequency from PMTimer reference 162 */ 163 static unsigned long calc_pmtimer_ref(u64 deltatsc, u64 pm1, u64 pm2) 164 { 165 u64 tmp; 166 167 if (!pm1 && !pm2) 168 return ULONG_MAX; 169 170 if (pm2 < pm1) 171 pm2 += (u64)ACPI_PM_OVRRUN; 172 pm2 -= pm1; 173 tmp = pm2 * 1000000000LL; 174 do_div(tmp, PMTMR_TICKS_PER_SEC); 175 do_div(deltatsc, tmp); 176 177 return (unsigned long) deltatsc; 178 } 179 180 #define CAL_MS 10 181 #define CAL_LATCH (CLOCK_TICK_RATE / (1000 / CAL_MS)) 182 #define CAL_PIT_LOOPS 1000 183 184 #define CAL2_MS 50 185 #define CAL2_LATCH (CLOCK_TICK_RATE / (1000 / CAL2_MS)) 186 #define CAL2_PIT_LOOPS 5000 187 188 189 /* 190 * Try to calibrate the TSC against the Programmable 191 * Interrupt Timer and return the frequency of the TSC 192 * in kHz. 193 * 194 * Return ULONG_MAX on failure to calibrate. 195 */ 196 static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin) 197 { 198 u64 tsc, t1, t2, delta; 199 unsigned long tscmin, tscmax; 200 int pitcnt; 201 202 /* Set the Gate high, disable speaker */ 203 outb((inb(0x61) & ~0x02) | 0x01, 0x61); 204 205 /* 206 * Setup CTC channel 2* for mode 0, (interrupt on terminal 207 * count mode), binary count. Set the latch register to 50ms 208 * (LSB then MSB) to begin countdown. 209 */ 210 outb(0xb0, 0x43); 211 outb(latch & 0xff, 0x42); 212 outb(latch >> 8, 0x42); 213 214 tsc = t1 = t2 = get_cycles(); 215 216 pitcnt = 0; 217 tscmax = 0; 218 tscmin = ULONG_MAX; 219 while ((inb(0x61) & 0x20) == 0) { 220 t2 = get_cycles(); 221 delta = t2 - tsc; 222 tsc = t2; 223 if ((unsigned long) delta < tscmin) 224 tscmin = (unsigned int) delta; 225 if ((unsigned long) delta > tscmax) 226 tscmax = (unsigned int) delta; 227 pitcnt++; 228 } 229 230 /* 231 * Sanity checks: 232 * 233 * If we were not able to read the PIT more than loopmin 234 * times, then we have been hit by a massive SMI 235 * 236 * If the maximum is 10 times larger than the minimum, 237 * then we got hit by an SMI as well. 238 */ 239 if (pitcnt < loopmin || tscmax > 10 * tscmin) 240 return ULONG_MAX; 241 242 /* Calculate the PIT value */ 243 delta = t2 - t1; 244 do_div(delta, ms); 245 return delta; 246 } 247 248 /* 249 * This reads the current MSB of the PIT counter, and 250 * checks if we are running on sufficiently fast and 251 * non-virtualized hardware. 252 * 253 * Our expectations are: 254 * 255 * - the PIT is running at roughly 1.19MHz 256 * 257 * - each IO is going to take about 1us on real hardware, 258 * but we allow it to be much faster (by a factor of 10) or 259 * _slightly_ slower (ie we allow up to a 2us read+counter 260 * update - anything else implies a unacceptably slow CPU 261 * or PIT for the fast calibration to work. 262 * 263 * - with 256 PIT ticks to read the value, we have 214us to 264 * see the same MSB (and overhead like doing a single TSC 265 * read per MSB value etc). 266 * 267 * - We're doing 2 reads per loop (LSB, MSB), and we expect 268 * them each to take about a microsecond on real hardware. 269 * So we expect a count value of around 100. But we'll be 270 * generous, and accept anything over 50. 271 * 272 * - if the PIT is stuck, and we see *many* more reads, we 273 * return early (and the next caller of pit_expect_msb() 274 * then consider it a failure when they don't see the 275 * next expected value). 276 * 277 * These expectations mean that we know that we have seen the 278 * transition from one expected value to another with a fairly 279 * high accuracy, and we didn't miss any events. We can thus 280 * use the TSC value at the transitions to calculate a pretty 281 * good value for the TSC frequencty. 282 */ 283 static inline int pit_verify_msb(unsigned char val) 284 { 285 /* Ignore LSB */ 286 inb(0x42); 287 return inb(0x42) == val; 288 } 289 290 static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap) 291 { 292 int count; 293 u64 tsc = 0; 294 295 for (count = 0; count < 50000; count++) { 296 if (!pit_verify_msb(val)) 297 break; 298 tsc = get_cycles(); 299 } 300 *deltap = get_cycles() - tsc; 301 *tscp = tsc; 302 303 /* 304 * We require _some_ success, but the quality control 305 * will be based on the error terms on the TSC values. 306 */ 307 return count > 5; 308 } 309 310 /* 311 * How many MSB values do we want to see? We aim for 312 * a maximum error rate of 500ppm (in practice the 313 * real error is much smaller), but refuse to spend 314 * more than 25ms on it. 315 */ 316 #define MAX_QUICK_PIT_MS 25 317 #define MAX_QUICK_PIT_ITERATIONS (MAX_QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256) 318 319 static unsigned long quick_pit_calibrate(void) 320 { 321 int i; 322 u64 tsc, delta; 323 unsigned long d1, d2; 324 325 /* Set the Gate high, disable speaker */ 326 outb((inb(0x61) & ~0x02) | 0x01, 0x61); 327 328 /* 329 * Counter 2, mode 0 (one-shot), binary count 330 * 331 * NOTE! Mode 2 decrements by two (and then the 332 * output is flipped each time, giving the same 333 * final output frequency as a decrement-by-one), 334 * so mode 0 is much better when looking at the 335 * individual counts. 336 */ 337 outb(0xb0, 0x43); 338 339 /* Start at 0xffff */ 340 outb(0xff, 0x42); 341 outb(0xff, 0x42); 342 343 /* 344 * The PIT starts counting at the next edge, so we 345 * need to delay for a microsecond. The easiest way 346 * to do that is to just read back the 16-bit counter 347 * once from the PIT. 348 */ 349 pit_verify_msb(0); 350 351 if (pit_expect_msb(0xff, &tsc, &d1)) { 352 for (i = 1; i <= MAX_QUICK_PIT_ITERATIONS; i++) { 353 if (!pit_expect_msb(0xff-i, &delta, &d2)) 354 break; 355 356 /* 357 * Iterate until the error is less than 500 ppm 358 */ 359 delta -= tsc; 360 if (d1+d2 >= delta >> 11) 361 continue; 362 363 /* 364 * Check the PIT one more time to verify that 365 * all TSC reads were stable wrt the PIT. 366 * 367 * This also guarantees serialization of the 368 * last cycle read ('d2') in pit_expect_msb. 369 */ 370 if (!pit_verify_msb(0xfe - i)) 371 break; 372 goto success; 373 } 374 } 375 printk("Fast TSC calibration failed\n"); 376 return 0; 377 378 success: 379 /* 380 * Ok, if we get here, then we've seen the 381 * MSB of the PIT decrement 'i' times, and the 382 * error has shrunk to less than 500 ppm. 383 * 384 * As a result, we can depend on there not being 385 * any odd delays anywhere, and the TSC reads are 386 * reliable (within the error). We also adjust the 387 * delta to the middle of the error bars, just 388 * because it looks nicer. 389 * 390 * kHz = ticks / time-in-seconds / 1000; 391 * kHz = (t2 - t1) / (I * 256 / PIT_TICK_RATE) / 1000 392 * kHz = ((t2 - t1) * PIT_TICK_RATE) / (I * 256 * 1000) 393 */ 394 delta += (long)(d2 - d1)/2; 395 delta *= PIT_TICK_RATE; 396 do_div(delta, i*256*1000); 397 printk("Fast TSC calibration using PIT\n"); 398 return delta; 399 } 400 401 /** 402 * native_calibrate_tsc - calibrate the tsc on boot 403 */ 404 unsigned long native_calibrate_tsc(void) 405 { 406 u64 tsc1, tsc2, delta, ref1, ref2; 407 unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX; 408 unsigned long flags, latch, ms, fast_calibrate; 409 int hpet = is_hpet_enabled(), i, loopmin; 410 411 local_irq_save(flags); 412 fast_calibrate = quick_pit_calibrate(); 413 local_irq_restore(flags); 414 if (fast_calibrate) 415 return fast_calibrate; 416 417 /* 418 * Run 5 calibration loops to get the lowest frequency value 419 * (the best estimate). We use two different calibration modes 420 * here: 421 * 422 * 1) PIT loop. We set the PIT Channel 2 to oneshot mode and 423 * load a timeout of 50ms. We read the time right after we 424 * started the timer and wait until the PIT count down reaches 425 * zero. In each wait loop iteration we read the TSC and check 426 * the delta to the previous read. We keep track of the min 427 * and max values of that delta. The delta is mostly defined 428 * by the IO time of the PIT access, so we can detect when a 429 * SMI/SMM disturbance happened between the two reads. If the 430 * maximum time is significantly larger than the minimum time, 431 * then we discard the result and have another try. 432 * 433 * 2) Reference counter. If available we use the HPET or the 434 * PMTIMER as a reference to check the sanity of that value. 435 * We use separate TSC readouts and check inside of the 436 * reference read for a SMI/SMM disturbance. We dicard 437 * disturbed values here as well. We do that around the PIT 438 * calibration delay loop as we have to wait for a certain 439 * amount of time anyway. 440 */ 441 442 /* Preset PIT loop values */ 443 latch = CAL_LATCH; 444 ms = CAL_MS; 445 loopmin = CAL_PIT_LOOPS; 446 447 for (i = 0; i < 3; i++) { 448 unsigned long tsc_pit_khz; 449 450 /* 451 * Read the start value and the reference count of 452 * hpet/pmtimer when available. Then do the PIT 453 * calibration, which will take at least 50ms, and 454 * read the end value. 455 */ 456 local_irq_save(flags); 457 tsc1 = tsc_read_refs(&ref1, hpet); 458 tsc_pit_khz = pit_calibrate_tsc(latch, ms, loopmin); 459 tsc2 = tsc_read_refs(&ref2, hpet); 460 local_irq_restore(flags); 461 462 /* Pick the lowest PIT TSC calibration so far */ 463 tsc_pit_min = min(tsc_pit_min, tsc_pit_khz); 464 465 /* hpet or pmtimer available ? */ 466 if (ref1 == ref2) 467 continue; 468 469 /* Check, whether the sampling was disturbed by an SMI */ 470 if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX) 471 continue; 472 473 tsc2 = (tsc2 - tsc1) * 1000000LL; 474 if (hpet) 475 tsc2 = calc_hpet_ref(tsc2, ref1, ref2); 476 else 477 tsc2 = calc_pmtimer_ref(tsc2, ref1, ref2); 478 479 tsc_ref_min = min(tsc_ref_min, (unsigned long) tsc2); 480 481 /* Check the reference deviation */ 482 delta = ((u64) tsc_pit_min) * 100; 483 do_div(delta, tsc_ref_min); 484 485 /* 486 * If both calibration results are inside a 10% window 487 * then we can be sure, that the calibration 488 * succeeded. We break out of the loop right away. We 489 * use the reference value, as it is more precise. 490 */ 491 if (delta >= 90 && delta <= 110) { 492 printk(KERN_INFO 493 "TSC: PIT calibration matches %s. %d loops\n", 494 hpet ? "HPET" : "PMTIMER", i + 1); 495 return tsc_ref_min; 496 } 497 498 /* 499 * Check whether PIT failed more than once. This 500 * happens in virtualized environments. We need to 501 * give the virtual PC a slightly longer timeframe for 502 * the HPET/PMTIMER to make the result precise. 503 */ 504 if (i == 1 && tsc_pit_min == ULONG_MAX) { 505 latch = CAL2_LATCH; 506 ms = CAL2_MS; 507 loopmin = CAL2_PIT_LOOPS; 508 } 509 } 510 511 /* 512 * Now check the results. 513 */ 514 if (tsc_pit_min == ULONG_MAX) { 515 /* PIT gave no useful value */ 516 printk(KERN_WARNING "TSC: Unable to calibrate against PIT\n"); 517 518 /* We don't have an alternative source, disable TSC */ 519 if (!hpet && !ref1 && !ref2) { 520 printk("TSC: No reference (HPET/PMTIMER) available\n"); 521 return 0; 522 } 523 524 /* The alternative source failed as well, disable TSC */ 525 if (tsc_ref_min == ULONG_MAX) { 526 printk(KERN_WARNING "TSC: HPET/PMTIMER calibration " 527 "failed.\n"); 528 return 0; 529 } 530 531 /* Use the alternative source */ 532 printk(KERN_INFO "TSC: using %s reference calibration\n", 533 hpet ? "HPET" : "PMTIMER"); 534 535 return tsc_ref_min; 536 } 537 538 /* We don't have an alternative source, use the PIT calibration value */ 539 if (!hpet && !ref1 && !ref2) { 540 printk(KERN_INFO "TSC: Using PIT calibration value\n"); 541 return tsc_pit_min; 542 } 543 544 /* The alternative source failed, use the PIT calibration value */ 545 if (tsc_ref_min == ULONG_MAX) { 546 printk(KERN_WARNING "TSC: HPET/PMTIMER calibration failed. " 547 "Using PIT calibration\n"); 548 return tsc_pit_min; 549 } 550 551 /* 552 * The calibration values differ too much. In doubt, we use 553 * the PIT value as we know that there are PMTIMERs around 554 * running at double speed. At least we let the user know: 555 */ 556 printk(KERN_WARNING "TSC: PIT calibration deviates from %s: %lu %lu.\n", 557 hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min); 558 printk(KERN_INFO "TSC: Using PIT calibration value\n"); 559 return tsc_pit_min; 560 } 561 562 int recalibrate_cpu_khz(void) 563 { 564 #ifndef CONFIG_SMP 565 unsigned long cpu_khz_old = cpu_khz; 566 567 if (cpu_has_tsc) { 568 tsc_khz = x86_platform.calibrate_tsc(); 569 cpu_khz = tsc_khz; 570 cpu_data(0).loops_per_jiffy = 571 cpufreq_scale(cpu_data(0).loops_per_jiffy, 572 cpu_khz_old, cpu_khz); 573 return 0; 574 } else 575 return -ENODEV; 576 #else 577 return -ENODEV; 578 #endif 579 } 580 581 EXPORT_SYMBOL(recalibrate_cpu_khz); 582 583 584 /* Accelerators for sched_clock() 585 * convert from cycles(64bits) => nanoseconds (64bits) 586 * basic equation: 587 * ns = cycles / (freq / ns_per_sec) 588 * ns = cycles * (ns_per_sec / freq) 589 * ns = cycles * (10^9 / (cpu_khz * 10^3)) 590 * ns = cycles * (10^6 / cpu_khz) 591 * 592 * Then we use scaling math (suggested by george@mvista.com) to get: 593 * ns = cycles * (10^6 * SC / cpu_khz) / SC 594 * ns = cycles * cyc2ns_scale / SC 595 * 596 * And since SC is a constant power of two, we can convert the div 597 * into a shift. 598 * 599 * We can use khz divisor instead of mhz to keep a better precision, since 600 * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. 601 * (mathieu.desnoyers@polymtl.ca) 602 * 603 * -johnstul@us.ibm.com "math is hard, lets go shopping!" 604 */ 605 606 DEFINE_PER_CPU(unsigned long, cyc2ns); 607 DEFINE_PER_CPU(unsigned long long, cyc2ns_offset); 608 609 static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) 610 { 611 unsigned long long tsc_now, ns_now, *offset; 612 unsigned long flags, *scale; 613 614 local_irq_save(flags); 615 sched_clock_idle_sleep_event(); 616 617 scale = &per_cpu(cyc2ns, cpu); 618 offset = &per_cpu(cyc2ns_offset, cpu); 619 620 rdtscll(tsc_now); 621 ns_now = __cycles_2_ns(tsc_now); 622 623 if (cpu_khz) { 624 *scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz; 625 *offset = ns_now - (tsc_now * *scale >> CYC2NS_SCALE_FACTOR); 626 } 627 628 sched_clock_idle_wakeup_event(0); 629 local_irq_restore(flags); 630 } 631 632 static unsigned long long cyc2ns_suspend; 633 634 void save_sched_clock_state(void) 635 { 636 if (!sched_clock_stable) 637 return; 638 639 cyc2ns_suspend = sched_clock(); 640 } 641 642 /* 643 * Even on processors with invariant TSC, TSC gets reset in some the 644 * ACPI system sleep states. And in some systems BIOS seem to reinit TSC to 645 * arbitrary value (still sync'd across cpu's) during resume from such sleep 646 * states. To cope up with this, recompute the cyc2ns_offset for each cpu so 647 * that sched_clock() continues from the point where it was left off during 648 * suspend. 649 */ 650 void restore_sched_clock_state(void) 651 { 652 unsigned long long offset; 653 unsigned long flags; 654 int cpu; 655 656 if (!sched_clock_stable) 657 return; 658 659 local_irq_save(flags); 660 661 __this_cpu_write(cyc2ns_offset, 0); 662 offset = cyc2ns_suspend - sched_clock(); 663 664 for_each_possible_cpu(cpu) 665 per_cpu(cyc2ns_offset, cpu) = offset; 666 667 local_irq_restore(flags); 668 } 669 670 #ifdef CONFIG_CPU_FREQ 671 672 /* Frequency scaling support. Adjust the TSC based timer when the cpu frequency 673 * changes. 674 * 675 * RED-PEN: On SMP we assume all CPUs run with the same frequency. It's 676 * not that important because current Opteron setups do not support 677 * scaling on SMP anyroads. 678 * 679 * Should fix up last_tsc too. Currently gettimeofday in the 680 * first tick after the change will be slightly wrong. 681 */ 682 683 static unsigned int ref_freq; 684 static unsigned long loops_per_jiffy_ref; 685 static unsigned long tsc_khz_ref; 686 687 static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, 688 void *data) 689 { 690 struct cpufreq_freqs *freq = data; 691 unsigned long *lpj; 692 693 if (cpu_has(&cpu_data(freq->cpu), X86_FEATURE_CONSTANT_TSC)) 694 return 0; 695 696 lpj = &boot_cpu_data.loops_per_jiffy; 697 #ifdef CONFIG_SMP 698 if (!(freq->flags & CPUFREQ_CONST_LOOPS)) 699 lpj = &cpu_data(freq->cpu).loops_per_jiffy; 700 #endif 701 702 if (!ref_freq) { 703 ref_freq = freq->old; 704 loops_per_jiffy_ref = *lpj; 705 tsc_khz_ref = tsc_khz; 706 } 707 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || 708 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) || 709 (val == CPUFREQ_RESUMECHANGE)) { 710 *lpj = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new); 711 712 tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new); 713 if (!(freq->flags & CPUFREQ_CONST_LOOPS)) 714 mark_tsc_unstable("cpufreq changes"); 715 } 716 717 set_cyc2ns_scale(tsc_khz, freq->cpu); 718 719 return 0; 720 } 721 722 static struct notifier_block time_cpufreq_notifier_block = { 723 .notifier_call = time_cpufreq_notifier 724 }; 725 726 static int __init cpufreq_tsc(void) 727 { 728 if (!cpu_has_tsc) 729 return 0; 730 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) 731 return 0; 732 cpufreq_register_notifier(&time_cpufreq_notifier_block, 733 CPUFREQ_TRANSITION_NOTIFIER); 734 return 0; 735 } 736 737 core_initcall(cpufreq_tsc); 738 739 #endif /* CONFIG_CPU_FREQ */ 740 741 /* clocksource code */ 742 743 static struct clocksource clocksource_tsc; 744 745 /* 746 * We compare the TSC to the cycle_last value in the clocksource 747 * structure to avoid a nasty time-warp. This can be observed in a 748 * very small window right after one CPU updated cycle_last under 749 * xtime/vsyscall_gtod lock and the other CPU reads a TSC value which 750 * is smaller than the cycle_last reference value due to a TSC which 751 * is slighty behind. This delta is nowhere else observable, but in 752 * that case it results in a forward time jump in the range of hours 753 * due to the unsigned delta calculation of the time keeping core 754 * code, which is necessary to support wrapping clocksources like pm 755 * timer. 756 */ 757 static cycle_t read_tsc(struct clocksource *cs) 758 { 759 cycle_t ret = (cycle_t)get_cycles(); 760 761 return ret >= clocksource_tsc.cycle_last ? 762 ret : clocksource_tsc.cycle_last; 763 } 764 765 static void resume_tsc(struct clocksource *cs) 766 { 767 clocksource_tsc.cycle_last = 0; 768 } 769 770 static struct clocksource clocksource_tsc = { 771 .name = "tsc", 772 .rating = 300, 773 .read = read_tsc, 774 .resume = resume_tsc, 775 .mask = CLOCKSOURCE_MASK(64), 776 .flags = CLOCK_SOURCE_IS_CONTINUOUS | 777 CLOCK_SOURCE_MUST_VERIFY, 778 #ifdef CONFIG_X86_64 779 .archdata = { .vclock_mode = VCLOCK_TSC }, 780 #endif 781 }; 782 783 void mark_tsc_unstable(char *reason) 784 { 785 if (!tsc_unstable) { 786 tsc_unstable = 1; 787 sched_clock_stable = 0; 788 disable_sched_clock_irqtime(); 789 printk(KERN_INFO "Marking TSC unstable due to %s\n", reason); 790 /* Change only the rating, when not registered */ 791 if (clocksource_tsc.mult) 792 clocksource_mark_unstable(&clocksource_tsc); 793 else { 794 clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE; 795 clocksource_tsc.rating = 0; 796 } 797 } 798 } 799 800 EXPORT_SYMBOL_GPL(mark_tsc_unstable); 801 802 static void __init check_system_tsc_reliable(void) 803 { 804 #ifdef CONFIG_MGEODE_LX 805 /* RTSC counts during suspend */ 806 #define RTSC_SUSP 0x100 807 unsigned long res_low, res_high; 808 809 rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high); 810 /* Geode_LX - the OLPC CPU has a very reliable TSC */ 811 if (res_low & RTSC_SUSP) 812 tsc_clocksource_reliable = 1; 813 #endif 814 if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) 815 tsc_clocksource_reliable = 1; 816 } 817 818 /* 819 * Make an educated guess if the TSC is trustworthy and synchronized 820 * over all CPUs. 821 */ 822 __cpuinit int unsynchronized_tsc(void) 823 { 824 if (!cpu_has_tsc || tsc_unstable) 825 return 1; 826 827 #ifdef CONFIG_SMP 828 if (apic_is_clustered_box()) 829 return 1; 830 #endif 831 832 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) 833 return 0; 834 835 if (tsc_clocksource_reliable) 836 return 0; 837 /* 838 * Intel systems are normally all synchronized. 839 * Exceptions must mark TSC as unstable: 840 */ 841 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { 842 /* assume multi socket systems are not synchronized: */ 843 if (num_possible_cpus() > 1) 844 return 1; 845 } 846 847 return 0; 848 } 849 850 851 static void tsc_refine_calibration_work(struct work_struct *work); 852 static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work); 853 /** 854 * tsc_refine_calibration_work - Further refine tsc freq calibration 855 * @work - ignored. 856 * 857 * This functions uses delayed work over a period of a 858 * second to further refine the TSC freq value. Since this is 859 * timer based, instead of loop based, we don't block the boot 860 * process while this longer calibration is done. 861 * 862 * If there are any calibration anomalies (too many SMIs, etc), 863 * or the refined calibration is off by 1% of the fast early 864 * calibration, we throw out the new calibration and use the 865 * early calibration. 866 */ 867 static void tsc_refine_calibration_work(struct work_struct *work) 868 { 869 static u64 tsc_start = -1, ref_start; 870 static int hpet; 871 u64 tsc_stop, ref_stop, delta; 872 unsigned long freq; 873 874 /* Don't bother refining TSC on unstable systems */ 875 if (check_tsc_unstable()) 876 goto out; 877 878 /* 879 * Since the work is started early in boot, we may be 880 * delayed the first time we expire. So set the workqueue 881 * again once we know timers are working. 882 */ 883 if (tsc_start == -1) { 884 /* 885 * Only set hpet once, to avoid mixing hardware 886 * if the hpet becomes enabled later. 887 */ 888 hpet = is_hpet_enabled(); 889 schedule_delayed_work(&tsc_irqwork, HZ); 890 tsc_start = tsc_read_refs(&ref_start, hpet); 891 return; 892 } 893 894 tsc_stop = tsc_read_refs(&ref_stop, hpet); 895 896 /* hpet or pmtimer available ? */ 897 if (ref_start == ref_stop) 898 goto out; 899 900 /* Check, whether the sampling was disturbed by an SMI */ 901 if (tsc_start == ULLONG_MAX || tsc_stop == ULLONG_MAX) 902 goto out; 903 904 delta = tsc_stop - tsc_start; 905 delta *= 1000000LL; 906 if (hpet) 907 freq = calc_hpet_ref(delta, ref_start, ref_stop); 908 else 909 freq = calc_pmtimer_ref(delta, ref_start, ref_stop); 910 911 /* Make sure we're within 1% */ 912 if (abs(tsc_khz - freq) > tsc_khz/100) 913 goto out; 914 915 tsc_khz = freq; 916 printk(KERN_INFO "Refined TSC clocksource calibration: " 917 "%lu.%03lu MHz.\n", (unsigned long)tsc_khz / 1000, 918 (unsigned long)tsc_khz % 1000); 919 920 out: 921 clocksource_register_khz(&clocksource_tsc, tsc_khz); 922 } 923 924 925 static int __init init_tsc_clocksource(void) 926 { 927 if (!cpu_has_tsc || tsc_disabled > 0 || !tsc_khz) 928 return 0; 929 930 if (tsc_clocksource_reliable) 931 clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY; 932 /* lower the rating if we already know its unstable: */ 933 if (check_tsc_unstable()) { 934 clocksource_tsc.rating = 0; 935 clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; 936 } 937 schedule_delayed_work(&tsc_irqwork, 0); 938 return 0; 939 } 940 /* 941 * We use device_initcall here, to ensure we run after the hpet 942 * is fully initialized, which may occur at fs_initcall time. 943 */ 944 device_initcall(init_tsc_clocksource); 945 946 void __init tsc_init(void) 947 { 948 u64 lpj; 949 int cpu; 950 951 x86_init.timers.tsc_pre_init(); 952 953 if (!cpu_has_tsc) 954 return; 955 956 tsc_khz = x86_platform.calibrate_tsc(); 957 cpu_khz = tsc_khz; 958 959 if (!tsc_khz) { 960 mark_tsc_unstable("could not calculate TSC khz"); 961 return; 962 } 963 964 printk("Detected %lu.%03lu MHz processor.\n", 965 (unsigned long)cpu_khz / 1000, 966 (unsigned long)cpu_khz % 1000); 967 968 /* 969 * Secondary CPUs do not run through tsc_init(), so set up 970 * all the scale factors for all CPUs, assuming the same 971 * speed as the bootup CPU. (cpufreq notifiers will fix this 972 * up if their speed diverges) 973 */ 974 for_each_possible_cpu(cpu) 975 set_cyc2ns_scale(cpu_khz, cpu); 976 977 if (tsc_disabled > 0) 978 return; 979 980 /* now allow native_sched_clock() to use rdtsc */ 981 tsc_disabled = 0; 982 983 if (!no_sched_irq_time) 984 enable_sched_clock_irqtime(); 985 986 lpj = ((u64)tsc_khz * 1000); 987 do_div(lpj, HZ); 988 lpj_fine = lpj; 989 990 use_tsc_delay(); 991 992 if (unsynchronized_tsc()) 993 mark_tsc_unstable("TSCs unsynchronized"); 994 995 check_system_tsc_reliable(); 996 } 997 998