1 /* 2 * linux/kernel/time/clocksource.c 3 * 4 * This file contains the functions which manage clocksource drivers. 5 * 6 * Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com) 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 * 22 * TODO WishList: 23 * o Allow clocksource drivers to be unregistered 24 */ 25 26 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 27 28 #include <linux/device.h> 29 #include <linux/clocksource.h> 30 #include <linux/init.h> 31 #include <linux/module.h> 32 #include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */ 33 #include <linux/tick.h> 34 #include <linux/kthread.h> 35 36 #include "tick-internal.h" 37 #include "timekeeping_internal.h" 38 39 /** 40 * clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks 41 * @mult: pointer to mult variable 42 * @shift: pointer to shift variable 43 * @from: frequency to convert from 44 * @to: frequency to convert to 45 * @maxsec: guaranteed runtime conversion range in seconds 46 * 47 * The function evaluates the shift/mult pair for the scaled math 48 * operations of clocksources and clockevents. 49 * 50 * @to and @from are frequency values in HZ. For clock sources @to is 51 * NSEC_PER_SEC == 1GHz and @from is the counter frequency. For clock 52 * event @to is the counter frequency and @from is NSEC_PER_SEC. 53 * 54 * The @maxsec conversion range argument controls the time frame in 55 * seconds which must be covered by the runtime conversion with the 56 * calculated mult and shift factors. This guarantees that no 64bit 57 * overflow happens when the input value of the conversion is 58 * multiplied with the calculated mult factor. Larger ranges may 59 * reduce the conversion accuracy by chosing smaller mult and shift 60 * factors. 61 */ 62 void 63 clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 maxsec) 64 { 65 u64 tmp; 66 u32 sft, sftacc= 32; 67 68 /* 69 * Calculate the shift factor which is limiting the conversion 70 * range: 71 */ 72 tmp = ((u64)maxsec * from) >> 32; 73 while (tmp) { 74 tmp >>=1; 75 sftacc--; 76 } 77 78 /* 79 * Find the conversion shift/mult pair which has the best 80 * accuracy and fits the maxsec conversion range: 81 */ 82 for (sft = 32; sft > 0; sft--) { 83 tmp = (u64) to << sft; 84 tmp += from / 2; 85 do_div(tmp, from); 86 if ((tmp >> sftacc) == 0) 87 break; 88 } 89 *mult = tmp; 90 *shift = sft; 91 } 92 EXPORT_SYMBOL_GPL(clocks_calc_mult_shift); 93 94 /*[Clocksource internal variables]--------- 95 * curr_clocksource: 96 * currently selected clocksource. 97 * suspend_clocksource: 98 * used to calculate the suspend time. 99 * clocksource_list: 100 * linked list with the registered clocksources 101 * clocksource_mutex: 102 * protects manipulations to curr_clocksource and the clocksource_list 103 * override_name: 104 * Name of the user-specified clocksource. 105 */ 106 static struct clocksource *curr_clocksource; 107 static struct clocksource *suspend_clocksource; 108 static LIST_HEAD(clocksource_list); 109 static DEFINE_MUTEX(clocksource_mutex); 110 static char override_name[CS_NAME_LEN]; 111 static int finished_booting; 112 static u64 suspend_start; 113 114 #ifdef CONFIG_CLOCKSOURCE_WATCHDOG 115 static void clocksource_watchdog_work(struct work_struct *work); 116 static void clocksource_select(void); 117 118 static LIST_HEAD(watchdog_list); 119 static struct clocksource *watchdog; 120 static struct timer_list watchdog_timer; 121 static DECLARE_WORK(watchdog_work, clocksource_watchdog_work); 122 static DEFINE_SPINLOCK(watchdog_lock); 123 static int watchdog_running; 124 static atomic_t watchdog_reset_pending; 125 126 static void inline clocksource_watchdog_lock(unsigned long *flags) 127 { 128 spin_lock_irqsave(&watchdog_lock, *flags); 129 } 130 131 static void inline clocksource_watchdog_unlock(unsigned long *flags) 132 { 133 spin_unlock_irqrestore(&watchdog_lock, *flags); 134 } 135 136 /* 137 * Interval: 0.5sec Threshold: 0.0625s 138 */ 139 #define WATCHDOG_INTERVAL (HZ >> 1) 140 #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4) 141 142 static void __clocksource_unstable(struct clocksource *cs) 143 { 144 cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); 145 cs->flags |= CLOCK_SOURCE_UNSTABLE; 146 147 /* 148 * If the clocksource is registered clocksource_watchdog_work() will 149 * re-rate and re-select. 150 */ 151 if (list_empty(&cs->list)) { 152 cs->rating = 0; 153 return; 154 } 155 156 if (cs->mark_unstable) 157 cs->mark_unstable(cs); 158 159 /* kick clocksource_watchdog_work() */ 160 if (finished_booting) 161 schedule_work(&watchdog_work); 162 } 163 164 /** 165 * clocksource_mark_unstable - mark clocksource unstable via watchdog 166 * @cs: clocksource to be marked unstable 167 * 168 * This function is called by the x86 TSC code to mark clocksources as unstable; 169 * it defers demotion and re-selection to a work. 170 */ 171 void clocksource_mark_unstable(struct clocksource *cs) 172 { 173 unsigned long flags; 174 175 spin_lock_irqsave(&watchdog_lock, flags); 176 if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) { 177 if (!list_empty(&cs->list) && list_empty(&cs->wd_list)) 178 list_add(&cs->wd_list, &watchdog_list); 179 __clocksource_unstable(cs); 180 } 181 spin_unlock_irqrestore(&watchdog_lock, flags); 182 } 183 184 static void clocksource_watchdog(struct timer_list *unused) 185 { 186 struct clocksource *cs; 187 u64 csnow, wdnow, cslast, wdlast, delta; 188 int64_t wd_nsec, cs_nsec; 189 int next_cpu, reset_pending; 190 191 spin_lock(&watchdog_lock); 192 if (!watchdog_running) 193 goto out; 194 195 reset_pending = atomic_read(&watchdog_reset_pending); 196 197 list_for_each_entry(cs, &watchdog_list, wd_list) { 198 199 /* Clocksource already marked unstable? */ 200 if (cs->flags & CLOCK_SOURCE_UNSTABLE) { 201 if (finished_booting) 202 schedule_work(&watchdog_work); 203 continue; 204 } 205 206 local_irq_disable(); 207 csnow = cs->read(cs); 208 wdnow = watchdog->read(watchdog); 209 local_irq_enable(); 210 211 /* Clocksource initialized ? */ 212 if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) || 213 atomic_read(&watchdog_reset_pending)) { 214 cs->flags |= CLOCK_SOURCE_WATCHDOG; 215 cs->wd_last = wdnow; 216 cs->cs_last = csnow; 217 continue; 218 } 219 220 delta = clocksource_delta(wdnow, cs->wd_last, watchdog->mask); 221 wd_nsec = clocksource_cyc2ns(delta, watchdog->mult, 222 watchdog->shift); 223 224 delta = clocksource_delta(csnow, cs->cs_last, cs->mask); 225 cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift); 226 wdlast = cs->wd_last; /* save these in case we print them */ 227 cslast = cs->cs_last; 228 cs->cs_last = csnow; 229 cs->wd_last = wdnow; 230 231 if (atomic_read(&watchdog_reset_pending)) 232 continue; 233 234 /* Check the deviation from the watchdog clocksource. */ 235 if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) { 236 pr_warn("timekeeping watchdog on CPU%d: Marking clocksource '%s' as unstable because the skew is too large:\n", 237 smp_processor_id(), cs->name); 238 pr_warn(" '%s' wd_now: %llx wd_last: %llx mask: %llx\n", 239 watchdog->name, wdnow, wdlast, watchdog->mask); 240 pr_warn(" '%s' cs_now: %llx cs_last: %llx mask: %llx\n", 241 cs->name, csnow, cslast, cs->mask); 242 __clocksource_unstable(cs); 243 continue; 244 } 245 246 if (cs == curr_clocksource && cs->tick_stable) 247 cs->tick_stable(cs); 248 249 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && 250 (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) && 251 (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) { 252 /* Mark it valid for high-res. */ 253 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; 254 255 /* 256 * clocksource_done_booting() will sort it if 257 * finished_booting is not set yet. 258 */ 259 if (!finished_booting) 260 continue; 261 262 /* 263 * If this is not the current clocksource let 264 * the watchdog thread reselect it. Due to the 265 * change to high res this clocksource might 266 * be preferred now. If it is the current 267 * clocksource let the tick code know about 268 * that change. 269 */ 270 if (cs != curr_clocksource) { 271 cs->flags |= CLOCK_SOURCE_RESELECT; 272 schedule_work(&watchdog_work); 273 } else { 274 tick_clock_notify(); 275 } 276 } 277 } 278 279 /* 280 * We only clear the watchdog_reset_pending, when we did a 281 * full cycle through all clocksources. 282 */ 283 if (reset_pending) 284 atomic_dec(&watchdog_reset_pending); 285 286 /* 287 * Cycle through CPUs to check if the CPUs stay synchronized 288 * to each other. 289 */ 290 next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask); 291 if (next_cpu >= nr_cpu_ids) 292 next_cpu = cpumask_first(cpu_online_mask); 293 watchdog_timer.expires += WATCHDOG_INTERVAL; 294 add_timer_on(&watchdog_timer, next_cpu); 295 out: 296 spin_unlock(&watchdog_lock); 297 } 298 299 static inline void clocksource_start_watchdog(void) 300 { 301 if (watchdog_running || !watchdog || list_empty(&watchdog_list)) 302 return; 303 timer_setup(&watchdog_timer, clocksource_watchdog, 0); 304 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; 305 add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask)); 306 watchdog_running = 1; 307 } 308 309 static inline void clocksource_stop_watchdog(void) 310 { 311 if (!watchdog_running || (watchdog && !list_empty(&watchdog_list))) 312 return; 313 del_timer(&watchdog_timer); 314 watchdog_running = 0; 315 } 316 317 static inline void clocksource_reset_watchdog(void) 318 { 319 struct clocksource *cs; 320 321 list_for_each_entry(cs, &watchdog_list, wd_list) 322 cs->flags &= ~CLOCK_SOURCE_WATCHDOG; 323 } 324 325 static void clocksource_resume_watchdog(void) 326 { 327 atomic_inc(&watchdog_reset_pending); 328 } 329 330 static void clocksource_enqueue_watchdog(struct clocksource *cs) 331 { 332 INIT_LIST_HEAD(&cs->wd_list); 333 334 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { 335 /* cs is a clocksource to be watched. */ 336 list_add(&cs->wd_list, &watchdog_list); 337 cs->flags &= ~CLOCK_SOURCE_WATCHDOG; 338 } else { 339 /* cs is a watchdog. */ 340 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) 341 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; 342 } 343 } 344 345 static void clocksource_select_watchdog(bool fallback) 346 { 347 struct clocksource *cs, *old_wd; 348 unsigned long flags; 349 350 spin_lock_irqsave(&watchdog_lock, flags); 351 /* save current watchdog */ 352 old_wd = watchdog; 353 if (fallback) 354 watchdog = NULL; 355 356 list_for_each_entry(cs, &clocksource_list, list) { 357 /* cs is a clocksource to be watched. */ 358 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) 359 continue; 360 361 /* Skip current if we were requested for a fallback. */ 362 if (fallback && cs == old_wd) 363 continue; 364 365 /* Pick the best watchdog. */ 366 if (!watchdog || cs->rating > watchdog->rating) 367 watchdog = cs; 368 } 369 /* If we failed to find a fallback restore the old one. */ 370 if (!watchdog) 371 watchdog = old_wd; 372 373 /* If we changed the watchdog we need to reset cycles. */ 374 if (watchdog != old_wd) 375 clocksource_reset_watchdog(); 376 377 /* Check if the watchdog timer needs to be started. */ 378 clocksource_start_watchdog(); 379 spin_unlock_irqrestore(&watchdog_lock, flags); 380 } 381 382 static void clocksource_dequeue_watchdog(struct clocksource *cs) 383 { 384 if (cs != watchdog) { 385 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { 386 /* cs is a watched clocksource. */ 387 list_del_init(&cs->wd_list); 388 /* Check if the watchdog timer needs to be stopped. */ 389 clocksource_stop_watchdog(); 390 } 391 } 392 } 393 394 static void __clocksource_change_rating(struct clocksource *cs, int rating); 395 396 static int __clocksource_watchdog_work(void) 397 { 398 struct clocksource *cs, *tmp; 399 unsigned long flags; 400 int select = 0; 401 402 spin_lock_irqsave(&watchdog_lock, flags); 403 list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { 404 if (cs->flags & CLOCK_SOURCE_UNSTABLE) { 405 list_del_init(&cs->wd_list); 406 __clocksource_change_rating(cs, 0); 407 select = 1; 408 } 409 if (cs->flags & CLOCK_SOURCE_RESELECT) { 410 cs->flags &= ~CLOCK_SOURCE_RESELECT; 411 select = 1; 412 } 413 } 414 /* Check if the watchdog timer needs to be stopped. */ 415 clocksource_stop_watchdog(); 416 spin_unlock_irqrestore(&watchdog_lock, flags); 417 418 return select; 419 } 420 421 static void clocksource_watchdog_work(struct work_struct *work) 422 { 423 mutex_lock(&clocksource_mutex); 424 if (__clocksource_watchdog_work()) 425 clocksource_select(); 426 mutex_unlock(&clocksource_mutex); 427 } 428 429 static bool clocksource_is_watchdog(struct clocksource *cs) 430 { 431 return cs == watchdog; 432 } 433 434 #else /* CONFIG_CLOCKSOURCE_WATCHDOG */ 435 436 static void clocksource_enqueue_watchdog(struct clocksource *cs) 437 { 438 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) 439 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; 440 } 441 442 static void clocksource_select_watchdog(bool fallback) { } 443 static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { } 444 static inline void clocksource_resume_watchdog(void) { } 445 static inline int __clocksource_watchdog_work(void) { return 0; } 446 static bool clocksource_is_watchdog(struct clocksource *cs) { return false; } 447 void clocksource_mark_unstable(struct clocksource *cs) { } 448 449 static inline void clocksource_watchdog_lock(unsigned long *flags) { } 450 static inline void clocksource_watchdog_unlock(unsigned long *flags) { } 451 452 #endif /* CONFIG_CLOCKSOURCE_WATCHDOG */ 453 454 static bool clocksource_is_suspend(struct clocksource *cs) 455 { 456 return cs == suspend_clocksource; 457 } 458 459 static void __clocksource_suspend_select(struct clocksource *cs) 460 { 461 /* 462 * Skip the clocksource which will be stopped in suspend state. 463 */ 464 if (!(cs->flags & CLOCK_SOURCE_SUSPEND_NONSTOP)) 465 return; 466 467 /* 468 * The nonstop clocksource can be selected as the suspend clocksource to 469 * calculate the suspend time, so it should not supply suspend/resume 470 * interfaces to suspend the nonstop clocksource when system suspends. 471 */ 472 if (cs->suspend || cs->resume) { 473 pr_warn("Nonstop clocksource %s should not supply suspend/resume interfaces\n", 474 cs->name); 475 } 476 477 /* Pick the best rating. */ 478 if (!suspend_clocksource || cs->rating > suspend_clocksource->rating) 479 suspend_clocksource = cs; 480 } 481 482 /** 483 * clocksource_suspend_select - Select the best clocksource for suspend timing 484 * @fallback: if select a fallback clocksource 485 */ 486 static void clocksource_suspend_select(bool fallback) 487 { 488 struct clocksource *cs, *old_suspend; 489 490 old_suspend = suspend_clocksource; 491 if (fallback) 492 suspend_clocksource = NULL; 493 494 list_for_each_entry(cs, &clocksource_list, list) { 495 /* Skip current if we were requested for a fallback. */ 496 if (fallback && cs == old_suspend) 497 continue; 498 499 __clocksource_suspend_select(cs); 500 } 501 } 502 503 /** 504 * clocksource_start_suspend_timing - Start measuring the suspend timing 505 * @cs: current clocksource from timekeeping 506 * @start_cycles: current cycles from timekeeping 507 * 508 * This function will save the start cycle values of suspend timer to calculate 509 * the suspend time when resuming system. 510 * 511 * This function is called late in the suspend process from timekeeping_suspend(), 512 * that means processes are freezed, non-boot cpus and interrupts are disabled 513 * now. It is therefore possible to start the suspend timer without taking the 514 * clocksource mutex. 515 */ 516 void clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles) 517 { 518 if (!suspend_clocksource) 519 return; 520 521 /* 522 * If current clocksource is the suspend timer, we should use the 523 * tkr_mono.cycle_last value as suspend_start to avoid same reading 524 * from suspend timer. 525 */ 526 if (clocksource_is_suspend(cs)) { 527 suspend_start = start_cycles; 528 return; 529 } 530 531 if (suspend_clocksource->enable && 532 suspend_clocksource->enable(suspend_clocksource)) { 533 pr_warn_once("Failed to enable the non-suspend-able clocksource.\n"); 534 return; 535 } 536 537 suspend_start = suspend_clocksource->read(suspend_clocksource); 538 } 539 540 /** 541 * clocksource_stop_suspend_timing - Stop measuring the suspend timing 542 * @cs: current clocksource from timekeeping 543 * @cycle_now: current cycles from timekeeping 544 * 545 * This function will calculate the suspend time from suspend timer. 546 * 547 * Returns nanoseconds since suspend started, 0 if no usable suspend clocksource. 548 * 549 * This function is called early in the resume process from timekeeping_resume(), 550 * that means there is only one cpu, no processes are running and the interrupts 551 * are disabled. It is therefore possible to stop the suspend timer without 552 * taking the clocksource mutex. 553 */ 554 u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 cycle_now) 555 { 556 u64 now, delta, nsec = 0; 557 558 if (!suspend_clocksource) 559 return 0; 560 561 /* 562 * If current clocksource is the suspend timer, we should use the 563 * tkr_mono.cycle_last value from timekeeping as current cycle to 564 * avoid same reading from suspend timer. 565 */ 566 if (clocksource_is_suspend(cs)) 567 now = cycle_now; 568 else 569 now = suspend_clocksource->read(suspend_clocksource); 570 571 if (now > suspend_start) { 572 delta = clocksource_delta(now, suspend_start, 573 suspend_clocksource->mask); 574 nsec = mul_u64_u32_shr(delta, suspend_clocksource->mult, 575 suspend_clocksource->shift); 576 } 577 578 /* 579 * Disable the suspend timer to save power if current clocksource is 580 * not the suspend timer. 581 */ 582 if (!clocksource_is_suspend(cs) && suspend_clocksource->disable) 583 suspend_clocksource->disable(suspend_clocksource); 584 585 return nsec; 586 } 587 588 /** 589 * clocksource_suspend - suspend the clocksource(s) 590 */ 591 void clocksource_suspend(void) 592 { 593 struct clocksource *cs; 594 595 list_for_each_entry_reverse(cs, &clocksource_list, list) 596 if (cs->suspend) 597 cs->suspend(cs); 598 } 599 600 /** 601 * clocksource_resume - resume the clocksource(s) 602 */ 603 void clocksource_resume(void) 604 { 605 struct clocksource *cs; 606 607 list_for_each_entry(cs, &clocksource_list, list) 608 if (cs->resume) 609 cs->resume(cs); 610 611 clocksource_resume_watchdog(); 612 } 613 614 /** 615 * clocksource_touch_watchdog - Update watchdog 616 * 617 * Update the watchdog after exception contexts such as kgdb so as not 618 * to incorrectly trip the watchdog. This might fail when the kernel 619 * was stopped in code which holds watchdog_lock. 620 */ 621 void clocksource_touch_watchdog(void) 622 { 623 clocksource_resume_watchdog(); 624 } 625 626 /** 627 * clocksource_max_adjustment- Returns max adjustment amount 628 * @cs: Pointer to clocksource 629 * 630 */ 631 static u32 clocksource_max_adjustment(struct clocksource *cs) 632 { 633 u64 ret; 634 /* 635 * We won't try to correct for more than 11% adjustments (110,000 ppm), 636 */ 637 ret = (u64)cs->mult * 11; 638 do_div(ret,100); 639 return (u32)ret; 640 } 641 642 /** 643 * clocks_calc_max_nsecs - Returns maximum nanoseconds that can be converted 644 * @mult: cycle to nanosecond multiplier 645 * @shift: cycle to nanosecond divisor (power of two) 646 * @maxadj: maximum adjustment value to mult (~11%) 647 * @mask: bitmask for two's complement subtraction of non 64 bit counters 648 * @max_cyc: maximum cycle value before potential overflow (does not include 649 * any safety margin) 650 * 651 * NOTE: This function includes a safety margin of 50%, in other words, we 652 * return half the number of nanoseconds the hardware counter can technically 653 * cover. This is done so that we can potentially detect problems caused by 654 * delayed timers or bad hardware, which might result in time intervals that 655 * are larger than what the math used can handle without overflows. 656 */ 657 u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cyc) 658 { 659 u64 max_nsecs, max_cycles; 660 661 /* 662 * Calculate the maximum number of cycles that we can pass to the 663 * cyc2ns() function without overflowing a 64-bit result. 664 */ 665 max_cycles = ULLONG_MAX; 666 do_div(max_cycles, mult+maxadj); 667 668 /* 669 * The actual maximum number of cycles we can defer the clocksource is 670 * determined by the minimum of max_cycles and mask. 671 * Note: Here we subtract the maxadj to make sure we don't sleep for 672 * too long if there's a large negative adjustment. 673 */ 674 max_cycles = min(max_cycles, mask); 675 max_nsecs = clocksource_cyc2ns(max_cycles, mult - maxadj, shift); 676 677 /* return the max_cycles value as well if requested */ 678 if (max_cyc) 679 *max_cyc = max_cycles; 680 681 /* Return 50% of the actual maximum, so we can detect bad values */ 682 max_nsecs >>= 1; 683 684 return max_nsecs; 685 } 686 687 /** 688 * clocksource_update_max_deferment - Updates the clocksource max_idle_ns & max_cycles 689 * @cs: Pointer to clocksource to be updated 690 * 691 */ 692 static inline void clocksource_update_max_deferment(struct clocksource *cs) 693 { 694 cs->max_idle_ns = clocks_calc_max_nsecs(cs->mult, cs->shift, 695 cs->maxadj, cs->mask, 696 &cs->max_cycles); 697 } 698 699 #ifndef CONFIG_ARCH_USES_GETTIMEOFFSET 700 701 static struct clocksource *clocksource_find_best(bool oneshot, bool skipcur) 702 { 703 struct clocksource *cs; 704 705 if (!finished_booting || list_empty(&clocksource_list)) 706 return NULL; 707 708 /* 709 * We pick the clocksource with the highest rating. If oneshot 710 * mode is active, we pick the highres valid clocksource with 711 * the best rating. 712 */ 713 list_for_each_entry(cs, &clocksource_list, list) { 714 if (skipcur && cs == curr_clocksource) 715 continue; 716 if (oneshot && !(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES)) 717 continue; 718 return cs; 719 } 720 return NULL; 721 } 722 723 static void __clocksource_select(bool skipcur) 724 { 725 bool oneshot = tick_oneshot_mode_active(); 726 struct clocksource *best, *cs; 727 728 /* Find the best suitable clocksource */ 729 best = clocksource_find_best(oneshot, skipcur); 730 if (!best) 731 return; 732 733 if (!strlen(override_name)) 734 goto found; 735 736 /* Check for the override clocksource. */ 737 list_for_each_entry(cs, &clocksource_list, list) { 738 if (skipcur && cs == curr_clocksource) 739 continue; 740 if (strcmp(cs->name, override_name) != 0) 741 continue; 742 /* 743 * Check to make sure we don't switch to a non-highres 744 * capable clocksource if the tick code is in oneshot 745 * mode (highres or nohz) 746 */ 747 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && oneshot) { 748 /* Override clocksource cannot be used. */ 749 if (cs->flags & CLOCK_SOURCE_UNSTABLE) { 750 pr_warn("Override clocksource %s is unstable and not HRT compatible - cannot switch while in HRT/NOHZ mode\n", 751 cs->name); 752 override_name[0] = 0; 753 } else { 754 /* 755 * The override cannot be currently verified. 756 * Deferring to let the watchdog check. 757 */ 758 pr_info("Override clocksource %s is not currently HRT compatible - deferring\n", 759 cs->name); 760 } 761 } else 762 /* Override clocksource can be used. */ 763 best = cs; 764 break; 765 } 766 767 found: 768 if (curr_clocksource != best && !timekeeping_notify(best)) { 769 pr_info("Switched to clocksource %s\n", best->name); 770 curr_clocksource = best; 771 } 772 } 773 774 /** 775 * clocksource_select - Select the best clocksource available 776 * 777 * Private function. Must hold clocksource_mutex when called. 778 * 779 * Select the clocksource with the best rating, or the clocksource, 780 * which is selected by userspace override. 781 */ 782 static void clocksource_select(void) 783 { 784 __clocksource_select(false); 785 } 786 787 static void clocksource_select_fallback(void) 788 { 789 __clocksource_select(true); 790 } 791 792 #else /* !CONFIG_ARCH_USES_GETTIMEOFFSET */ 793 static inline void clocksource_select(void) { } 794 static inline void clocksource_select_fallback(void) { } 795 796 #endif 797 798 /* 799 * clocksource_done_booting - Called near the end of core bootup 800 * 801 * Hack to avoid lots of clocksource churn at boot time. 802 * We use fs_initcall because we want this to start before 803 * device_initcall but after subsys_initcall. 804 */ 805 static int __init clocksource_done_booting(void) 806 { 807 mutex_lock(&clocksource_mutex); 808 curr_clocksource = clocksource_default_clock(); 809 finished_booting = 1; 810 /* 811 * Run the watchdog first to eliminate unstable clock sources 812 */ 813 __clocksource_watchdog_work(); 814 clocksource_select(); 815 mutex_unlock(&clocksource_mutex); 816 return 0; 817 } 818 fs_initcall(clocksource_done_booting); 819 820 /* 821 * Enqueue the clocksource sorted by rating 822 */ 823 static void clocksource_enqueue(struct clocksource *cs) 824 { 825 struct list_head *entry = &clocksource_list; 826 struct clocksource *tmp; 827 828 list_for_each_entry(tmp, &clocksource_list, list) { 829 /* Keep track of the place, where to insert */ 830 if (tmp->rating < cs->rating) 831 break; 832 entry = &tmp->list; 833 } 834 list_add(&cs->list, entry); 835 } 836 837 /** 838 * __clocksource_update_freq_scale - Used update clocksource with new freq 839 * @cs: clocksource to be registered 840 * @scale: Scale factor multiplied against freq to get clocksource hz 841 * @freq: clocksource frequency (cycles per second) divided by scale 842 * 843 * This should only be called from the clocksource->enable() method. 844 * 845 * This *SHOULD NOT* be called directly! Please use the 846 * __clocksource_update_freq_hz() or __clocksource_update_freq_khz() helper 847 * functions. 848 */ 849 void __clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq) 850 { 851 u64 sec; 852 853 /* 854 * Default clocksources are *special* and self-define their mult/shift. 855 * But, you're not special, so you should specify a freq value. 856 */ 857 if (freq) { 858 /* 859 * Calc the maximum number of seconds which we can run before 860 * wrapping around. For clocksources which have a mask > 32-bit 861 * we need to limit the max sleep time to have a good 862 * conversion precision. 10 minutes is still a reasonable 863 * amount. That results in a shift value of 24 for a 864 * clocksource with mask >= 40-bit and f >= 4GHz. That maps to 865 * ~ 0.06ppm granularity for NTP. 866 */ 867 sec = cs->mask; 868 do_div(sec, freq); 869 do_div(sec, scale); 870 if (!sec) 871 sec = 1; 872 else if (sec > 600 && cs->mask > UINT_MAX) 873 sec = 600; 874 875 clocks_calc_mult_shift(&cs->mult, &cs->shift, freq, 876 NSEC_PER_SEC / scale, sec * scale); 877 } 878 /* 879 * Ensure clocksources that have large 'mult' values don't overflow 880 * when adjusted. 881 */ 882 cs->maxadj = clocksource_max_adjustment(cs); 883 while (freq && ((cs->mult + cs->maxadj < cs->mult) 884 || (cs->mult - cs->maxadj > cs->mult))) { 885 cs->mult >>= 1; 886 cs->shift--; 887 cs->maxadj = clocksource_max_adjustment(cs); 888 } 889 890 /* 891 * Only warn for *special* clocksources that self-define 892 * their mult/shift values and don't specify a freq. 893 */ 894 WARN_ONCE(cs->mult + cs->maxadj < cs->mult, 895 "timekeeping: Clocksource %s might overflow on 11%% adjustment\n", 896 cs->name); 897 898 clocksource_update_max_deferment(cs); 899 900 pr_info("%s: mask: 0x%llx max_cycles: 0x%llx, max_idle_ns: %lld ns\n", 901 cs->name, cs->mask, cs->max_cycles, cs->max_idle_ns); 902 } 903 EXPORT_SYMBOL_GPL(__clocksource_update_freq_scale); 904 905 /** 906 * __clocksource_register_scale - Used to install new clocksources 907 * @cs: clocksource to be registered 908 * @scale: Scale factor multiplied against freq to get clocksource hz 909 * @freq: clocksource frequency (cycles per second) divided by scale 910 * 911 * Returns -EBUSY if registration fails, zero otherwise. 912 * 913 * This *SHOULD NOT* be called directly! Please use the 914 * clocksource_register_hz() or clocksource_register_khz helper functions. 915 */ 916 int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) 917 { 918 unsigned long flags; 919 920 /* Initialize mult/shift and max_idle_ns */ 921 __clocksource_update_freq_scale(cs, scale, freq); 922 923 /* Add clocksource to the clocksource list */ 924 mutex_lock(&clocksource_mutex); 925 926 clocksource_watchdog_lock(&flags); 927 clocksource_enqueue(cs); 928 clocksource_enqueue_watchdog(cs); 929 clocksource_watchdog_unlock(&flags); 930 931 clocksource_select(); 932 clocksource_select_watchdog(false); 933 __clocksource_suspend_select(cs); 934 mutex_unlock(&clocksource_mutex); 935 return 0; 936 } 937 EXPORT_SYMBOL_GPL(__clocksource_register_scale); 938 939 static void __clocksource_change_rating(struct clocksource *cs, int rating) 940 { 941 list_del(&cs->list); 942 cs->rating = rating; 943 clocksource_enqueue(cs); 944 } 945 946 /** 947 * clocksource_change_rating - Change the rating of a registered clocksource 948 * @cs: clocksource to be changed 949 * @rating: new rating 950 */ 951 void clocksource_change_rating(struct clocksource *cs, int rating) 952 { 953 unsigned long flags; 954 955 mutex_lock(&clocksource_mutex); 956 clocksource_watchdog_lock(&flags); 957 __clocksource_change_rating(cs, rating); 958 clocksource_watchdog_unlock(&flags); 959 960 clocksource_select(); 961 clocksource_select_watchdog(false); 962 clocksource_suspend_select(false); 963 mutex_unlock(&clocksource_mutex); 964 } 965 EXPORT_SYMBOL(clocksource_change_rating); 966 967 /* 968 * Unbind clocksource @cs. Called with clocksource_mutex held 969 */ 970 static int clocksource_unbind(struct clocksource *cs) 971 { 972 unsigned long flags; 973 974 if (clocksource_is_watchdog(cs)) { 975 /* Select and try to install a replacement watchdog. */ 976 clocksource_select_watchdog(true); 977 if (clocksource_is_watchdog(cs)) 978 return -EBUSY; 979 } 980 981 if (cs == curr_clocksource) { 982 /* Select and try to install a replacement clock source */ 983 clocksource_select_fallback(); 984 if (curr_clocksource == cs) 985 return -EBUSY; 986 } 987 988 if (clocksource_is_suspend(cs)) { 989 /* 990 * Select and try to install a replacement suspend clocksource. 991 * If no replacement suspend clocksource, we will just let the 992 * clocksource go and have no suspend clocksource. 993 */ 994 clocksource_suspend_select(true); 995 } 996 997 clocksource_watchdog_lock(&flags); 998 clocksource_dequeue_watchdog(cs); 999 list_del_init(&cs->list); 1000 clocksource_watchdog_unlock(&flags); 1001 1002 return 0; 1003 } 1004 1005 /** 1006 * clocksource_unregister - remove a registered clocksource 1007 * @cs: clocksource to be unregistered 1008 */ 1009 int clocksource_unregister(struct clocksource *cs) 1010 { 1011 int ret = 0; 1012 1013 mutex_lock(&clocksource_mutex); 1014 if (!list_empty(&cs->list)) 1015 ret = clocksource_unbind(cs); 1016 mutex_unlock(&clocksource_mutex); 1017 return ret; 1018 } 1019 EXPORT_SYMBOL(clocksource_unregister); 1020 1021 #ifdef CONFIG_SYSFS 1022 /** 1023 * current_clocksource_show - sysfs interface for current clocksource 1024 * @dev: unused 1025 * @attr: unused 1026 * @buf: char buffer to be filled with clocksource list 1027 * 1028 * Provides sysfs interface for listing current clocksource. 1029 */ 1030 static ssize_t current_clocksource_show(struct device *dev, 1031 struct device_attribute *attr, 1032 char *buf) 1033 { 1034 ssize_t count = 0; 1035 1036 mutex_lock(&clocksource_mutex); 1037 count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name); 1038 mutex_unlock(&clocksource_mutex); 1039 1040 return count; 1041 } 1042 1043 ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt) 1044 { 1045 size_t ret = cnt; 1046 1047 /* strings from sysfs write are not 0 terminated! */ 1048 if (!cnt || cnt >= CS_NAME_LEN) 1049 return -EINVAL; 1050 1051 /* strip of \n: */ 1052 if (buf[cnt-1] == '\n') 1053 cnt--; 1054 if (cnt > 0) 1055 memcpy(dst, buf, cnt); 1056 dst[cnt] = 0; 1057 return ret; 1058 } 1059 1060 /** 1061 * current_clocksource_store - interface for manually overriding clocksource 1062 * @dev: unused 1063 * @attr: unused 1064 * @buf: name of override clocksource 1065 * @count: length of buffer 1066 * 1067 * Takes input from sysfs interface for manually overriding the default 1068 * clocksource selection. 1069 */ 1070 static ssize_t current_clocksource_store(struct device *dev, 1071 struct device_attribute *attr, 1072 const char *buf, size_t count) 1073 { 1074 ssize_t ret; 1075 1076 mutex_lock(&clocksource_mutex); 1077 1078 ret = sysfs_get_uname(buf, override_name, count); 1079 if (ret >= 0) 1080 clocksource_select(); 1081 1082 mutex_unlock(&clocksource_mutex); 1083 1084 return ret; 1085 } 1086 static DEVICE_ATTR_RW(current_clocksource); 1087 1088 /** 1089 * unbind_clocksource_store - interface for manually unbinding clocksource 1090 * @dev: unused 1091 * @attr: unused 1092 * @buf: unused 1093 * @count: length of buffer 1094 * 1095 * Takes input from sysfs interface for manually unbinding a clocksource. 1096 */ 1097 static ssize_t unbind_clocksource_store(struct device *dev, 1098 struct device_attribute *attr, 1099 const char *buf, size_t count) 1100 { 1101 struct clocksource *cs; 1102 char name[CS_NAME_LEN]; 1103 ssize_t ret; 1104 1105 ret = sysfs_get_uname(buf, name, count); 1106 if (ret < 0) 1107 return ret; 1108 1109 ret = -ENODEV; 1110 mutex_lock(&clocksource_mutex); 1111 list_for_each_entry(cs, &clocksource_list, list) { 1112 if (strcmp(cs->name, name)) 1113 continue; 1114 ret = clocksource_unbind(cs); 1115 break; 1116 } 1117 mutex_unlock(&clocksource_mutex); 1118 1119 return ret ? ret : count; 1120 } 1121 static DEVICE_ATTR_WO(unbind_clocksource); 1122 1123 /** 1124 * available_clocksource_show - sysfs interface for listing clocksource 1125 * @dev: unused 1126 * @attr: unused 1127 * @buf: char buffer to be filled with clocksource list 1128 * 1129 * Provides sysfs interface for listing registered clocksources 1130 */ 1131 static ssize_t available_clocksource_show(struct device *dev, 1132 struct device_attribute *attr, 1133 char *buf) 1134 { 1135 struct clocksource *src; 1136 ssize_t count = 0; 1137 1138 mutex_lock(&clocksource_mutex); 1139 list_for_each_entry(src, &clocksource_list, list) { 1140 /* 1141 * Don't show non-HRES clocksource if the tick code is 1142 * in one shot mode (highres=on or nohz=on) 1143 */ 1144 if (!tick_oneshot_mode_active() || 1145 (src->flags & CLOCK_SOURCE_VALID_FOR_HRES)) 1146 count += snprintf(buf + count, 1147 max((ssize_t)PAGE_SIZE - count, (ssize_t)0), 1148 "%s ", src->name); 1149 } 1150 mutex_unlock(&clocksource_mutex); 1151 1152 count += snprintf(buf + count, 1153 max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n"); 1154 1155 return count; 1156 } 1157 static DEVICE_ATTR_RO(available_clocksource); 1158 1159 static struct attribute *clocksource_attrs[] = { 1160 &dev_attr_current_clocksource.attr, 1161 &dev_attr_unbind_clocksource.attr, 1162 &dev_attr_available_clocksource.attr, 1163 NULL 1164 }; 1165 ATTRIBUTE_GROUPS(clocksource); 1166 1167 static struct bus_type clocksource_subsys = { 1168 .name = "clocksource", 1169 .dev_name = "clocksource", 1170 }; 1171 1172 static struct device device_clocksource = { 1173 .id = 0, 1174 .bus = &clocksource_subsys, 1175 .groups = clocksource_groups, 1176 }; 1177 1178 static int __init init_clocksource_sysfs(void) 1179 { 1180 int error = subsys_system_register(&clocksource_subsys, NULL); 1181 1182 if (!error) 1183 error = device_register(&device_clocksource); 1184 1185 return error; 1186 } 1187 1188 device_initcall(init_clocksource_sysfs); 1189 #endif /* CONFIG_SYSFS */ 1190 1191 /** 1192 * boot_override_clocksource - boot clock override 1193 * @str: override name 1194 * 1195 * Takes a clocksource= boot argument and uses it 1196 * as the clocksource override name. 1197 */ 1198 static int __init boot_override_clocksource(char* str) 1199 { 1200 mutex_lock(&clocksource_mutex); 1201 if (str) 1202 strlcpy(override_name, str, sizeof(override_name)); 1203 mutex_unlock(&clocksource_mutex); 1204 return 1; 1205 } 1206 1207 __setup("clocksource=", boot_override_clocksource); 1208 1209 /** 1210 * boot_override_clock - Compatibility layer for deprecated boot option 1211 * @str: override name 1212 * 1213 * DEPRECATED! Takes a clock= boot argument and uses it 1214 * as the clocksource override name 1215 */ 1216 static int __init boot_override_clock(char* str) 1217 { 1218 if (!strcmp(str, "pmtmr")) { 1219 pr_warn("clock=pmtmr is deprecated - use clocksource=acpi_pm\n"); 1220 return boot_override_clocksource("acpi_pm"); 1221 } 1222 pr_warn("clock= boot option is deprecated - use clocksource=xyz\n"); 1223 return boot_override_clocksource(str); 1224 } 1225 1226 __setup("clock=", boot_override_clock); 1227