1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * This file contains the functions which manage clocksource drivers. 4 * 5 * Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com) 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/device.h> 11 #include <linux/clocksource.h> 12 #include <linux/init.h> 13 #include <linux/module.h> 14 #include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */ 15 #include <linux/tick.h> 16 #include <linux/kthread.h> 17 18 #include "tick-internal.h" 19 #include "timekeeping_internal.h" 20 21 /** 22 * clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks 23 * @mult: pointer to mult variable 24 * @shift: pointer to shift variable 25 * @from: frequency to convert from 26 * @to: frequency to convert to 27 * @maxsec: guaranteed runtime conversion range in seconds 28 * 29 * The function evaluates the shift/mult pair for the scaled math 30 * operations of clocksources and clockevents. 31 * 32 * @to and @from are frequency values in HZ. For clock sources @to is 33 * NSEC_PER_SEC == 1GHz and @from is the counter frequency. For clock 34 * event @to is the counter frequency and @from is NSEC_PER_SEC. 35 * 36 * The @maxsec conversion range argument controls the time frame in 37 * seconds which must be covered by the runtime conversion with the 38 * calculated mult and shift factors. This guarantees that no 64bit 39 * overflow happens when the input value of the conversion is 40 * multiplied with the calculated mult factor. Larger ranges may 41 * reduce the conversion accuracy by choosing smaller mult and shift 42 * factors. 43 */ 44 void 45 clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 maxsec) 46 { 47 u64 tmp; 48 u32 sft, sftacc= 32; 49 50 /* 51 * Calculate the shift factor which is limiting the conversion 52 * range: 53 */ 54 tmp = ((u64)maxsec * from) >> 32; 55 while (tmp) { 56 tmp >>=1; 57 sftacc--; 58 } 59 60 /* 61 * Find the conversion shift/mult pair which has the best 62 * accuracy and fits the maxsec conversion range: 63 */ 64 for (sft = 32; sft > 0; sft--) { 65 tmp = (u64) to << sft; 66 tmp += from / 2; 67 do_div(tmp, from); 68 if ((tmp >> sftacc) == 0) 69 break; 70 } 71 *mult = tmp; 72 *shift = sft; 73 } 74 EXPORT_SYMBOL_GPL(clocks_calc_mult_shift); 75 76 /*[Clocksource internal variables]--------- 77 * curr_clocksource: 78 * currently selected clocksource. 79 * suspend_clocksource: 80 * used to calculate the suspend time. 81 * clocksource_list: 82 * linked list with the registered clocksources 83 * clocksource_mutex: 84 * protects manipulations to curr_clocksource and the clocksource_list 85 * override_name: 86 * Name of the user-specified clocksource. 87 */ 88 static struct clocksource *curr_clocksource; 89 static struct clocksource *suspend_clocksource; 90 static LIST_HEAD(clocksource_list); 91 static DEFINE_MUTEX(clocksource_mutex); 92 static char override_name[CS_NAME_LEN]; 93 static int finished_booting; 94 static u64 suspend_start; 95 96 #ifdef CONFIG_CLOCKSOURCE_WATCHDOG 97 static void clocksource_watchdog_work(struct work_struct *work); 98 static void clocksource_select(void); 99 100 static LIST_HEAD(watchdog_list); 101 static struct clocksource *watchdog; 102 static struct timer_list watchdog_timer; 103 static DECLARE_WORK(watchdog_work, clocksource_watchdog_work); 104 static DEFINE_SPINLOCK(watchdog_lock); 105 static int watchdog_running; 106 static atomic_t watchdog_reset_pending; 107 108 static inline void clocksource_watchdog_lock(unsigned long *flags) 109 { 110 spin_lock_irqsave(&watchdog_lock, *flags); 111 } 112 113 static inline void clocksource_watchdog_unlock(unsigned long *flags) 114 { 115 spin_unlock_irqrestore(&watchdog_lock, *flags); 116 } 117 118 static int clocksource_watchdog_kthread(void *data); 119 static void __clocksource_change_rating(struct clocksource *cs, int rating); 120 121 /* 122 * Interval: 0.5sec Threshold: 0.0625s 123 */ 124 #define WATCHDOG_INTERVAL (HZ >> 1) 125 #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4) 126 127 /* 128 * Maximum permissible delay between two readouts of the watchdog 129 * clocksource surrounding a read of the clocksource being validated. 130 * This delay could be due to SMIs, NMIs, or to VCPU preemptions. 131 */ 132 #define WATCHDOG_MAX_SKEW (100 * NSEC_PER_USEC) 133 134 static void clocksource_watchdog_work(struct work_struct *work) 135 { 136 /* 137 * We cannot directly run clocksource_watchdog_kthread() here, because 138 * clocksource_select() calls timekeeping_notify() which uses 139 * stop_machine(). One cannot use stop_machine() from a workqueue() due 140 * lock inversions wrt CPU hotplug. 141 * 142 * Also, we only ever run this work once or twice during the lifetime 143 * of the kernel, so there is no point in creating a more permanent 144 * kthread for this. 145 * 146 * If kthread_run fails the next watchdog scan over the 147 * watchdog_list will find the unstable clock again. 148 */ 149 kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog"); 150 } 151 152 static void __clocksource_unstable(struct clocksource *cs) 153 { 154 cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); 155 cs->flags |= CLOCK_SOURCE_UNSTABLE; 156 157 /* 158 * If the clocksource is registered clocksource_watchdog_kthread() will 159 * re-rate and re-select. 160 */ 161 if (list_empty(&cs->list)) { 162 cs->rating = 0; 163 return; 164 } 165 166 if (cs->mark_unstable) 167 cs->mark_unstable(cs); 168 169 /* kick clocksource_watchdog_kthread() */ 170 if (finished_booting) 171 schedule_work(&watchdog_work); 172 } 173 174 /** 175 * clocksource_mark_unstable - mark clocksource unstable via watchdog 176 * @cs: clocksource to be marked unstable 177 * 178 * This function is called by the x86 TSC code to mark clocksources as unstable; 179 * it defers demotion and re-selection to a kthread. 180 */ 181 void clocksource_mark_unstable(struct clocksource *cs) 182 { 183 unsigned long flags; 184 185 spin_lock_irqsave(&watchdog_lock, flags); 186 if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) { 187 if (!list_empty(&cs->list) && list_empty(&cs->wd_list)) 188 list_add(&cs->wd_list, &watchdog_list); 189 __clocksource_unstable(cs); 190 } 191 spin_unlock_irqrestore(&watchdog_lock, flags); 192 } 193 194 static ulong max_cswd_read_retries = 3; 195 module_param(max_cswd_read_retries, ulong, 0644); 196 197 static bool cs_watchdog_read(struct clocksource *cs, u64 *csnow, u64 *wdnow) 198 { 199 unsigned int nretries; 200 u64 wd_end, wd_delta; 201 int64_t wd_delay; 202 203 for (nretries = 0; nretries <= max_cswd_read_retries; nretries++) { 204 local_irq_disable(); 205 *wdnow = watchdog->read(watchdog); 206 *csnow = cs->read(cs); 207 wd_end = watchdog->read(watchdog); 208 local_irq_enable(); 209 210 wd_delta = clocksource_delta(wd_end, *wdnow, watchdog->mask); 211 wd_delay = clocksource_cyc2ns(wd_delta, watchdog->mult, 212 watchdog->shift); 213 if (wd_delay <= WATCHDOG_MAX_SKEW) { 214 if (nretries > 1 || nretries >= max_cswd_read_retries) { 215 pr_warn("timekeeping watchdog on CPU%d: %s retried %d times before success\n", 216 smp_processor_id(), watchdog->name, nretries); 217 } 218 return true; 219 } 220 } 221 222 pr_warn("timekeeping watchdog on CPU%d: %s read-back delay of %lldns, attempt %d, marking unstable\n", 223 smp_processor_id(), watchdog->name, wd_delay, nretries); 224 return false; 225 } 226 227 static u64 csnow_mid; 228 static cpumask_t cpus_ahead; 229 static cpumask_t cpus_behind; 230 231 static void clocksource_verify_one_cpu(void *csin) 232 { 233 struct clocksource *cs = (struct clocksource *)csin; 234 235 csnow_mid = cs->read(cs); 236 } 237 238 static void clocksource_verify_percpu(struct clocksource *cs) 239 { 240 int64_t cs_nsec, cs_nsec_max = 0, cs_nsec_min = LLONG_MAX; 241 u64 csnow_begin, csnow_end; 242 int cpu, testcpu; 243 s64 delta; 244 245 cpumask_clear(&cpus_ahead); 246 cpumask_clear(&cpus_behind); 247 preempt_disable(); 248 testcpu = smp_processor_id(); 249 pr_warn("Checking clocksource %s synchronization from CPU %d.\n", cs->name, testcpu); 250 for_each_online_cpu(cpu) { 251 if (cpu == testcpu) 252 continue; 253 csnow_begin = cs->read(cs); 254 smp_call_function_single(cpu, clocksource_verify_one_cpu, cs, 1); 255 csnow_end = cs->read(cs); 256 delta = (s64)((csnow_mid - csnow_begin) & cs->mask); 257 if (delta < 0) 258 cpumask_set_cpu(cpu, &cpus_behind); 259 delta = (csnow_end - csnow_mid) & cs->mask; 260 if (delta < 0) 261 cpumask_set_cpu(cpu, &cpus_ahead); 262 delta = clocksource_delta(csnow_end, csnow_begin, cs->mask); 263 cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift); 264 if (cs_nsec > cs_nsec_max) 265 cs_nsec_max = cs_nsec; 266 if (cs_nsec < cs_nsec_min) 267 cs_nsec_min = cs_nsec; 268 } 269 preempt_enable(); 270 if (!cpumask_empty(&cpus_ahead)) 271 pr_warn(" CPUs %*pbl ahead of CPU %d for clocksource %s.\n", 272 cpumask_pr_args(&cpus_ahead), testcpu, cs->name); 273 if (!cpumask_empty(&cpus_behind)) 274 pr_warn(" CPUs %*pbl behind CPU %d for clocksource %s.\n", 275 cpumask_pr_args(&cpus_behind), testcpu, cs->name); 276 if (!cpumask_empty(&cpus_ahead) || !cpumask_empty(&cpus_behind)) 277 pr_warn(" CPU %d check durations %lldns - %lldns for clocksource %s.\n", 278 testcpu, cs_nsec_min, cs_nsec_max, cs->name); 279 } 280 281 static void clocksource_watchdog(struct timer_list *unused) 282 { 283 u64 csnow, wdnow, cslast, wdlast, delta; 284 int next_cpu, reset_pending; 285 int64_t wd_nsec, cs_nsec; 286 struct clocksource *cs; 287 288 spin_lock(&watchdog_lock); 289 if (!watchdog_running) 290 goto out; 291 292 reset_pending = atomic_read(&watchdog_reset_pending); 293 294 list_for_each_entry(cs, &watchdog_list, wd_list) { 295 296 /* Clocksource already marked unstable? */ 297 if (cs->flags & CLOCK_SOURCE_UNSTABLE) { 298 if (finished_booting) 299 schedule_work(&watchdog_work); 300 continue; 301 } 302 303 if (!cs_watchdog_read(cs, &csnow, &wdnow)) { 304 /* Clock readout unreliable, so give it up. */ 305 __clocksource_unstable(cs); 306 continue; 307 } 308 309 /* Clocksource initialized ? */ 310 if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) || 311 atomic_read(&watchdog_reset_pending)) { 312 cs->flags |= CLOCK_SOURCE_WATCHDOG; 313 cs->wd_last = wdnow; 314 cs->cs_last = csnow; 315 continue; 316 } 317 318 delta = clocksource_delta(wdnow, cs->wd_last, watchdog->mask); 319 wd_nsec = clocksource_cyc2ns(delta, watchdog->mult, 320 watchdog->shift); 321 322 delta = clocksource_delta(csnow, cs->cs_last, cs->mask); 323 cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift); 324 wdlast = cs->wd_last; /* save these in case we print them */ 325 cslast = cs->cs_last; 326 cs->cs_last = csnow; 327 cs->wd_last = wdnow; 328 329 if (atomic_read(&watchdog_reset_pending)) 330 continue; 331 332 /* Check the deviation from the watchdog clocksource. */ 333 if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) { 334 pr_warn("timekeeping watchdog on CPU%d: Marking clocksource '%s' as unstable because the skew is too large:\n", 335 smp_processor_id(), cs->name); 336 pr_warn(" '%s' wd_now: %llx wd_last: %llx mask: %llx\n", 337 watchdog->name, wdnow, wdlast, watchdog->mask); 338 pr_warn(" '%s' cs_now: %llx cs_last: %llx mask: %llx\n", 339 cs->name, csnow, cslast, cs->mask); 340 __clocksource_unstable(cs); 341 continue; 342 } 343 344 if (cs == curr_clocksource && cs->tick_stable) 345 cs->tick_stable(cs); 346 347 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && 348 (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) && 349 (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) { 350 /* Mark it valid for high-res. */ 351 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; 352 353 /* 354 * clocksource_done_booting() will sort it if 355 * finished_booting is not set yet. 356 */ 357 if (!finished_booting) 358 continue; 359 360 /* 361 * If this is not the current clocksource let 362 * the watchdog thread reselect it. Due to the 363 * change to high res this clocksource might 364 * be preferred now. If it is the current 365 * clocksource let the tick code know about 366 * that change. 367 */ 368 if (cs != curr_clocksource) { 369 cs->flags |= CLOCK_SOURCE_RESELECT; 370 schedule_work(&watchdog_work); 371 } else { 372 tick_clock_notify(); 373 } 374 } 375 } 376 377 /* 378 * We only clear the watchdog_reset_pending, when we did a 379 * full cycle through all clocksources. 380 */ 381 if (reset_pending) 382 atomic_dec(&watchdog_reset_pending); 383 384 /* 385 * Cycle through CPUs to check if the CPUs stay synchronized 386 * to each other. 387 */ 388 next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask); 389 if (next_cpu >= nr_cpu_ids) 390 next_cpu = cpumask_first(cpu_online_mask); 391 392 /* 393 * Arm timer if not already pending: could race with concurrent 394 * pair clocksource_stop_watchdog() clocksource_start_watchdog(). 395 */ 396 if (!timer_pending(&watchdog_timer)) { 397 watchdog_timer.expires += WATCHDOG_INTERVAL; 398 add_timer_on(&watchdog_timer, next_cpu); 399 } 400 out: 401 spin_unlock(&watchdog_lock); 402 } 403 404 static inline void clocksource_start_watchdog(void) 405 { 406 if (watchdog_running || !watchdog || list_empty(&watchdog_list)) 407 return; 408 timer_setup(&watchdog_timer, clocksource_watchdog, 0); 409 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; 410 add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask)); 411 watchdog_running = 1; 412 } 413 414 static inline void clocksource_stop_watchdog(void) 415 { 416 if (!watchdog_running || (watchdog && !list_empty(&watchdog_list))) 417 return; 418 del_timer(&watchdog_timer); 419 watchdog_running = 0; 420 } 421 422 static inline void clocksource_reset_watchdog(void) 423 { 424 struct clocksource *cs; 425 426 list_for_each_entry(cs, &watchdog_list, wd_list) 427 cs->flags &= ~CLOCK_SOURCE_WATCHDOG; 428 } 429 430 static void clocksource_resume_watchdog(void) 431 { 432 atomic_inc(&watchdog_reset_pending); 433 } 434 435 static void clocksource_enqueue_watchdog(struct clocksource *cs) 436 { 437 INIT_LIST_HEAD(&cs->wd_list); 438 439 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { 440 /* cs is a clocksource to be watched. */ 441 list_add(&cs->wd_list, &watchdog_list); 442 cs->flags &= ~CLOCK_SOURCE_WATCHDOG; 443 } else { 444 /* cs is a watchdog. */ 445 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) 446 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; 447 } 448 } 449 450 static void clocksource_select_watchdog(bool fallback) 451 { 452 struct clocksource *cs, *old_wd; 453 unsigned long flags; 454 455 spin_lock_irqsave(&watchdog_lock, flags); 456 /* save current watchdog */ 457 old_wd = watchdog; 458 if (fallback) 459 watchdog = NULL; 460 461 list_for_each_entry(cs, &clocksource_list, list) { 462 /* cs is a clocksource to be watched. */ 463 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) 464 continue; 465 466 /* Skip current if we were requested for a fallback. */ 467 if (fallback && cs == old_wd) 468 continue; 469 470 /* Pick the best watchdog. */ 471 if (!watchdog || cs->rating > watchdog->rating) 472 watchdog = cs; 473 } 474 /* If we failed to find a fallback restore the old one. */ 475 if (!watchdog) 476 watchdog = old_wd; 477 478 /* If we changed the watchdog we need to reset cycles. */ 479 if (watchdog != old_wd) 480 clocksource_reset_watchdog(); 481 482 /* Check if the watchdog timer needs to be started. */ 483 clocksource_start_watchdog(); 484 spin_unlock_irqrestore(&watchdog_lock, flags); 485 } 486 487 static void clocksource_dequeue_watchdog(struct clocksource *cs) 488 { 489 if (cs != watchdog) { 490 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { 491 /* cs is a watched clocksource. */ 492 list_del_init(&cs->wd_list); 493 /* Check if the watchdog timer needs to be stopped. */ 494 clocksource_stop_watchdog(); 495 } 496 } 497 } 498 499 static int __clocksource_watchdog_kthread(void) 500 { 501 struct clocksource *cs, *tmp; 502 unsigned long flags; 503 int select = 0; 504 505 /* Do any required per-CPU skew verification. */ 506 if (curr_clocksource && 507 curr_clocksource->flags & CLOCK_SOURCE_UNSTABLE && 508 curr_clocksource->flags & CLOCK_SOURCE_VERIFY_PERCPU) 509 clocksource_verify_percpu(curr_clocksource); 510 511 spin_lock_irqsave(&watchdog_lock, flags); 512 list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { 513 if (cs->flags & CLOCK_SOURCE_UNSTABLE) { 514 list_del_init(&cs->wd_list); 515 __clocksource_change_rating(cs, 0); 516 select = 1; 517 } 518 if (cs->flags & CLOCK_SOURCE_RESELECT) { 519 cs->flags &= ~CLOCK_SOURCE_RESELECT; 520 select = 1; 521 } 522 } 523 /* Check if the watchdog timer needs to be stopped. */ 524 clocksource_stop_watchdog(); 525 spin_unlock_irqrestore(&watchdog_lock, flags); 526 527 return select; 528 } 529 530 static int clocksource_watchdog_kthread(void *data) 531 { 532 mutex_lock(&clocksource_mutex); 533 if (__clocksource_watchdog_kthread()) 534 clocksource_select(); 535 mutex_unlock(&clocksource_mutex); 536 return 0; 537 } 538 539 static bool clocksource_is_watchdog(struct clocksource *cs) 540 { 541 return cs == watchdog; 542 } 543 544 #else /* CONFIG_CLOCKSOURCE_WATCHDOG */ 545 546 static void clocksource_enqueue_watchdog(struct clocksource *cs) 547 { 548 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) 549 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; 550 } 551 552 static void clocksource_select_watchdog(bool fallback) { } 553 static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { } 554 static inline void clocksource_resume_watchdog(void) { } 555 static inline int __clocksource_watchdog_kthread(void) { return 0; } 556 static bool clocksource_is_watchdog(struct clocksource *cs) { return false; } 557 void clocksource_mark_unstable(struct clocksource *cs) { } 558 559 static inline void clocksource_watchdog_lock(unsigned long *flags) { } 560 static inline void clocksource_watchdog_unlock(unsigned long *flags) { } 561 562 #endif /* CONFIG_CLOCKSOURCE_WATCHDOG */ 563 564 static bool clocksource_is_suspend(struct clocksource *cs) 565 { 566 return cs == suspend_clocksource; 567 } 568 569 static void __clocksource_suspend_select(struct clocksource *cs) 570 { 571 /* 572 * Skip the clocksource which will be stopped in suspend state. 573 */ 574 if (!(cs->flags & CLOCK_SOURCE_SUSPEND_NONSTOP)) 575 return; 576 577 /* 578 * The nonstop clocksource can be selected as the suspend clocksource to 579 * calculate the suspend time, so it should not supply suspend/resume 580 * interfaces to suspend the nonstop clocksource when system suspends. 581 */ 582 if (cs->suspend || cs->resume) { 583 pr_warn("Nonstop clocksource %s should not supply suspend/resume interfaces\n", 584 cs->name); 585 } 586 587 /* Pick the best rating. */ 588 if (!suspend_clocksource || cs->rating > suspend_clocksource->rating) 589 suspend_clocksource = cs; 590 } 591 592 /** 593 * clocksource_suspend_select - Select the best clocksource for suspend timing 594 * @fallback: if select a fallback clocksource 595 */ 596 static void clocksource_suspend_select(bool fallback) 597 { 598 struct clocksource *cs, *old_suspend; 599 600 old_suspend = suspend_clocksource; 601 if (fallback) 602 suspend_clocksource = NULL; 603 604 list_for_each_entry(cs, &clocksource_list, list) { 605 /* Skip current if we were requested for a fallback. */ 606 if (fallback && cs == old_suspend) 607 continue; 608 609 __clocksource_suspend_select(cs); 610 } 611 } 612 613 /** 614 * clocksource_start_suspend_timing - Start measuring the suspend timing 615 * @cs: current clocksource from timekeeping 616 * @start_cycles: current cycles from timekeeping 617 * 618 * This function will save the start cycle values of suspend timer to calculate 619 * the suspend time when resuming system. 620 * 621 * This function is called late in the suspend process from timekeeping_suspend(), 622 * that means processes are frozen, non-boot cpus and interrupts are disabled 623 * now. It is therefore possible to start the suspend timer without taking the 624 * clocksource mutex. 625 */ 626 void clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles) 627 { 628 if (!suspend_clocksource) 629 return; 630 631 /* 632 * If current clocksource is the suspend timer, we should use the 633 * tkr_mono.cycle_last value as suspend_start to avoid same reading 634 * from suspend timer. 635 */ 636 if (clocksource_is_suspend(cs)) { 637 suspend_start = start_cycles; 638 return; 639 } 640 641 if (suspend_clocksource->enable && 642 suspend_clocksource->enable(suspend_clocksource)) { 643 pr_warn_once("Failed to enable the non-suspend-able clocksource.\n"); 644 return; 645 } 646 647 suspend_start = suspend_clocksource->read(suspend_clocksource); 648 } 649 650 /** 651 * clocksource_stop_suspend_timing - Stop measuring the suspend timing 652 * @cs: current clocksource from timekeeping 653 * @cycle_now: current cycles from timekeeping 654 * 655 * This function will calculate the suspend time from suspend timer. 656 * 657 * Returns nanoseconds since suspend started, 0 if no usable suspend clocksource. 658 * 659 * This function is called early in the resume process from timekeeping_resume(), 660 * that means there is only one cpu, no processes are running and the interrupts 661 * are disabled. It is therefore possible to stop the suspend timer without 662 * taking the clocksource mutex. 663 */ 664 u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 cycle_now) 665 { 666 u64 now, delta, nsec = 0; 667 668 if (!suspend_clocksource) 669 return 0; 670 671 /* 672 * If current clocksource is the suspend timer, we should use the 673 * tkr_mono.cycle_last value from timekeeping as current cycle to 674 * avoid same reading from suspend timer. 675 */ 676 if (clocksource_is_suspend(cs)) 677 now = cycle_now; 678 else 679 now = suspend_clocksource->read(suspend_clocksource); 680 681 if (now > suspend_start) { 682 delta = clocksource_delta(now, suspend_start, 683 suspend_clocksource->mask); 684 nsec = mul_u64_u32_shr(delta, suspend_clocksource->mult, 685 suspend_clocksource->shift); 686 } 687 688 /* 689 * Disable the suspend timer to save power if current clocksource is 690 * not the suspend timer. 691 */ 692 if (!clocksource_is_suspend(cs) && suspend_clocksource->disable) 693 suspend_clocksource->disable(suspend_clocksource); 694 695 return nsec; 696 } 697 698 /** 699 * clocksource_suspend - suspend the clocksource(s) 700 */ 701 void clocksource_suspend(void) 702 { 703 struct clocksource *cs; 704 705 list_for_each_entry_reverse(cs, &clocksource_list, list) 706 if (cs->suspend) 707 cs->suspend(cs); 708 } 709 710 /** 711 * clocksource_resume - resume the clocksource(s) 712 */ 713 void clocksource_resume(void) 714 { 715 struct clocksource *cs; 716 717 list_for_each_entry(cs, &clocksource_list, list) 718 if (cs->resume) 719 cs->resume(cs); 720 721 clocksource_resume_watchdog(); 722 } 723 724 /** 725 * clocksource_touch_watchdog - Update watchdog 726 * 727 * Update the watchdog after exception contexts such as kgdb so as not 728 * to incorrectly trip the watchdog. This might fail when the kernel 729 * was stopped in code which holds watchdog_lock. 730 */ 731 void clocksource_touch_watchdog(void) 732 { 733 clocksource_resume_watchdog(); 734 } 735 736 /** 737 * clocksource_max_adjustment- Returns max adjustment amount 738 * @cs: Pointer to clocksource 739 * 740 */ 741 static u32 clocksource_max_adjustment(struct clocksource *cs) 742 { 743 u64 ret; 744 /* 745 * We won't try to correct for more than 11% adjustments (110,000 ppm), 746 */ 747 ret = (u64)cs->mult * 11; 748 do_div(ret,100); 749 return (u32)ret; 750 } 751 752 /** 753 * clocks_calc_max_nsecs - Returns maximum nanoseconds that can be converted 754 * @mult: cycle to nanosecond multiplier 755 * @shift: cycle to nanosecond divisor (power of two) 756 * @maxadj: maximum adjustment value to mult (~11%) 757 * @mask: bitmask for two's complement subtraction of non 64 bit counters 758 * @max_cyc: maximum cycle value before potential overflow (does not include 759 * any safety margin) 760 * 761 * NOTE: This function includes a safety margin of 50%, in other words, we 762 * return half the number of nanoseconds the hardware counter can technically 763 * cover. This is done so that we can potentially detect problems caused by 764 * delayed timers or bad hardware, which might result in time intervals that 765 * are larger than what the math used can handle without overflows. 766 */ 767 u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cyc) 768 { 769 u64 max_nsecs, max_cycles; 770 771 /* 772 * Calculate the maximum number of cycles that we can pass to the 773 * cyc2ns() function without overflowing a 64-bit result. 774 */ 775 max_cycles = ULLONG_MAX; 776 do_div(max_cycles, mult+maxadj); 777 778 /* 779 * The actual maximum number of cycles we can defer the clocksource is 780 * determined by the minimum of max_cycles and mask. 781 * Note: Here we subtract the maxadj to make sure we don't sleep for 782 * too long if there's a large negative adjustment. 783 */ 784 max_cycles = min(max_cycles, mask); 785 max_nsecs = clocksource_cyc2ns(max_cycles, mult - maxadj, shift); 786 787 /* return the max_cycles value as well if requested */ 788 if (max_cyc) 789 *max_cyc = max_cycles; 790 791 /* Return 50% of the actual maximum, so we can detect bad values */ 792 max_nsecs >>= 1; 793 794 return max_nsecs; 795 } 796 797 /** 798 * clocksource_update_max_deferment - Updates the clocksource max_idle_ns & max_cycles 799 * @cs: Pointer to clocksource to be updated 800 * 801 */ 802 static inline void clocksource_update_max_deferment(struct clocksource *cs) 803 { 804 cs->max_idle_ns = clocks_calc_max_nsecs(cs->mult, cs->shift, 805 cs->maxadj, cs->mask, 806 &cs->max_cycles); 807 } 808 809 static struct clocksource *clocksource_find_best(bool oneshot, bool skipcur) 810 { 811 struct clocksource *cs; 812 813 if (!finished_booting || list_empty(&clocksource_list)) 814 return NULL; 815 816 /* 817 * We pick the clocksource with the highest rating. If oneshot 818 * mode is active, we pick the highres valid clocksource with 819 * the best rating. 820 */ 821 list_for_each_entry(cs, &clocksource_list, list) { 822 if (skipcur && cs == curr_clocksource) 823 continue; 824 if (oneshot && !(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES)) 825 continue; 826 return cs; 827 } 828 return NULL; 829 } 830 831 static void __clocksource_select(bool skipcur) 832 { 833 bool oneshot = tick_oneshot_mode_active(); 834 struct clocksource *best, *cs; 835 836 /* Find the best suitable clocksource */ 837 best = clocksource_find_best(oneshot, skipcur); 838 if (!best) 839 return; 840 841 if (!strlen(override_name)) 842 goto found; 843 844 /* Check for the override clocksource. */ 845 list_for_each_entry(cs, &clocksource_list, list) { 846 if (skipcur && cs == curr_clocksource) 847 continue; 848 if (strcmp(cs->name, override_name) != 0) 849 continue; 850 /* 851 * Check to make sure we don't switch to a non-highres 852 * capable clocksource if the tick code is in oneshot 853 * mode (highres or nohz) 854 */ 855 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && oneshot) { 856 /* Override clocksource cannot be used. */ 857 if (cs->flags & CLOCK_SOURCE_UNSTABLE) { 858 pr_warn("Override clocksource %s is unstable and not HRT compatible - cannot switch while in HRT/NOHZ mode\n", 859 cs->name); 860 override_name[0] = 0; 861 } else { 862 /* 863 * The override cannot be currently verified. 864 * Deferring to let the watchdog check. 865 */ 866 pr_info("Override clocksource %s is not currently HRT compatible - deferring\n", 867 cs->name); 868 } 869 } else 870 /* Override clocksource can be used. */ 871 best = cs; 872 break; 873 } 874 875 found: 876 if (curr_clocksource != best && !timekeeping_notify(best)) { 877 pr_info("Switched to clocksource %s\n", best->name); 878 curr_clocksource = best; 879 } 880 } 881 882 /** 883 * clocksource_select - Select the best clocksource available 884 * 885 * Private function. Must hold clocksource_mutex when called. 886 * 887 * Select the clocksource with the best rating, or the clocksource, 888 * which is selected by userspace override. 889 */ 890 static void clocksource_select(void) 891 { 892 __clocksource_select(false); 893 } 894 895 static void clocksource_select_fallback(void) 896 { 897 __clocksource_select(true); 898 } 899 900 /* 901 * clocksource_done_booting - Called near the end of core bootup 902 * 903 * Hack to avoid lots of clocksource churn at boot time. 904 * We use fs_initcall because we want this to start before 905 * device_initcall but after subsys_initcall. 906 */ 907 static int __init clocksource_done_booting(void) 908 { 909 mutex_lock(&clocksource_mutex); 910 curr_clocksource = clocksource_default_clock(); 911 finished_booting = 1; 912 /* 913 * Run the watchdog first to eliminate unstable clock sources 914 */ 915 __clocksource_watchdog_kthread(); 916 clocksource_select(); 917 mutex_unlock(&clocksource_mutex); 918 return 0; 919 } 920 fs_initcall(clocksource_done_booting); 921 922 /* 923 * Enqueue the clocksource sorted by rating 924 */ 925 static void clocksource_enqueue(struct clocksource *cs) 926 { 927 struct list_head *entry = &clocksource_list; 928 struct clocksource *tmp; 929 930 list_for_each_entry(tmp, &clocksource_list, list) { 931 /* Keep track of the place, where to insert */ 932 if (tmp->rating < cs->rating) 933 break; 934 entry = &tmp->list; 935 } 936 list_add(&cs->list, entry); 937 } 938 939 /** 940 * __clocksource_update_freq_scale - Used update clocksource with new freq 941 * @cs: clocksource to be registered 942 * @scale: Scale factor multiplied against freq to get clocksource hz 943 * @freq: clocksource frequency (cycles per second) divided by scale 944 * 945 * This should only be called from the clocksource->enable() method. 946 * 947 * This *SHOULD NOT* be called directly! Please use the 948 * __clocksource_update_freq_hz() or __clocksource_update_freq_khz() helper 949 * functions. 950 */ 951 void __clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq) 952 { 953 u64 sec; 954 955 /* 956 * Default clocksources are *special* and self-define their mult/shift. 957 * But, you're not special, so you should specify a freq value. 958 */ 959 if (freq) { 960 /* 961 * Calc the maximum number of seconds which we can run before 962 * wrapping around. For clocksources which have a mask > 32-bit 963 * we need to limit the max sleep time to have a good 964 * conversion precision. 10 minutes is still a reasonable 965 * amount. That results in a shift value of 24 for a 966 * clocksource with mask >= 40-bit and f >= 4GHz. That maps to 967 * ~ 0.06ppm granularity for NTP. 968 */ 969 sec = cs->mask; 970 do_div(sec, freq); 971 do_div(sec, scale); 972 if (!sec) 973 sec = 1; 974 else if (sec > 600 && cs->mask > UINT_MAX) 975 sec = 600; 976 977 clocks_calc_mult_shift(&cs->mult, &cs->shift, freq, 978 NSEC_PER_SEC / scale, sec * scale); 979 } 980 /* 981 * Ensure clocksources that have large 'mult' values don't overflow 982 * when adjusted. 983 */ 984 cs->maxadj = clocksource_max_adjustment(cs); 985 while (freq && ((cs->mult + cs->maxadj < cs->mult) 986 || (cs->mult - cs->maxadj > cs->mult))) { 987 cs->mult >>= 1; 988 cs->shift--; 989 cs->maxadj = clocksource_max_adjustment(cs); 990 } 991 992 /* 993 * Only warn for *special* clocksources that self-define 994 * their mult/shift values and don't specify a freq. 995 */ 996 WARN_ONCE(cs->mult + cs->maxadj < cs->mult, 997 "timekeeping: Clocksource %s might overflow on 11%% adjustment\n", 998 cs->name); 999 1000 clocksource_update_max_deferment(cs); 1001 1002 pr_info("%s: mask: 0x%llx max_cycles: 0x%llx, max_idle_ns: %lld ns\n", 1003 cs->name, cs->mask, cs->max_cycles, cs->max_idle_ns); 1004 } 1005 EXPORT_SYMBOL_GPL(__clocksource_update_freq_scale); 1006 1007 /** 1008 * __clocksource_register_scale - Used to install new clocksources 1009 * @cs: clocksource to be registered 1010 * @scale: Scale factor multiplied against freq to get clocksource hz 1011 * @freq: clocksource frequency (cycles per second) divided by scale 1012 * 1013 * Returns -EBUSY if registration fails, zero otherwise. 1014 * 1015 * This *SHOULD NOT* be called directly! Please use the 1016 * clocksource_register_hz() or clocksource_register_khz helper functions. 1017 */ 1018 int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) 1019 { 1020 unsigned long flags; 1021 1022 clocksource_arch_init(cs); 1023 1024 if (WARN_ON_ONCE((unsigned int)cs->id >= CSID_MAX)) 1025 cs->id = CSID_GENERIC; 1026 if (cs->vdso_clock_mode < 0 || 1027 cs->vdso_clock_mode >= VDSO_CLOCKMODE_MAX) { 1028 pr_warn("clocksource %s registered with invalid VDSO mode %d. Disabling VDSO support.\n", 1029 cs->name, cs->vdso_clock_mode); 1030 cs->vdso_clock_mode = VDSO_CLOCKMODE_NONE; 1031 } 1032 1033 /* Initialize mult/shift and max_idle_ns */ 1034 __clocksource_update_freq_scale(cs, scale, freq); 1035 1036 /* Add clocksource to the clocksource list */ 1037 mutex_lock(&clocksource_mutex); 1038 1039 clocksource_watchdog_lock(&flags); 1040 clocksource_enqueue(cs); 1041 clocksource_enqueue_watchdog(cs); 1042 clocksource_watchdog_unlock(&flags); 1043 1044 clocksource_select(); 1045 clocksource_select_watchdog(false); 1046 __clocksource_suspend_select(cs); 1047 mutex_unlock(&clocksource_mutex); 1048 return 0; 1049 } 1050 EXPORT_SYMBOL_GPL(__clocksource_register_scale); 1051 1052 static void __clocksource_change_rating(struct clocksource *cs, int rating) 1053 { 1054 list_del(&cs->list); 1055 cs->rating = rating; 1056 clocksource_enqueue(cs); 1057 } 1058 1059 /** 1060 * clocksource_change_rating - Change the rating of a registered clocksource 1061 * @cs: clocksource to be changed 1062 * @rating: new rating 1063 */ 1064 void clocksource_change_rating(struct clocksource *cs, int rating) 1065 { 1066 unsigned long flags; 1067 1068 mutex_lock(&clocksource_mutex); 1069 clocksource_watchdog_lock(&flags); 1070 __clocksource_change_rating(cs, rating); 1071 clocksource_watchdog_unlock(&flags); 1072 1073 clocksource_select(); 1074 clocksource_select_watchdog(false); 1075 clocksource_suspend_select(false); 1076 mutex_unlock(&clocksource_mutex); 1077 } 1078 EXPORT_SYMBOL(clocksource_change_rating); 1079 1080 /* 1081 * Unbind clocksource @cs. Called with clocksource_mutex held 1082 */ 1083 static int clocksource_unbind(struct clocksource *cs) 1084 { 1085 unsigned long flags; 1086 1087 if (clocksource_is_watchdog(cs)) { 1088 /* Select and try to install a replacement watchdog. */ 1089 clocksource_select_watchdog(true); 1090 if (clocksource_is_watchdog(cs)) 1091 return -EBUSY; 1092 } 1093 1094 if (cs == curr_clocksource) { 1095 /* Select and try to install a replacement clock source */ 1096 clocksource_select_fallback(); 1097 if (curr_clocksource == cs) 1098 return -EBUSY; 1099 } 1100 1101 if (clocksource_is_suspend(cs)) { 1102 /* 1103 * Select and try to install a replacement suspend clocksource. 1104 * If no replacement suspend clocksource, we will just let the 1105 * clocksource go and have no suspend clocksource. 1106 */ 1107 clocksource_suspend_select(true); 1108 } 1109 1110 clocksource_watchdog_lock(&flags); 1111 clocksource_dequeue_watchdog(cs); 1112 list_del_init(&cs->list); 1113 clocksource_watchdog_unlock(&flags); 1114 1115 return 0; 1116 } 1117 1118 /** 1119 * clocksource_unregister - remove a registered clocksource 1120 * @cs: clocksource to be unregistered 1121 */ 1122 int clocksource_unregister(struct clocksource *cs) 1123 { 1124 int ret = 0; 1125 1126 mutex_lock(&clocksource_mutex); 1127 if (!list_empty(&cs->list)) 1128 ret = clocksource_unbind(cs); 1129 mutex_unlock(&clocksource_mutex); 1130 return ret; 1131 } 1132 EXPORT_SYMBOL(clocksource_unregister); 1133 1134 #ifdef CONFIG_SYSFS 1135 /** 1136 * current_clocksource_show - sysfs interface for current clocksource 1137 * @dev: unused 1138 * @attr: unused 1139 * @buf: char buffer to be filled with clocksource list 1140 * 1141 * Provides sysfs interface for listing current clocksource. 1142 */ 1143 static ssize_t current_clocksource_show(struct device *dev, 1144 struct device_attribute *attr, 1145 char *buf) 1146 { 1147 ssize_t count = 0; 1148 1149 mutex_lock(&clocksource_mutex); 1150 count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name); 1151 mutex_unlock(&clocksource_mutex); 1152 1153 return count; 1154 } 1155 1156 ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt) 1157 { 1158 size_t ret = cnt; 1159 1160 /* strings from sysfs write are not 0 terminated! */ 1161 if (!cnt || cnt >= CS_NAME_LEN) 1162 return -EINVAL; 1163 1164 /* strip of \n: */ 1165 if (buf[cnt-1] == '\n') 1166 cnt--; 1167 if (cnt > 0) 1168 memcpy(dst, buf, cnt); 1169 dst[cnt] = 0; 1170 return ret; 1171 } 1172 1173 /** 1174 * current_clocksource_store - interface for manually overriding clocksource 1175 * @dev: unused 1176 * @attr: unused 1177 * @buf: name of override clocksource 1178 * @count: length of buffer 1179 * 1180 * Takes input from sysfs interface for manually overriding the default 1181 * clocksource selection. 1182 */ 1183 static ssize_t current_clocksource_store(struct device *dev, 1184 struct device_attribute *attr, 1185 const char *buf, size_t count) 1186 { 1187 ssize_t ret; 1188 1189 mutex_lock(&clocksource_mutex); 1190 1191 ret = sysfs_get_uname(buf, override_name, count); 1192 if (ret >= 0) 1193 clocksource_select(); 1194 1195 mutex_unlock(&clocksource_mutex); 1196 1197 return ret; 1198 } 1199 static DEVICE_ATTR_RW(current_clocksource); 1200 1201 /** 1202 * unbind_clocksource_store - interface for manually unbinding clocksource 1203 * @dev: unused 1204 * @attr: unused 1205 * @buf: unused 1206 * @count: length of buffer 1207 * 1208 * Takes input from sysfs interface for manually unbinding a clocksource. 1209 */ 1210 static ssize_t unbind_clocksource_store(struct device *dev, 1211 struct device_attribute *attr, 1212 const char *buf, size_t count) 1213 { 1214 struct clocksource *cs; 1215 char name[CS_NAME_LEN]; 1216 ssize_t ret; 1217 1218 ret = sysfs_get_uname(buf, name, count); 1219 if (ret < 0) 1220 return ret; 1221 1222 ret = -ENODEV; 1223 mutex_lock(&clocksource_mutex); 1224 list_for_each_entry(cs, &clocksource_list, list) { 1225 if (strcmp(cs->name, name)) 1226 continue; 1227 ret = clocksource_unbind(cs); 1228 break; 1229 } 1230 mutex_unlock(&clocksource_mutex); 1231 1232 return ret ? ret : count; 1233 } 1234 static DEVICE_ATTR_WO(unbind_clocksource); 1235 1236 /** 1237 * available_clocksource_show - sysfs interface for listing clocksource 1238 * @dev: unused 1239 * @attr: unused 1240 * @buf: char buffer to be filled with clocksource list 1241 * 1242 * Provides sysfs interface for listing registered clocksources 1243 */ 1244 static ssize_t available_clocksource_show(struct device *dev, 1245 struct device_attribute *attr, 1246 char *buf) 1247 { 1248 struct clocksource *src; 1249 ssize_t count = 0; 1250 1251 mutex_lock(&clocksource_mutex); 1252 list_for_each_entry(src, &clocksource_list, list) { 1253 /* 1254 * Don't show non-HRES clocksource if the tick code is 1255 * in one shot mode (highres=on or nohz=on) 1256 */ 1257 if (!tick_oneshot_mode_active() || 1258 (src->flags & CLOCK_SOURCE_VALID_FOR_HRES)) 1259 count += snprintf(buf + count, 1260 max((ssize_t)PAGE_SIZE - count, (ssize_t)0), 1261 "%s ", src->name); 1262 } 1263 mutex_unlock(&clocksource_mutex); 1264 1265 count += snprintf(buf + count, 1266 max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n"); 1267 1268 return count; 1269 } 1270 static DEVICE_ATTR_RO(available_clocksource); 1271 1272 static struct attribute *clocksource_attrs[] = { 1273 &dev_attr_current_clocksource.attr, 1274 &dev_attr_unbind_clocksource.attr, 1275 &dev_attr_available_clocksource.attr, 1276 NULL 1277 }; 1278 ATTRIBUTE_GROUPS(clocksource); 1279 1280 static struct bus_type clocksource_subsys = { 1281 .name = "clocksource", 1282 .dev_name = "clocksource", 1283 }; 1284 1285 static struct device device_clocksource = { 1286 .id = 0, 1287 .bus = &clocksource_subsys, 1288 .groups = clocksource_groups, 1289 }; 1290 1291 static int __init init_clocksource_sysfs(void) 1292 { 1293 int error = subsys_system_register(&clocksource_subsys, NULL); 1294 1295 if (!error) 1296 error = device_register(&device_clocksource); 1297 1298 return error; 1299 } 1300 1301 device_initcall(init_clocksource_sysfs); 1302 #endif /* CONFIG_SYSFS */ 1303 1304 /** 1305 * boot_override_clocksource - boot clock override 1306 * @str: override name 1307 * 1308 * Takes a clocksource= boot argument and uses it 1309 * as the clocksource override name. 1310 */ 1311 static int __init boot_override_clocksource(char* str) 1312 { 1313 mutex_lock(&clocksource_mutex); 1314 if (str) 1315 strlcpy(override_name, str, sizeof(override_name)); 1316 mutex_unlock(&clocksource_mutex); 1317 return 1; 1318 } 1319 1320 __setup("clocksource=", boot_override_clocksource); 1321 1322 /** 1323 * boot_override_clock - Compatibility layer for deprecated boot option 1324 * @str: override name 1325 * 1326 * DEPRECATED! Takes a clock= boot argument and uses it 1327 * as the clocksource override name 1328 */ 1329 static int __init boot_override_clock(char* str) 1330 { 1331 if (!strcmp(str, "pmtmr")) { 1332 pr_warn("clock=pmtmr is deprecated - use clocksource=acpi_pm\n"); 1333 return boot_override_clocksource("acpi_pm"); 1334 } 1335 pr_warn("clock= boot option is deprecated - use clocksource=xyz\n"); 1336 return boot_override_clocksource(str); 1337 } 1338 1339 __setup("clock=", boot_override_clock); 1340