1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * This file contains the functions which manage clocksource drivers. 4 * 5 * Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com) 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/device.h> 11 #include <linux/clocksource.h> 12 #include <linux/init.h> 13 #include <linux/module.h> 14 #include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */ 15 #include <linux/tick.h> 16 #include <linux/kthread.h> 17 #include <linux/prandom.h> 18 #include <linux/cpu.h> 19 20 #include "tick-internal.h" 21 #include "timekeeping_internal.h" 22 23 /** 24 * clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks 25 * @mult: pointer to mult variable 26 * @shift: pointer to shift variable 27 * @from: frequency to convert from 28 * @to: frequency to convert to 29 * @maxsec: guaranteed runtime conversion range in seconds 30 * 31 * The function evaluates the shift/mult pair for the scaled math 32 * operations of clocksources and clockevents. 33 * 34 * @to and @from are frequency values in HZ. For clock sources @to is 35 * NSEC_PER_SEC == 1GHz and @from is the counter frequency. For clock 36 * event @to is the counter frequency and @from is NSEC_PER_SEC. 37 * 38 * The @maxsec conversion range argument controls the time frame in 39 * seconds which must be covered by the runtime conversion with the 40 * calculated mult and shift factors. This guarantees that no 64bit 41 * overflow happens when the input value of the conversion is 42 * multiplied with the calculated mult factor. Larger ranges may 43 * reduce the conversion accuracy by choosing smaller mult and shift 44 * factors. 45 */ 46 void 47 clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 maxsec) 48 { 49 u64 tmp; 50 u32 sft, sftacc= 32; 51 52 /* 53 * Calculate the shift factor which is limiting the conversion 54 * range: 55 */ 56 tmp = ((u64)maxsec * from) >> 32; 57 while (tmp) { 58 tmp >>=1; 59 sftacc--; 60 } 61 62 /* 63 * Find the conversion shift/mult pair which has the best 64 * accuracy and fits the maxsec conversion range: 65 */ 66 for (sft = 32; sft > 0; sft--) { 67 tmp = (u64) to << sft; 68 tmp += from / 2; 69 do_div(tmp, from); 70 if ((tmp >> sftacc) == 0) 71 break; 72 } 73 *mult = tmp; 74 *shift = sft; 75 } 76 EXPORT_SYMBOL_GPL(clocks_calc_mult_shift); 77 78 /*[Clocksource internal variables]--------- 79 * curr_clocksource: 80 * currently selected clocksource. 81 * suspend_clocksource: 82 * used to calculate the suspend time. 83 * clocksource_list: 84 * linked list with the registered clocksources 85 * clocksource_mutex: 86 * protects manipulations to curr_clocksource and the clocksource_list 87 * override_name: 88 * Name of the user-specified clocksource. 89 */ 90 static struct clocksource *curr_clocksource; 91 static struct clocksource *suspend_clocksource; 92 static LIST_HEAD(clocksource_list); 93 static DEFINE_MUTEX(clocksource_mutex); 94 static char override_name[CS_NAME_LEN]; 95 static int finished_booting; 96 static u64 suspend_start; 97 98 /* 99 * Threshold: 0.0312s, when doubled: 0.0625s. 100 * Also a default for cs->uncertainty_margin when registering clocks. 101 */ 102 #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 5) 103 104 /* 105 * Maximum permissible delay between two readouts of the watchdog 106 * clocksource surrounding a read of the clocksource being validated. 107 * This delay could be due to SMIs, NMIs, or to VCPU preemptions. Used as 108 * a lower bound for cs->uncertainty_margin values when registering clocks. 109 */ 110 #define WATCHDOG_MAX_SKEW (50 * NSEC_PER_USEC) 111 112 #ifdef CONFIG_CLOCKSOURCE_WATCHDOG 113 static void clocksource_watchdog_work(struct work_struct *work); 114 static void clocksource_select(void); 115 116 static LIST_HEAD(watchdog_list); 117 static struct clocksource *watchdog; 118 static struct timer_list watchdog_timer; 119 static DECLARE_WORK(watchdog_work, clocksource_watchdog_work); 120 static DEFINE_SPINLOCK(watchdog_lock); 121 static int watchdog_running; 122 static atomic_t watchdog_reset_pending; 123 124 static inline void clocksource_watchdog_lock(unsigned long *flags) 125 { 126 spin_lock_irqsave(&watchdog_lock, *flags); 127 } 128 129 static inline void clocksource_watchdog_unlock(unsigned long *flags) 130 { 131 spin_unlock_irqrestore(&watchdog_lock, *flags); 132 } 133 134 static int clocksource_watchdog_kthread(void *data); 135 static void __clocksource_change_rating(struct clocksource *cs, int rating); 136 137 /* 138 * Interval: 0.5sec. 139 */ 140 #define WATCHDOG_INTERVAL (HZ >> 1) 141 142 static void clocksource_watchdog_work(struct work_struct *work) 143 { 144 /* 145 * We cannot directly run clocksource_watchdog_kthread() here, because 146 * clocksource_select() calls timekeeping_notify() which uses 147 * stop_machine(). One cannot use stop_machine() from a workqueue() due 148 * lock inversions wrt CPU hotplug. 149 * 150 * Also, we only ever run this work once or twice during the lifetime 151 * of the kernel, so there is no point in creating a more permanent 152 * kthread for this. 153 * 154 * If kthread_run fails the next watchdog scan over the 155 * watchdog_list will find the unstable clock again. 156 */ 157 kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog"); 158 } 159 160 static void __clocksource_unstable(struct clocksource *cs) 161 { 162 cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); 163 cs->flags |= CLOCK_SOURCE_UNSTABLE; 164 165 /* 166 * If the clocksource is registered clocksource_watchdog_kthread() will 167 * re-rate and re-select. 168 */ 169 if (list_empty(&cs->list)) { 170 cs->rating = 0; 171 return; 172 } 173 174 if (cs->mark_unstable) 175 cs->mark_unstable(cs); 176 177 /* kick clocksource_watchdog_kthread() */ 178 if (finished_booting) 179 schedule_work(&watchdog_work); 180 } 181 182 /** 183 * clocksource_mark_unstable - mark clocksource unstable via watchdog 184 * @cs: clocksource to be marked unstable 185 * 186 * This function is called by the x86 TSC code to mark clocksources as unstable; 187 * it defers demotion and re-selection to a kthread. 188 */ 189 void clocksource_mark_unstable(struct clocksource *cs) 190 { 191 unsigned long flags; 192 193 spin_lock_irqsave(&watchdog_lock, flags); 194 if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) { 195 if (!list_empty(&cs->list) && list_empty(&cs->wd_list)) 196 list_add(&cs->wd_list, &watchdog_list); 197 __clocksource_unstable(cs); 198 } 199 spin_unlock_irqrestore(&watchdog_lock, flags); 200 } 201 202 static ulong max_cswd_read_retries = 3; 203 module_param(max_cswd_read_retries, ulong, 0644); 204 static int verify_n_cpus = 8; 205 module_param(verify_n_cpus, int, 0644); 206 207 static bool cs_watchdog_read(struct clocksource *cs, u64 *csnow, u64 *wdnow) 208 { 209 unsigned int nretries; 210 u64 wd_end, wd_delta; 211 int64_t wd_delay; 212 213 for (nretries = 0; nretries <= max_cswd_read_retries; nretries++) { 214 local_irq_disable(); 215 *wdnow = watchdog->read(watchdog); 216 *csnow = cs->read(cs); 217 wd_end = watchdog->read(watchdog); 218 local_irq_enable(); 219 220 wd_delta = clocksource_delta(wd_end, *wdnow, watchdog->mask); 221 wd_delay = clocksource_cyc2ns(wd_delta, watchdog->mult, 222 watchdog->shift); 223 if (wd_delay <= WATCHDOG_MAX_SKEW) { 224 if (nretries > 1 || nretries >= max_cswd_read_retries) { 225 pr_warn("timekeeping watchdog on CPU%d: %s retried %d times before success\n", 226 smp_processor_id(), watchdog->name, nretries); 227 } 228 return true; 229 } 230 } 231 232 pr_warn("timekeeping watchdog on CPU%d: %s read-back delay of %lldns, attempt %d, marking unstable\n", 233 smp_processor_id(), watchdog->name, wd_delay, nretries); 234 return false; 235 } 236 237 static u64 csnow_mid; 238 static cpumask_t cpus_ahead; 239 static cpumask_t cpus_behind; 240 static cpumask_t cpus_chosen; 241 242 static void clocksource_verify_choose_cpus(void) 243 { 244 int cpu, i, n = verify_n_cpus; 245 246 if (n < 0) { 247 /* Check all of the CPUs. */ 248 cpumask_copy(&cpus_chosen, cpu_online_mask); 249 cpumask_clear_cpu(smp_processor_id(), &cpus_chosen); 250 return; 251 } 252 253 /* If no checking desired, or no other CPU to check, leave. */ 254 cpumask_clear(&cpus_chosen); 255 if (n == 0 || num_online_cpus() <= 1) 256 return; 257 258 /* Make sure to select at least one CPU other than the current CPU. */ 259 cpu = cpumask_next(-1, cpu_online_mask); 260 if (cpu == smp_processor_id()) 261 cpu = cpumask_next(cpu, cpu_online_mask); 262 if (WARN_ON_ONCE(cpu >= nr_cpu_ids)) 263 return; 264 cpumask_set_cpu(cpu, &cpus_chosen); 265 266 /* Force a sane value for the boot parameter. */ 267 if (n > nr_cpu_ids) 268 n = nr_cpu_ids; 269 270 /* 271 * Randomly select the specified number of CPUs. If the same 272 * CPU is selected multiple times, that CPU is checked only once, 273 * and no replacement CPU is selected. This gracefully handles 274 * situations where verify_n_cpus is greater than the number of 275 * CPUs that are currently online. 276 */ 277 for (i = 1; i < n; i++) { 278 cpu = prandom_u32() % nr_cpu_ids; 279 cpu = cpumask_next(cpu - 1, cpu_online_mask); 280 if (cpu >= nr_cpu_ids) 281 cpu = cpumask_next(-1, cpu_online_mask); 282 if (!WARN_ON_ONCE(cpu >= nr_cpu_ids)) 283 cpumask_set_cpu(cpu, &cpus_chosen); 284 } 285 286 /* Don't verify ourselves. */ 287 cpumask_clear_cpu(smp_processor_id(), &cpus_chosen); 288 } 289 290 static void clocksource_verify_one_cpu(void *csin) 291 { 292 struct clocksource *cs = (struct clocksource *)csin; 293 294 csnow_mid = cs->read(cs); 295 } 296 297 static void clocksource_verify_percpu(struct clocksource *cs) 298 { 299 int64_t cs_nsec, cs_nsec_max = 0, cs_nsec_min = LLONG_MAX; 300 u64 csnow_begin, csnow_end; 301 int cpu, testcpu; 302 s64 delta; 303 304 if (verify_n_cpus == 0) 305 return; 306 cpumask_clear(&cpus_ahead); 307 cpumask_clear(&cpus_behind); 308 get_online_cpus(); 309 preempt_disable(); 310 clocksource_verify_choose_cpus(); 311 if (cpumask_weight(&cpus_chosen) == 0) { 312 preempt_enable(); 313 put_online_cpus(); 314 pr_warn("Not enough CPUs to check clocksource '%s'.\n", cs->name); 315 return; 316 } 317 testcpu = smp_processor_id(); 318 pr_warn("Checking clocksource %s synchronization from CPU %d to CPUs %*pbl.\n", cs->name, testcpu, cpumask_pr_args(&cpus_chosen)); 319 for_each_cpu(cpu, &cpus_chosen) { 320 if (cpu == testcpu) 321 continue; 322 csnow_begin = cs->read(cs); 323 smp_call_function_single(cpu, clocksource_verify_one_cpu, cs, 1); 324 csnow_end = cs->read(cs); 325 delta = (s64)((csnow_mid - csnow_begin) & cs->mask); 326 if (delta < 0) 327 cpumask_set_cpu(cpu, &cpus_behind); 328 delta = (csnow_end - csnow_mid) & cs->mask; 329 if (delta < 0) 330 cpumask_set_cpu(cpu, &cpus_ahead); 331 delta = clocksource_delta(csnow_end, csnow_begin, cs->mask); 332 cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift); 333 if (cs_nsec > cs_nsec_max) 334 cs_nsec_max = cs_nsec; 335 if (cs_nsec < cs_nsec_min) 336 cs_nsec_min = cs_nsec; 337 } 338 preempt_enable(); 339 put_online_cpus(); 340 if (!cpumask_empty(&cpus_ahead)) 341 pr_warn(" CPUs %*pbl ahead of CPU %d for clocksource %s.\n", 342 cpumask_pr_args(&cpus_ahead), testcpu, cs->name); 343 if (!cpumask_empty(&cpus_behind)) 344 pr_warn(" CPUs %*pbl behind CPU %d for clocksource %s.\n", 345 cpumask_pr_args(&cpus_behind), testcpu, cs->name); 346 if (!cpumask_empty(&cpus_ahead) || !cpumask_empty(&cpus_behind)) 347 pr_warn(" CPU %d check durations %lldns - %lldns for clocksource %s.\n", 348 testcpu, cs_nsec_min, cs_nsec_max, cs->name); 349 } 350 351 static void clocksource_watchdog(struct timer_list *unused) 352 { 353 u64 csnow, wdnow, cslast, wdlast, delta; 354 int next_cpu, reset_pending; 355 int64_t wd_nsec, cs_nsec; 356 struct clocksource *cs; 357 u32 md; 358 359 spin_lock(&watchdog_lock); 360 if (!watchdog_running) 361 goto out; 362 363 reset_pending = atomic_read(&watchdog_reset_pending); 364 365 list_for_each_entry(cs, &watchdog_list, wd_list) { 366 367 /* Clocksource already marked unstable? */ 368 if (cs->flags & CLOCK_SOURCE_UNSTABLE) { 369 if (finished_booting) 370 schedule_work(&watchdog_work); 371 continue; 372 } 373 374 if (!cs_watchdog_read(cs, &csnow, &wdnow)) { 375 /* Clock readout unreliable, so give it up. */ 376 __clocksource_unstable(cs); 377 continue; 378 } 379 380 /* Clocksource initialized ? */ 381 if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) || 382 atomic_read(&watchdog_reset_pending)) { 383 cs->flags |= CLOCK_SOURCE_WATCHDOG; 384 cs->wd_last = wdnow; 385 cs->cs_last = csnow; 386 continue; 387 } 388 389 delta = clocksource_delta(wdnow, cs->wd_last, watchdog->mask); 390 wd_nsec = clocksource_cyc2ns(delta, watchdog->mult, 391 watchdog->shift); 392 393 delta = clocksource_delta(csnow, cs->cs_last, cs->mask); 394 cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift); 395 wdlast = cs->wd_last; /* save these in case we print them */ 396 cslast = cs->cs_last; 397 cs->cs_last = csnow; 398 cs->wd_last = wdnow; 399 400 if (atomic_read(&watchdog_reset_pending)) 401 continue; 402 403 /* Check the deviation from the watchdog clocksource. */ 404 md = cs->uncertainty_margin + watchdog->uncertainty_margin; 405 if (abs(cs_nsec - wd_nsec) > md) { 406 pr_warn("timekeeping watchdog on CPU%d: Marking clocksource '%s' as unstable because the skew is too large:\n", 407 smp_processor_id(), cs->name); 408 pr_warn(" '%s' wd_now: %llx wd_last: %llx mask: %llx\n", 409 watchdog->name, wdnow, wdlast, watchdog->mask); 410 pr_warn(" '%s' cs_now: %llx cs_last: %llx mask: %llx\n", 411 cs->name, csnow, cslast, cs->mask); 412 if (curr_clocksource == cs) 413 pr_warn(" '%s' is current clocksource.\n", cs->name); 414 else if (curr_clocksource) 415 pr_warn(" '%s' (not '%s') is current clocksource.\n", curr_clocksource->name, cs->name); 416 else 417 pr_warn(" No current clocksource.\n"); 418 __clocksource_unstable(cs); 419 continue; 420 } 421 422 if (cs == curr_clocksource && cs->tick_stable) 423 cs->tick_stable(cs); 424 425 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && 426 (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) && 427 (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) { 428 /* Mark it valid for high-res. */ 429 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; 430 431 /* 432 * clocksource_done_booting() will sort it if 433 * finished_booting is not set yet. 434 */ 435 if (!finished_booting) 436 continue; 437 438 /* 439 * If this is not the current clocksource let 440 * the watchdog thread reselect it. Due to the 441 * change to high res this clocksource might 442 * be preferred now. If it is the current 443 * clocksource let the tick code know about 444 * that change. 445 */ 446 if (cs != curr_clocksource) { 447 cs->flags |= CLOCK_SOURCE_RESELECT; 448 schedule_work(&watchdog_work); 449 } else { 450 tick_clock_notify(); 451 } 452 } 453 } 454 455 /* 456 * We only clear the watchdog_reset_pending, when we did a 457 * full cycle through all clocksources. 458 */ 459 if (reset_pending) 460 atomic_dec(&watchdog_reset_pending); 461 462 /* 463 * Cycle through CPUs to check if the CPUs stay synchronized 464 * to each other. 465 */ 466 next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask); 467 if (next_cpu >= nr_cpu_ids) 468 next_cpu = cpumask_first(cpu_online_mask); 469 470 /* 471 * Arm timer if not already pending: could race with concurrent 472 * pair clocksource_stop_watchdog() clocksource_start_watchdog(). 473 */ 474 if (!timer_pending(&watchdog_timer)) { 475 watchdog_timer.expires += WATCHDOG_INTERVAL; 476 add_timer_on(&watchdog_timer, next_cpu); 477 } 478 out: 479 spin_unlock(&watchdog_lock); 480 } 481 482 static inline void clocksource_start_watchdog(void) 483 { 484 if (watchdog_running || !watchdog || list_empty(&watchdog_list)) 485 return; 486 timer_setup(&watchdog_timer, clocksource_watchdog, 0); 487 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; 488 add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask)); 489 watchdog_running = 1; 490 } 491 492 static inline void clocksource_stop_watchdog(void) 493 { 494 if (!watchdog_running || (watchdog && !list_empty(&watchdog_list))) 495 return; 496 del_timer(&watchdog_timer); 497 watchdog_running = 0; 498 } 499 500 static inline void clocksource_reset_watchdog(void) 501 { 502 struct clocksource *cs; 503 504 list_for_each_entry(cs, &watchdog_list, wd_list) 505 cs->flags &= ~CLOCK_SOURCE_WATCHDOG; 506 } 507 508 static void clocksource_resume_watchdog(void) 509 { 510 atomic_inc(&watchdog_reset_pending); 511 } 512 513 static void clocksource_enqueue_watchdog(struct clocksource *cs) 514 { 515 INIT_LIST_HEAD(&cs->wd_list); 516 517 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { 518 /* cs is a clocksource to be watched. */ 519 list_add(&cs->wd_list, &watchdog_list); 520 cs->flags &= ~CLOCK_SOURCE_WATCHDOG; 521 } else { 522 /* cs is a watchdog. */ 523 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) 524 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; 525 } 526 } 527 528 static void clocksource_select_watchdog(bool fallback) 529 { 530 struct clocksource *cs, *old_wd; 531 unsigned long flags; 532 533 spin_lock_irqsave(&watchdog_lock, flags); 534 /* save current watchdog */ 535 old_wd = watchdog; 536 if (fallback) 537 watchdog = NULL; 538 539 list_for_each_entry(cs, &clocksource_list, list) { 540 /* cs is a clocksource to be watched. */ 541 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) 542 continue; 543 544 /* Skip current if we were requested for a fallback. */ 545 if (fallback && cs == old_wd) 546 continue; 547 548 /* Pick the best watchdog. */ 549 if (!watchdog || cs->rating > watchdog->rating) 550 watchdog = cs; 551 } 552 /* If we failed to find a fallback restore the old one. */ 553 if (!watchdog) 554 watchdog = old_wd; 555 556 /* If we changed the watchdog we need to reset cycles. */ 557 if (watchdog != old_wd) 558 clocksource_reset_watchdog(); 559 560 /* Check if the watchdog timer needs to be started. */ 561 clocksource_start_watchdog(); 562 spin_unlock_irqrestore(&watchdog_lock, flags); 563 } 564 565 static void clocksource_dequeue_watchdog(struct clocksource *cs) 566 { 567 if (cs != watchdog) { 568 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { 569 /* cs is a watched clocksource. */ 570 list_del_init(&cs->wd_list); 571 /* Check if the watchdog timer needs to be stopped. */ 572 clocksource_stop_watchdog(); 573 } 574 } 575 } 576 577 static int __clocksource_watchdog_kthread(void) 578 { 579 struct clocksource *cs, *tmp; 580 unsigned long flags; 581 int select = 0; 582 583 /* Do any required per-CPU skew verification. */ 584 if (curr_clocksource && 585 curr_clocksource->flags & CLOCK_SOURCE_UNSTABLE && 586 curr_clocksource->flags & CLOCK_SOURCE_VERIFY_PERCPU) 587 clocksource_verify_percpu(curr_clocksource); 588 589 spin_lock_irqsave(&watchdog_lock, flags); 590 list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { 591 if (cs->flags & CLOCK_SOURCE_UNSTABLE) { 592 list_del_init(&cs->wd_list); 593 __clocksource_change_rating(cs, 0); 594 select = 1; 595 } 596 if (cs->flags & CLOCK_SOURCE_RESELECT) { 597 cs->flags &= ~CLOCK_SOURCE_RESELECT; 598 select = 1; 599 } 600 } 601 /* Check if the watchdog timer needs to be stopped. */ 602 clocksource_stop_watchdog(); 603 spin_unlock_irqrestore(&watchdog_lock, flags); 604 605 return select; 606 } 607 608 static int clocksource_watchdog_kthread(void *data) 609 { 610 mutex_lock(&clocksource_mutex); 611 if (__clocksource_watchdog_kthread()) 612 clocksource_select(); 613 mutex_unlock(&clocksource_mutex); 614 return 0; 615 } 616 617 static bool clocksource_is_watchdog(struct clocksource *cs) 618 { 619 return cs == watchdog; 620 } 621 622 #else /* CONFIG_CLOCKSOURCE_WATCHDOG */ 623 624 static void clocksource_enqueue_watchdog(struct clocksource *cs) 625 { 626 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) 627 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; 628 } 629 630 static void clocksource_select_watchdog(bool fallback) { } 631 static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { } 632 static inline void clocksource_resume_watchdog(void) { } 633 static inline int __clocksource_watchdog_kthread(void) { return 0; } 634 static bool clocksource_is_watchdog(struct clocksource *cs) { return false; } 635 void clocksource_mark_unstable(struct clocksource *cs) { } 636 637 static inline void clocksource_watchdog_lock(unsigned long *flags) { } 638 static inline void clocksource_watchdog_unlock(unsigned long *flags) { } 639 640 #endif /* CONFIG_CLOCKSOURCE_WATCHDOG */ 641 642 static bool clocksource_is_suspend(struct clocksource *cs) 643 { 644 return cs == suspend_clocksource; 645 } 646 647 static void __clocksource_suspend_select(struct clocksource *cs) 648 { 649 /* 650 * Skip the clocksource which will be stopped in suspend state. 651 */ 652 if (!(cs->flags & CLOCK_SOURCE_SUSPEND_NONSTOP)) 653 return; 654 655 /* 656 * The nonstop clocksource can be selected as the suspend clocksource to 657 * calculate the suspend time, so it should not supply suspend/resume 658 * interfaces to suspend the nonstop clocksource when system suspends. 659 */ 660 if (cs->suspend || cs->resume) { 661 pr_warn("Nonstop clocksource %s should not supply suspend/resume interfaces\n", 662 cs->name); 663 } 664 665 /* Pick the best rating. */ 666 if (!suspend_clocksource || cs->rating > suspend_clocksource->rating) 667 suspend_clocksource = cs; 668 } 669 670 /** 671 * clocksource_suspend_select - Select the best clocksource for suspend timing 672 * @fallback: if select a fallback clocksource 673 */ 674 static void clocksource_suspend_select(bool fallback) 675 { 676 struct clocksource *cs, *old_suspend; 677 678 old_suspend = suspend_clocksource; 679 if (fallback) 680 suspend_clocksource = NULL; 681 682 list_for_each_entry(cs, &clocksource_list, list) { 683 /* Skip current if we were requested for a fallback. */ 684 if (fallback && cs == old_suspend) 685 continue; 686 687 __clocksource_suspend_select(cs); 688 } 689 } 690 691 /** 692 * clocksource_start_suspend_timing - Start measuring the suspend timing 693 * @cs: current clocksource from timekeeping 694 * @start_cycles: current cycles from timekeeping 695 * 696 * This function will save the start cycle values of suspend timer to calculate 697 * the suspend time when resuming system. 698 * 699 * This function is called late in the suspend process from timekeeping_suspend(), 700 * that means processes are frozen, non-boot cpus and interrupts are disabled 701 * now. It is therefore possible to start the suspend timer without taking the 702 * clocksource mutex. 703 */ 704 void clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles) 705 { 706 if (!suspend_clocksource) 707 return; 708 709 /* 710 * If current clocksource is the suspend timer, we should use the 711 * tkr_mono.cycle_last value as suspend_start to avoid same reading 712 * from suspend timer. 713 */ 714 if (clocksource_is_suspend(cs)) { 715 suspend_start = start_cycles; 716 return; 717 } 718 719 if (suspend_clocksource->enable && 720 suspend_clocksource->enable(suspend_clocksource)) { 721 pr_warn_once("Failed to enable the non-suspend-able clocksource.\n"); 722 return; 723 } 724 725 suspend_start = suspend_clocksource->read(suspend_clocksource); 726 } 727 728 /** 729 * clocksource_stop_suspend_timing - Stop measuring the suspend timing 730 * @cs: current clocksource from timekeeping 731 * @cycle_now: current cycles from timekeeping 732 * 733 * This function will calculate the suspend time from suspend timer. 734 * 735 * Returns nanoseconds since suspend started, 0 if no usable suspend clocksource. 736 * 737 * This function is called early in the resume process from timekeeping_resume(), 738 * that means there is only one cpu, no processes are running and the interrupts 739 * are disabled. It is therefore possible to stop the suspend timer without 740 * taking the clocksource mutex. 741 */ 742 u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 cycle_now) 743 { 744 u64 now, delta, nsec = 0; 745 746 if (!suspend_clocksource) 747 return 0; 748 749 /* 750 * If current clocksource is the suspend timer, we should use the 751 * tkr_mono.cycle_last value from timekeeping as current cycle to 752 * avoid same reading from suspend timer. 753 */ 754 if (clocksource_is_suspend(cs)) 755 now = cycle_now; 756 else 757 now = suspend_clocksource->read(suspend_clocksource); 758 759 if (now > suspend_start) { 760 delta = clocksource_delta(now, suspend_start, 761 suspend_clocksource->mask); 762 nsec = mul_u64_u32_shr(delta, suspend_clocksource->mult, 763 suspend_clocksource->shift); 764 } 765 766 /* 767 * Disable the suspend timer to save power if current clocksource is 768 * not the suspend timer. 769 */ 770 if (!clocksource_is_suspend(cs) && suspend_clocksource->disable) 771 suspend_clocksource->disable(suspend_clocksource); 772 773 return nsec; 774 } 775 776 /** 777 * clocksource_suspend - suspend the clocksource(s) 778 */ 779 void clocksource_suspend(void) 780 { 781 struct clocksource *cs; 782 783 list_for_each_entry_reverse(cs, &clocksource_list, list) 784 if (cs->suspend) 785 cs->suspend(cs); 786 } 787 788 /** 789 * clocksource_resume - resume the clocksource(s) 790 */ 791 void clocksource_resume(void) 792 { 793 struct clocksource *cs; 794 795 list_for_each_entry(cs, &clocksource_list, list) 796 if (cs->resume) 797 cs->resume(cs); 798 799 clocksource_resume_watchdog(); 800 } 801 802 /** 803 * clocksource_touch_watchdog - Update watchdog 804 * 805 * Update the watchdog after exception contexts such as kgdb so as not 806 * to incorrectly trip the watchdog. This might fail when the kernel 807 * was stopped in code which holds watchdog_lock. 808 */ 809 void clocksource_touch_watchdog(void) 810 { 811 clocksource_resume_watchdog(); 812 } 813 814 /** 815 * clocksource_max_adjustment- Returns max adjustment amount 816 * @cs: Pointer to clocksource 817 * 818 */ 819 static u32 clocksource_max_adjustment(struct clocksource *cs) 820 { 821 u64 ret; 822 /* 823 * We won't try to correct for more than 11% adjustments (110,000 ppm), 824 */ 825 ret = (u64)cs->mult * 11; 826 do_div(ret,100); 827 return (u32)ret; 828 } 829 830 /** 831 * clocks_calc_max_nsecs - Returns maximum nanoseconds that can be converted 832 * @mult: cycle to nanosecond multiplier 833 * @shift: cycle to nanosecond divisor (power of two) 834 * @maxadj: maximum adjustment value to mult (~11%) 835 * @mask: bitmask for two's complement subtraction of non 64 bit counters 836 * @max_cyc: maximum cycle value before potential overflow (does not include 837 * any safety margin) 838 * 839 * NOTE: This function includes a safety margin of 50%, in other words, we 840 * return half the number of nanoseconds the hardware counter can technically 841 * cover. This is done so that we can potentially detect problems caused by 842 * delayed timers or bad hardware, which might result in time intervals that 843 * are larger than what the math used can handle without overflows. 844 */ 845 u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cyc) 846 { 847 u64 max_nsecs, max_cycles; 848 849 /* 850 * Calculate the maximum number of cycles that we can pass to the 851 * cyc2ns() function without overflowing a 64-bit result. 852 */ 853 max_cycles = ULLONG_MAX; 854 do_div(max_cycles, mult+maxadj); 855 856 /* 857 * The actual maximum number of cycles we can defer the clocksource is 858 * determined by the minimum of max_cycles and mask. 859 * Note: Here we subtract the maxadj to make sure we don't sleep for 860 * too long if there's a large negative adjustment. 861 */ 862 max_cycles = min(max_cycles, mask); 863 max_nsecs = clocksource_cyc2ns(max_cycles, mult - maxadj, shift); 864 865 /* return the max_cycles value as well if requested */ 866 if (max_cyc) 867 *max_cyc = max_cycles; 868 869 /* Return 50% of the actual maximum, so we can detect bad values */ 870 max_nsecs >>= 1; 871 872 return max_nsecs; 873 } 874 875 /** 876 * clocksource_update_max_deferment - Updates the clocksource max_idle_ns & max_cycles 877 * @cs: Pointer to clocksource to be updated 878 * 879 */ 880 static inline void clocksource_update_max_deferment(struct clocksource *cs) 881 { 882 cs->max_idle_ns = clocks_calc_max_nsecs(cs->mult, cs->shift, 883 cs->maxadj, cs->mask, 884 &cs->max_cycles); 885 } 886 887 static struct clocksource *clocksource_find_best(bool oneshot, bool skipcur) 888 { 889 struct clocksource *cs; 890 891 if (!finished_booting || list_empty(&clocksource_list)) 892 return NULL; 893 894 /* 895 * We pick the clocksource with the highest rating. If oneshot 896 * mode is active, we pick the highres valid clocksource with 897 * the best rating. 898 */ 899 list_for_each_entry(cs, &clocksource_list, list) { 900 if (skipcur && cs == curr_clocksource) 901 continue; 902 if (oneshot && !(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES)) 903 continue; 904 return cs; 905 } 906 return NULL; 907 } 908 909 static void __clocksource_select(bool skipcur) 910 { 911 bool oneshot = tick_oneshot_mode_active(); 912 struct clocksource *best, *cs; 913 914 /* Find the best suitable clocksource */ 915 best = clocksource_find_best(oneshot, skipcur); 916 if (!best) 917 return; 918 919 if (!strlen(override_name)) 920 goto found; 921 922 /* Check for the override clocksource. */ 923 list_for_each_entry(cs, &clocksource_list, list) { 924 if (skipcur && cs == curr_clocksource) 925 continue; 926 if (strcmp(cs->name, override_name) != 0) 927 continue; 928 /* 929 * Check to make sure we don't switch to a non-highres 930 * capable clocksource if the tick code is in oneshot 931 * mode (highres or nohz) 932 */ 933 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && oneshot) { 934 /* Override clocksource cannot be used. */ 935 if (cs->flags & CLOCK_SOURCE_UNSTABLE) { 936 pr_warn("Override clocksource %s is unstable and not HRT compatible - cannot switch while in HRT/NOHZ mode\n", 937 cs->name); 938 override_name[0] = 0; 939 } else { 940 /* 941 * The override cannot be currently verified. 942 * Deferring to let the watchdog check. 943 */ 944 pr_info("Override clocksource %s is not currently HRT compatible - deferring\n", 945 cs->name); 946 } 947 } else 948 /* Override clocksource can be used. */ 949 best = cs; 950 break; 951 } 952 953 found: 954 if (curr_clocksource != best && !timekeeping_notify(best)) { 955 pr_info("Switched to clocksource %s\n", best->name); 956 curr_clocksource = best; 957 } 958 } 959 960 /** 961 * clocksource_select - Select the best clocksource available 962 * 963 * Private function. Must hold clocksource_mutex when called. 964 * 965 * Select the clocksource with the best rating, or the clocksource, 966 * which is selected by userspace override. 967 */ 968 static void clocksource_select(void) 969 { 970 __clocksource_select(false); 971 } 972 973 static void clocksource_select_fallback(void) 974 { 975 __clocksource_select(true); 976 } 977 978 /* 979 * clocksource_done_booting - Called near the end of core bootup 980 * 981 * Hack to avoid lots of clocksource churn at boot time. 982 * We use fs_initcall because we want this to start before 983 * device_initcall but after subsys_initcall. 984 */ 985 static int __init clocksource_done_booting(void) 986 { 987 mutex_lock(&clocksource_mutex); 988 curr_clocksource = clocksource_default_clock(); 989 finished_booting = 1; 990 /* 991 * Run the watchdog first to eliminate unstable clock sources 992 */ 993 __clocksource_watchdog_kthread(); 994 clocksource_select(); 995 mutex_unlock(&clocksource_mutex); 996 return 0; 997 } 998 fs_initcall(clocksource_done_booting); 999 1000 /* 1001 * Enqueue the clocksource sorted by rating 1002 */ 1003 static void clocksource_enqueue(struct clocksource *cs) 1004 { 1005 struct list_head *entry = &clocksource_list; 1006 struct clocksource *tmp; 1007 1008 list_for_each_entry(tmp, &clocksource_list, list) { 1009 /* Keep track of the place, where to insert */ 1010 if (tmp->rating < cs->rating) 1011 break; 1012 entry = &tmp->list; 1013 } 1014 list_add(&cs->list, entry); 1015 } 1016 1017 /** 1018 * __clocksource_update_freq_scale - Used update clocksource with new freq 1019 * @cs: clocksource to be registered 1020 * @scale: Scale factor multiplied against freq to get clocksource hz 1021 * @freq: clocksource frequency (cycles per second) divided by scale 1022 * 1023 * This should only be called from the clocksource->enable() method. 1024 * 1025 * This *SHOULD NOT* be called directly! Please use the 1026 * __clocksource_update_freq_hz() or __clocksource_update_freq_khz() helper 1027 * functions. 1028 */ 1029 void __clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq) 1030 { 1031 u64 sec; 1032 1033 /* 1034 * Default clocksources are *special* and self-define their mult/shift. 1035 * But, you're not special, so you should specify a freq value. 1036 */ 1037 if (freq) { 1038 /* 1039 * Calc the maximum number of seconds which we can run before 1040 * wrapping around. For clocksources which have a mask > 32-bit 1041 * we need to limit the max sleep time to have a good 1042 * conversion precision. 10 minutes is still a reasonable 1043 * amount. That results in a shift value of 24 for a 1044 * clocksource with mask >= 40-bit and f >= 4GHz. That maps to 1045 * ~ 0.06ppm granularity for NTP. 1046 */ 1047 sec = cs->mask; 1048 do_div(sec, freq); 1049 do_div(sec, scale); 1050 if (!sec) 1051 sec = 1; 1052 else if (sec > 600 && cs->mask > UINT_MAX) 1053 sec = 600; 1054 1055 clocks_calc_mult_shift(&cs->mult, &cs->shift, freq, 1056 NSEC_PER_SEC / scale, sec * scale); 1057 } 1058 1059 /* 1060 * If the uncertainty margin is not specified, calculate it. 1061 * If both scale and freq are non-zero, calculate the clock 1062 * period, but bound below at 2*WATCHDOG_MAX_SKEW. However, 1063 * if either of scale or freq is zero, be very conservative and 1064 * take the tens-of-milliseconds WATCHDOG_THRESHOLD value for the 1065 * uncertainty margin. Allow stupidly small uncertainty margins 1066 * to be specified by the caller for testing purposes, but warn 1067 * to discourage production use of this capability. 1068 */ 1069 if (scale && freq && !cs->uncertainty_margin) { 1070 cs->uncertainty_margin = NSEC_PER_SEC / (scale * freq); 1071 if (cs->uncertainty_margin < 2 * WATCHDOG_MAX_SKEW) 1072 cs->uncertainty_margin = 2 * WATCHDOG_MAX_SKEW; 1073 } else if (!cs->uncertainty_margin) { 1074 cs->uncertainty_margin = WATCHDOG_THRESHOLD; 1075 } 1076 WARN_ON_ONCE(cs->uncertainty_margin < 2 * WATCHDOG_MAX_SKEW); 1077 1078 /* 1079 * Ensure clocksources that have large 'mult' values don't overflow 1080 * when adjusted. 1081 */ 1082 cs->maxadj = clocksource_max_adjustment(cs); 1083 while (freq && ((cs->mult + cs->maxadj < cs->mult) 1084 || (cs->mult - cs->maxadj > cs->mult))) { 1085 cs->mult >>= 1; 1086 cs->shift--; 1087 cs->maxadj = clocksource_max_adjustment(cs); 1088 } 1089 1090 /* 1091 * Only warn for *special* clocksources that self-define 1092 * their mult/shift values and don't specify a freq. 1093 */ 1094 WARN_ONCE(cs->mult + cs->maxadj < cs->mult, 1095 "timekeeping: Clocksource %s might overflow on 11%% adjustment\n", 1096 cs->name); 1097 1098 clocksource_update_max_deferment(cs); 1099 1100 pr_info("%s: mask: 0x%llx max_cycles: 0x%llx, max_idle_ns: %lld ns\n", 1101 cs->name, cs->mask, cs->max_cycles, cs->max_idle_ns); 1102 } 1103 EXPORT_SYMBOL_GPL(__clocksource_update_freq_scale); 1104 1105 /** 1106 * __clocksource_register_scale - Used to install new clocksources 1107 * @cs: clocksource to be registered 1108 * @scale: Scale factor multiplied against freq to get clocksource hz 1109 * @freq: clocksource frequency (cycles per second) divided by scale 1110 * 1111 * Returns -EBUSY if registration fails, zero otherwise. 1112 * 1113 * This *SHOULD NOT* be called directly! Please use the 1114 * clocksource_register_hz() or clocksource_register_khz helper functions. 1115 */ 1116 int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) 1117 { 1118 unsigned long flags; 1119 1120 clocksource_arch_init(cs); 1121 1122 if (WARN_ON_ONCE((unsigned int)cs->id >= CSID_MAX)) 1123 cs->id = CSID_GENERIC; 1124 if (cs->vdso_clock_mode < 0 || 1125 cs->vdso_clock_mode >= VDSO_CLOCKMODE_MAX) { 1126 pr_warn("clocksource %s registered with invalid VDSO mode %d. Disabling VDSO support.\n", 1127 cs->name, cs->vdso_clock_mode); 1128 cs->vdso_clock_mode = VDSO_CLOCKMODE_NONE; 1129 } 1130 1131 /* Initialize mult/shift and max_idle_ns */ 1132 __clocksource_update_freq_scale(cs, scale, freq); 1133 1134 /* Add clocksource to the clocksource list */ 1135 mutex_lock(&clocksource_mutex); 1136 1137 clocksource_watchdog_lock(&flags); 1138 clocksource_enqueue(cs); 1139 clocksource_enqueue_watchdog(cs); 1140 clocksource_watchdog_unlock(&flags); 1141 1142 clocksource_select(); 1143 clocksource_select_watchdog(false); 1144 __clocksource_suspend_select(cs); 1145 mutex_unlock(&clocksource_mutex); 1146 return 0; 1147 } 1148 EXPORT_SYMBOL_GPL(__clocksource_register_scale); 1149 1150 static void __clocksource_change_rating(struct clocksource *cs, int rating) 1151 { 1152 list_del(&cs->list); 1153 cs->rating = rating; 1154 clocksource_enqueue(cs); 1155 } 1156 1157 /** 1158 * clocksource_change_rating - Change the rating of a registered clocksource 1159 * @cs: clocksource to be changed 1160 * @rating: new rating 1161 */ 1162 void clocksource_change_rating(struct clocksource *cs, int rating) 1163 { 1164 unsigned long flags; 1165 1166 mutex_lock(&clocksource_mutex); 1167 clocksource_watchdog_lock(&flags); 1168 __clocksource_change_rating(cs, rating); 1169 clocksource_watchdog_unlock(&flags); 1170 1171 clocksource_select(); 1172 clocksource_select_watchdog(false); 1173 clocksource_suspend_select(false); 1174 mutex_unlock(&clocksource_mutex); 1175 } 1176 EXPORT_SYMBOL(clocksource_change_rating); 1177 1178 /* 1179 * Unbind clocksource @cs. Called with clocksource_mutex held 1180 */ 1181 static int clocksource_unbind(struct clocksource *cs) 1182 { 1183 unsigned long flags; 1184 1185 if (clocksource_is_watchdog(cs)) { 1186 /* Select and try to install a replacement watchdog. */ 1187 clocksource_select_watchdog(true); 1188 if (clocksource_is_watchdog(cs)) 1189 return -EBUSY; 1190 } 1191 1192 if (cs == curr_clocksource) { 1193 /* Select and try to install a replacement clock source */ 1194 clocksource_select_fallback(); 1195 if (curr_clocksource == cs) 1196 return -EBUSY; 1197 } 1198 1199 if (clocksource_is_suspend(cs)) { 1200 /* 1201 * Select and try to install a replacement suspend clocksource. 1202 * If no replacement suspend clocksource, we will just let the 1203 * clocksource go and have no suspend clocksource. 1204 */ 1205 clocksource_suspend_select(true); 1206 } 1207 1208 clocksource_watchdog_lock(&flags); 1209 clocksource_dequeue_watchdog(cs); 1210 list_del_init(&cs->list); 1211 clocksource_watchdog_unlock(&flags); 1212 1213 return 0; 1214 } 1215 1216 /** 1217 * clocksource_unregister - remove a registered clocksource 1218 * @cs: clocksource to be unregistered 1219 */ 1220 int clocksource_unregister(struct clocksource *cs) 1221 { 1222 int ret = 0; 1223 1224 mutex_lock(&clocksource_mutex); 1225 if (!list_empty(&cs->list)) 1226 ret = clocksource_unbind(cs); 1227 mutex_unlock(&clocksource_mutex); 1228 return ret; 1229 } 1230 EXPORT_SYMBOL(clocksource_unregister); 1231 1232 #ifdef CONFIG_SYSFS 1233 /** 1234 * current_clocksource_show - sysfs interface for current clocksource 1235 * @dev: unused 1236 * @attr: unused 1237 * @buf: char buffer to be filled with clocksource list 1238 * 1239 * Provides sysfs interface for listing current clocksource. 1240 */ 1241 static ssize_t current_clocksource_show(struct device *dev, 1242 struct device_attribute *attr, 1243 char *buf) 1244 { 1245 ssize_t count = 0; 1246 1247 mutex_lock(&clocksource_mutex); 1248 count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name); 1249 mutex_unlock(&clocksource_mutex); 1250 1251 return count; 1252 } 1253 1254 ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt) 1255 { 1256 size_t ret = cnt; 1257 1258 /* strings from sysfs write are not 0 terminated! */ 1259 if (!cnt || cnt >= CS_NAME_LEN) 1260 return -EINVAL; 1261 1262 /* strip of \n: */ 1263 if (buf[cnt-1] == '\n') 1264 cnt--; 1265 if (cnt > 0) 1266 memcpy(dst, buf, cnt); 1267 dst[cnt] = 0; 1268 return ret; 1269 } 1270 1271 /** 1272 * current_clocksource_store - interface for manually overriding clocksource 1273 * @dev: unused 1274 * @attr: unused 1275 * @buf: name of override clocksource 1276 * @count: length of buffer 1277 * 1278 * Takes input from sysfs interface for manually overriding the default 1279 * clocksource selection. 1280 */ 1281 static ssize_t current_clocksource_store(struct device *dev, 1282 struct device_attribute *attr, 1283 const char *buf, size_t count) 1284 { 1285 ssize_t ret; 1286 1287 mutex_lock(&clocksource_mutex); 1288 1289 ret = sysfs_get_uname(buf, override_name, count); 1290 if (ret >= 0) 1291 clocksource_select(); 1292 1293 mutex_unlock(&clocksource_mutex); 1294 1295 return ret; 1296 } 1297 static DEVICE_ATTR_RW(current_clocksource); 1298 1299 /** 1300 * unbind_clocksource_store - interface for manually unbinding clocksource 1301 * @dev: unused 1302 * @attr: unused 1303 * @buf: unused 1304 * @count: length of buffer 1305 * 1306 * Takes input from sysfs interface for manually unbinding a clocksource. 1307 */ 1308 static ssize_t unbind_clocksource_store(struct device *dev, 1309 struct device_attribute *attr, 1310 const char *buf, size_t count) 1311 { 1312 struct clocksource *cs; 1313 char name[CS_NAME_LEN]; 1314 ssize_t ret; 1315 1316 ret = sysfs_get_uname(buf, name, count); 1317 if (ret < 0) 1318 return ret; 1319 1320 ret = -ENODEV; 1321 mutex_lock(&clocksource_mutex); 1322 list_for_each_entry(cs, &clocksource_list, list) { 1323 if (strcmp(cs->name, name)) 1324 continue; 1325 ret = clocksource_unbind(cs); 1326 break; 1327 } 1328 mutex_unlock(&clocksource_mutex); 1329 1330 return ret ? ret : count; 1331 } 1332 static DEVICE_ATTR_WO(unbind_clocksource); 1333 1334 /** 1335 * available_clocksource_show - sysfs interface for listing clocksource 1336 * @dev: unused 1337 * @attr: unused 1338 * @buf: char buffer to be filled with clocksource list 1339 * 1340 * Provides sysfs interface for listing registered clocksources 1341 */ 1342 static ssize_t available_clocksource_show(struct device *dev, 1343 struct device_attribute *attr, 1344 char *buf) 1345 { 1346 struct clocksource *src; 1347 ssize_t count = 0; 1348 1349 mutex_lock(&clocksource_mutex); 1350 list_for_each_entry(src, &clocksource_list, list) { 1351 /* 1352 * Don't show non-HRES clocksource if the tick code is 1353 * in one shot mode (highres=on or nohz=on) 1354 */ 1355 if (!tick_oneshot_mode_active() || 1356 (src->flags & CLOCK_SOURCE_VALID_FOR_HRES)) 1357 count += snprintf(buf + count, 1358 max((ssize_t)PAGE_SIZE - count, (ssize_t)0), 1359 "%s ", src->name); 1360 } 1361 mutex_unlock(&clocksource_mutex); 1362 1363 count += snprintf(buf + count, 1364 max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n"); 1365 1366 return count; 1367 } 1368 static DEVICE_ATTR_RO(available_clocksource); 1369 1370 static struct attribute *clocksource_attrs[] = { 1371 &dev_attr_current_clocksource.attr, 1372 &dev_attr_unbind_clocksource.attr, 1373 &dev_attr_available_clocksource.attr, 1374 NULL 1375 }; 1376 ATTRIBUTE_GROUPS(clocksource); 1377 1378 static struct bus_type clocksource_subsys = { 1379 .name = "clocksource", 1380 .dev_name = "clocksource", 1381 }; 1382 1383 static struct device device_clocksource = { 1384 .id = 0, 1385 .bus = &clocksource_subsys, 1386 .groups = clocksource_groups, 1387 }; 1388 1389 static int __init init_clocksource_sysfs(void) 1390 { 1391 int error = subsys_system_register(&clocksource_subsys, NULL); 1392 1393 if (!error) 1394 error = device_register(&device_clocksource); 1395 1396 return error; 1397 } 1398 1399 device_initcall(init_clocksource_sysfs); 1400 #endif /* CONFIG_SYSFS */ 1401 1402 /** 1403 * boot_override_clocksource - boot clock override 1404 * @str: override name 1405 * 1406 * Takes a clocksource= boot argument and uses it 1407 * as the clocksource override name. 1408 */ 1409 static int __init boot_override_clocksource(char* str) 1410 { 1411 mutex_lock(&clocksource_mutex); 1412 if (str) 1413 strlcpy(override_name, str, sizeof(override_name)); 1414 mutex_unlock(&clocksource_mutex); 1415 return 1; 1416 } 1417 1418 __setup("clocksource=", boot_override_clocksource); 1419 1420 /** 1421 * boot_override_clock - Compatibility layer for deprecated boot option 1422 * @str: override name 1423 * 1424 * DEPRECATED! Takes a clock= boot argument and uses it 1425 * as the clocksource override name 1426 */ 1427 static int __init boot_override_clock(char* str) 1428 { 1429 if (!strcmp(str, "pmtmr")) { 1430 pr_warn("clock=pmtmr is deprecated - use clocksource=acpi_pm\n"); 1431 return boot_override_clocksource("acpi_pm"); 1432 } 1433 pr_warn("clock= boot option is deprecated - use clocksource=xyz\n"); 1434 return boot_override_clocksource(str); 1435 } 1436 1437 __setup("clock=", boot_override_clock); 1438