1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * This file contains the functions which manage clocksource drivers. 4 * 5 * Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com) 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/device.h> 11 #include <linux/clocksource.h> 12 #include <linux/init.h> 13 #include <linux/module.h> 14 #include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */ 15 #include <linux/tick.h> 16 #include <linux/kthread.h> 17 #include <linux/prandom.h> 18 #include <linux/cpu.h> 19 20 #include "tick-internal.h" 21 #include "timekeeping_internal.h" 22 23 /** 24 * clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks 25 * @mult: pointer to mult variable 26 * @shift: pointer to shift variable 27 * @from: frequency to convert from 28 * @to: frequency to convert to 29 * @maxsec: guaranteed runtime conversion range in seconds 30 * 31 * The function evaluates the shift/mult pair for the scaled math 32 * operations of clocksources and clockevents. 33 * 34 * @to and @from are frequency values in HZ. For clock sources @to is 35 * NSEC_PER_SEC == 1GHz and @from is the counter frequency. For clock 36 * event @to is the counter frequency and @from is NSEC_PER_SEC. 37 * 38 * The @maxsec conversion range argument controls the time frame in 39 * seconds which must be covered by the runtime conversion with the 40 * calculated mult and shift factors. This guarantees that no 64bit 41 * overflow happens when the input value of the conversion is 42 * multiplied with the calculated mult factor. Larger ranges may 43 * reduce the conversion accuracy by choosing smaller mult and shift 44 * factors. 45 */ 46 void 47 clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 maxsec) 48 { 49 u64 tmp; 50 u32 sft, sftacc= 32; 51 52 /* 53 * Calculate the shift factor which is limiting the conversion 54 * range: 55 */ 56 tmp = ((u64)maxsec * from) >> 32; 57 while (tmp) { 58 tmp >>=1; 59 sftacc--; 60 } 61 62 /* 63 * Find the conversion shift/mult pair which has the best 64 * accuracy and fits the maxsec conversion range: 65 */ 66 for (sft = 32; sft > 0; sft--) { 67 tmp = (u64) to << sft; 68 tmp += from / 2; 69 do_div(tmp, from); 70 if ((tmp >> sftacc) == 0) 71 break; 72 } 73 *mult = tmp; 74 *shift = sft; 75 } 76 EXPORT_SYMBOL_GPL(clocks_calc_mult_shift); 77 78 /*[Clocksource internal variables]--------- 79 * curr_clocksource: 80 * currently selected clocksource. 81 * suspend_clocksource: 82 * used to calculate the suspend time. 83 * clocksource_list: 84 * linked list with the registered clocksources 85 * clocksource_mutex: 86 * protects manipulations to curr_clocksource and the clocksource_list 87 * override_name: 88 * Name of the user-specified clocksource. 89 */ 90 static struct clocksource *curr_clocksource; 91 static struct clocksource *suspend_clocksource; 92 static LIST_HEAD(clocksource_list); 93 static DEFINE_MUTEX(clocksource_mutex); 94 static char override_name[CS_NAME_LEN]; 95 static int finished_booting; 96 static u64 suspend_start; 97 98 #ifdef CONFIG_CLOCKSOURCE_WATCHDOG 99 static void clocksource_watchdog_work(struct work_struct *work); 100 static void clocksource_select(void); 101 102 static LIST_HEAD(watchdog_list); 103 static struct clocksource *watchdog; 104 static struct timer_list watchdog_timer; 105 static DECLARE_WORK(watchdog_work, clocksource_watchdog_work); 106 static DEFINE_SPINLOCK(watchdog_lock); 107 static int watchdog_running; 108 static atomic_t watchdog_reset_pending; 109 110 static inline void clocksource_watchdog_lock(unsigned long *flags) 111 { 112 spin_lock_irqsave(&watchdog_lock, *flags); 113 } 114 115 static inline void clocksource_watchdog_unlock(unsigned long *flags) 116 { 117 spin_unlock_irqrestore(&watchdog_lock, *flags); 118 } 119 120 static int clocksource_watchdog_kthread(void *data); 121 static void __clocksource_change_rating(struct clocksource *cs, int rating); 122 123 /* 124 * Interval: 0.5sec Threshold: 0.0625s 125 */ 126 #define WATCHDOG_INTERVAL (HZ >> 1) 127 #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4) 128 129 /* 130 * Maximum permissible delay between two readouts of the watchdog 131 * clocksource surrounding a read of the clocksource being validated. 132 * This delay could be due to SMIs, NMIs, or to VCPU preemptions. 133 */ 134 #define WATCHDOG_MAX_SKEW (100 * NSEC_PER_USEC) 135 136 static void clocksource_watchdog_work(struct work_struct *work) 137 { 138 /* 139 * We cannot directly run clocksource_watchdog_kthread() here, because 140 * clocksource_select() calls timekeeping_notify() which uses 141 * stop_machine(). One cannot use stop_machine() from a workqueue() due 142 * lock inversions wrt CPU hotplug. 143 * 144 * Also, we only ever run this work once or twice during the lifetime 145 * of the kernel, so there is no point in creating a more permanent 146 * kthread for this. 147 * 148 * If kthread_run fails the next watchdog scan over the 149 * watchdog_list will find the unstable clock again. 150 */ 151 kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog"); 152 } 153 154 static void __clocksource_unstable(struct clocksource *cs) 155 { 156 cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); 157 cs->flags |= CLOCK_SOURCE_UNSTABLE; 158 159 /* 160 * If the clocksource is registered clocksource_watchdog_kthread() will 161 * re-rate and re-select. 162 */ 163 if (list_empty(&cs->list)) { 164 cs->rating = 0; 165 return; 166 } 167 168 if (cs->mark_unstable) 169 cs->mark_unstable(cs); 170 171 /* kick clocksource_watchdog_kthread() */ 172 if (finished_booting) 173 schedule_work(&watchdog_work); 174 } 175 176 /** 177 * clocksource_mark_unstable - mark clocksource unstable via watchdog 178 * @cs: clocksource to be marked unstable 179 * 180 * This function is called by the x86 TSC code to mark clocksources as unstable; 181 * it defers demotion and re-selection to a kthread. 182 */ 183 void clocksource_mark_unstable(struct clocksource *cs) 184 { 185 unsigned long flags; 186 187 spin_lock_irqsave(&watchdog_lock, flags); 188 if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) { 189 if (!list_empty(&cs->list) && list_empty(&cs->wd_list)) 190 list_add(&cs->wd_list, &watchdog_list); 191 __clocksource_unstable(cs); 192 } 193 spin_unlock_irqrestore(&watchdog_lock, flags); 194 } 195 196 static ulong max_cswd_read_retries = 3; 197 module_param(max_cswd_read_retries, ulong, 0644); 198 static int verify_n_cpus = 8; 199 module_param(verify_n_cpus, int, 0644); 200 201 static bool cs_watchdog_read(struct clocksource *cs, u64 *csnow, u64 *wdnow) 202 { 203 unsigned int nretries; 204 u64 wd_end, wd_delta; 205 int64_t wd_delay; 206 207 for (nretries = 0; nretries <= max_cswd_read_retries; nretries++) { 208 local_irq_disable(); 209 *wdnow = watchdog->read(watchdog); 210 *csnow = cs->read(cs); 211 wd_end = watchdog->read(watchdog); 212 local_irq_enable(); 213 214 wd_delta = clocksource_delta(wd_end, *wdnow, watchdog->mask); 215 wd_delay = clocksource_cyc2ns(wd_delta, watchdog->mult, 216 watchdog->shift); 217 if (wd_delay <= WATCHDOG_MAX_SKEW) { 218 if (nretries > 1 || nretries >= max_cswd_read_retries) { 219 pr_warn("timekeeping watchdog on CPU%d: %s retried %d times before success\n", 220 smp_processor_id(), watchdog->name, nretries); 221 } 222 return true; 223 } 224 } 225 226 pr_warn("timekeeping watchdog on CPU%d: %s read-back delay of %lldns, attempt %d, marking unstable\n", 227 smp_processor_id(), watchdog->name, wd_delay, nretries); 228 return false; 229 } 230 231 static u64 csnow_mid; 232 static cpumask_t cpus_ahead; 233 static cpumask_t cpus_behind; 234 static cpumask_t cpus_chosen; 235 236 static void clocksource_verify_choose_cpus(void) 237 { 238 int cpu, i, n = verify_n_cpus; 239 240 if (n < 0) { 241 /* Check all of the CPUs. */ 242 cpumask_copy(&cpus_chosen, cpu_online_mask); 243 cpumask_clear_cpu(smp_processor_id(), &cpus_chosen); 244 return; 245 } 246 247 /* If no checking desired, or no other CPU to check, leave. */ 248 cpumask_clear(&cpus_chosen); 249 if (n == 0 || num_online_cpus() <= 1) 250 return; 251 252 /* Make sure to select at least one CPU other than the current CPU. */ 253 cpu = cpumask_next(-1, cpu_online_mask); 254 if (cpu == smp_processor_id()) 255 cpu = cpumask_next(cpu, cpu_online_mask); 256 if (WARN_ON_ONCE(cpu >= nr_cpu_ids)) 257 return; 258 cpumask_set_cpu(cpu, &cpus_chosen); 259 260 /* Force a sane value for the boot parameter. */ 261 if (n > nr_cpu_ids) 262 n = nr_cpu_ids; 263 264 /* 265 * Randomly select the specified number of CPUs. If the same 266 * CPU is selected multiple times, that CPU is checked only once, 267 * and no replacement CPU is selected. This gracefully handles 268 * situations where verify_n_cpus is greater than the number of 269 * CPUs that are currently online. 270 */ 271 for (i = 1; i < n; i++) { 272 cpu = prandom_u32() % nr_cpu_ids; 273 cpu = cpumask_next(cpu - 1, cpu_online_mask); 274 if (cpu >= nr_cpu_ids) 275 cpu = cpumask_next(-1, cpu_online_mask); 276 if (!WARN_ON_ONCE(cpu >= nr_cpu_ids)) 277 cpumask_set_cpu(cpu, &cpus_chosen); 278 } 279 280 /* Don't verify ourselves. */ 281 cpumask_clear_cpu(smp_processor_id(), &cpus_chosen); 282 } 283 284 static void clocksource_verify_one_cpu(void *csin) 285 { 286 struct clocksource *cs = (struct clocksource *)csin; 287 288 csnow_mid = cs->read(cs); 289 } 290 291 static void clocksource_verify_percpu(struct clocksource *cs) 292 { 293 int64_t cs_nsec, cs_nsec_max = 0, cs_nsec_min = LLONG_MAX; 294 u64 csnow_begin, csnow_end; 295 int cpu, testcpu; 296 s64 delta; 297 298 if (verify_n_cpus == 0) 299 return; 300 cpumask_clear(&cpus_ahead); 301 cpumask_clear(&cpus_behind); 302 get_online_cpus(); 303 preempt_disable(); 304 clocksource_verify_choose_cpus(); 305 if (cpumask_weight(&cpus_chosen) == 0) { 306 preempt_enable(); 307 put_online_cpus(); 308 pr_warn("Not enough CPUs to check clocksource '%s'.\n", cs->name); 309 return; 310 } 311 testcpu = smp_processor_id(); 312 pr_warn("Checking clocksource %s synchronization from CPU %d to CPUs %*pbl.\n", cs->name, testcpu, cpumask_pr_args(&cpus_chosen)); 313 for_each_cpu(cpu, &cpus_chosen) { 314 if (cpu == testcpu) 315 continue; 316 csnow_begin = cs->read(cs); 317 smp_call_function_single(cpu, clocksource_verify_one_cpu, cs, 1); 318 csnow_end = cs->read(cs); 319 delta = (s64)((csnow_mid - csnow_begin) & cs->mask); 320 if (delta < 0) 321 cpumask_set_cpu(cpu, &cpus_behind); 322 delta = (csnow_end - csnow_mid) & cs->mask; 323 if (delta < 0) 324 cpumask_set_cpu(cpu, &cpus_ahead); 325 delta = clocksource_delta(csnow_end, csnow_begin, cs->mask); 326 cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift); 327 if (cs_nsec > cs_nsec_max) 328 cs_nsec_max = cs_nsec; 329 if (cs_nsec < cs_nsec_min) 330 cs_nsec_min = cs_nsec; 331 } 332 preempt_enable(); 333 put_online_cpus(); 334 if (!cpumask_empty(&cpus_ahead)) 335 pr_warn(" CPUs %*pbl ahead of CPU %d for clocksource %s.\n", 336 cpumask_pr_args(&cpus_ahead), testcpu, cs->name); 337 if (!cpumask_empty(&cpus_behind)) 338 pr_warn(" CPUs %*pbl behind CPU %d for clocksource %s.\n", 339 cpumask_pr_args(&cpus_behind), testcpu, cs->name); 340 if (!cpumask_empty(&cpus_ahead) || !cpumask_empty(&cpus_behind)) 341 pr_warn(" CPU %d check durations %lldns - %lldns for clocksource %s.\n", 342 testcpu, cs_nsec_min, cs_nsec_max, cs->name); 343 } 344 345 static void clocksource_watchdog(struct timer_list *unused) 346 { 347 u64 csnow, wdnow, cslast, wdlast, delta; 348 int next_cpu, reset_pending; 349 int64_t wd_nsec, cs_nsec; 350 struct clocksource *cs; 351 352 spin_lock(&watchdog_lock); 353 if (!watchdog_running) 354 goto out; 355 356 reset_pending = atomic_read(&watchdog_reset_pending); 357 358 list_for_each_entry(cs, &watchdog_list, wd_list) { 359 360 /* Clocksource already marked unstable? */ 361 if (cs->flags & CLOCK_SOURCE_UNSTABLE) { 362 if (finished_booting) 363 schedule_work(&watchdog_work); 364 continue; 365 } 366 367 if (!cs_watchdog_read(cs, &csnow, &wdnow)) { 368 /* Clock readout unreliable, so give it up. */ 369 __clocksource_unstable(cs); 370 continue; 371 } 372 373 /* Clocksource initialized ? */ 374 if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) || 375 atomic_read(&watchdog_reset_pending)) { 376 cs->flags |= CLOCK_SOURCE_WATCHDOG; 377 cs->wd_last = wdnow; 378 cs->cs_last = csnow; 379 continue; 380 } 381 382 delta = clocksource_delta(wdnow, cs->wd_last, watchdog->mask); 383 wd_nsec = clocksource_cyc2ns(delta, watchdog->mult, 384 watchdog->shift); 385 386 delta = clocksource_delta(csnow, cs->cs_last, cs->mask); 387 cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift); 388 wdlast = cs->wd_last; /* save these in case we print them */ 389 cslast = cs->cs_last; 390 cs->cs_last = csnow; 391 cs->wd_last = wdnow; 392 393 if (atomic_read(&watchdog_reset_pending)) 394 continue; 395 396 /* Check the deviation from the watchdog clocksource. */ 397 if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) { 398 pr_warn("timekeeping watchdog on CPU%d: Marking clocksource '%s' as unstable because the skew is too large:\n", 399 smp_processor_id(), cs->name); 400 pr_warn(" '%s' wd_now: %llx wd_last: %llx mask: %llx\n", 401 watchdog->name, wdnow, wdlast, watchdog->mask); 402 pr_warn(" '%s' cs_now: %llx cs_last: %llx mask: %llx\n", 403 cs->name, csnow, cslast, cs->mask); 404 if (curr_clocksource == cs) 405 pr_warn(" '%s' is current clocksource.\n", cs->name); 406 else if (curr_clocksource) 407 pr_warn(" '%s' (not '%s') is current clocksource.\n", curr_clocksource->name, cs->name); 408 else 409 pr_warn(" No current clocksource.\n"); 410 __clocksource_unstable(cs); 411 continue; 412 } 413 414 if (cs == curr_clocksource && cs->tick_stable) 415 cs->tick_stable(cs); 416 417 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && 418 (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) && 419 (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) { 420 /* Mark it valid for high-res. */ 421 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; 422 423 /* 424 * clocksource_done_booting() will sort it if 425 * finished_booting is not set yet. 426 */ 427 if (!finished_booting) 428 continue; 429 430 /* 431 * If this is not the current clocksource let 432 * the watchdog thread reselect it. Due to the 433 * change to high res this clocksource might 434 * be preferred now. If it is the current 435 * clocksource let the tick code know about 436 * that change. 437 */ 438 if (cs != curr_clocksource) { 439 cs->flags |= CLOCK_SOURCE_RESELECT; 440 schedule_work(&watchdog_work); 441 } else { 442 tick_clock_notify(); 443 } 444 } 445 } 446 447 /* 448 * We only clear the watchdog_reset_pending, when we did a 449 * full cycle through all clocksources. 450 */ 451 if (reset_pending) 452 atomic_dec(&watchdog_reset_pending); 453 454 /* 455 * Cycle through CPUs to check if the CPUs stay synchronized 456 * to each other. 457 */ 458 next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask); 459 if (next_cpu >= nr_cpu_ids) 460 next_cpu = cpumask_first(cpu_online_mask); 461 462 /* 463 * Arm timer if not already pending: could race with concurrent 464 * pair clocksource_stop_watchdog() clocksource_start_watchdog(). 465 */ 466 if (!timer_pending(&watchdog_timer)) { 467 watchdog_timer.expires += WATCHDOG_INTERVAL; 468 add_timer_on(&watchdog_timer, next_cpu); 469 } 470 out: 471 spin_unlock(&watchdog_lock); 472 } 473 474 static inline void clocksource_start_watchdog(void) 475 { 476 if (watchdog_running || !watchdog || list_empty(&watchdog_list)) 477 return; 478 timer_setup(&watchdog_timer, clocksource_watchdog, 0); 479 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; 480 add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask)); 481 watchdog_running = 1; 482 } 483 484 static inline void clocksource_stop_watchdog(void) 485 { 486 if (!watchdog_running || (watchdog && !list_empty(&watchdog_list))) 487 return; 488 del_timer(&watchdog_timer); 489 watchdog_running = 0; 490 } 491 492 static inline void clocksource_reset_watchdog(void) 493 { 494 struct clocksource *cs; 495 496 list_for_each_entry(cs, &watchdog_list, wd_list) 497 cs->flags &= ~CLOCK_SOURCE_WATCHDOG; 498 } 499 500 static void clocksource_resume_watchdog(void) 501 { 502 atomic_inc(&watchdog_reset_pending); 503 } 504 505 static void clocksource_enqueue_watchdog(struct clocksource *cs) 506 { 507 INIT_LIST_HEAD(&cs->wd_list); 508 509 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { 510 /* cs is a clocksource to be watched. */ 511 list_add(&cs->wd_list, &watchdog_list); 512 cs->flags &= ~CLOCK_SOURCE_WATCHDOG; 513 } else { 514 /* cs is a watchdog. */ 515 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) 516 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; 517 } 518 } 519 520 static void clocksource_select_watchdog(bool fallback) 521 { 522 struct clocksource *cs, *old_wd; 523 unsigned long flags; 524 525 spin_lock_irqsave(&watchdog_lock, flags); 526 /* save current watchdog */ 527 old_wd = watchdog; 528 if (fallback) 529 watchdog = NULL; 530 531 list_for_each_entry(cs, &clocksource_list, list) { 532 /* cs is a clocksource to be watched. */ 533 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) 534 continue; 535 536 /* Skip current if we were requested for a fallback. */ 537 if (fallback && cs == old_wd) 538 continue; 539 540 /* Pick the best watchdog. */ 541 if (!watchdog || cs->rating > watchdog->rating) 542 watchdog = cs; 543 } 544 /* If we failed to find a fallback restore the old one. */ 545 if (!watchdog) 546 watchdog = old_wd; 547 548 /* If we changed the watchdog we need to reset cycles. */ 549 if (watchdog != old_wd) 550 clocksource_reset_watchdog(); 551 552 /* Check if the watchdog timer needs to be started. */ 553 clocksource_start_watchdog(); 554 spin_unlock_irqrestore(&watchdog_lock, flags); 555 } 556 557 static void clocksource_dequeue_watchdog(struct clocksource *cs) 558 { 559 if (cs != watchdog) { 560 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { 561 /* cs is a watched clocksource. */ 562 list_del_init(&cs->wd_list); 563 /* Check if the watchdog timer needs to be stopped. */ 564 clocksource_stop_watchdog(); 565 } 566 } 567 } 568 569 static int __clocksource_watchdog_kthread(void) 570 { 571 struct clocksource *cs, *tmp; 572 unsigned long flags; 573 int select = 0; 574 575 /* Do any required per-CPU skew verification. */ 576 if (curr_clocksource && 577 curr_clocksource->flags & CLOCK_SOURCE_UNSTABLE && 578 curr_clocksource->flags & CLOCK_SOURCE_VERIFY_PERCPU) 579 clocksource_verify_percpu(curr_clocksource); 580 581 spin_lock_irqsave(&watchdog_lock, flags); 582 list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { 583 if (cs->flags & CLOCK_SOURCE_UNSTABLE) { 584 list_del_init(&cs->wd_list); 585 __clocksource_change_rating(cs, 0); 586 select = 1; 587 } 588 if (cs->flags & CLOCK_SOURCE_RESELECT) { 589 cs->flags &= ~CLOCK_SOURCE_RESELECT; 590 select = 1; 591 } 592 } 593 /* Check if the watchdog timer needs to be stopped. */ 594 clocksource_stop_watchdog(); 595 spin_unlock_irqrestore(&watchdog_lock, flags); 596 597 return select; 598 } 599 600 static int clocksource_watchdog_kthread(void *data) 601 { 602 mutex_lock(&clocksource_mutex); 603 if (__clocksource_watchdog_kthread()) 604 clocksource_select(); 605 mutex_unlock(&clocksource_mutex); 606 return 0; 607 } 608 609 static bool clocksource_is_watchdog(struct clocksource *cs) 610 { 611 return cs == watchdog; 612 } 613 614 #else /* CONFIG_CLOCKSOURCE_WATCHDOG */ 615 616 static void clocksource_enqueue_watchdog(struct clocksource *cs) 617 { 618 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) 619 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; 620 } 621 622 static void clocksource_select_watchdog(bool fallback) { } 623 static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { } 624 static inline void clocksource_resume_watchdog(void) { } 625 static inline int __clocksource_watchdog_kthread(void) { return 0; } 626 static bool clocksource_is_watchdog(struct clocksource *cs) { return false; } 627 void clocksource_mark_unstable(struct clocksource *cs) { } 628 629 static inline void clocksource_watchdog_lock(unsigned long *flags) { } 630 static inline void clocksource_watchdog_unlock(unsigned long *flags) { } 631 632 #endif /* CONFIG_CLOCKSOURCE_WATCHDOG */ 633 634 static bool clocksource_is_suspend(struct clocksource *cs) 635 { 636 return cs == suspend_clocksource; 637 } 638 639 static void __clocksource_suspend_select(struct clocksource *cs) 640 { 641 /* 642 * Skip the clocksource which will be stopped in suspend state. 643 */ 644 if (!(cs->flags & CLOCK_SOURCE_SUSPEND_NONSTOP)) 645 return; 646 647 /* 648 * The nonstop clocksource can be selected as the suspend clocksource to 649 * calculate the suspend time, so it should not supply suspend/resume 650 * interfaces to suspend the nonstop clocksource when system suspends. 651 */ 652 if (cs->suspend || cs->resume) { 653 pr_warn("Nonstop clocksource %s should not supply suspend/resume interfaces\n", 654 cs->name); 655 } 656 657 /* Pick the best rating. */ 658 if (!suspend_clocksource || cs->rating > suspend_clocksource->rating) 659 suspend_clocksource = cs; 660 } 661 662 /** 663 * clocksource_suspend_select - Select the best clocksource for suspend timing 664 * @fallback: if select a fallback clocksource 665 */ 666 static void clocksource_suspend_select(bool fallback) 667 { 668 struct clocksource *cs, *old_suspend; 669 670 old_suspend = suspend_clocksource; 671 if (fallback) 672 suspend_clocksource = NULL; 673 674 list_for_each_entry(cs, &clocksource_list, list) { 675 /* Skip current if we were requested for a fallback. */ 676 if (fallback && cs == old_suspend) 677 continue; 678 679 __clocksource_suspend_select(cs); 680 } 681 } 682 683 /** 684 * clocksource_start_suspend_timing - Start measuring the suspend timing 685 * @cs: current clocksource from timekeeping 686 * @start_cycles: current cycles from timekeeping 687 * 688 * This function will save the start cycle values of suspend timer to calculate 689 * the suspend time when resuming system. 690 * 691 * This function is called late in the suspend process from timekeeping_suspend(), 692 * that means processes are frozen, non-boot cpus and interrupts are disabled 693 * now. It is therefore possible to start the suspend timer without taking the 694 * clocksource mutex. 695 */ 696 void clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles) 697 { 698 if (!suspend_clocksource) 699 return; 700 701 /* 702 * If current clocksource is the suspend timer, we should use the 703 * tkr_mono.cycle_last value as suspend_start to avoid same reading 704 * from suspend timer. 705 */ 706 if (clocksource_is_suspend(cs)) { 707 suspend_start = start_cycles; 708 return; 709 } 710 711 if (suspend_clocksource->enable && 712 suspend_clocksource->enable(suspend_clocksource)) { 713 pr_warn_once("Failed to enable the non-suspend-able clocksource.\n"); 714 return; 715 } 716 717 suspend_start = suspend_clocksource->read(suspend_clocksource); 718 } 719 720 /** 721 * clocksource_stop_suspend_timing - Stop measuring the suspend timing 722 * @cs: current clocksource from timekeeping 723 * @cycle_now: current cycles from timekeeping 724 * 725 * This function will calculate the suspend time from suspend timer. 726 * 727 * Returns nanoseconds since suspend started, 0 if no usable suspend clocksource. 728 * 729 * This function is called early in the resume process from timekeeping_resume(), 730 * that means there is only one cpu, no processes are running and the interrupts 731 * are disabled. It is therefore possible to stop the suspend timer without 732 * taking the clocksource mutex. 733 */ 734 u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 cycle_now) 735 { 736 u64 now, delta, nsec = 0; 737 738 if (!suspend_clocksource) 739 return 0; 740 741 /* 742 * If current clocksource is the suspend timer, we should use the 743 * tkr_mono.cycle_last value from timekeeping as current cycle to 744 * avoid same reading from suspend timer. 745 */ 746 if (clocksource_is_suspend(cs)) 747 now = cycle_now; 748 else 749 now = suspend_clocksource->read(suspend_clocksource); 750 751 if (now > suspend_start) { 752 delta = clocksource_delta(now, suspend_start, 753 suspend_clocksource->mask); 754 nsec = mul_u64_u32_shr(delta, suspend_clocksource->mult, 755 suspend_clocksource->shift); 756 } 757 758 /* 759 * Disable the suspend timer to save power if current clocksource is 760 * not the suspend timer. 761 */ 762 if (!clocksource_is_suspend(cs) && suspend_clocksource->disable) 763 suspend_clocksource->disable(suspend_clocksource); 764 765 return nsec; 766 } 767 768 /** 769 * clocksource_suspend - suspend the clocksource(s) 770 */ 771 void clocksource_suspend(void) 772 { 773 struct clocksource *cs; 774 775 list_for_each_entry_reverse(cs, &clocksource_list, list) 776 if (cs->suspend) 777 cs->suspend(cs); 778 } 779 780 /** 781 * clocksource_resume - resume the clocksource(s) 782 */ 783 void clocksource_resume(void) 784 { 785 struct clocksource *cs; 786 787 list_for_each_entry(cs, &clocksource_list, list) 788 if (cs->resume) 789 cs->resume(cs); 790 791 clocksource_resume_watchdog(); 792 } 793 794 /** 795 * clocksource_touch_watchdog - Update watchdog 796 * 797 * Update the watchdog after exception contexts such as kgdb so as not 798 * to incorrectly trip the watchdog. This might fail when the kernel 799 * was stopped in code which holds watchdog_lock. 800 */ 801 void clocksource_touch_watchdog(void) 802 { 803 clocksource_resume_watchdog(); 804 } 805 806 /** 807 * clocksource_max_adjustment- Returns max adjustment amount 808 * @cs: Pointer to clocksource 809 * 810 */ 811 static u32 clocksource_max_adjustment(struct clocksource *cs) 812 { 813 u64 ret; 814 /* 815 * We won't try to correct for more than 11% adjustments (110,000 ppm), 816 */ 817 ret = (u64)cs->mult * 11; 818 do_div(ret,100); 819 return (u32)ret; 820 } 821 822 /** 823 * clocks_calc_max_nsecs - Returns maximum nanoseconds that can be converted 824 * @mult: cycle to nanosecond multiplier 825 * @shift: cycle to nanosecond divisor (power of two) 826 * @maxadj: maximum adjustment value to mult (~11%) 827 * @mask: bitmask for two's complement subtraction of non 64 bit counters 828 * @max_cyc: maximum cycle value before potential overflow (does not include 829 * any safety margin) 830 * 831 * NOTE: This function includes a safety margin of 50%, in other words, we 832 * return half the number of nanoseconds the hardware counter can technically 833 * cover. This is done so that we can potentially detect problems caused by 834 * delayed timers or bad hardware, which might result in time intervals that 835 * are larger than what the math used can handle without overflows. 836 */ 837 u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cyc) 838 { 839 u64 max_nsecs, max_cycles; 840 841 /* 842 * Calculate the maximum number of cycles that we can pass to the 843 * cyc2ns() function without overflowing a 64-bit result. 844 */ 845 max_cycles = ULLONG_MAX; 846 do_div(max_cycles, mult+maxadj); 847 848 /* 849 * The actual maximum number of cycles we can defer the clocksource is 850 * determined by the minimum of max_cycles and mask. 851 * Note: Here we subtract the maxadj to make sure we don't sleep for 852 * too long if there's a large negative adjustment. 853 */ 854 max_cycles = min(max_cycles, mask); 855 max_nsecs = clocksource_cyc2ns(max_cycles, mult - maxadj, shift); 856 857 /* return the max_cycles value as well if requested */ 858 if (max_cyc) 859 *max_cyc = max_cycles; 860 861 /* Return 50% of the actual maximum, so we can detect bad values */ 862 max_nsecs >>= 1; 863 864 return max_nsecs; 865 } 866 867 /** 868 * clocksource_update_max_deferment - Updates the clocksource max_idle_ns & max_cycles 869 * @cs: Pointer to clocksource to be updated 870 * 871 */ 872 static inline void clocksource_update_max_deferment(struct clocksource *cs) 873 { 874 cs->max_idle_ns = clocks_calc_max_nsecs(cs->mult, cs->shift, 875 cs->maxadj, cs->mask, 876 &cs->max_cycles); 877 } 878 879 static struct clocksource *clocksource_find_best(bool oneshot, bool skipcur) 880 { 881 struct clocksource *cs; 882 883 if (!finished_booting || list_empty(&clocksource_list)) 884 return NULL; 885 886 /* 887 * We pick the clocksource with the highest rating. If oneshot 888 * mode is active, we pick the highres valid clocksource with 889 * the best rating. 890 */ 891 list_for_each_entry(cs, &clocksource_list, list) { 892 if (skipcur && cs == curr_clocksource) 893 continue; 894 if (oneshot && !(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES)) 895 continue; 896 return cs; 897 } 898 return NULL; 899 } 900 901 static void __clocksource_select(bool skipcur) 902 { 903 bool oneshot = tick_oneshot_mode_active(); 904 struct clocksource *best, *cs; 905 906 /* Find the best suitable clocksource */ 907 best = clocksource_find_best(oneshot, skipcur); 908 if (!best) 909 return; 910 911 if (!strlen(override_name)) 912 goto found; 913 914 /* Check for the override clocksource. */ 915 list_for_each_entry(cs, &clocksource_list, list) { 916 if (skipcur && cs == curr_clocksource) 917 continue; 918 if (strcmp(cs->name, override_name) != 0) 919 continue; 920 /* 921 * Check to make sure we don't switch to a non-highres 922 * capable clocksource if the tick code is in oneshot 923 * mode (highres or nohz) 924 */ 925 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && oneshot) { 926 /* Override clocksource cannot be used. */ 927 if (cs->flags & CLOCK_SOURCE_UNSTABLE) { 928 pr_warn("Override clocksource %s is unstable and not HRT compatible - cannot switch while in HRT/NOHZ mode\n", 929 cs->name); 930 override_name[0] = 0; 931 } else { 932 /* 933 * The override cannot be currently verified. 934 * Deferring to let the watchdog check. 935 */ 936 pr_info("Override clocksource %s is not currently HRT compatible - deferring\n", 937 cs->name); 938 } 939 } else 940 /* Override clocksource can be used. */ 941 best = cs; 942 break; 943 } 944 945 found: 946 if (curr_clocksource != best && !timekeeping_notify(best)) { 947 pr_info("Switched to clocksource %s\n", best->name); 948 curr_clocksource = best; 949 } 950 } 951 952 /** 953 * clocksource_select - Select the best clocksource available 954 * 955 * Private function. Must hold clocksource_mutex when called. 956 * 957 * Select the clocksource with the best rating, or the clocksource, 958 * which is selected by userspace override. 959 */ 960 static void clocksource_select(void) 961 { 962 __clocksource_select(false); 963 } 964 965 static void clocksource_select_fallback(void) 966 { 967 __clocksource_select(true); 968 } 969 970 /* 971 * clocksource_done_booting - Called near the end of core bootup 972 * 973 * Hack to avoid lots of clocksource churn at boot time. 974 * We use fs_initcall because we want this to start before 975 * device_initcall but after subsys_initcall. 976 */ 977 static int __init clocksource_done_booting(void) 978 { 979 mutex_lock(&clocksource_mutex); 980 curr_clocksource = clocksource_default_clock(); 981 finished_booting = 1; 982 /* 983 * Run the watchdog first to eliminate unstable clock sources 984 */ 985 __clocksource_watchdog_kthread(); 986 clocksource_select(); 987 mutex_unlock(&clocksource_mutex); 988 return 0; 989 } 990 fs_initcall(clocksource_done_booting); 991 992 /* 993 * Enqueue the clocksource sorted by rating 994 */ 995 static void clocksource_enqueue(struct clocksource *cs) 996 { 997 struct list_head *entry = &clocksource_list; 998 struct clocksource *tmp; 999 1000 list_for_each_entry(tmp, &clocksource_list, list) { 1001 /* Keep track of the place, where to insert */ 1002 if (tmp->rating < cs->rating) 1003 break; 1004 entry = &tmp->list; 1005 } 1006 list_add(&cs->list, entry); 1007 } 1008 1009 /** 1010 * __clocksource_update_freq_scale - Used update clocksource with new freq 1011 * @cs: clocksource to be registered 1012 * @scale: Scale factor multiplied against freq to get clocksource hz 1013 * @freq: clocksource frequency (cycles per second) divided by scale 1014 * 1015 * This should only be called from the clocksource->enable() method. 1016 * 1017 * This *SHOULD NOT* be called directly! Please use the 1018 * __clocksource_update_freq_hz() or __clocksource_update_freq_khz() helper 1019 * functions. 1020 */ 1021 void __clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq) 1022 { 1023 u64 sec; 1024 1025 /* 1026 * Default clocksources are *special* and self-define their mult/shift. 1027 * But, you're not special, so you should specify a freq value. 1028 */ 1029 if (freq) { 1030 /* 1031 * Calc the maximum number of seconds which we can run before 1032 * wrapping around. For clocksources which have a mask > 32-bit 1033 * we need to limit the max sleep time to have a good 1034 * conversion precision. 10 minutes is still a reasonable 1035 * amount. That results in a shift value of 24 for a 1036 * clocksource with mask >= 40-bit and f >= 4GHz. That maps to 1037 * ~ 0.06ppm granularity for NTP. 1038 */ 1039 sec = cs->mask; 1040 do_div(sec, freq); 1041 do_div(sec, scale); 1042 if (!sec) 1043 sec = 1; 1044 else if (sec > 600 && cs->mask > UINT_MAX) 1045 sec = 600; 1046 1047 clocks_calc_mult_shift(&cs->mult, &cs->shift, freq, 1048 NSEC_PER_SEC / scale, sec * scale); 1049 } 1050 /* 1051 * Ensure clocksources that have large 'mult' values don't overflow 1052 * when adjusted. 1053 */ 1054 cs->maxadj = clocksource_max_adjustment(cs); 1055 while (freq && ((cs->mult + cs->maxadj < cs->mult) 1056 || (cs->mult - cs->maxadj > cs->mult))) { 1057 cs->mult >>= 1; 1058 cs->shift--; 1059 cs->maxadj = clocksource_max_adjustment(cs); 1060 } 1061 1062 /* 1063 * Only warn for *special* clocksources that self-define 1064 * their mult/shift values and don't specify a freq. 1065 */ 1066 WARN_ONCE(cs->mult + cs->maxadj < cs->mult, 1067 "timekeeping: Clocksource %s might overflow on 11%% adjustment\n", 1068 cs->name); 1069 1070 clocksource_update_max_deferment(cs); 1071 1072 pr_info("%s: mask: 0x%llx max_cycles: 0x%llx, max_idle_ns: %lld ns\n", 1073 cs->name, cs->mask, cs->max_cycles, cs->max_idle_ns); 1074 } 1075 EXPORT_SYMBOL_GPL(__clocksource_update_freq_scale); 1076 1077 /** 1078 * __clocksource_register_scale - Used to install new clocksources 1079 * @cs: clocksource to be registered 1080 * @scale: Scale factor multiplied against freq to get clocksource hz 1081 * @freq: clocksource frequency (cycles per second) divided by scale 1082 * 1083 * Returns -EBUSY if registration fails, zero otherwise. 1084 * 1085 * This *SHOULD NOT* be called directly! Please use the 1086 * clocksource_register_hz() or clocksource_register_khz helper functions. 1087 */ 1088 int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) 1089 { 1090 unsigned long flags; 1091 1092 clocksource_arch_init(cs); 1093 1094 if (WARN_ON_ONCE((unsigned int)cs->id >= CSID_MAX)) 1095 cs->id = CSID_GENERIC; 1096 if (cs->vdso_clock_mode < 0 || 1097 cs->vdso_clock_mode >= VDSO_CLOCKMODE_MAX) { 1098 pr_warn("clocksource %s registered with invalid VDSO mode %d. Disabling VDSO support.\n", 1099 cs->name, cs->vdso_clock_mode); 1100 cs->vdso_clock_mode = VDSO_CLOCKMODE_NONE; 1101 } 1102 1103 /* Initialize mult/shift and max_idle_ns */ 1104 __clocksource_update_freq_scale(cs, scale, freq); 1105 1106 /* Add clocksource to the clocksource list */ 1107 mutex_lock(&clocksource_mutex); 1108 1109 clocksource_watchdog_lock(&flags); 1110 clocksource_enqueue(cs); 1111 clocksource_enqueue_watchdog(cs); 1112 clocksource_watchdog_unlock(&flags); 1113 1114 clocksource_select(); 1115 clocksource_select_watchdog(false); 1116 __clocksource_suspend_select(cs); 1117 mutex_unlock(&clocksource_mutex); 1118 return 0; 1119 } 1120 EXPORT_SYMBOL_GPL(__clocksource_register_scale); 1121 1122 static void __clocksource_change_rating(struct clocksource *cs, int rating) 1123 { 1124 list_del(&cs->list); 1125 cs->rating = rating; 1126 clocksource_enqueue(cs); 1127 } 1128 1129 /** 1130 * clocksource_change_rating - Change the rating of a registered clocksource 1131 * @cs: clocksource to be changed 1132 * @rating: new rating 1133 */ 1134 void clocksource_change_rating(struct clocksource *cs, int rating) 1135 { 1136 unsigned long flags; 1137 1138 mutex_lock(&clocksource_mutex); 1139 clocksource_watchdog_lock(&flags); 1140 __clocksource_change_rating(cs, rating); 1141 clocksource_watchdog_unlock(&flags); 1142 1143 clocksource_select(); 1144 clocksource_select_watchdog(false); 1145 clocksource_suspend_select(false); 1146 mutex_unlock(&clocksource_mutex); 1147 } 1148 EXPORT_SYMBOL(clocksource_change_rating); 1149 1150 /* 1151 * Unbind clocksource @cs. Called with clocksource_mutex held 1152 */ 1153 static int clocksource_unbind(struct clocksource *cs) 1154 { 1155 unsigned long flags; 1156 1157 if (clocksource_is_watchdog(cs)) { 1158 /* Select and try to install a replacement watchdog. */ 1159 clocksource_select_watchdog(true); 1160 if (clocksource_is_watchdog(cs)) 1161 return -EBUSY; 1162 } 1163 1164 if (cs == curr_clocksource) { 1165 /* Select and try to install a replacement clock source */ 1166 clocksource_select_fallback(); 1167 if (curr_clocksource == cs) 1168 return -EBUSY; 1169 } 1170 1171 if (clocksource_is_suspend(cs)) { 1172 /* 1173 * Select and try to install a replacement suspend clocksource. 1174 * If no replacement suspend clocksource, we will just let the 1175 * clocksource go and have no suspend clocksource. 1176 */ 1177 clocksource_suspend_select(true); 1178 } 1179 1180 clocksource_watchdog_lock(&flags); 1181 clocksource_dequeue_watchdog(cs); 1182 list_del_init(&cs->list); 1183 clocksource_watchdog_unlock(&flags); 1184 1185 return 0; 1186 } 1187 1188 /** 1189 * clocksource_unregister - remove a registered clocksource 1190 * @cs: clocksource to be unregistered 1191 */ 1192 int clocksource_unregister(struct clocksource *cs) 1193 { 1194 int ret = 0; 1195 1196 mutex_lock(&clocksource_mutex); 1197 if (!list_empty(&cs->list)) 1198 ret = clocksource_unbind(cs); 1199 mutex_unlock(&clocksource_mutex); 1200 return ret; 1201 } 1202 EXPORT_SYMBOL(clocksource_unregister); 1203 1204 #ifdef CONFIG_SYSFS 1205 /** 1206 * current_clocksource_show - sysfs interface for current clocksource 1207 * @dev: unused 1208 * @attr: unused 1209 * @buf: char buffer to be filled with clocksource list 1210 * 1211 * Provides sysfs interface for listing current clocksource. 1212 */ 1213 static ssize_t current_clocksource_show(struct device *dev, 1214 struct device_attribute *attr, 1215 char *buf) 1216 { 1217 ssize_t count = 0; 1218 1219 mutex_lock(&clocksource_mutex); 1220 count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name); 1221 mutex_unlock(&clocksource_mutex); 1222 1223 return count; 1224 } 1225 1226 ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt) 1227 { 1228 size_t ret = cnt; 1229 1230 /* strings from sysfs write are not 0 terminated! */ 1231 if (!cnt || cnt >= CS_NAME_LEN) 1232 return -EINVAL; 1233 1234 /* strip of \n: */ 1235 if (buf[cnt-1] == '\n') 1236 cnt--; 1237 if (cnt > 0) 1238 memcpy(dst, buf, cnt); 1239 dst[cnt] = 0; 1240 return ret; 1241 } 1242 1243 /** 1244 * current_clocksource_store - interface for manually overriding clocksource 1245 * @dev: unused 1246 * @attr: unused 1247 * @buf: name of override clocksource 1248 * @count: length of buffer 1249 * 1250 * Takes input from sysfs interface for manually overriding the default 1251 * clocksource selection. 1252 */ 1253 static ssize_t current_clocksource_store(struct device *dev, 1254 struct device_attribute *attr, 1255 const char *buf, size_t count) 1256 { 1257 ssize_t ret; 1258 1259 mutex_lock(&clocksource_mutex); 1260 1261 ret = sysfs_get_uname(buf, override_name, count); 1262 if (ret >= 0) 1263 clocksource_select(); 1264 1265 mutex_unlock(&clocksource_mutex); 1266 1267 return ret; 1268 } 1269 static DEVICE_ATTR_RW(current_clocksource); 1270 1271 /** 1272 * unbind_clocksource_store - interface for manually unbinding clocksource 1273 * @dev: unused 1274 * @attr: unused 1275 * @buf: unused 1276 * @count: length of buffer 1277 * 1278 * Takes input from sysfs interface for manually unbinding a clocksource. 1279 */ 1280 static ssize_t unbind_clocksource_store(struct device *dev, 1281 struct device_attribute *attr, 1282 const char *buf, size_t count) 1283 { 1284 struct clocksource *cs; 1285 char name[CS_NAME_LEN]; 1286 ssize_t ret; 1287 1288 ret = sysfs_get_uname(buf, name, count); 1289 if (ret < 0) 1290 return ret; 1291 1292 ret = -ENODEV; 1293 mutex_lock(&clocksource_mutex); 1294 list_for_each_entry(cs, &clocksource_list, list) { 1295 if (strcmp(cs->name, name)) 1296 continue; 1297 ret = clocksource_unbind(cs); 1298 break; 1299 } 1300 mutex_unlock(&clocksource_mutex); 1301 1302 return ret ? ret : count; 1303 } 1304 static DEVICE_ATTR_WO(unbind_clocksource); 1305 1306 /** 1307 * available_clocksource_show - sysfs interface for listing clocksource 1308 * @dev: unused 1309 * @attr: unused 1310 * @buf: char buffer to be filled with clocksource list 1311 * 1312 * Provides sysfs interface for listing registered clocksources 1313 */ 1314 static ssize_t available_clocksource_show(struct device *dev, 1315 struct device_attribute *attr, 1316 char *buf) 1317 { 1318 struct clocksource *src; 1319 ssize_t count = 0; 1320 1321 mutex_lock(&clocksource_mutex); 1322 list_for_each_entry(src, &clocksource_list, list) { 1323 /* 1324 * Don't show non-HRES clocksource if the tick code is 1325 * in one shot mode (highres=on or nohz=on) 1326 */ 1327 if (!tick_oneshot_mode_active() || 1328 (src->flags & CLOCK_SOURCE_VALID_FOR_HRES)) 1329 count += snprintf(buf + count, 1330 max((ssize_t)PAGE_SIZE - count, (ssize_t)0), 1331 "%s ", src->name); 1332 } 1333 mutex_unlock(&clocksource_mutex); 1334 1335 count += snprintf(buf + count, 1336 max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n"); 1337 1338 return count; 1339 } 1340 static DEVICE_ATTR_RO(available_clocksource); 1341 1342 static struct attribute *clocksource_attrs[] = { 1343 &dev_attr_current_clocksource.attr, 1344 &dev_attr_unbind_clocksource.attr, 1345 &dev_attr_available_clocksource.attr, 1346 NULL 1347 }; 1348 ATTRIBUTE_GROUPS(clocksource); 1349 1350 static struct bus_type clocksource_subsys = { 1351 .name = "clocksource", 1352 .dev_name = "clocksource", 1353 }; 1354 1355 static struct device device_clocksource = { 1356 .id = 0, 1357 .bus = &clocksource_subsys, 1358 .groups = clocksource_groups, 1359 }; 1360 1361 static int __init init_clocksource_sysfs(void) 1362 { 1363 int error = subsys_system_register(&clocksource_subsys, NULL); 1364 1365 if (!error) 1366 error = device_register(&device_clocksource); 1367 1368 return error; 1369 } 1370 1371 device_initcall(init_clocksource_sysfs); 1372 #endif /* CONFIG_SYSFS */ 1373 1374 /** 1375 * boot_override_clocksource - boot clock override 1376 * @str: override name 1377 * 1378 * Takes a clocksource= boot argument and uses it 1379 * as the clocksource override name. 1380 */ 1381 static int __init boot_override_clocksource(char* str) 1382 { 1383 mutex_lock(&clocksource_mutex); 1384 if (str) 1385 strlcpy(override_name, str, sizeof(override_name)); 1386 mutex_unlock(&clocksource_mutex); 1387 return 1; 1388 } 1389 1390 __setup("clocksource=", boot_override_clocksource); 1391 1392 /** 1393 * boot_override_clock - Compatibility layer for deprecated boot option 1394 * @str: override name 1395 * 1396 * DEPRECATED! Takes a clock= boot argument and uses it 1397 * as the clocksource override name 1398 */ 1399 static int __init boot_override_clock(char* str) 1400 { 1401 if (!strcmp(str, "pmtmr")) { 1402 pr_warn("clock=pmtmr is deprecated - use clocksource=acpi_pm\n"); 1403 return boot_override_clocksource("acpi_pm"); 1404 } 1405 pr_warn("clock= boot option is deprecated - use clocksource=xyz\n"); 1406 return boot_override_clocksource(str); 1407 } 1408 1409 __setup("clock=", boot_override_clock); 1410