1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * This file contains the functions which manage clocksource drivers. 4 * 5 * Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com) 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/device.h> 11 #include <linux/clocksource.h> 12 #include <linux/init.h> 13 #include <linux/module.h> 14 #include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */ 15 #include <linux/tick.h> 16 #include <linux/kthread.h> 17 #include <linux/prandom.h> 18 #include <linux/cpu.h> 19 20 #include "tick-internal.h" 21 #include "timekeeping_internal.h" 22 23 /** 24 * clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks 25 * @mult: pointer to mult variable 26 * @shift: pointer to shift variable 27 * @from: frequency to convert from 28 * @to: frequency to convert to 29 * @maxsec: guaranteed runtime conversion range in seconds 30 * 31 * The function evaluates the shift/mult pair for the scaled math 32 * operations of clocksources and clockevents. 33 * 34 * @to and @from are frequency values in HZ. For clock sources @to is 35 * NSEC_PER_SEC == 1GHz and @from is the counter frequency. For clock 36 * event @to is the counter frequency and @from is NSEC_PER_SEC. 37 * 38 * The @maxsec conversion range argument controls the time frame in 39 * seconds which must be covered by the runtime conversion with the 40 * calculated mult and shift factors. This guarantees that no 64bit 41 * overflow happens when the input value of the conversion is 42 * multiplied with the calculated mult factor. Larger ranges may 43 * reduce the conversion accuracy by choosing smaller mult and shift 44 * factors. 45 */ 46 void 47 clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 maxsec) 48 { 49 u64 tmp; 50 u32 sft, sftacc= 32; 51 52 /* 53 * Calculate the shift factor which is limiting the conversion 54 * range: 55 */ 56 tmp = ((u64)maxsec * from) >> 32; 57 while (tmp) { 58 tmp >>=1; 59 sftacc--; 60 } 61 62 /* 63 * Find the conversion shift/mult pair which has the best 64 * accuracy and fits the maxsec conversion range: 65 */ 66 for (sft = 32; sft > 0; sft--) { 67 tmp = (u64) to << sft; 68 tmp += from / 2; 69 do_div(tmp, from); 70 if ((tmp >> sftacc) == 0) 71 break; 72 } 73 *mult = tmp; 74 *shift = sft; 75 } 76 EXPORT_SYMBOL_GPL(clocks_calc_mult_shift); 77 78 /*[Clocksource internal variables]--------- 79 * curr_clocksource: 80 * currently selected clocksource. 81 * suspend_clocksource: 82 * used to calculate the suspend time. 83 * clocksource_list: 84 * linked list with the registered clocksources 85 * clocksource_mutex: 86 * protects manipulations to curr_clocksource and the clocksource_list 87 * override_name: 88 * Name of the user-specified clocksource. 89 */ 90 static struct clocksource *curr_clocksource; 91 static struct clocksource *suspend_clocksource; 92 static LIST_HEAD(clocksource_list); 93 static DEFINE_MUTEX(clocksource_mutex); 94 static char override_name[CS_NAME_LEN]; 95 static int finished_booting; 96 static u64 suspend_start; 97 98 /* 99 * Interval: 0.5sec. 100 */ 101 #define WATCHDOG_INTERVAL (HZ >> 1) 102 103 /* 104 * Threshold: 0.0312s, when doubled: 0.0625s. 105 * Also a default for cs->uncertainty_margin when registering clocks. 106 */ 107 #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 5) 108 109 /* 110 * Maximum permissible delay between two readouts of the watchdog 111 * clocksource surrounding a read of the clocksource being validated. 112 * This delay could be due to SMIs, NMIs, or to VCPU preemptions. Used as 113 * a lower bound for cs->uncertainty_margin values when registering clocks. 114 * 115 * The default of 500 parts per million is based on NTP's limits. 116 * If a clocksource is good enough for NTP, it is good enough for us! 117 */ 118 #ifdef CONFIG_CLOCKSOURCE_WATCHDOG_MAX_SKEW_US 119 #define MAX_SKEW_USEC CONFIG_CLOCKSOURCE_WATCHDOG_MAX_SKEW_US 120 #else 121 #define MAX_SKEW_USEC (125 * WATCHDOG_INTERVAL / HZ) 122 #endif 123 124 #define WATCHDOG_MAX_SKEW (MAX_SKEW_USEC * NSEC_PER_USEC) 125 126 #ifdef CONFIG_CLOCKSOURCE_WATCHDOG 127 static void clocksource_watchdog_work(struct work_struct *work); 128 static void clocksource_select(void); 129 130 static LIST_HEAD(watchdog_list); 131 static struct clocksource *watchdog; 132 static struct timer_list watchdog_timer; 133 static DECLARE_WORK(watchdog_work, clocksource_watchdog_work); 134 static DEFINE_SPINLOCK(watchdog_lock); 135 static int watchdog_running; 136 static atomic_t watchdog_reset_pending; 137 138 static inline void clocksource_watchdog_lock(unsigned long *flags) 139 { 140 spin_lock_irqsave(&watchdog_lock, *flags); 141 } 142 143 static inline void clocksource_watchdog_unlock(unsigned long *flags) 144 { 145 spin_unlock_irqrestore(&watchdog_lock, *flags); 146 } 147 148 static int clocksource_watchdog_kthread(void *data); 149 static void __clocksource_change_rating(struct clocksource *cs, int rating); 150 151 static void clocksource_watchdog_work(struct work_struct *work) 152 { 153 /* 154 * We cannot directly run clocksource_watchdog_kthread() here, because 155 * clocksource_select() calls timekeeping_notify() which uses 156 * stop_machine(). One cannot use stop_machine() from a workqueue() due 157 * lock inversions wrt CPU hotplug. 158 * 159 * Also, we only ever run this work once or twice during the lifetime 160 * of the kernel, so there is no point in creating a more permanent 161 * kthread for this. 162 * 163 * If kthread_run fails the next watchdog scan over the 164 * watchdog_list will find the unstable clock again. 165 */ 166 kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog"); 167 } 168 169 static void __clocksource_unstable(struct clocksource *cs) 170 { 171 cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); 172 cs->flags |= CLOCK_SOURCE_UNSTABLE; 173 174 /* 175 * If the clocksource is registered clocksource_watchdog_kthread() will 176 * re-rate and re-select. 177 */ 178 if (list_empty(&cs->list)) { 179 cs->rating = 0; 180 return; 181 } 182 183 if (cs->mark_unstable) 184 cs->mark_unstable(cs); 185 186 /* kick clocksource_watchdog_kthread() */ 187 if (finished_booting) 188 schedule_work(&watchdog_work); 189 } 190 191 /** 192 * clocksource_mark_unstable - mark clocksource unstable via watchdog 193 * @cs: clocksource to be marked unstable 194 * 195 * This function is called by the x86 TSC code to mark clocksources as unstable; 196 * it defers demotion and re-selection to a kthread. 197 */ 198 void clocksource_mark_unstable(struct clocksource *cs) 199 { 200 unsigned long flags; 201 202 spin_lock_irqsave(&watchdog_lock, flags); 203 if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) { 204 if (!list_empty(&cs->list) && list_empty(&cs->wd_list)) 205 list_add(&cs->wd_list, &watchdog_list); 206 __clocksource_unstable(cs); 207 } 208 spin_unlock_irqrestore(&watchdog_lock, flags); 209 } 210 211 ulong max_cswd_read_retries = 2; 212 module_param(max_cswd_read_retries, ulong, 0644); 213 EXPORT_SYMBOL_GPL(max_cswd_read_retries); 214 static int verify_n_cpus = 8; 215 module_param(verify_n_cpus, int, 0644); 216 217 enum wd_read_status { 218 WD_READ_SUCCESS, 219 WD_READ_UNSTABLE, 220 WD_READ_SKIP 221 }; 222 223 static enum wd_read_status cs_watchdog_read(struct clocksource *cs, u64 *csnow, u64 *wdnow) 224 { 225 unsigned int nretries; 226 u64 wd_end, wd_end2, wd_delta; 227 int64_t wd_delay, wd_seq_delay; 228 229 for (nretries = 0; nretries <= max_cswd_read_retries; nretries++) { 230 local_irq_disable(); 231 *wdnow = watchdog->read(watchdog); 232 *csnow = cs->read(cs); 233 wd_end = watchdog->read(watchdog); 234 wd_end2 = watchdog->read(watchdog); 235 local_irq_enable(); 236 237 wd_delta = clocksource_delta(wd_end, *wdnow, watchdog->mask); 238 wd_delay = clocksource_cyc2ns(wd_delta, watchdog->mult, 239 watchdog->shift); 240 if (wd_delay <= WATCHDOG_MAX_SKEW) { 241 if (nretries > 1 || nretries >= max_cswd_read_retries) { 242 pr_warn("timekeeping watchdog on CPU%d: %s retried %d times before success\n", 243 smp_processor_id(), watchdog->name, nretries); 244 } 245 return WD_READ_SUCCESS; 246 } 247 248 /* 249 * Now compute delay in consecutive watchdog read to see if 250 * there is too much external interferences that cause 251 * significant delay in reading both clocksource and watchdog. 252 * 253 * If consecutive WD read-back delay > WATCHDOG_MAX_SKEW/2, 254 * report system busy, reinit the watchdog and skip the current 255 * watchdog test. 256 */ 257 wd_delta = clocksource_delta(wd_end2, wd_end, watchdog->mask); 258 wd_seq_delay = clocksource_cyc2ns(wd_delta, watchdog->mult, watchdog->shift); 259 if (wd_seq_delay > WATCHDOG_MAX_SKEW/2) 260 goto skip_test; 261 } 262 263 pr_warn("timekeeping watchdog on CPU%d: wd-%s-wd read-back delay of %lldns, attempt %d, marking unstable\n", 264 smp_processor_id(), cs->name, wd_delay, nretries); 265 return WD_READ_UNSTABLE; 266 267 skip_test: 268 pr_info("timekeeping watchdog on CPU%d: %s wd-wd read-back delay of %lldns\n", 269 smp_processor_id(), watchdog->name, wd_seq_delay); 270 pr_info("wd-%s-wd read-back delay of %lldns, clock-skew test skipped!\n", 271 cs->name, wd_delay); 272 return WD_READ_SKIP; 273 } 274 275 static u64 csnow_mid; 276 static cpumask_t cpus_ahead; 277 static cpumask_t cpus_behind; 278 static cpumask_t cpus_chosen; 279 280 static void clocksource_verify_choose_cpus(void) 281 { 282 int cpu, i, n = verify_n_cpus; 283 284 if (n < 0) { 285 /* Check all of the CPUs. */ 286 cpumask_copy(&cpus_chosen, cpu_online_mask); 287 cpumask_clear_cpu(smp_processor_id(), &cpus_chosen); 288 return; 289 } 290 291 /* If no checking desired, or no other CPU to check, leave. */ 292 cpumask_clear(&cpus_chosen); 293 if (n == 0 || num_online_cpus() <= 1) 294 return; 295 296 /* Make sure to select at least one CPU other than the current CPU. */ 297 cpu = cpumask_first(cpu_online_mask); 298 if (cpu == smp_processor_id()) 299 cpu = cpumask_next(cpu, cpu_online_mask); 300 if (WARN_ON_ONCE(cpu >= nr_cpu_ids)) 301 return; 302 cpumask_set_cpu(cpu, &cpus_chosen); 303 304 /* Force a sane value for the boot parameter. */ 305 if (n > nr_cpu_ids) 306 n = nr_cpu_ids; 307 308 /* 309 * Randomly select the specified number of CPUs. If the same 310 * CPU is selected multiple times, that CPU is checked only once, 311 * and no replacement CPU is selected. This gracefully handles 312 * situations where verify_n_cpus is greater than the number of 313 * CPUs that are currently online. 314 */ 315 for (i = 1; i < n; i++) { 316 cpu = get_random_u32_below(nr_cpu_ids); 317 cpu = cpumask_next(cpu - 1, cpu_online_mask); 318 if (cpu >= nr_cpu_ids) 319 cpu = cpumask_first(cpu_online_mask); 320 if (!WARN_ON_ONCE(cpu >= nr_cpu_ids)) 321 cpumask_set_cpu(cpu, &cpus_chosen); 322 } 323 324 /* Don't verify ourselves. */ 325 cpumask_clear_cpu(smp_processor_id(), &cpus_chosen); 326 } 327 328 static void clocksource_verify_one_cpu(void *csin) 329 { 330 struct clocksource *cs = (struct clocksource *)csin; 331 332 csnow_mid = cs->read(cs); 333 } 334 335 void clocksource_verify_percpu(struct clocksource *cs) 336 { 337 int64_t cs_nsec, cs_nsec_max = 0, cs_nsec_min = LLONG_MAX; 338 u64 csnow_begin, csnow_end; 339 int cpu, testcpu; 340 s64 delta; 341 342 if (verify_n_cpus == 0) 343 return; 344 cpumask_clear(&cpus_ahead); 345 cpumask_clear(&cpus_behind); 346 cpus_read_lock(); 347 preempt_disable(); 348 clocksource_verify_choose_cpus(); 349 if (cpumask_empty(&cpus_chosen)) { 350 preempt_enable(); 351 cpus_read_unlock(); 352 pr_warn("Not enough CPUs to check clocksource '%s'.\n", cs->name); 353 return; 354 } 355 testcpu = smp_processor_id(); 356 pr_warn("Checking clocksource %s synchronization from CPU %d to CPUs %*pbl.\n", cs->name, testcpu, cpumask_pr_args(&cpus_chosen)); 357 for_each_cpu(cpu, &cpus_chosen) { 358 if (cpu == testcpu) 359 continue; 360 csnow_begin = cs->read(cs); 361 smp_call_function_single(cpu, clocksource_verify_one_cpu, cs, 1); 362 csnow_end = cs->read(cs); 363 delta = (s64)((csnow_mid - csnow_begin) & cs->mask); 364 if (delta < 0) 365 cpumask_set_cpu(cpu, &cpus_behind); 366 delta = (csnow_end - csnow_mid) & cs->mask; 367 if (delta < 0) 368 cpumask_set_cpu(cpu, &cpus_ahead); 369 delta = clocksource_delta(csnow_end, csnow_begin, cs->mask); 370 cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift); 371 if (cs_nsec > cs_nsec_max) 372 cs_nsec_max = cs_nsec; 373 if (cs_nsec < cs_nsec_min) 374 cs_nsec_min = cs_nsec; 375 } 376 preempt_enable(); 377 cpus_read_unlock(); 378 if (!cpumask_empty(&cpus_ahead)) 379 pr_warn(" CPUs %*pbl ahead of CPU %d for clocksource %s.\n", 380 cpumask_pr_args(&cpus_ahead), testcpu, cs->name); 381 if (!cpumask_empty(&cpus_behind)) 382 pr_warn(" CPUs %*pbl behind CPU %d for clocksource %s.\n", 383 cpumask_pr_args(&cpus_behind), testcpu, cs->name); 384 if (!cpumask_empty(&cpus_ahead) || !cpumask_empty(&cpus_behind)) 385 pr_warn(" CPU %d check durations %lldns - %lldns for clocksource %s.\n", 386 testcpu, cs_nsec_min, cs_nsec_max, cs->name); 387 } 388 EXPORT_SYMBOL_GPL(clocksource_verify_percpu); 389 390 static void clocksource_watchdog(struct timer_list *unused) 391 { 392 u64 csnow, wdnow, cslast, wdlast, delta; 393 int next_cpu, reset_pending; 394 int64_t wd_nsec, cs_nsec; 395 struct clocksource *cs; 396 enum wd_read_status read_ret; 397 u32 md; 398 399 spin_lock(&watchdog_lock); 400 if (!watchdog_running) 401 goto out; 402 403 reset_pending = atomic_read(&watchdog_reset_pending); 404 405 list_for_each_entry(cs, &watchdog_list, wd_list) { 406 407 /* Clocksource already marked unstable? */ 408 if (cs->flags & CLOCK_SOURCE_UNSTABLE) { 409 if (finished_booting) 410 schedule_work(&watchdog_work); 411 continue; 412 } 413 414 read_ret = cs_watchdog_read(cs, &csnow, &wdnow); 415 416 if (read_ret != WD_READ_SUCCESS) { 417 if (read_ret == WD_READ_UNSTABLE) 418 /* Clock readout unreliable, so give it up. */ 419 __clocksource_unstable(cs); 420 continue; 421 } 422 423 /* Clocksource initialized ? */ 424 if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) || 425 atomic_read(&watchdog_reset_pending)) { 426 cs->flags |= CLOCK_SOURCE_WATCHDOG; 427 cs->wd_last = wdnow; 428 cs->cs_last = csnow; 429 continue; 430 } 431 432 delta = clocksource_delta(wdnow, cs->wd_last, watchdog->mask); 433 wd_nsec = clocksource_cyc2ns(delta, watchdog->mult, 434 watchdog->shift); 435 436 delta = clocksource_delta(csnow, cs->cs_last, cs->mask); 437 cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift); 438 wdlast = cs->wd_last; /* save these in case we print them */ 439 cslast = cs->cs_last; 440 cs->cs_last = csnow; 441 cs->wd_last = wdnow; 442 443 if (atomic_read(&watchdog_reset_pending)) 444 continue; 445 446 /* Check the deviation from the watchdog clocksource. */ 447 md = cs->uncertainty_margin + watchdog->uncertainty_margin; 448 if (abs(cs_nsec - wd_nsec) > md) { 449 pr_warn("timekeeping watchdog on CPU%d: Marking clocksource '%s' as unstable because the skew is too large:\n", 450 smp_processor_id(), cs->name); 451 pr_warn(" '%s' wd_nsec: %lld wd_now: %llx wd_last: %llx mask: %llx\n", 452 watchdog->name, wd_nsec, wdnow, wdlast, watchdog->mask); 453 pr_warn(" '%s' cs_nsec: %lld cs_now: %llx cs_last: %llx mask: %llx\n", 454 cs->name, cs_nsec, csnow, cslast, cs->mask); 455 if (curr_clocksource == cs) 456 pr_warn(" '%s' is current clocksource.\n", cs->name); 457 else if (curr_clocksource) 458 pr_warn(" '%s' (not '%s') is current clocksource.\n", curr_clocksource->name, cs->name); 459 else 460 pr_warn(" No current clocksource.\n"); 461 __clocksource_unstable(cs); 462 continue; 463 } 464 465 if (cs == curr_clocksource && cs->tick_stable) 466 cs->tick_stable(cs); 467 468 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && 469 (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) && 470 (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) { 471 /* Mark it valid for high-res. */ 472 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; 473 474 /* 475 * clocksource_done_booting() will sort it if 476 * finished_booting is not set yet. 477 */ 478 if (!finished_booting) 479 continue; 480 481 /* 482 * If this is not the current clocksource let 483 * the watchdog thread reselect it. Due to the 484 * change to high res this clocksource might 485 * be preferred now. If it is the current 486 * clocksource let the tick code know about 487 * that change. 488 */ 489 if (cs != curr_clocksource) { 490 cs->flags |= CLOCK_SOURCE_RESELECT; 491 schedule_work(&watchdog_work); 492 } else { 493 tick_clock_notify(); 494 } 495 } 496 } 497 498 /* 499 * We only clear the watchdog_reset_pending, when we did a 500 * full cycle through all clocksources. 501 */ 502 if (reset_pending) 503 atomic_dec(&watchdog_reset_pending); 504 505 /* 506 * Cycle through CPUs to check if the CPUs stay synchronized 507 * to each other. 508 */ 509 next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask); 510 if (next_cpu >= nr_cpu_ids) 511 next_cpu = cpumask_first(cpu_online_mask); 512 513 /* 514 * Arm timer if not already pending: could race with concurrent 515 * pair clocksource_stop_watchdog() clocksource_start_watchdog(). 516 */ 517 if (!timer_pending(&watchdog_timer)) { 518 watchdog_timer.expires += WATCHDOG_INTERVAL; 519 add_timer_on(&watchdog_timer, next_cpu); 520 } 521 out: 522 spin_unlock(&watchdog_lock); 523 } 524 525 static inline void clocksource_start_watchdog(void) 526 { 527 if (watchdog_running || !watchdog || list_empty(&watchdog_list)) 528 return; 529 timer_setup(&watchdog_timer, clocksource_watchdog, 0); 530 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; 531 add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask)); 532 watchdog_running = 1; 533 } 534 535 static inline void clocksource_stop_watchdog(void) 536 { 537 if (!watchdog_running || (watchdog && !list_empty(&watchdog_list))) 538 return; 539 del_timer(&watchdog_timer); 540 watchdog_running = 0; 541 } 542 543 static inline void clocksource_reset_watchdog(void) 544 { 545 struct clocksource *cs; 546 547 list_for_each_entry(cs, &watchdog_list, wd_list) 548 cs->flags &= ~CLOCK_SOURCE_WATCHDOG; 549 } 550 551 static void clocksource_resume_watchdog(void) 552 { 553 atomic_inc(&watchdog_reset_pending); 554 } 555 556 static void clocksource_enqueue_watchdog(struct clocksource *cs) 557 { 558 INIT_LIST_HEAD(&cs->wd_list); 559 560 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { 561 /* cs is a clocksource to be watched. */ 562 list_add(&cs->wd_list, &watchdog_list); 563 cs->flags &= ~CLOCK_SOURCE_WATCHDOG; 564 } else { 565 /* cs is a watchdog. */ 566 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) 567 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; 568 } 569 } 570 571 static void clocksource_select_watchdog(bool fallback) 572 { 573 struct clocksource *cs, *old_wd; 574 unsigned long flags; 575 576 spin_lock_irqsave(&watchdog_lock, flags); 577 /* save current watchdog */ 578 old_wd = watchdog; 579 if (fallback) 580 watchdog = NULL; 581 582 list_for_each_entry(cs, &clocksource_list, list) { 583 /* cs is a clocksource to be watched. */ 584 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) 585 continue; 586 587 /* Skip current if we were requested for a fallback. */ 588 if (fallback && cs == old_wd) 589 continue; 590 591 /* Pick the best watchdog. */ 592 if (!watchdog || cs->rating > watchdog->rating) 593 watchdog = cs; 594 } 595 /* If we failed to find a fallback restore the old one. */ 596 if (!watchdog) 597 watchdog = old_wd; 598 599 /* If we changed the watchdog we need to reset cycles. */ 600 if (watchdog != old_wd) 601 clocksource_reset_watchdog(); 602 603 /* Check if the watchdog timer needs to be started. */ 604 clocksource_start_watchdog(); 605 spin_unlock_irqrestore(&watchdog_lock, flags); 606 } 607 608 static void clocksource_dequeue_watchdog(struct clocksource *cs) 609 { 610 if (cs != watchdog) { 611 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { 612 /* cs is a watched clocksource. */ 613 list_del_init(&cs->wd_list); 614 /* Check if the watchdog timer needs to be stopped. */ 615 clocksource_stop_watchdog(); 616 } 617 } 618 } 619 620 static int __clocksource_watchdog_kthread(void) 621 { 622 struct clocksource *cs, *tmp; 623 unsigned long flags; 624 int select = 0; 625 626 /* Do any required per-CPU skew verification. */ 627 if (curr_clocksource && 628 curr_clocksource->flags & CLOCK_SOURCE_UNSTABLE && 629 curr_clocksource->flags & CLOCK_SOURCE_VERIFY_PERCPU) 630 clocksource_verify_percpu(curr_clocksource); 631 632 spin_lock_irqsave(&watchdog_lock, flags); 633 list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { 634 if (cs->flags & CLOCK_SOURCE_UNSTABLE) { 635 list_del_init(&cs->wd_list); 636 __clocksource_change_rating(cs, 0); 637 select = 1; 638 } 639 if (cs->flags & CLOCK_SOURCE_RESELECT) { 640 cs->flags &= ~CLOCK_SOURCE_RESELECT; 641 select = 1; 642 } 643 } 644 /* Check if the watchdog timer needs to be stopped. */ 645 clocksource_stop_watchdog(); 646 spin_unlock_irqrestore(&watchdog_lock, flags); 647 648 return select; 649 } 650 651 static int clocksource_watchdog_kthread(void *data) 652 { 653 mutex_lock(&clocksource_mutex); 654 if (__clocksource_watchdog_kthread()) 655 clocksource_select(); 656 mutex_unlock(&clocksource_mutex); 657 return 0; 658 } 659 660 static bool clocksource_is_watchdog(struct clocksource *cs) 661 { 662 return cs == watchdog; 663 } 664 665 #else /* CONFIG_CLOCKSOURCE_WATCHDOG */ 666 667 static void clocksource_enqueue_watchdog(struct clocksource *cs) 668 { 669 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) 670 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; 671 } 672 673 static void clocksource_select_watchdog(bool fallback) { } 674 static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { } 675 static inline void clocksource_resume_watchdog(void) { } 676 static inline int __clocksource_watchdog_kthread(void) { return 0; } 677 static bool clocksource_is_watchdog(struct clocksource *cs) { return false; } 678 void clocksource_mark_unstable(struct clocksource *cs) { } 679 680 static inline void clocksource_watchdog_lock(unsigned long *flags) { } 681 static inline void clocksource_watchdog_unlock(unsigned long *flags) { } 682 683 #endif /* CONFIG_CLOCKSOURCE_WATCHDOG */ 684 685 static bool clocksource_is_suspend(struct clocksource *cs) 686 { 687 return cs == suspend_clocksource; 688 } 689 690 static void __clocksource_suspend_select(struct clocksource *cs) 691 { 692 /* 693 * Skip the clocksource which will be stopped in suspend state. 694 */ 695 if (!(cs->flags & CLOCK_SOURCE_SUSPEND_NONSTOP)) 696 return; 697 698 /* 699 * The nonstop clocksource can be selected as the suspend clocksource to 700 * calculate the suspend time, so it should not supply suspend/resume 701 * interfaces to suspend the nonstop clocksource when system suspends. 702 */ 703 if (cs->suspend || cs->resume) { 704 pr_warn("Nonstop clocksource %s should not supply suspend/resume interfaces\n", 705 cs->name); 706 } 707 708 /* Pick the best rating. */ 709 if (!suspend_clocksource || cs->rating > suspend_clocksource->rating) 710 suspend_clocksource = cs; 711 } 712 713 /** 714 * clocksource_suspend_select - Select the best clocksource for suspend timing 715 * @fallback: if select a fallback clocksource 716 */ 717 static void clocksource_suspend_select(bool fallback) 718 { 719 struct clocksource *cs, *old_suspend; 720 721 old_suspend = suspend_clocksource; 722 if (fallback) 723 suspend_clocksource = NULL; 724 725 list_for_each_entry(cs, &clocksource_list, list) { 726 /* Skip current if we were requested for a fallback. */ 727 if (fallback && cs == old_suspend) 728 continue; 729 730 __clocksource_suspend_select(cs); 731 } 732 } 733 734 /** 735 * clocksource_start_suspend_timing - Start measuring the suspend timing 736 * @cs: current clocksource from timekeeping 737 * @start_cycles: current cycles from timekeeping 738 * 739 * This function will save the start cycle values of suspend timer to calculate 740 * the suspend time when resuming system. 741 * 742 * This function is called late in the suspend process from timekeeping_suspend(), 743 * that means processes are frozen, non-boot cpus and interrupts are disabled 744 * now. It is therefore possible to start the suspend timer without taking the 745 * clocksource mutex. 746 */ 747 void clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles) 748 { 749 if (!suspend_clocksource) 750 return; 751 752 /* 753 * If current clocksource is the suspend timer, we should use the 754 * tkr_mono.cycle_last value as suspend_start to avoid same reading 755 * from suspend timer. 756 */ 757 if (clocksource_is_suspend(cs)) { 758 suspend_start = start_cycles; 759 return; 760 } 761 762 if (suspend_clocksource->enable && 763 suspend_clocksource->enable(suspend_clocksource)) { 764 pr_warn_once("Failed to enable the non-suspend-able clocksource.\n"); 765 return; 766 } 767 768 suspend_start = suspend_clocksource->read(suspend_clocksource); 769 } 770 771 /** 772 * clocksource_stop_suspend_timing - Stop measuring the suspend timing 773 * @cs: current clocksource from timekeeping 774 * @cycle_now: current cycles from timekeeping 775 * 776 * This function will calculate the suspend time from suspend timer. 777 * 778 * Returns nanoseconds since suspend started, 0 if no usable suspend clocksource. 779 * 780 * This function is called early in the resume process from timekeeping_resume(), 781 * that means there is only one cpu, no processes are running and the interrupts 782 * are disabled. It is therefore possible to stop the suspend timer without 783 * taking the clocksource mutex. 784 */ 785 u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 cycle_now) 786 { 787 u64 now, delta, nsec = 0; 788 789 if (!suspend_clocksource) 790 return 0; 791 792 /* 793 * If current clocksource is the suspend timer, we should use the 794 * tkr_mono.cycle_last value from timekeeping as current cycle to 795 * avoid same reading from suspend timer. 796 */ 797 if (clocksource_is_suspend(cs)) 798 now = cycle_now; 799 else 800 now = suspend_clocksource->read(suspend_clocksource); 801 802 if (now > suspend_start) { 803 delta = clocksource_delta(now, suspend_start, 804 suspend_clocksource->mask); 805 nsec = mul_u64_u32_shr(delta, suspend_clocksource->mult, 806 suspend_clocksource->shift); 807 } 808 809 /* 810 * Disable the suspend timer to save power if current clocksource is 811 * not the suspend timer. 812 */ 813 if (!clocksource_is_suspend(cs) && suspend_clocksource->disable) 814 suspend_clocksource->disable(suspend_clocksource); 815 816 return nsec; 817 } 818 819 /** 820 * clocksource_suspend - suspend the clocksource(s) 821 */ 822 void clocksource_suspend(void) 823 { 824 struct clocksource *cs; 825 826 list_for_each_entry_reverse(cs, &clocksource_list, list) 827 if (cs->suspend) 828 cs->suspend(cs); 829 } 830 831 /** 832 * clocksource_resume - resume the clocksource(s) 833 */ 834 void clocksource_resume(void) 835 { 836 struct clocksource *cs; 837 838 list_for_each_entry(cs, &clocksource_list, list) 839 if (cs->resume) 840 cs->resume(cs); 841 842 clocksource_resume_watchdog(); 843 } 844 845 /** 846 * clocksource_touch_watchdog - Update watchdog 847 * 848 * Update the watchdog after exception contexts such as kgdb so as not 849 * to incorrectly trip the watchdog. This might fail when the kernel 850 * was stopped in code which holds watchdog_lock. 851 */ 852 void clocksource_touch_watchdog(void) 853 { 854 clocksource_resume_watchdog(); 855 } 856 857 /** 858 * clocksource_max_adjustment- Returns max adjustment amount 859 * @cs: Pointer to clocksource 860 * 861 */ 862 static u32 clocksource_max_adjustment(struct clocksource *cs) 863 { 864 u64 ret; 865 /* 866 * We won't try to correct for more than 11% adjustments (110,000 ppm), 867 */ 868 ret = (u64)cs->mult * 11; 869 do_div(ret,100); 870 return (u32)ret; 871 } 872 873 /** 874 * clocks_calc_max_nsecs - Returns maximum nanoseconds that can be converted 875 * @mult: cycle to nanosecond multiplier 876 * @shift: cycle to nanosecond divisor (power of two) 877 * @maxadj: maximum adjustment value to mult (~11%) 878 * @mask: bitmask for two's complement subtraction of non 64 bit counters 879 * @max_cyc: maximum cycle value before potential overflow (does not include 880 * any safety margin) 881 * 882 * NOTE: This function includes a safety margin of 50%, in other words, we 883 * return half the number of nanoseconds the hardware counter can technically 884 * cover. This is done so that we can potentially detect problems caused by 885 * delayed timers or bad hardware, which might result in time intervals that 886 * are larger than what the math used can handle without overflows. 887 */ 888 u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cyc) 889 { 890 u64 max_nsecs, max_cycles; 891 892 /* 893 * Calculate the maximum number of cycles that we can pass to the 894 * cyc2ns() function without overflowing a 64-bit result. 895 */ 896 max_cycles = ULLONG_MAX; 897 do_div(max_cycles, mult+maxadj); 898 899 /* 900 * The actual maximum number of cycles we can defer the clocksource is 901 * determined by the minimum of max_cycles and mask. 902 * Note: Here we subtract the maxadj to make sure we don't sleep for 903 * too long if there's a large negative adjustment. 904 */ 905 max_cycles = min(max_cycles, mask); 906 max_nsecs = clocksource_cyc2ns(max_cycles, mult - maxadj, shift); 907 908 /* return the max_cycles value as well if requested */ 909 if (max_cyc) 910 *max_cyc = max_cycles; 911 912 /* Return 50% of the actual maximum, so we can detect bad values */ 913 max_nsecs >>= 1; 914 915 return max_nsecs; 916 } 917 918 /** 919 * clocksource_update_max_deferment - Updates the clocksource max_idle_ns & max_cycles 920 * @cs: Pointer to clocksource to be updated 921 * 922 */ 923 static inline void clocksource_update_max_deferment(struct clocksource *cs) 924 { 925 cs->max_idle_ns = clocks_calc_max_nsecs(cs->mult, cs->shift, 926 cs->maxadj, cs->mask, 927 &cs->max_cycles); 928 } 929 930 static struct clocksource *clocksource_find_best(bool oneshot, bool skipcur) 931 { 932 struct clocksource *cs; 933 934 if (!finished_booting || list_empty(&clocksource_list)) 935 return NULL; 936 937 /* 938 * We pick the clocksource with the highest rating. If oneshot 939 * mode is active, we pick the highres valid clocksource with 940 * the best rating. 941 */ 942 list_for_each_entry(cs, &clocksource_list, list) { 943 if (skipcur && cs == curr_clocksource) 944 continue; 945 if (oneshot && !(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES)) 946 continue; 947 return cs; 948 } 949 return NULL; 950 } 951 952 static void __clocksource_select(bool skipcur) 953 { 954 bool oneshot = tick_oneshot_mode_active(); 955 struct clocksource *best, *cs; 956 957 /* Find the best suitable clocksource */ 958 best = clocksource_find_best(oneshot, skipcur); 959 if (!best) 960 return; 961 962 if (!strlen(override_name)) 963 goto found; 964 965 /* Check for the override clocksource. */ 966 list_for_each_entry(cs, &clocksource_list, list) { 967 if (skipcur && cs == curr_clocksource) 968 continue; 969 if (strcmp(cs->name, override_name) != 0) 970 continue; 971 /* 972 * Check to make sure we don't switch to a non-highres 973 * capable clocksource if the tick code is in oneshot 974 * mode (highres or nohz) 975 */ 976 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && oneshot) { 977 /* Override clocksource cannot be used. */ 978 if (cs->flags & CLOCK_SOURCE_UNSTABLE) { 979 pr_warn("Override clocksource %s is unstable and not HRT compatible - cannot switch while in HRT/NOHZ mode\n", 980 cs->name); 981 override_name[0] = 0; 982 } else { 983 /* 984 * The override cannot be currently verified. 985 * Deferring to let the watchdog check. 986 */ 987 pr_info("Override clocksource %s is not currently HRT compatible - deferring\n", 988 cs->name); 989 } 990 } else 991 /* Override clocksource can be used. */ 992 best = cs; 993 break; 994 } 995 996 found: 997 if (curr_clocksource != best && !timekeeping_notify(best)) { 998 pr_info("Switched to clocksource %s\n", best->name); 999 curr_clocksource = best; 1000 } 1001 } 1002 1003 /** 1004 * clocksource_select - Select the best clocksource available 1005 * 1006 * Private function. Must hold clocksource_mutex when called. 1007 * 1008 * Select the clocksource with the best rating, or the clocksource, 1009 * which is selected by userspace override. 1010 */ 1011 static void clocksource_select(void) 1012 { 1013 __clocksource_select(false); 1014 } 1015 1016 static void clocksource_select_fallback(void) 1017 { 1018 __clocksource_select(true); 1019 } 1020 1021 /* 1022 * clocksource_done_booting - Called near the end of core bootup 1023 * 1024 * Hack to avoid lots of clocksource churn at boot time. 1025 * We use fs_initcall because we want this to start before 1026 * device_initcall but after subsys_initcall. 1027 */ 1028 static int __init clocksource_done_booting(void) 1029 { 1030 mutex_lock(&clocksource_mutex); 1031 curr_clocksource = clocksource_default_clock(); 1032 finished_booting = 1; 1033 /* 1034 * Run the watchdog first to eliminate unstable clock sources 1035 */ 1036 __clocksource_watchdog_kthread(); 1037 clocksource_select(); 1038 mutex_unlock(&clocksource_mutex); 1039 return 0; 1040 } 1041 fs_initcall(clocksource_done_booting); 1042 1043 /* 1044 * Enqueue the clocksource sorted by rating 1045 */ 1046 static void clocksource_enqueue(struct clocksource *cs) 1047 { 1048 struct list_head *entry = &clocksource_list; 1049 struct clocksource *tmp; 1050 1051 list_for_each_entry(tmp, &clocksource_list, list) { 1052 /* Keep track of the place, where to insert */ 1053 if (tmp->rating < cs->rating) 1054 break; 1055 entry = &tmp->list; 1056 } 1057 list_add(&cs->list, entry); 1058 } 1059 1060 /** 1061 * __clocksource_update_freq_scale - Used update clocksource with new freq 1062 * @cs: clocksource to be registered 1063 * @scale: Scale factor multiplied against freq to get clocksource hz 1064 * @freq: clocksource frequency (cycles per second) divided by scale 1065 * 1066 * This should only be called from the clocksource->enable() method. 1067 * 1068 * This *SHOULD NOT* be called directly! Please use the 1069 * __clocksource_update_freq_hz() or __clocksource_update_freq_khz() helper 1070 * functions. 1071 */ 1072 void __clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq) 1073 { 1074 u64 sec; 1075 1076 /* 1077 * Default clocksources are *special* and self-define their mult/shift. 1078 * But, you're not special, so you should specify a freq value. 1079 */ 1080 if (freq) { 1081 /* 1082 * Calc the maximum number of seconds which we can run before 1083 * wrapping around. For clocksources which have a mask > 32-bit 1084 * we need to limit the max sleep time to have a good 1085 * conversion precision. 10 minutes is still a reasonable 1086 * amount. That results in a shift value of 24 for a 1087 * clocksource with mask >= 40-bit and f >= 4GHz. That maps to 1088 * ~ 0.06ppm granularity for NTP. 1089 */ 1090 sec = cs->mask; 1091 do_div(sec, freq); 1092 do_div(sec, scale); 1093 if (!sec) 1094 sec = 1; 1095 else if (sec > 600 && cs->mask > UINT_MAX) 1096 sec = 600; 1097 1098 clocks_calc_mult_shift(&cs->mult, &cs->shift, freq, 1099 NSEC_PER_SEC / scale, sec * scale); 1100 } 1101 1102 /* 1103 * If the uncertainty margin is not specified, calculate it. 1104 * If both scale and freq are non-zero, calculate the clock 1105 * period, but bound below at 2*WATCHDOG_MAX_SKEW. However, 1106 * if either of scale or freq is zero, be very conservative and 1107 * take the tens-of-milliseconds WATCHDOG_THRESHOLD value for the 1108 * uncertainty margin. Allow stupidly small uncertainty margins 1109 * to be specified by the caller for testing purposes, but warn 1110 * to discourage production use of this capability. 1111 */ 1112 if (scale && freq && !cs->uncertainty_margin) { 1113 cs->uncertainty_margin = NSEC_PER_SEC / (scale * freq); 1114 if (cs->uncertainty_margin < 2 * WATCHDOG_MAX_SKEW) 1115 cs->uncertainty_margin = 2 * WATCHDOG_MAX_SKEW; 1116 } else if (!cs->uncertainty_margin) { 1117 cs->uncertainty_margin = WATCHDOG_THRESHOLD; 1118 } 1119 WARN_ON_ONCE(cs->uncertainty_margin < 2 * WATCHDOG_MAX_SKEW); 1120 1121 /* 1122 * Ensure clocksources that have large 'mult' values don't overflow 1123 * when adjusted. 1124 */ 1125 cs->maxadj = clocksource_max_adjustment(cs); 1126 while (freq && ((cs->mult + cs->maxadj < cs->mult) 1127 || (cs->mult - cs->maxadj > cs->mult))) { 1128 cs->mult >>= 1; 1129 cs->shift--; 1130 cs->maxadj = clocksource_max_adjustment(cs); 1131 } 1132 1133 /* 1134 * Only warn for *special* clocksources that self-define 1135 * their mult/shift values and don't specify a freq. 1136 */ 1137 WARN_ONCE(cs->mult + cs->maxadj < cs->mult, 1138 "timekeeping: Clocksource %s might overflow on 11%% adjustment\n", 1139 cs->name); 1140 1141 clocksource_update_max_deferment(cs); 1142 1143 pr_info("%s: mask: 0x%llx max_cycles: 0x%llx, max_idle_ns: %lld ns\n", 1144 cs->name, cs->mask, cs->max_cycles, cs->max_idle_ns); 1145 } 1146 EXPORT_SYMBOL_GPL(__clocksource_update_freq_scale); 1147 1148 /** 1149 * __clocksource_register_scale - Used to install new clocksources 1150 * @cs: clocksource to be registered 1151 * @scale: Scale factor multiplied against freq to get clocksource hz 1152 * @freq: clocksource frequency (cycles per second) divided by scale 1153 * 1154 * Returns -EBUSY if registration fails, zero otherwise. 1155 * 1156 * This *SHOULD NOT* be called directly! Please use the 1157 * clocksource_register_hz() or clocksource_register_khz helper functions. 1158 */ 1159 int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) 1160 { 1161 unsigned long flags; 1162 1163 clocksource_arch_init(cs); 1164 1165 if (WARN_ON_ONCE((unsigned int)cs->id >= CSID_MAX)) 1166 cs->id = CSID_GENERIC; 1167 if (cs->vdso_clock_mode < 0 || 1168 cs->vdso_clock_mode >= VDSO_CLOCKMODE_MAX) { 1169 pr_warn("clocksource %s registered with invalid VDSO mode %d. Disabling VDSO support.\n", 1170 cs->name, cs->vdso_clock_mode); 1171 cs->vdso_clock_mode = VDSO_CLOCKMODE_NONE; 1172 } 1173 1174 /* Initialize mult/shift and max_idle_ns */ 1175 __clocksource_update_freq_scale(cs, scale, freq); 1176 1177 /* Add clocksource to the clocksource list */ 1178 mutex_lock(&clocksource_mutex); 1179 1180 clocksource_watchdog_lock(&flags); 1181 clocksource_enqueue(cs); 1182 clocksource_enqueue_watchdog(cs); 1183 clocksource_watchdog_unlock(&flags); 1184 1185 clocksource_select(); 1186 clocksource_select_watchdog(false); 1187 __clocksource_suspend_select(cs); 1188 mutex_unlock(&clocksource_mutex); 1189 return 0; 1190 } 1191 EXPORT_SYMBOL_GPL(__clocksource_register_scale); 1192 1193 static void __clocksource_change_rating(struct clocksource *cs, int rating) 1194 { 1195 list_del(&cs->list); 1196 cs->rating = rating; 1197 clocksource_enqueue(cs); 1198 } 1199 1200 /** 1201 * clocksource_change_rating - Change the rating of a registered clocksource 1202 * @cs: clocksource to be changed 1203 * @rating: new rating 1204 */ 1205 void clocksource_change_rating(struct clocksource *cs, int rating) 1206 { 1207 unsigned long flags; 1208 1209 mutex_lock(&clocksource_mutex); 1210 clocksource_watchdog_lock(&flags); 1211 __clocksource_change_rating(cs, rating); 1212 clocksource_watchdog_unlock(&flags); 1213 1214 clocksource_select(); 1215 clocksource_select_watchdog(false); 1216 clocksource_suspend_select(false); 1217 mutex_unlock(&clocksource_mutex); 1218 } 1219 EXPORT_SYMBOL(clocksource_change_rating); 1220 1221 /* 1222 * Unbind clocksource @cs. Called with clocksource_mutex held 1223 */ 1224 static int clocksource_unbind(struct clocksource *cs) 1225 { 1226 unsigned long flags; 1227 1228 if (clocksource_is_watchdog(cs)) { 1229 /* Select and try to install a replacement watchdog. */ 1230 clocksource_select_watchdog(true); 1231 if (clocksource_is_watchdog(cs)) 1232 return -EBUSY; 1233 } 1234 1235 if (cs == curr_clocksource) { 1236 /* Select and try to install a replacement clock source */ 1237 clocksource_select_fallback(); 1238 if (curr_clocksource == cs) 1239 return -EBUSY; 1240 } 1241 1242 if (clocksource_is_suspend(cs)) { 1243 /* 1244 * Select and try to install a replacement suspend clocksource. 1245 * If no replacement suspend clocksource, we will just let the 1246 * clocksource go and have no suspend clocksource. 1247 */ 1248 clocksource_suspend_select(true); 1249 } 1250 1251 clocksource_watchdog_lock(&flags); 1252 clocksource_dequeue_watchdog(cs); 1253 list_del_init(&cs->list); 1254 clocksource_watchdog_unlock(&flags); 1255 1256 return 0; 1257 } 1258 1259 /** 1260 * clocksource_unregister - remove a registered clocksource 1261 * @cs: clocksource to be unregistered 1262 */ 1263 int clocksource_unregister(struct clocksource *cs) 1264 { 1265 int ret = 0; 1266 1267 mutex_lock(&clocksource_mutex); 1268 if (!list_empty(&cs->list)) 1269 ret = clocksource_unbind(cs); 1270 mutex_unlock(&clocksource_mutex); 1271 return ret; 1272 } 1273 EXPORT_SYMBOL(clocksource_unregister); 1274 1275 #ifdef CONFIG_SYSFS 1276 /** 1277 * current_clocksource_show - sysfs interface for current clocksource 1278 * @dev: unused 1279 * @attr: unused 1280 * @buf: char buffer to be filled with clocksource list 1281 * 1282 * Provides sysfs interface for listing current clocksource. 1283 */ 1284 static ssize_t current_clocksource_show(struct device *dev, 1285 struct device_attribute *attr, 1286 char *buf) 1287 { 1288 ssize_t count = 0; 1289 1290 mutex_lock(&clocksource_mutex); 1291 count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name); 1292 mutex_unlock(&clocksource_mutex); 1293 1294 return count; 1295 } 1296 1297 ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt) 1298 { 1299 size_t ret = cnt; 1300 1301 /* strings from sysfs write are not 0 terminated! */ 1302 if (!cnt || cnt >= CS_NAME_LEN) 1303 return -EINVAL; 1304 1305 /* strip of \n: */ 1306 if (buf[cnt-1] == '\n') 1307 cnt--; 1308 if (cnt > 0) 1309 memcpy(dst, buf, cnt); 1310 dst[cnt] = 0; 1311 return ret; 1312 } 1313 1314 /** 1315 * current_clocksource_store - interface for manually overriding clocksource 1316 * @dev: unused 1317 * @attr: unused 1318 * @buf: name of override clocksource 1319 * @count: length of buffer 1320 * 1321 * Takes input from sysfs interface for manually overriding the default 1322 * clocksource selection. 1323 */ 1324 static ssize_t current_clocksource_store(struct device *dev, 1325 struct device_attribute *attr, 1326 const char *buf, size_t count) 1327 { 1328 ssize_t ret; 1329 1330 mutex_lock(&clocksource_mutex); 1331 1332 ret = sysfs_get_uname(buf, override_name, count); 1333 if (ret >= 0) 1334 clocksource_select(); 1335 1336 mutex_unlock(&clocksource_mutex); 1337 1338 return ret; 1339 } 1340 static DEVICE_ATTR_RW(current_clocksource); 1341 1342 /** 1343 * unbind_clocksource_store - interface for manually unbinding clocksource 1344 * @dev: unused 1345 * @attr: unused 1346 * @buf: unused 1347 * @count: length of buffer 1348 * 1349 * Takes input from sysfs interface for manually unbinding a clocksource. 1350 */ 1351 static ssize_t unbind_clocksource_store(struct device *dev, 1352 struct device_attribute *attr, 1353 const char *buf, size_t count) 1354 { 1355 struct clocksource *cs; 1356 char name[CS_NAME_LEN]; 1357 ssize_t ret; 1358 1359 ret = sysfs_get_uname(buf, name, count); 1360 if (ret < 0) 1361 return ret; 1362 1363 ret = -ENODEV; 1364 mutex_lock(&clocksource_mutex); 1365 list_for_each_entry(cs, &clocksource_list, list) { 1366 if (strcmp(cs->name, name)) 1367 continue; 1368 ret = clocksource_unbind(cs); 1369 break; 1370 } 1371 mutex_unlock(&clocksource_mutex); 1372 1373 return ret ? ret : count; 1374 } 1375 static DEVICE_ATTR_WO(unbind_clocksource); 1376 1377 /** 1378 * available_clocksource_show - sysfs interface for listing clocksource 1379 * @dev: unused 1380 * @attr: unused 1381 * @buf: char buffer to be filled with clocksource list 1382 * 1383 * Provides sysfs interface for listing registered clocksources 1384 */ 1385 static ssize_t available_clocksource_show(struct device *dev, 1386 struct device_attribute *attr, 1387 char *buf) 1388 { 1389 struct clocksource *src; 1390 ssize_t count = 0; 1391 1392 mutex_lock(&clocksource_mutex); 1393 list_for_each_entry(src, &clocksource_list, list) { 1394 /* 1395 * Don't show non-HRES clocksource if the tick code is 1396 * in one shot mode (highres=on or nohz=on) 1397 */ 1398 if (!tick_oneshot_mode_active() || 1399 (src->flags & CLOCK_SOURCE_VALID_FOR_HRES)) 1400 count += snprintf(buf + count, 1401 max((ssize_t)PAGE_SIZE - count, (ssize_t)0), 1402 "%s ", src->name); 1403 } 1404 mutex_unlock(&clocksource_mutex); 1405 1406 count += snprintf(buf + count, 1407 max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n"); 1408 1409 return count; 1410 } 1411 static DEVICE_ATTR_RO(available_clocksource); 1412 1413 static struct attribute *clocksource_attrs[] = { 1414 &dev_attr_current_clocksource.attr, 1415 &dev_attr_unbind_clocksource.attr, 1416 &dev_attr_available_clocksource.attr, 1417 NULL 1418 }; 1419 ATTRIBUTE_GROUPS(clocksource); 1420 1421 static struct bus_type clocksource_subsys = { 1422 .name = "clocksource", 1423 .dev_name = "clocksource", 1424 }; 1425 1426 static struct device device_clocksource = { 1427 .id = 0, 1428 .bus = &clocksource_subsys, 1429 .groups = clocksource_groups, 1430 }; 1431 1432 static int __init init_clocksource_sysfs(void) 1433 { 1434 int error = subsys_system_register(&clocksource_subsys, NULL); 1435 1436 if (!error) 1437 error = device_register(&device_clocksource); 1438 1439 return error; 1440 } 1441 1442 device_initcall(init_clocksource_sysfs); 1443 #endif /* CONFIG_SYSFS */ 1444 1445 /** 1446 * boot_override_clocksource - boot clock override 1447 * @str: override name 1448 * 1449 * Takes a clocksource= boot argument and uses it 1450 * as the clocksource override name. 1451 */ 1452 static int __init boot_override_clocksource(char* str) 1453 { 1454 mutex_lock(&clocksource_mutex); 1455 if (str) 1456 strlcpy(override_name, str, sizeof(override_name)); 1457 mutex_unlock(&clocksource_mutex); 1458 return 1; 1459 } 1460 1461 __setup("clocksource=", boot_override_clocksource); 1462 1463 /** 1464 * boot_override_clock - Compatibility layer for deprecated boot option 1465 * @str: override name 1466 * 1467 * DEPRECATED! Takes a clock= boot argument and uses it 1468 * as the clocksource override name 1469 */ 1470 static int __init boot_override_clock(char* str) 1471 { 1472 if (!strcmp(str, "pmtmr")) { 1473 pr_warn("clock=pmtmr is deprecated - use clocksource=acpi_pm\n"); 1474 return boot_override_clocksource("acpi_pm"); 1475 } 1476 pr_warn("clock= boot option is deprecated - use clocksource=xyz\n"); 1477 return boot_override_clocksource(str); 1478 } 1479 1480 __setup("clock=", boot_override_clock); 1481