135728b82SThomas Gleixner // SPDX-License-Identifier: GPL-2.0+ 2734efb46Sjohn stultz /* 3734efb46Sjohn stultz * This file contains the functions which manage clocksource drivers. 4734efb46Sjohn stultz * 5734efb46Sjohn stultz * Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com) 6734efb46Sjohn stultz */ 7734efb46Sjohn stultz 845bbfe64SJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 945bbfe64SJoe Perches 10d369a5d8SKay Sievers #include <linux/device.h> 11734efb46Sjohn stultz #include <linux/clocksource.h> 12734efb46Sjohn stultz #include <linux/init.h> 13734efb46Sjohn stultz #include <linux/module.h> 14dc29a365SMathieu Desnoyers #include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */ 1579bf2bb3SThomas Gleixner #include <linux/tick.h> 1601548f4dSMartin Schwidefsky #include <linux/kthread.h> 17734efb46Sjohn stultz 18c1797bafSThomas Gleixner #include "tick-internal.h" 193a978377SThomas Gleixner #include "timekeeping_internal.h" 2003e13cf5SThomas Gleixner 217d2f944aSThomas Gleixner /** 227d2f944aSThomas Gleixner * clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks 237d2f944aSThomas Gleixner * @mult: pointer to mult variable 247d2f944aSThomas Gleixner * @shift: pointer to shift variable 257d2f944aSThomas Gleixner * @from: frequency to convert from 267d2f944aSThomas Gleixner * @to: frequency to convert to 275fdade95SNicolas Pitre * @maxsec: guaranteed runtime conversion range in seconds 287d2f944aSThomas Gleixner * 297d2f944aSThomas Gleixner * The function evaluates the shift/mult pair for the scaled math 307d2f944aSThomas Gleixner * operations of clocksources and clockevents. 317d2f944aSThomas Gleixner * 327d2f944aSThomas Gleixner * @to and @from are frequency values in HZ. For clock sources @to is 337d2f944aSThomas Gleixner * NSEC_PER_SEC == 1GHz and @from is the counter frequency. For clock 347d2f944aSThomas Gleixner * event @to is the counter frequency and @from is NSEC_PER_SEC. 357d2f944aSThomas Gleixner * 365fdade95SNicolas Pitre * The @maxsec conversion range argument controls the time frame in 377d2f944aSThomas Gleixner * seconds which must be covered by the runtime conversion with the 387d2f944aSThomas Gleixner * calculated mult and shift factors. This guarantees that no 64bit 397d2f944aSThomas Gleixner * overflow happens when the input value of the conversion is 407d2f944aSThomas Gleixner * multiplied with the calculated mult factor. Larger ranges may 417d2f944aSThomas Gleixner * reduce the conversion accuracy by chosing smaller mult and shift 427d2f944aSThomas Gleixner * factors. 437d2f944aSThomas Gleixner */ 447d2f944aSThomas Gleixner void 455fdade95SNicolas Pitre clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 maxsec) 467d2f944aSThomas Gleixner { 477d2f944aSThomas Gleixner u64 tmp; 487d2f944aSThomas Gleixner u32 sft, sftacc= 32; 497d2f944aSThomas Gleixner 507d2f944aSThomas Gleixner /* 517d2f944aSThomas Gleixner * Calculate the shift factor which is limiting the conversion 527d2f944aSThomas Gleixner * range: 537d2f944aSThomas Gleixner */ 545fdade95SNicolas Pitre tmp = ((u64)maxsec * from) >> 32; 557d2f944aSThomas Gleixner while (tmp) { 567d2f944aSThomas Gleixner tmp >>=1; 577d2f944aSThomas Gleixner sftacc--; 587d2f944aSThomas Gleixner } 597d2f944aSThomas Gleixner 607d2f944aSThomas Gleixner /* 617d2f944aSThomas Gleixner * Find the conversion shift/mult pair which has the best 627d2f944aSThomas Gleixner * accuracy and fits the maxsec conversion range: 637d2f944aSThomas Gleixner */ 647d2f944aSThomas Gleixner for (sft = 32; sft > 0; sft--) { 657d2f944aSThomas Gleixner tmp = (u64) to << sft; 66b5776c4aSjohn stultz tmp += from / 2; 677d2f944aSThomas Gleixner do_div(tmp, from); 687d2f944aSThomas Gleixner if ((tmp >> sftacc) == 0) 697d2f944aSThomas Gleixner break; 707d2f944aSThomas Gleixner } 717d2f944aSThomas Gleixner *mult = tmp; 727d2f944aSThomas Gleixner *shift = sft; 737d2f944aSThomas Gleixner } 745304121aSMurali Karicheri EXPORT_SYMBOL_GPL(clocks_calc_mult_shift); 757d2f944aSThomas Gleixner 76734efb46Sjohn stultz /*[Clocksource internal variables]--------- 77734efb46Sjohn stultz * curr_clocksource: 78f1b82746SMartin Schwidefsky * currently selected clocksource. 7939232ed5SBaolin Wang * suspend_clocksource: 8039232ed5SBaolin Wang * used to calculate the suspend time. 81734efb46Sjohn stultz * clocksource_list: 82734efb46Sjohn stultz * linked list with the registered clocksources 8375c5158fSMartin Schwidefsky * clocksource_mutex: 8475c5158fSMartin Schwidefsky * protects manipulations to curr_clocksource and the clocksource_list 85734efb46Sjohn stultz * override_name: 86734efb46Sjohn stultz * Name of the user-specified clocksource. 87734efb46Sjohn stultz */ 88f1b82746SMartin Schwidefsky static struct clocksource *curr_clocksource; 8939232ed5SBaolin Wang static struct clocksource *suspend_clocksource; 90734efb46Sjohn stultz static LIST_HEAD(clocksource_list); 9175c5158fSMartin Schwidefsky static DEFINE_MUTEX(clocksource_mutex); 9229b54078SThomas Gleixner static char override_name[CS_NAME_LEN]; 9354a6bc0bSThomas Gleixner static int finished_booting; 9439232ed5SBaolin Wang static u64 suspend_start; 95734efb46Sjohn stultz 965d8b34fdSThomas Gleixner #ifdef CONFIG_CLOCKSOURCE_WATCHDOG 97f79e0258SMartin Schwidefsky static void clocksource_watchdog_work(struct work_struct *work); 98332962f2SThomas Gleixner static void clocksource_select(void); 99f79e0258SMartin Schwidefsky 1005d8b34fdSThomas Gleixner static LIST_HEAD(watchdog_list); 1015d8b34fdSThomas Gleixner static struct clocksource *watchdog; 1025d8b34fdSThomas Gleixner static struct timer_list watchdog_timer; 103f79e0258SMartin Schwidefsky static DECLARE_WORK(watchdog_work, clocksource_watchdog_work); 1045d8b34fdSThomas Gleixner static DEFINE_SPINLOCK(watchdog_lock); 105fb63a0ebSMartin Schwidefsky static int watchdog_running; 1069fb60336SThomas Gleixner static atomic_t watchdog_reset_pending; 107b52f52a0SThomas Gleixner 1080f48b41fSMathieu Malaterre static inline void clocksource_watchdog_lock(unsigned long *flags) 1092aae7bcfSPeter Zijlstra { 1102aae7bcfSPeter Zijlstra spin_lock_irqsave(&watchdog_lock, *flags); 1112aae7bcfSPeter Zijlstra } 1122aae7bcfSPeter Zijlstra 1130f48b41fSMathieu Malaterre static inline void clocksource_watchdog_unlock(unsigned long *flags) 1142aae7bcfSPeter Zijlstra { 1152aae7bcfSPeter Zijlstra spin_unlock_irqrestore(&watchdog_lock, *flags); 1162aae7bcfSPeter Zijlstra } 1172aae7bcfSPeter Zijlstra 118e2c631baSPeter Zijlstra static int clocksource_watchdog_kthread(void *data); 119e2c631baSPeter Zijlstra static void __clocksource_change_rating(struct clocksource *cs, int rating); 120e2c631baSPeter Zijlstra 1215d8b34fdSThomas Gleixner /* 12235c35d1aSDaniel Walker * Interval: 0.5sec Threshold: 0.0625s 1235d8b34fdSThomas Gleixner */ 1245d8b34fdSThomas Gleixner #define WATCHDOG_INTERVAL (HZ >> 1) 12535c35d1aSDaniel Walker #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4) 1265d8b34fdSThomas Gleixner 127e2c631baSPeter Zijlstra static void clocksource_watchdog_work(struct work_struct *work) 128e2c631baSPeter Zijlstra { 129e2c631baSPeter Zijlstra /* 130e2c631baSPeter Zijlstra * We cannot directly run clocksource_watchdog_kthread() here, because 131e2c631baSPeter Zijlstra * clocksource_select() calls timekeeping_notify() which uses 132e2c631baSPeter Zijlstra * stop_machine(). One cannot use stop_machine() from a workqueue() due 133e2c631baSPeter Zijlstra * lock inversions wrt CPU hotplug. 134e2c631baSPeter Zijlstra * 135e2c631baSPeter Zijlstra * Also, we only ever run this work once or twice during the lifetime 136e2c631baSPeter Zijlstra * of the kernel, so there is no point in creating a more permanent 137e2c631baSPeter Zijlstra * kthread for this. 138e2c631baSPeter Zijlstra * 139e2c631baSPeter Zijlstra * If kthread_run fails the next watchdog scan over the 140e2c631baSPeter Zijlstra * watchdog_list will find the unstable clock again. 141e2c631baSPeter Zijlstra */ 142e2c631baSPeter Zijlstra kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog"); 143e2c631baSPeter Zijlstra } 144e2c631baSPeter Zijlstra 1457285dd7fSThomas Gleixner static void __clocksource_unstable(struct clocksource *cs) 1467285dd7fSThomas Gleixner { 1477285dd7fSThomas Gleixner cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); 1487285dd7fSThomas Gleixner cs->flags |= CLOCK_SOURCE_UNSTABLE; 14912907fbbSThomas Gleixner 150cd2af07dSPeter Zijlstra /* 151e2c631baSPeter Zijlstra * If the clocksource is registered clocksource_watchdog_kthread() will 152cd2af07dSPeter Zijlstra * re-rate and re-select. 153cd2af07dSPeter Zijlstra */ 154cd2af07dSPeter Zijlstra if (list_empty(&cs->list)) { 155cd2af07dSPeter Zijlstra cs->rating = 0; 1562aae7bcfSPeter Zijlstra return; 157cd2af07dSPeter Zijlstra } 1582aae7bcfSPeter Zijlstra 15912907fbbSThomas Gleixner if (cs->mark_unstable) 16012907fbbSThomas Gleixner cs->mark_unstable(cs); 16112907fbbSThomas Gleixner 162e2c631baSPeter Zijlstra /* kick clocksource_watchdog_kthread() */ 16354a6bc0bSThomas Gleixner if (finished_booting) 1647285dd7fSThomas Gleixner schedule_work(&watchdog_work); 1657285dd7fSThomas Gleixner } 1667285dd7fSThomas Gleixner 1677285dd7fSThomas Gleixner /** 1687285dd7fSThomas Gleixner * clocksource_mark_unstable - mark clocksource unstable via watchdog 1697285dd7fSThomas Gleixner * @cs: clocksource to be marked unstable 1707285dd7fSThomas Gleixner * 1717dba33c6SPeter Zijlstra * This function is called by the x86 TSC code to mark clocksources as unstable; 172e2c631baSPeter Zijlstra * it defers demotion and re-selection to a kthread. 1737285dd7fSThomas Gleixner */ 1747285dd7fSThomas Gleixner void clocksource_mark_unstable(struct clocksource *cs) 1757285dd7fSThomas Gleixner { 1767285dd7fSThomas Gleixner unsigned long flags; 1777285dd7fSThomas Gleixner 1787285dd7fSThomas Gleixner spin_lock_irqsave(&watchdog_lock, flags); 1797285dd7fSThomas Gleixner if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) { 1802aae7bcfSPeter Zijlstra if (!list_empty(&cs->list) && list_empty(&cs->wd_list)) 1817285dd7fSThomas Gleixner list_add(&cs->wd_list, &watchdog_list); 1827285dd7fSThomas Gleixner __clocksource_unstable(cs); 1837285dd7fSThomas Gleixner } 1847285dd7fSThomas Gleixner spin_unlock_irqrestore(&watchdog_lock, flags); 1855d8b34fdSThomas Gleixner } 1865d8b34fdSThomas Gleixner 187e99e88a9SKees Cook static void clocksource_watchdog(struct timer_list *unused) 1885d8b34fdSThomas Gleixner { 189c55c87c8SMartin Schwidefsky struct clocksource *cs; 190a5a1d1c2SThomas Gleixner u64 csnow, wdnow, cslast, wdlast, delta; 1915d8b34fdSThomas Gleixner int64_t wd_nsec, cs_nsec; 1929fb60336SThomas Gleixner int next_cpu, reset_pending; 1935d8b34fdSThomas Gleixner 1945d8b34fdSThomas Gleixner spin_lock(&watchdog_lock); 195fb63a0ebSMartin Schwidefsky if (!watchdog_running) 196fb63a0ebSMartin Schwidefsky goto out; 1975d8b34fdSThomas Gleixner 1989fb60336SThomas Gleixner reset_pending = atomic_read(&watchdog_reset_pending); 1999fb60336SThomas Gleixner 200c55c87c8SMartin Schwidefsky list_for_each_entry(cs, &watchdog_list, wd_list) { 201c55c87c8SMartin Schwidefsky 202c55c87c8SMartin Schwidefsky /* Clocksource already marked unstable? */ 20301548f4dSMartin Schwidefsky if (cs->flags & CLOCK_SOURCE_UNSTABLE) { 20454a6bc0bSThomas Gleixner if (finished_booting) 20501548f4dSMartin Schwidefsky schedule_work(&watchdog_work); 206c55c87c8SMartin Schwidefsky continue; 20701548f4dSMartin Schwidefsky } 208c55c87c8SMartin Schwidefsky 209b5199515SThomas Gleixner local_irq_disable(); 2108e19608eSMagnus Damm csnow = cs->read(cs); 211b5199515SThomas Gleixner wdnow = watchdog->read(watchdog); 212b5199515SThomas Gleixner local_irq_enable(); 213b52f52a0SThomas Gleixner 2148cf4e750SMartin Schwidefsky /* Clocksource initialized ? */ 2159fb60336SThomas Gleixner if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) || 2169fb60336SThomas Gleixner atomic_read(&watchdog_reset_pending)) { 2178cf4e750SMartin Schwidefsky cs->flags |= CLOCK_SOURCE_WATCHDOG; 218b5199515SThomas Gleixner cs->wd_last = wdnow; 219b5199515SThomas Gleixner cs->cs_last = csnow; 220b52f52a0SThomas Gleixner continue; 221b52f52a0SThomas Gleixner } 222b52f52a0SThomas Gleixner 2233a978377SThomas Gleixner delta = clocksource_delta(wdnow, cs->wd_last, watchdog->mask); 2243a978377SThomas Gleixner wd_nsec = clocksource_cyc2ns(delta, watchdog->mult, 2253a978377SThomas Gleixner watchdog->shift); 226b5199515SThomas Gleixner 2273a978377SThomas Gleixner delta = clocksource_delta(csnow, cs->cs_last, cs->mask); 2283a978377SThomas Gleixner cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift); 2290b046b21SJohn Stultz wdlast = cs->wd_last; /* save these in case we print them */ 2300b046b21SJohn Stultz cslast = cs->cs_last; 231b5199515SThomas Gleixner cs->cs_last = csnow; 232b5199515SThomas Gleixner cs->wd_last = wdnow; 233b5199515SThomas Gleixner 2349fb60336SThomas Gleixner if (atomic_read(&watchdog_reset_pending)) 2359fb60336SThomas Gleixner continue; 2369fb60336SThomas Gleixner 237b5199515SThomas Gleixner /* Check the deviation from the watchdog clocksource. */ 23879211c8eSAndrew Morton if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) { 239390dd67cSSeiichi Ikarashi pr_warn("timekeeping watchdog on CPU%d: Marking clocksource '%s' as unstable because the skew is too large:\n", 240390dd67cSSeiichi Ikarashi smp_processor_id(), cs->name); 2410b046b21SJohn Stultz pr_warn(" '%s' wd_now: %llx wd_last: %llx mask: %llx\n", 2420b046b21SJohn Stultz watchdog->name, wdnow, wdlast, watchdog->mask); 2430b046b21SJohn Stultz pr_warn(" '%s' cs_now: %llx cs_last: %llx mask: %llx\n", 2440b046b21SJohn Stultz cs->name, csnow, cslast, cs->mask); 2450b046b21SJohn Stultz __clocksource_unstable(cs); 2468cf4e750SMartin Schwidefsky continue; 2478cf4e750SMartin Schwidefsky } 2488cf4e750SMartin Schwidefsky 249b421b22bSPeter Zijlstra if (cs == curr_clocksource && cs->tick_stable) 250b421b22bSPeter Zijlstra cs->tick_stable(cs); 251b421b22bSPeter Zijlstra 2528cf4e750SMartin Schwidefsky if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && 2538cf4e750SMartin Schwidefsky (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) && 2545d8b34fdSThomas Gleixner (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) { 255332962f2SThomas Gleixner /* Mark it valid for high-res. */ 2565d8b34fdSThomas Gleixner cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; 257332962f2SThomas Gleixner 25879bf2bb3SThomas Gleixner /* 259332962f2SThomas Gleixner * clocksource_done_booting() will sort it if 260332962f2SThomas Gleixner * finished_booting is not set yet. 26179bf2bb3SThomas Gleixner */ 262332962f2SThomas Gleixner if (!finished_booting) 263332962f2SThomas Gleixner continue; 264332962f2SThomas Gleixner 265332962f2SThomas Gleixner /* 266332962f2SThomas Gleixner * If this is not the current clocksource let 267332962f2SThomas Gleixner * the watchdog thread reselect it. Due to the 268332962f2SThomas Gleixner * change to high res this clocksource might 269332962f2SThomas Gleixner * be preferred now. If it is the current 270332962f2SThomas Gleixner * clocksource let the tick code know about 271332962f2SThomas Gleixner * that change. 272332962f2SThomas Gleixner */ 273332962f2SThomas Gleixner if (cs != curr_clocksource) { 274332962f2SThomas Gleixner cs->flags |= CLOCK_SOURCE_RESELECT; 275332962f2SThomas Gleixner schedule_work(&watchdog_work); 276332962f2SThomas Gleixner } else { 27779bf2bb3SThomas Gleixner tick_clock_notify(); 2785d8b34fdSThomas Gleixner } 2795d8b34fdSThomas Gleixner } 280332962f2SThomas Gleixner } 2815d8b34fdSThomas Gleixner 2826993fc5bSAndi Kleen /* 2839fb60336SThomas Gleixner * We only clear the watchdog_reset_pending, when we did a 2849fb60336SThomas Gleixner * full cycle through all clocksources. 2859fb60336SThomas Gleixner */ 2869fb60336SThomas Gleixner if (reset_pending) 2879fb60336SThomas Gleixner atomic_dec(&watchdog_reset_pending); 2889fb60336SThomas Gleixner 2899fb60336SThomas Gleixner /* 290c55c87c8SMartin Schwidefsky * Cycle through CPUs to check if the CPUs stay synchronized 291c55c87c8SMartin Schwidefsky * to each other. 2926993fc5bSAndi Kleen */ 293c55c87c8SMartin Schwidefsky next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask); 294cad0e458SMike Travis if (next_cpu >= nr_cpu_ids) 2956b954823SRusty Russell next_cpu = cpumask_first(cpu_online_mask); 2966993fc5bSAndi Kleen watchdog_timer.expires += WATCHDOG_INTERVAL; 2976993fc5bSAndi Kleen add_timer_on(&watchdog_timer, next_cpu); 298fb63a0ebSMartin Schwidefsky out: 2995d8b34fdSThomas Gleixner spin_unlock(&watchdog_lock); 3005d8b34fdSThomas Gleixner } 3010f8e8ef7SMartin Schwidefsky 302fb63a0ebSMartin Schwidefsky static inline void clocksource_start_watchdog(void) 303fb63a0ebSMartin Schwidefsky { 304fb63a0ebSMartin Schwidefsky if (watchdog_running || !watchdog || list_empty(&watchdog_list)) 305fb63a0ebSMartin Schwidefsky return; 306e99e88a9SKees Cook timer_setup(&watchdog_timer, clocksource_watchdog, 0); 307fb63a0ebSMartin Schwidefsky watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; 308fb63a0ebSMartin Schwidefsky add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask)); 309fb63a0ebSMartin Schwidefsky watchdog_running = 1; 310fb63a0ebSMartin Schwidefsky } 311fb63a0ebSMartin Schwidefsky 312fb63a0ebSMartin Schwidefsky static inline void clocksource_stop_watchdog(void) 313fb63a0ebSMartin Schwidefsky { 314fb63a0ebSMartin Schwidefsky if (!watchdog_running || (watchdog && !list_empty(&watchdog_list))) 315fb63a0ebSMartin Schwidefsky return; 316fb63a0ebSMartin Schwidefsky del_timer(&watchdog_timer); 317fb63a0ebSMartin Schwidefsky watchdog_running = 0; 318fb63a0ebSMartin Schwidefsky } 319fb63a0ebSMartin Schwidefsky 3200f8e8ef7SMartin Schwidefsky static inline void clocksource_reset_watchdog(void) 3210f8e8ef7SMartin Schwidefsky { 3220f8e8ef7SMartin Schwidefsky struct clocksource *cs; 3230f8e8ef7SMartin Schwidefsky 3240f8e8ef7SMartin Schwidefsky list_for_each_entry(cs, &watchdog_list, wd_list) 3250f8e8ef7SMartin Schwidefsky cs->flags &= ~CLOCK_SOURCE_WATCHDOG; 3260f8e8ef7SMartin Schwidefsky } 3270f8e8ef7SMartin Schwidefsky 328b52f52a0SThomas Gleixner static void clocksource_resume_watchdog(void) 329b52f52a0SThomas Gleixner { 3309fb60336SThomas Gleixner atomic_inc(&watchdog_reset_pending); 331b52f52a0SThomas Gleixner } 332b52f52a0SThomas Gleixner 333fb63a0ebSMartin Schwidefsky static void clocksource_enqueue_watchdog(struct clocksource *cs) 3345d8b34fdSThomas Gleixner { 3355b9e886aSPeter Zijlstra INIT_LIST_HEAD(&cs->wd_list); 3365b9e886aSPeter Zijlstra 3375d8b34fdSThomas Gleixner if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { 338fb63a0ebSMartin Schwidefsky /* cs is a clocksource to be watched. */ 3395d8b34fdSThomas Gleixner list_add(&cs->wd_list, &watchdog_list); 340fb63a0ebSMartin Schwidefsky cs->flags &= ~CLOCK_SOURCE_WATCHDOG; 341948ac6d7SThomas Gleixner } else { 342fb63a0ebSMartin Schwidefsky /* cs is a watchdog. */ 343948ac6d7SThomas Gleixner if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) 3445d8b34fdSThomas Gleixner cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; 345bbf66d89SVitaly Kuznetsov } 346bbf66d89SVitaly Kuznetsov } 347bbf66d89SVitaly Kuznetsov 348bbf66d89SVitaly Kuznetsov static void clocksource_select_watchdog(bool fallback) 349bbf66d89SVitaly Kuznetsov { 350bbf66d89SVitaly Kuznetsov struct clocksource *cs, *old_wd; 351bbf66d89SVitaly Kuznetsov unsigned long flags; 352bbf66d89SVitaly Kuznetsov 353bbf66d89SVitaly Kuznetsov spin_lock_irqsave(&watchdog_lock, flags); 354bbf66d89SVitaly Kuznetsov /* save current watchdog */ 355bbf66d89SVitaly Kuznetsov old_wd = watchdog; 356bbf66d89SVitaly Kuznetsov if (fallback) 357bbf66d89SVitaly Kuznetsov watchdog = NULL; 358bbf66d89SVitaly Kuznetsov 359bbf66d89SVitaly Kuznetsov list_for_each_entry(cs, &clocksource_list, list) { 360bbf66d89SVitaly Kuznetsov /* cs is a clocksource to be watched. */ 361bbf66d89SVitaly Kuznetsov if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) 362bbf66d89SVitaly Kuznetsov continue; 363bbf66d89SVitaly Kuznetsov 364bbf66d89SVitaly Kuznetsov /* Skip current if we were requested for a fallback. */ 365bbf66d89SVitaly Kuznetsov if (fallback && cs == old_wd) 366bbf66d89SVitaly Kuznetsov continue; 367bbf66d89SVitaly Kuznetsov 368fb63a0ebSMartin Schwidefsky /* Pick the best watchdog. */ 369bbf66d89SVitaly Kuznetsov if (!watchdog || cs->rating > watchdog->rating) 3705d8b34fdSThomas Gleixner watchdog = cs; 371bbf66d89SVitaly Kuznetsov } 372bbf66d89SVitaly Kuznetsov /* If we failed to find a fallback restore the old one. */ 373bbf66d89SVitaly Kuznetsov if (!watchdog) 374bbf66d89SVitaly Kuznetsov watchdog = old_wd; 375bbf66d89SVitaly Kuznetsov 376bbf66d89SVitaly Kuznetsov /* If we changed the watchdog we need to reset cycles. */ 377bbf66d89SVitaly Kuznetsov if (watchdog != old_wd) 3780f8e8ef7SMartin Schwidefsky clocksource_reset_watchdog(); 379bbf66d89SVitaly Kuznetsov 380fb63a0ebSMartin Schwidefsky /* Check if the watchdog timer needs to be started. */ 381fb63a0ebSMartin Schwidefsky clocksource_start_watchdog(); 3825d8b34fdSThomas Gleixner spin_unlock_irqrestore(&watchdog_lock, flags); 3835d8b34fdSThomas Gleixner } 384fb63a0ebSMartin Schwidefsky 385fb63a0ebSMartin Schwidefsky static void clocksource_dequeue_watchdog(struct clocksource *cs) 386fb63a0ebSMartin Schwidefsky { 387a89c7edbSThomas Gleixner if (cs != watchdog) { 388fb63a0ebSMartin Schwidefsky if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { 389fb63a0ebSMartin Schwidefsky /* cs is a watched clocksource. */ 390fb63a0ebSMartin Schwidefsky list_del_init(&cs->wd_list); 391fb63a0ebSMartin Schwidefsky /* Check if the watchdog timer needs to be stopped. */ 392fb63a0ebSMartin Schwidefsky clocksource_stop_watchdog(); 393a89c7edbSThomas Gleixner } 394a89c7edbSThomas Gleixner } 395fb63a0ebSMartin Schwidefsky } 396fb63a0ebSMartin Schwidefsky 397e2c631baSPeter Zijlstra static int __clocksource_watchdog_kthread(void) 398c55c87c8SMartin Schwidefsky { 399c55c87c8SMartin Schwidefsky struct clocksource *cs, *tmp; 400c55c87c8SMartin Schwidefsky unsigned long flags; 401332962f2SThomas Gleixner int select = 0; 402c55c87c8SMartin Schwidefsky 403c55c87c8SMartin Schwidefsky spin_lock_irqsave(&watchdog_lock, flags); 404332962f2SThomas Gleixner list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { 405c55c87c8SMartin Schwidefsky if (cs->flags & CLOCK_SOURCE_UNSTABLE) { 406c55c87c8SMartin Schwidefsky list_del_init(&cs->wd_list); 4072aae7bcfSPeter Zijlstra __clocksource_change_rating(cs, 0); 408332962f2SThomas Gleixner select = 1; 409332962f2SThomas Gleixner } 410332962f2SThomas Gleixner if (cs->flags & CLOCK_SOURCE_RESELECT) { 411332962f2SThomas Gleixner cs->flags &= ~CLOCK_SOURCE_RESELECT; 412332962f2SThomas Gleixner select = 1; 413332962f2SThomas Gleixner } 414c55c87c8SMartin Schwidefsky } 415c55c87c8SMartin Schwidefsky /* Check if the watchdog timer needs to be stopped. */ 416c55c87c8SMartin Schwidefsky clocksource_stop_watchdog(); 4176ea41d25SThomas Gleixner spin_unlock_irqrestore(&watchdog_lock, flags); 4186ea41d25SThomas Gleixner 419332962f2SThomas Gleixner return select; 420332962f2SThomas Gleixner } 421332962f2SThomas Gleixner 422e2c631baSPeter Zijlstra static int clocksource_watchdog_kthread(void *data) 423332962f2SThomas Gleixner { 424332962f2SThomas Gleixner mutex_lock(&clocksource_mutex); 425e2c631baSPeter Zijlstra if (__clocksource_watchdog_kthread()) 426332962f2SThomas Gleixner clocksource_select(); 427d0981a1bSThomas Gleixner mutex_unlock(&clocksource_mutex); 428e2c631baSPeter Zijlstra return 0; 429c55c87c8SMartin Schwidefsky } 430c55c87c8SMartin Schwidefsky 4317eaeb343SThomas Gleixner static bool clocksource_is_watchdog(struct clocksource *cs) 4327eaeb343SThomas Gleixner { 4337eaeb343SThomas Gleixner return cs == watchdog; 4347eaeb343SThomas Gleixner } 4357eaeb343SThomas Gleixner 436fb63a0ebSMartin Schwidefsky #else /* CONFIG_CLOCKSOURCE_WATCHDOG */ 437fb63a0ebSMartin Schwidefsky 438fb63a0ebSMartin Schwidefsky static void clocksource_enqueue_watchdog(struct clocksource *cs) 4395d8b34fdSThomas Gleixner { 4405d8b34fdSThomas Gleixner if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) 4415d8b34fdSThomas Gleixner cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; 4425d8b34fdSThomas Gleixner } 443b52f52a0SThomas Gleixner 444bbf66d89SVitaly Kuznetsov static void clocksource_select_watchdog(bool fallback) { } 445fb63a0ebSMartin Schwidefsky static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { } 446b52f52a0SThomas Gleixner static inline void clocksource_resume_watchdog(void) { } 447e2c631baSPeter Zijlstra static inline int __clocksource_watchdog_kthread(void) { return 0; } 4487eaeb343SThomas Gleixner static bool clocksource_is_watchdog(struct clocksource *cs) { return false; } 449397bbf6dSPrarit Bhargava void clocksource_mark_unstable(struct clocksource *cs) { } 450fb63a0ebSMartin Schwidefsky 451db6f9e55SMathieu Malaterre static inline void clocksource_watchdog_lock(unsigned long *flags) { } 452db6f9e55SMathieu Malaterre static inline void clocksource_watchdog_unlock(unsigned long *flags) { } 4532aae7bcfSPeter Zijlstra 454fb63a0ebSMartin Schwidefsky #endif /* CONFIG_CLOCKSOURCE_WATCHDOG */ 4555d8b34fdSThomas Gleixner 45639232ed5SBaolin Wang static bool clocksource_is_suspend(struct clocksource *cs) 45739232ed5SBaolin Wang { 45839232ed5SBaolin Wang return cs == suspend_clocksource; 45939232ed5SBaolin Wang } 46039232ed5SBaolin Wang 46139232ed5SBaolin Wang static void __clocksource_suspend_select(struct clocksource *cs) 46239232ed5SBaolin Wang { 46339232ed5SBaolin Wang /* 46439232ed5SBaolin Wang * Skip the clocksource which will be stopped in suspend state. 46539232ed5SBaolin Wang */ 46639232ed5SBaolin Wang if (!(cs->flags & CLOCK_SOURCE_SUSPEND_NONSTOP)) 46739232ed5SBaolin Wang return; 46839232ed5SBaolin Wang 46939232ed5SBaolin Wang /* 47039232ed5SBaolin Wang * The nonstop clocksource can be selected as the suspend clocksource to 47139232ed5SBaolin Wang * calculate the suspend time, so it should not supply suspend/resume 47239232ed5SBaolin Wang * interfaces to suspend the nonstop clocksource when system suspends. 47339232ed5SBaolin Wang */ 47439232ed5SBaolin Wang if (cs->suspend || cs->resume) { 47539232ed5SBaolin Wang pr_warn("Nonstop clocksource %s should not supply suspend/resume interfaces\n", 47639232ed5SBaolin Wang cs->name); 47739232ed5SBaolin Wang } 47839232ed5SBaolin Wang 47939232ed5SBaolin Wang /* Pick the best rating. */ 48039232ed5SBaolin Wang if (!suspend_clocksource || cs->rating > suspend_clocksource->rating) 48139232ed5SBaolin Wang suspend_clocksource = cs; 48239232ed5SBaolin Wang } 48339232ed5SBaolin Wang 48439232ed5SBaolin Wang /** 48539232ed5SBaolin Wang * clocksource_suspend_select - Select the best clocksource for suspend timing 48639232ed5SBaolin Wang * @fallback: if select a fallback clocksource 48739232ed5SBaolin Wang */ 48839232ed5SBaolin Wang static void clocksource_suspend_select(bool fallback) 48939232ed5SBaolin Wang { 49039232ed5SBaolin Wang struct clocksource *cs, *old_suspend; 49139232ed5SBaolin Wang 49239232ed5SBaolin Wang old_suspend = suspend_clocksource; 49339232ed5SBaolin Wang if (fallback) 49439232ed5SBaolin Wang suspend_clocksource = NULL; 49539232ed5SBaolin Wang 49639232ed5SBaolin Wang list_for_each_entry(cs, &clocksource_list, list) { 49739232ed5SBaolin Wang /* Skip current if we were requested for a fallback. */ 49839232ed5SBaolin Wang if (fallback && cs == old_suspend) 49939232ed5SBaolin Wang continue; 50039232ed5SBaolin Wang 50139232ed5SBaolin Wang __clocksource_suspend_select(cs); 50239232ed5SBaolin Wang } 50339232ed5SBaolin Wang } 50439232ed5SBaolin Wang 50539232ed5SBaolin Wang /** 50639232ed5SBaolin Wang * clocksource_start_suspend_timing - Start measuring the suspend timing 50739232ed5SBaolin Wang * @cs: current clocksource from timekeeping 50839232ed5SBaolin Wang * @start_cycles: current cycles from timekeeping 50939232ed5SBaolin Wang * 51039232ed5SBaolin Wang * This function will save the start cycle values of suspend timer to calculate 51139232ed5SBaolin Wang * the suspend time when resuming system. 51239232ed5SBaolin Wang * 51339232ed5SBaolin Wang * This function is called late in the suspend process from timekeeping_suspend(), 51439232ed5SBaolin Wang * that means processes are freezed, non-boot cpus and interrupts are disabled 51539232ed5SBaolin Wang * now. It is therefore possible to start the suspend timer without taking the 51639232ed5SBaolin Wang * clocksource mutex. 51739232ed5SBaolin Wang */ 51839232ed5SBaolin Wang void clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles) 51939232ed5SBaolin Wang { 52039232ed5SBaolin Wang if (!suspend_clocksource) 52139232ed5SBaolin Wang return; 52239232ed5SBaolin Wang 52339232ed5SBaolin Wang /* 52439232ed5SBaolin Wang * If current clocksource is the suspend timer, we should use the 52539232ed5SBaolin Wang * tkr_mono.cycle_last value as suspend_start to avoid same reading 52639232ed5SBaolin Wang * from suspend timer. 52739232ed5SBaolin Wang */ 52839232ed5SBaolin Wang if (clocksource_is_suspend(cs)) { 52939232ed5SBaolin Wang suspend_start = start_cycles; 53039232ed5SBaolin Wang return; 53139232ed5SBaolin Wang } 53239232ed5SBaolin Wang 53339232ed5SBaolin Wang if (suspend_clocksource->enable && 53439232ed5SBaolin Wang suspend_clocksource->enable(suspend_clocksource)) { 53539232ed5SBaolin Wang pr_warn_once("Failed to enable the non-suspend-able clocksource.\n"); 53639232ed5SBaolin Wang return; 53739232ed5SBaolin Wang } 53839232ed5SBaolin Wang 53939232ed5SBaolin Wang suspend_start = suspend_clocksource->read(suspend_clocksource); 54039232ed5SBaolin Wang } 54139232ed5SBaolin Wang 54239232ed5SBaolin Wang /** 54339232ed5SBaolin Wang * clocksource_stop_suspend_timing - Stop measuring the suspend timing 54439232ed5SBaolin Wang * @cs: current clocksource from timekeeping 54539232ed5SBaolin Wang * @cycle_now: current cycles from timekeeping 54639232ed5SBaolin Wang * 54739232ed5SBaolin Wang * This function will calculate the suspend time from suspend timer. 54839232ed5SBaolin Wang * 54939232ed5SBaolin Wang * Returns nanoseconds since suspend started, 0 if no usable suspend clocksource. 55039232ed5SBaolin Wang * 55139232ed5SBaolin Wang * This function is called early in the resume process from timekeeping_resume(), 55239232ed5SBaolin Wang * that means there is only one cpu, no processes are running and the interrupts 55339232ed5SBaolin Wang * are disabled. It is therefore possible to stop the suspend timer without 55439232ed5SBaolin Wang * taking the clocksource mutex. 55539232ed5SBaolin Wang */ 55639232ed5SBaolin Wang u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 cycle_now) 55739232ed5SBaolin Wang { 55839232ed5SBaolin Wang u64 now, delta, nsec = 0; 55939232ed5SBaolin Wang 56039232ed5SBaolin Wang if (!suspend_clocksource) 56139232ed5SBaolin Wang return 0; 56239232ed5SBaolin Wang 56339232ed5SBaolin Wang /* 56439232ed5SBaolin Wang * If current clocksource is the suspend timer, we should use the 56539232ed5SBaolin Wang * tkr_mono.cycle_last value from timekeeping as current cycle to 56639232ed5SBaolin Wang * avoid same reading from suspend timer. 56739232ed5SBaolin Wang */ 56839232ed5SBaolin Wang if (clocksource_is_suspend(cs)) 56939232ed5SBaolin Wang now = cycle_now; 57039232ed5SBaolin Wang else 57139232ed5SBaolin Wang now = suspend_clocksource->read(suspend_clocksource); 57239232ed5SBaolin Wang 57339232ed5SBaolin Wang if (now > suspend_start) { 57439232ed5SBaolin Wang delta = clocksource_delta(now, suspend_start, 57539232ed5SBaolin Wang suspend_clocksource->mask); 57639232ed5SBaolin Wang nsec = mul_u64_u32_shr(delta, suspend_clocksource->mult, 57739232ed5SBaolin Wang suspend_clocksource->shift); 57839232ed5SBaolin Wang } 57939232ed5SBaolin Wang 58039232ed5SBaolin Wang /* 58139232ed5SBaolin Wang * Disable the suspend timer to save power if current clocksource is 58239232ed5SBaolin Wang * not the suspend timer. 58339232ed5SBaolin Wang */ 58439232ed5SBaolin Wang if (!clocksource_is_suspend(cs) && suspend_clocksource->disable) 58539232ed5SBaolin Wang suspend_clocksource->disable(suspend_clocksource); 58639232ed5SBaolin Wang 58739232ed5SBaolin Wang return nsec; 58839232ed5SBaolin Wang } 58939232ed5SBaolin Wang 590734efb46Sjohn stultz /** 591c54a42b1SMagnus Damm * clocksource_suspend - suspend the clocksource(s) 592c54a42b1SMagnus Damm */ 593c54a42b1SMagnus Damm void clocksource_suspend(void) 594c54a42b1SMagnus Damm { 595c54a42b1SMagnus Damm struct clocksource *cs; 596c54a42b1SMagnus Damm 597c54a42b1SMagnus Damm list_for_each_entry_reverse(cs, &clocksource_list, list) 598c54a42b1SMagnus Damm if (cs->suspend) 599c54a42b1SMagnus Damm cs->suspend(cs); 600c54a42b1SMagnus Damm } 601c54a42b1SMagnus Damm 602c54a42b1SMagnus Damm /** 603b52f52a0SThomas Gleixner * clocksource_resume - resume the clocksource(s) 604b52f52a0SThomas Gleixner */ 605b52f52a0SThomas Gleixner void clocksource_resume(void) 606b52f52a0SThomas Gleixner { 6072e197586SMatthias Kaehlcke struct clocksource *cs; 608b52f52a0SThomas Gleixner 60975c5158fSMartin Schwidefsky list_for_each_entry(cs, &clocksource_list, list) 610b52f52a0SThomas Gleixner if (cs->resume) 61117622339SMagnus Damm cs->resume(cs); 612b52f52a0SThomas Gleixner 613b52f52a0SThomas Gleixner clocksource_resume_watchdog(); 614b52f52a0SThomas Gleixner } 615b52f52a0SThomas Gleixner 616b52f52a0SThomas Gleixner /** 6177c3078b6SJason Wessel * clocksource_touch_watchdog - Update watchdog 6187c3078b6SJason Wessel * 6197c3078b6SJason Wessel * Update the watchdog after exception contexts such as kgdb so as not 6207b7422a5SThomas Gleixner * to incorrectly trip the watchdog. This might fail when the kernel 6217b7422a5SThomas Gleixner * was stopped in code which holds watchdog_lock. 6227c3078b6SJason Wessel */ 6237c3078b6SJason Wessel void clocksource_touch_watchdog(void) 6247c3078b6SJason Wessel { 6257c3078b6SJason Wessel clocksource_resume_watchdog(); 6267c3078b6SJason Wessel } 6277c3078b6SJason Wessel 628734efb46Sjohn stultz /** 629d65670a7SJohn Stultz * clocksource_max_adjustment- Returns max adjustment amount 630d65670a7SJohn Stultz * @cs: Pointer to clocksource 631d65670a7SJohn Stultz * 632d65670a7SJohn Stultz */ 633d65670a7SJohn Stultz static u32 clocksource_max_adjustment(struct clocksource *cs) 634d65670a7SJohn Stultz { 635d65670a7SJohn Stultz u64 ret; 636d65670a7SJohn Stultz /* 63788b28adfSJim Cromie * We won't try to correct for more than 11% adjustments (110,000 ppm), 638d65670a7SJohn Stultz */ 639d65670a7SJohn Stultz ret = (u64)cs->mult * 11; 640d65670a7SJohn Stultz do_div(ret,100); 641d65670a7SJohn Stultz return (u32)ret; 642d65670a7SJohn Stultz } 643d65670a7SJohn Stultz 644d65670a7SJohn Stultz /** 64587d8b9ebSStephen Boyd * clocks_calc_max_nsecs - Returns maximum nanoseconds that can be converted 64687d8b9ebSStephen Boyd * @mult: cycle to nanosecond multiplier 64787d8b9ebSStephen Boyd * @shift: cycle to nanosecond divisor (power of two) 64887d8b9ebSStephen Boyd * @maxadj: maximum adjustment value to mult (~11%) 64987d8b9ebSStephen Boyd * @mask: bitmask for two's complement subtraction of non 64 bit counters 650fb82fe2fSJohn Stultz * @max_cyc: maximum cycle value before potential overflow (does not include 651fb82fe2fSJohn Stultz * any safety margin) 652362fde04SJohn Stultz * 6538e56f33fSJohn Stultz * NOTE: This function includes a safety margin of 50%, in other words, we 6548e56f33fSJohn Stultz * return half the number of nanoseconds the hardware counter can technically 6558e56f33fSJohn Stultz * cover. This is done so that we can potentially detect problems caused by 6568e56f33fSJohn Stultz * delayed timers or bad hardware, which might result in time intervals that 657571af55aSZhen Lei * are larger than what the math used can handle without overflows. 65898962465SJon Hunter */ 659fb82fe2fSJohn Stultz u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cyc) 66098962465SJon Hunter { 66198962465SJon Hunter u64 max_nsecs, max_cycles; 66298962465SJon Hunter 66398962465SJon Hunter /* 66498962465SJon Hunter * Calculate the maximum number of cycles that we can pass to the 6656086e346SJohn Stultz * cyc2ns() function without overflowing a 64-bit result. 66698962465SJon Hunter */ 6676086e346SJohn Stultz max_cycles = ULLONG_MAX; 6686086e346SJohn Stultz do_div(max_cycles, mult+maxadj); 66998962465SJon Hunter 67098962465SJon Hunter /* 67198962465SJon Hunter * The actual maximum number of cycles we can defer the clocksource is 67287d8b9ebSStephen Boyd * determined by the minimum of max_cycles and mask. 673d65670a7SJohn Stultz * Note: Here we subtract the maxadj to make sure we don't sleep for 674d65670a7SJohn Stultz * too long if there's a large negative adjustment. 67598962465SJon Hunter */ 67687d8b9ebSStephen Boyd max_cycles = min(max_cycles, mask); 67787d8b9ebSStephen Boyd max_nsecs = clocksource_cyc2ns(max_cycles, mult - maxadj, shift); 67898962465SJon Hunter 679fb82fe2fSJohn Stultz /* return the max_cycles value as well if requested */ 680fb82fe2fSJohn Stultz if (max_cyc) 681fb82fe2fSJohn Stultz *max_cyc = max_cycles; 682fb82fe2fSJohn Stultz 683362fde04SJohn Stultz /* Return 50% of the actual maximum, so we can detect bad values */ 684362fde04SJohn Stultz max_nsecs >>= 1; 685362fde04SJohn Stultz 68687d8b9ebSStephen Boyd return max_nsecs; 68787d8b9ebSStephen Boyd } 68887d8b9ebSStephen Boyd 68987d8b9ebSStephen Boyd /** 690fb82fe2fSJohn Stultz * clocksource_update_max_deferment - Updates the clocksource max_idle_ns & max_cycles 691fb82fe2fSJohn Stultz * @cs: Pointer to clocksource to be updated 69287d8b9ebSStephen Boyd * 69387d8b9ebSStephen Boyd */ 694fb82fe2fSJohn Stultz static inline void clocksource_update_max_deferment(struct clocksource *cs) 69587d8b9ebSStephen Boyd { 696fb82fe2fSJohn Stultz cs->max_idle_ns = clocks_calc_max_nsecs(cs->mult, cs->shift, 697fb82fe2fSJohn Stultz cs->maxadj, cs->mask, 698fb82fe2fSJohn Stultz &cs->max_cycles); 69998962465SJon Hunter } 70098962465SJon Hunter 701592913ecSJohn Stultz #ifndef CONFIG_ARCH_USES_GETTIMEOFFSET 702734efb46Sjohn stultz 703f5a2e343SThomas Gleixner static struct clocksource *clocksource_find_best(bool oneshot, bool skipcur) 7045d33b883SThomas Gleixner { 7055d33b883SThomas Gleixner struct clocksource *cs; 7065d33b883SThomas Gleixner 7075d33b883SThomas Gleixner if (!finished_booting || list_empty(&clocksource_list)) 7085d33b883SThomas Gleixner return NULL; 7095d33b883SThomas Gleixner 7105d33b883SThomas Gleixner /* 7115d33b883SThomas Gleixner * We pick the clocksource with the highest rating. If oneshot 7125d33b883SThomas Gleixner * mode is active, we pick the highres valid clocksource with 7135d33b883SThomas Gleixner * the best rating. 7145d33b883SThomas Gleixner */ 7155d33b883SThomas Gleixner list_for_each_entry(cs, &clocksource_list, list) { 716f5a2e343SThomas Gleixner if (skipcur && cs == curr_clocksource) 717f5a2e343SThomas Gleixner continue; 7185d33b883SThomas Gleixner if (oneshot && !(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES)) 7195d33b883SThomas Gleixner continue; 7205d33b883SThomas Gleixner return cs; 7215d33b883SThomas Gleixner } 7225d33b883SThomas Gleixner return NULL; 7235d33b883SThomas Gleixner } 7245d33b883SThomas Gleixner 725f5a2e343SThomas Gleixner static void __clocksource_select(bool skipcur) 726734efb46Sjohn stultz { 7275d33b883SThomas Gleixner bool oneshot = tick_oneshot_mode_active(); 728f1b82746SMartin Schwidefsky struct clocksource *best, *cs; 7295d8b34fdSThomas Gleixner 7305d33b883SThomas Gleixner /* Find the best suitable clocksource */ 731f5a2e343SThomas Gleixner best = clocksource_find_best(oneshot, skipcur); 7325d33b883SThomas Gleixner if (!best) 733f1b82746SMartin Schwidefsky return; 7345d33b883SThomas Gleixner 7357f852afeSBaolin Wang if (!strlen(override_name)) 7367f852afeSBaolin Wang goto found; 7377f852afeSBaolin Wang 738f1b82746SMartin Schwidefsky /* Check for the override clocksource. */ 739f1b82746SMartin Schwidefsky list_for_each_entry(cs, &clocksource_list, list) { 740f5a2e343SThomas Gleixner if (skipcur && cs == curr_clocksource) 741f5a2e343SThomas Gleixner continue; 742f1b82746SMartin Schwidefsky if (strcmp(cs->name, override_name) != 0) 743f1b82746SMartin Schwidefsky continue; 744f1b82746SMartin Schwidefsky /* 745f1b82746SMartin Schwidefsky * Check to make sure we don't switch to a non-highres 746f1b82746SMartin Schwidefsky * capable clocksource if the tick code is in oneshot 747f1b82746SMartin Schwidefsky * mode (highres or nohz) 748f1b82746SMartin Schwidefsky */ 7495d33b883SThomas Gleixner if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && oneshot) { 750f1b82746SMartin Schwidefsky /* Override clocksource cannot be used. */ 75136374583SKyle Walker if (cs->flags & CLOCK_SOURCE_UNSTABLE) { 75236374583SKyle Walker pr_warn("Override clocksource %s is unstable and not HRT compatible - cannot switch while in HRT/NOHZ mode\n", 75345bbfe64SJoe Perches cs->name); 754f1b82746SMartin Schwidefsky override_name[0] = 0; 75536374583SKyle Walker } else { 75636374583SKyle Walker /* 75736374583SKyle Walker * The override cannot be currently verified. 75836374583SKyle Walker * Deferring to let the watchdog check. 75936374583SKyle Walker */ 76036374583SKyle Walker pr_info("Override clocksource %s is not currently HRT compatible - deferring\n", 76136374583SKyle Walker cs->name); 76236374583SKyle Walker } 763f1b82746SMartin Schwidefsky } else 764f1b82746SMartin Schwidefsky /* Override clocksource can be used. */ 765f1b82746SMartin Schwidefsky best = cs; 766f1b82746SMartin Schwidefsky break; 767734efb46Sjohn stultz } 768ba919d1cSThomas Gleixner 7697f852afeSBaolin Wang found: 770ba919d1cSThomas Gleixner if (curr_clocksource != best && !timekeeping_notify(best)) { 771ba919d1cSThomas Gleixner pr_info("Switched to clocksource %s\n", best->name); 77275c5158fSMartin Schwidefsky curr_clocksource = best; 773f1b82746SMartin Schwidefsky } 77475c5158fSMartin Schwidefsky } 77575c5158fSMartin Schwidefsky 776f5a2e343SThomas Gleixner /** 777f5a2e343SThomas Gleixner * clocksource_select - Select the best clocksource available 778f5a2e343SThomas Gleixner * 779f5a2e343SThomas Gleixner * Private function. Must hold clocksource_mutex when called. 780f5a2e343SThomas Gleixner * 781f5a2e343SThomas Gleixner * Select the clocksource with the best rating, or the clocksource, 782f5a2e343SThomas Gleixner * which is selected by userspace override. 783f5a2e343SThomas Gleixner */ 784f5a2e343SThomas Gleixner static void clocksource_select(void) 785f5a2e343SThomas Gleixner { 786cfed432dSGuillaume Gomez __clocksource_select(false); 787f5a2e343SThomas Gleixner } 788f5a2e343SThomas Gleixner 7897eaeb343SThomas Gleixner static void clocksource_select_fallback(void) 7907eaeb343SThomas Gleixner { 791cfed432dSGuillaume Gomez __clocksource_select(true); 7927eaeb343SThomas Gleixner } 7937eaeb343SThomas Gleixner 794592913ecSJohn Stultz #else /* !CONFIG_ARCH_USES_GETTIMEOFFSET */ 79554a6bc0bSThomas Gleixner static inline void clocksource_select(void) { } 7961eaff672SThomas Gleixner static inline void clocksource_select_fallback(void) { } 79754a6bc0bSThomas Gleixner 79854a6bc0bSThomas Gleixner #endif 79954a6bc0bSThomas Gleixner 80075c5158fSMartin Schwidefsky /* 80175c5158fSMartin Schwidefsky * clocksource_done_booting - Called near the end of core bootup 80275c5158fSMartin Schwidefsky * 80375c5158fSMartin Schwidefsky * Hack to avoid lots of clocksource churn at boot time. 80475c5158fSMartin Schwidefsky * We use fs_initcall because we want this to start before 80575c5158fSMartin Schwidefsky * device_initcall but after subsys_initcall. 80675c5158fSMartin Schwidefsky */ 80775c5158fSMartin Schwidefsky static int __init clocksource_done_booting(void) 80875c5158fSMartin Schwidefsky { 809ad6759fbSjohn stultz mutex_lock(&clocksource_mutex); 810ad6759fbSjohn stultz curr_clocksource = clocksource_default_clock(); 81175c5158fSMartin Schwidefsky finished_booting = 1; 81254a6bc0bSThomas Gleixner /* 81354a6bc0bSThomas Gleixner * Run the watchdog first to eliminate unstable clock sources 81454a6bc0bSThomas Gleixner */ 815e2c631baSPeter Zijlstra __clocksource_watchdog_kthread(); 81675c5158fSMartin Schwidefsky clocksource_select(); 817e6c73305SThomas Gleixner mutex_unlock(&clocksource_mutex); 81875c5158fSMartin Schwidefsky return 0; 81975c5158fSMartin Schwidefsky } 82075c5158fSMartin Schwidefsky fs_initcall(clocksource_done_booting); 821f1b82746SMartin Schwidefsky 82292c7e002SThomas Gleixner /* 82392c7e002SThomas Gleixner * Enqueue the clocksource sorted by rating 824734efb46Sjohn stultz */ 825f1b82746SMartin Schwidefsky static void clocksource_enqueue(struct clocksource *cs) 826734efb46Sjohn stultz { 827f1b82746SMartin Schwidefsky struct list_head *entry = &clocksource_list; 828f1b82746SMartin Schwidefsky struct clocksource *tmp; 829734efb46Sjohn stultz 8300fb71d34SMinfei Huang list_for_each_entry(tmp, &clocksource_list, list) { 83192c7e002SThomas Gleixner /* Keep track of the place, where to insert */ 8320fb71d34SMinfei Huang if (tmp->rating < cs->rating) 8330fb71d34SMinfei Huang break; 834f1b82746SMartin Schwidefsky entry = &tmp->list; 8350fb71d34SMinfei Huang } 836f1b82746SMartin Schwidefsky list_add(&cs->list, entry); 837734efb46Sjohn stultz } 838734efb46Sjohn stultz 839d7e81c26SJohn Stultz /** 840fba9e072SJohn Stultz * __clocksource_update_freq_scale - Used update clocksource with new freq 841b1b73d09SKusanagi Kouichi * @cs: clocksource to be registered 842852db46dSJohn Stultz * @scale: Scale factor multiplied against freq to get clocksource hz 843852db46dSJohn Stultz * @freq: clocksource frequency (cycles per second) divided by scale 844852db46dSJohn Stultz * 845852db46dSJohn Stultz * This should only be called from the clocksource->enable() method. 846852db46dSJohn Stultz * 847852db46dSJohn Stultz * This *SHOULD NOT* be called directly! Please use the 848fba9e072SJohn Stultz * __clocksource_update_freq_hz() or __clocksource_update_freq_khz() helper 849fba9e072SJohn Stultz * functions. 850852db46dSJohn Stultz */ 851fba9e072SJohn Stultz void __clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq) 852852db46dSJohn Stultz { 853c0e299b1SThomas Gleixner u64 sec; 854f8935983SJohn Stultz 855f8935983SJohn Stultz /* 856f8935983SJohn Stultz * Default clocksources are *special* and self-define their mult/shift. 857f8935983SJohn Stultz * But, you're not special, so you should specify a freq value. 858f8935983SJohn Stultz */ 859f8935983SJohn Stultz if (freq) { 860852db46dSJohn Stultz /* 861724ed53eSThomas Gleixner * Calc the maximum number of seconds which we can run before 862f8935983SJohn Stultz * wrapping around. For clocksources which have a mask > 32-bit 863724ed53eSThomas Gleixner * we need to limit the max sleep time to have a good 864724ed53eSThomas Gleixner * conversion precision. 10 minutes is still a reasonable 865724ed53eSThomas Gleixner * amount. That results in a shift value of 24 for a 866f8935983SJohn Stultz * clocksource with mask >= 40-bit and f >= 4GHz. That maps to 867362fde04SJohn Stultz * ~ 0.06ppm granularity for NTP. 868852db46dSJohn Stultz */ 869362fde04SJohn Stultz sec = cs->mask; 870724ed53eSThomas Gleixner do_div(sec, freq); 871724ed53eSThomas Gleixner do_div(sec, scale); 872724ed53eSThomas Gleixner if (!sec) 873724ed53eSThomas Gleixner sec = 1; 874724ed53eSThomas Gleixner else if (sec > 600 && cs->mask > UINT_MAX) 875724ed53eSThomas Gleixner sec = 600; 876724ed53eSThomas Gleixner 877852db46dSJohn Stultz clocks_calc_mult_shift(&cs->mult, &cs->shift, freq, 878724ed53eSThomas Gleixner NSEC_PER_SEC / scale, sec * scale); 879f8935983SJohn Stultz } 880d65670a7SJohn Stultz /* 881362fde04SJohn Stultz * Ensure clocksources that have large 'mult' values don't overflow 882362fde04SJohn Stultz * when adjusted. 883d65670a7SJohn Stultz */ 884d65670a7SJohn Stultz cs->maxadj = clocksource_max_adjustment(cs); 885f8935983SJohn Stultz while (freq && ((cs->mult + cs->maxadj < cs->mult) 886f8935983SJohn Stultz || (cs->mult - cs->maxadj > cs->mult))) { 887d65670a7SJohn Stultz cs->mult >>= 1; 888d65670a7SJohn Stultz cs->shift--; 889d65670a7SJohn Stultz cs->maxadj = clocksource_max_adjustment(cs); 890d65670a7SJohn Stultz } 891d65670a7SJohn Stultz 892f8935983SJohn Stultz /* 893f8935983SJohn Stultz * Only warn for *special* clocksources that self-define 894f8935983SJohn Stultz * their mult/shift values and don't specify a freq. 895f8935983SJohn Stultz */ 896f8935983SJohn Stultz WARN_ONCE(cs->mult + cs->maxadj < cs->mult, 897f8935983SJohn Stultz "timekeeping: Clocksource %s might overflow on 11%% adjustment\n", 898f8935983SJohn Stultz cs->name); 899f8935983SJohn Stultz 900fb82fe2fSJohn Stultz clocksource_update_max_deferment(cs); 9018cc8c525SJohn Stultz 90245bbfe64SJoe Perches pr_info("%s: mask: 0x%llx max_cycles: 0x%llx, max_idle_ns: %lld ns\n", 9038cc8c525SJohn Stultz cs->name, cs->mask, cs->max_cycles, cs->max_idle_ns); 904852db46dSJohn Stultz } 905fba9e072SJohn Stultz EXPORT_SYMBOL_GPL(__clocksource_update_freq_scale); 906852db46dSJohn Stultz 907852db46dSJohn Stultz /** 908d7e81c26SJohn Stultz * __clocksource_register_scale - Used to install new clocksources 909b1b73d09SKusanagi Kouichi * @cs: clocksource to be registered 910d7e81c26SJohn Stultz * @scale: Scale factor multiplied against freq to get clocksource hz 911d7e81c26SJohn Stultz * @freq: clocksource frequency (cycles per second) divided by scale 912d7e81c26SJohn Stultz * 913d7e81c26SJohn Stultz * Returns -EBUSY if registration fails, zero otherwise. 914d7e81c26SJohn Stultz * 915d7e81c26SJohn Stultz * This *SHOULD NOT* be called directly! Please use the 916d7e81c26SJohn Stultz * clocksource_register_hz() or clocksource_register_khz helper functions. 917d7e81c26SJohn Stultz */ 918d7e81c26SJohn Stultz int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) 919d7e81c26SJohn Stultz { 9202aae7bcfSPeter Zijlstra unsigned long flags; 921d7e81c26SJohn Stultz 922d67f34c1SThomas Gleixner clocksource_arch_init(cs); 923d67f34c1SThomas Gleixner 924b595076aSUwe Kleine-König /* Initialize mult/shift and max_idle_ns */ 925fba9e072SJohn Stultz __clocksource_update_freq_scale(cs, scale, freq); 926d7e81c26SJohn Stultz 927be278e98SJames Hartley /* Add clocksource to the clocksource list */ 928d7e81c26SJohn Stultz mutex_lock(&clocksource_mutex); 9292aae7bcfSPeter Zijlstra 9302aae7bcfSPeter Zijlstra clocksource_watchdog_lock(&flags); 931d7e81c26SJohn Stultz clocksource_enqueue(cs); 932d7e81c26SJohn Stultz clocksource_enqueue_watchdog(cs); 9332aae7bcfSPeter Zijlstra clocksource_watchdog_unlock(&flags); 9342aae7bcfSPeter Zijlstra 935e05b2efbSjohn stultz clocksource_select(); 936bbf66d89SVitaly Kuznetsov clocksource_select_watchdog(false); 93739232ed5SBaolin Wang __clocksource_suspend_select(cs); 938d7e81c26SJohn Stultz mutex_unlock(&clocksource_mutex); 939d7e81c26SJohn Stultz return 0; 940d7e81c26SJohn Stultz } 941d7e81c26SJohn Stultz EXPORT_SYMBOL_GPL(__clocksource_register_scale); 942d7e81c26SJohn Stultz 943d0981a1bSThomas Gleixner static void __clocksource_change_rating(struct clocksource *cs, int rating) 944d0981a1bSThomas Gleixner { 945d0981a1bSThomas Gleixner list_del(&cs->list); 946d0981a1bSThomas Gleixner cs->rating = rating; 947d0981a1bSThomas Gleixner clocksource_enqueue(cs); 948d0981a1bSThomas Gleixner } 949d0981a1bSThomas Gleixner 950734efb46Sjohn stultz /** 95192c7e002SThomas Gleixner * clocksource_change_rating - Change the rating of a registered clocksource 952b1b73d09SKusanagi Kouichi * @cs: clocksource to be changed 953b1b73d09SKusanagi Kouichi * @rating: new rating 954734efb46Sjohn stultz */ 95592c7e002SThomas Gleixner void clocksource_change_rating(struct clocksource *cs, int rating) 956734efb46Sjohn stultz { 9572aae7bcfSPeter Zijlstra unsigned long flags; 9582aae7bcfSPeter Zijlstra 95975c5158fSMartin Schwidefsky mutex_lock(&clocksource_mutex); 9602aae7bcfSPeter Zijlstra clocksource_watchdog_lock(&flags); 961d0981a1bSThomas Gleixner __clocksource_change_rating(cs, rating); 9622aae7bcfSPeter Zijlstra clocksource_watchdog_unlock(&flags); 9632aae7bcfSPeter Zijlstra 964332962f2SThomas Gleixner clocksource_select(); 965bbf66d89SVitaly Kuznetsov clocksource_select_watchdog(false); 96639232ed5SBaolin Wang clocksource_suspend_select(false); 96775c5158fSMartin Schwidefsky mutex_unlock(&clocksource_mutex); 968734efb46Sjohn stultz } 969fb63a0ebSMartin Schwidefsky EXPORT_SYMBOL(clocksource_change_rating); 970734efb46Sjohn stultz 9717eaeb343SThomas Gleixner /* 9727eaeb343SThomas Gleixner * Unbind clocksource @cs. Called with clocksource_mutex held 9737eaeb343SThomas Gleixner */ 9747eaeb343SThomas Gleixner static int clocksource_unbind(struct clocksource *cs) 9757eaeb343SThomas Gleixner { 9762aae7bcfSPeter Zijlstra unsigned long flags; 9772aae7bcfSPeter Zijlstra 978bbf66d89SVitaly Kuznetsov if (clocksource_is_watchdog(cs)) { 979bbf66d89SVitaly Kuznetsov /* Select and try to install a replacement watchdog. */ 980bbf66d89SVitaly Kuznetsov clocksource_select_watchdog(true); 9817eaeb343SThomas Gleixner if (clocksource_is_watchdog(cs)) 9827eaeb343SThomas Gleixner return -EBUSY; 983bbf66d89SVitaly Kuznetsov } 9847eaeb343SThomas Gleixner 9857eaeb343SThomas Gleixner if (cs == curr_clocksource) { 9867eaeb343SThomas Gleixner /* Select and try to install a replacement clock source */ 9877eaeb343SThomas Gleixner clocksource_select_fallback(); 9887eaeb343SThomas Gleixner if (curr_clocksource == cs) 9897eaeb343SThomas Gleixner return -EBUSY; 9907eaeb343SThomas Gleixner } 9912aae7bcfSPeter Zijlstra 99239232ed5SBaolin Wang if (clocksource_is_suspend(cs)) { 99339232ed5SBaolin Wang /* 99439232ed5SBaolin Wang * Select and try to install a replacement suspend clocksource. 99539232ed5SBaolin Wang * If no replacement suspend clocksource, we will just let the 99639232ed5SBaolin Wang * clocksource go and have no suspend clocksource. 99739232ed5SBaolin Wang */ 99839232ed5SBaolin Wang clocksource_suspend_select(true); 99939232ed5SBaolin Wang } 100039232ed5SBaolin Wang 10012aae7bcfSPeter Zijlstra clocksource_watchdog_lock(&flags); 10027eaeb343SThomas Gleixner clocksource_dequeue_watchdog(cs); 10037eaeb343SThomas Gleixner list_del_init(&cs->list); 10042aae7bcfSPeter Zijlstra clocksource_watchdog_unlock(&flags); 10052aae7bcfSPeter Zijlstra 10067eaeb343SThomas Gleixner return 0; 10077eaeb343SThomas Gleixner } 10087eaeb343SThomas Gleixner 10094713e22cSThomas Gleixner /** 10104713e22cSThomas Gleixner * clocksource_unregister - remove a registered clocksource 1011b1b73d09SKusanagi Kouichi * @cs: clocksource to be unregistered 10124713e22cSThomas Gleixner */ 1013a89c7edbSThomas Gleixner int clocksource_unregister(struct clocksource *cs) 10144713e22cSThomas Gleixner { 1015a89c7edbSThomas Gleixner int ret = 0; 1016a89c7edbSThomas Gleixner 101775c5158fSMartin Schwidefsky mutex_lock(&clocksource_mutex); 1018a89c7edbSThomas Gleixner if (!list_empty(&cs->list)) 1019a89c7edbSThomas Gleixner ret = clocksource_unbind(cs); 102075c5158fSMartin Schwidefsky mutex_unlock(&clocksource_mutex); 1021a89c7edbSThomas Gleixner return ret; 10224713e22cSThomas Gleixner } 1023fb63a0ebSMartin Schwidefsky EXPORT_SYMBOL(clocksource_unregister); 10244713e22cSThomas Gleixner 10252b013700SDaniel Walker #ifdef CONFIG_SYSFS 1026734efb46Sjohn stultz /** 1027e87821d1SBaolin Wang * current_clocksource_show - sysfs interface for current clocksource 1028734efb46Sjohn stultz * @dev: unused 1029b1b73d09SKusanagi Kouichi * @attr: unused 1030734efb46Sjohn stultz * @buf: char buffer to be filled with clocksource list 1031734efb46Sjohn stultz * 1032734efb46Sjohn stultz * Provides sysfs interface for listing current clocksource. 1033734efb46Sjohn stultz */ 1034e87821d1SBaolin Wang static ssize_t current_clocksource_show(struct device *dev, 1035e87821d1SBaolin Wang struct device_attribute *attr, 1036e87821d1SBaolin Wang char *buf) 1037734efb46Sjohn stultz { 10385e2cb101SMiao Xie ssize_t count = 0; 1039734efb46Sjohn stultz 104075c5158fSMartin Schwidefsky mutex_lock(&clocksource_mutex); 10415e2cb101SMiao Xie count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name); 104275c5158fSMartin Schwidefsky mutex_unlock(&clocksource_mutex); 1043734efb46Sjohn stultz 10445e2cb101SMiao Xie return count; 1045734efb46Sjohn stultz } 1046734efb46Sjohn stultz 1047891292a7SPatrick Palka ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt) 104829b54078SThomas Gleixner { 104929b54078SThomas Gleixner size_t ret = cnt; 105029b54078SThomas Gleixner 105129b54078SThomas Gleixner /* strings from sysfs write are not 0 terminated! */ 105229b54078SThomas Gleixner if (!cnt || cnt >= CS_NAME_LEN) 105329b54078SThomas Gleixner return -EINVAL; 105429b54078SThomas Gleixner 105529b54078SThomas Gleixner /* strip of \n: */ 105629b54078SThomas Gleixner if (buf[cnt-1] == '\n') 105729b54078SThomas Gleixner cnt--; 105829b54078SThomas Gleixner if (cnt > 0) 105929b54078SThomas Gleixner memcpy(dst, buf, cnt); 106029b54078SThomas Gleixner dst[cnt] = 0; 106129b54078SThomas Gleixner return ret; 106229b54078SThomas Gleixner } 106329b54078SThomas Gleixner 1064734efb46Sjohn stultz /** 1065e87821d1SBaolin Wang * current_clocksource_store - interface for manually overriding clocksource 1066734efb46Sjohn stultz * @dev: unused 1067b1b73d09SKusanagi Kouichi * @attr: unused 1068734efb46Sjohn stultz * @buf: name of override clocksource 1069734efb46Sjohn stultz * @count: length of buffer 1070734efb46Sjohn stultz * 1071734efb46Sjohn stultz * Takes input from sysfs interface for manually overriding the default 1072b71a8eb0SUwe Kleine-König * clocksource selection. 1073734efb46Sjohn stultz */ 1074e87821d1SBaolin Wang static ssize_t current_clocksource_store(struct device *dev, 1075d369a5d8SKay Sievers struct device_attribute *attr, 1076734efb46Sjohn stultz const char *buf, size_t count) 1077734efb46Sjohn stultz { 1078233bcb41SElad Wexler ssize_t ret; 1079734efb46Sjohn stultz 108075c5158fSMartin Schwidefsky mutex_lock(&clocksource_mutex); 1081734efb46Sjohn stultz 108203e13cf5SThomas Gleixner ret = sysfs_get_uname(buf, override_name, count); 108329b54078SThomas Gleixner if (ret >= 0) 1084f1b82746SMartin Schwidefsky clocksource_select(); 1085734efb46Sjohn stultz 108675c5158fSMartin Schwidefsky mutex_unlock(&clocksource_mutex); 1087734efb46Sjohn stultz 1088734efb46Sjohn stultz return ret; 1089734efb46Sjohn stultz } 1090e87821d1SBaolin Wang static DEVICE_ATTR_RW(current_clocksource); 1091734efb46Sjohn stultz 1092734efb46Sjohn stultz /** 1093e87821d1SBaolin Wang * unbind_clocksource_store - interface for manually unbinding clocksource 10947eaeb343SThomas Gleixner * @dev: unused 10957eaeb343SThomas Gleixner * @attr: unused 10967eaeb343SThomas Gleixner * @buf: unused 10977eaeb343SThomas Gleixner * @count: length of buffer 10987eaeb343SThomas Gleixner * 10997eaeb343SThomas Gleixner * Takes input from sysfs interface for manually unbinding a clocksource. 11007eaeb343SThomas Gleixner */ 1101e87821d1SBaolin Wang static ssize_t unbind_clocksource_store(struct device *dev, 11027eaeb343SThomas Gleixner struct device_attribute *attr, 11037eaeb343SThomas Gleixner const char *buf, size_t count) 11047eaeb343SThomas Gleixner { 11057eaeb343SThomas Gleixner struct clocksource *cs; 11067eaeb343SThomas Gleixner char name[CS_NAME_LEN]; 1107233bcb41SElad Wexler ssize_t ret; 11087eaeb343SThomas Gleixner 110903e13cf5SThomas Gleixner ret = sysfs_get_uname(buf, name, count); 11107eaeb343SThomas Gleixner if (ret < 0) 11117eaeb343SThomas Gleixner return ret; 11127eaeb343SThomas Gleixner 11137eaeb343SThomas Gleixner ret = -ENODEV; 11147eaeb343SThomas Gleixner mutex_lock(&clocksource_mutex); 11157eaeb343SThomas Gleixner list_for_each_entry(cs, &clocksource_list, list) { 11167eaeb343SThomas Gleixner if (strcmp(cs->name, name)) 11177eaeb343SThomas Gleixner continue; 11187eaeb343SThomas Gleixner ret = clocksource_unbind(cs); 11197eaeb343SThomas Gleixner break; 11207eaeb343SThomas Gleixner } 11217eaeb343SThomas Gleixner mutex_unlock(&clocksource_mutex); 11227eaeb343SThomas Gleixner 11237eaeb343SThomas Gleixner return ret ? ret : count; 11247eaeb343SThomas Gleixner } 1125e87821d1SBaolin Wang static DEVICE_ATTR_WO(unbind_clocksource); 11267eaeb343SThomas Gleixner 11277eaeb343SThomas Gleixner /** 1128e87821d1SBaolin Wang * available_clocksource_show - sysfs interface for listing clocksource 1129734efb46Sjohn stultz * @dev: unused 1130b1b73d09SKusanagi Kouichi * @attr: unused 1131734efb46Sjohn stultz * @buf: char buffer to be filled with clocksource list 1132734efb46Sjohn stultz * 1133734efb46Sjohn stultz * Provides sysfs interface for listing registered clocksources 1134734efb46Sjohn stultz */ 1135e87821d1SBaolin Wang static ssize_t available_clocksource_show(struct device *dev, 1136d369a5d8SKay Sievers struct device_attribute *attr, 11374a0b2b4dSAndi Kleen char *buf) 1138734efb46Sjohn stultz { 11392e197586SMatthias Kaehlcke struct clocksource *src; 11405e2cb101SMiao Xie ssize_t count = 0; 1141734efb46Sjohn stultz 114275c5158fSMartin Schwidefsky mutex_lock(&clocksource_mutex); 11432e197586SMatthias Kaehlcke list_for_each_entry(src, &clocksource_list, list) { 1144cd6d95d8SThomas Gleixner /* 1145cd6d95d8SThomas Gleixner * Don't show non-HRES clocksource if the tick code is 1146cd6d95d8SThomas Gleixner * in one shot mode (highres=on or nohz=on) 1147cd6d95d8SThomas Gleixner */ 1148cd6d95d8SThomas Gleixner if (!tick_oneshot_mode_active() || 11493f68535aSjohn stultz (src->flags & CLOCK_SOURCE_VALID_FOR_HRES)) 11505e2cb101SMiao Xie count += snprintf(buf + count, 11515e2cb101SMiao Xie max((ssize_t)PAGE_SIZE - count, (ssize_t)0), 11525e2cb101SMiao Xie "%s ", src->name); 1153734efb46Sjohn stultz } 115475c5158fSMartin Schwidefsky mutex_unlock(&clocksource_mutex); 1155734efb46Sjohn stultz 11565e2cb101SMiao Xie count += snprintf(buf + count, 11575e2cb101SMiao Xie max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n"); 1158734efb46Sjohn stultz 11595e2cb101SMiao Xie return count; 1160734efb46Sjohn stultz } 1161e87821d1SBaolin Wang static DEVICE_ATTR_RO(available_clocksource); 1162734efb46Sjohn stultz 116327263e8dSBaolin Wang static struct attribute *clocksource_attrs[] = { 116427263e8dSBaolin Wang &dev_attr_current_clocksource.attr, 116527263e8dSBaolin Wang &dev_attr_unbind_clocksource.attr, 116627263e8dSBaolin Wang &dev_attr_available_clocksource.attr, 116727263e8dSBaolin Wang NULL 116827263e8dSBaolin Wang }; 116927263e8dSBaolin Wang ATTRIBUTE_GROUPS(clocksource); 117027263e8dSBaolin Wang 1171d369a5d8SKay Sievers static struct bus_type clocksource_subsys = { 1172af5ca3f4SKay Sievers .name = "clocksource", 1173d369a5d8SKay Sievers .dev_name = "clocksource", 1174734efb46Sjohn stultz }; 1175734efb46Sjohn stultz 1176d369a5d8SKay Sievers static struct device device_clocksource = { 1177734efb46Sjohn stultz .id = 0, 1178d369a5d8SKay Sievers .bus = &clocksource_subsys, 117927263e8dSBaolin Wang .groups = clocksource_groups, 1180734efb46Sjohn stultz }; 1181734efb46Sjohn stultz 1182ad596171Sjohn stultz static int __init init_clocksource_sysfs(void) 1183734efb46Sjohn stultz { 1184d369a5d8SKay Sievers int error = subsys_system_register(&clocksource_subsys, NULL); 1185734efb46Sjohn stultz 1186734efb46Sjohn stultz if (!error) 1187d369a5d8SKay Sievers error = device_register(&device_clocksource); 118827263e8dSBaolin Wang 1189734efb46Sjohn stultz return error; 1190734efb46Sjohn stultz } 1191734efb46Sjohn stultz 1192734efb46Sjohn stultz device_initcall(init_clocksource_sysfs); 11932b013700SDaniel Walker #endif /* CONFIG_SYSFS */ 1194734efb46Sjohn stultz 1195734efb46Sjohn stultz /** 1196734efb46Sjohn stultz * boot_override_clocksource - boot clock override 1197734efb46Sjohn stultz * @str: override name 1198734efb46Sjohn stultz * 1199734efb46Sjohn stultz * Takes a clocksource= boot argument and uses it 1200734efb46Sjohn stultz * as the clocksource override name. 1201734efb46Sjohn stultz */ 1202734efb46Sjohn stultz static int __init boot_override_clocksource(char* str) 1203734efb46Sjohn stultz { 120475c5158fSMartin Schwidefsky mutex_lock(&clocksource_mutex); 1205734efb46Sjohn stultz if (str) 1206734efb46Sjohn stultz strlcpy(override_name, str, sizeof(override_name)); 120775c5158fSMartin Schwidefsky mutex_unlock(&clocksource_mutex); 1208734efb46Sjohn stultz return 1; 1209734efb46Sjohn stultz } 1210734efb46Sjohn stultz 1211734efb46Sjohn stultz __setup("clocksource=", boot_override_clocksource); 1212734efb46Sjohn stultz 1213734efb46Sjohn stultz /** 1214734efb46Sjohn stultz * boot_override_clock - Compatibility layer for deprecated boot option 1215734efb46Sjohn stultz * @str: override name 1216734efb46Sjohn stultz * 1217734efb46Sjohn stultz * DEPRECATED! Takes a clock= boot argument and uses it 1218734efb46Sjohn stultz * as the clocksource override name 1219734efb46Sjohn stultz */ 1220734efb46Sjohn stultz static int __init boot_override_clock(char* str) 1221734efb46Sjohn stultz { 12225d0cf410Sjohn stultz if (!strcmp(str, "pmtmr")) { 122345bbfe64SJoe Perches pr_warn("clock=pmtmr is deprecated - use clocksource=acpi_pm\n"); 12245d0cf410Sjohn stultz return boot_override_clocksource("acpi_pm"); 12255d0cf410Sjohn stultz } 122645bbfe64SJoe Perches pr_warn("clock= boot option is deprecated - use clocksource=xyz\n"); 1227734efb46Sjohn stultz return boot_override_clocksource(str); 1228734efb46Sjohn stultz } 1229734efb46Sjohn stultz 1230734efb46Sjohn stultz __setup("clock=", boot_override_clock); 1231