1734efb46Sjohn stultz /* 2734efb46Sjohn stultz * linux/kernel/time/clocksource.c 3734efb46Sjohn stultz * 4734efb46Sjohn stultz * This file contains the functions which manage clocksource drivers. 5734efb46Sjohn stultz * 6734efb46Sjohn stultz * Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com) 7734efb46Sjohn stultz * 8734efb46Sjohn stultz * This program is free software; you can redistribute it and/or modify 9734efb46Sjohn stultz * it under the terms of the GNU General Public License as published by 10734efb46Sjohn stultz * the Free Software Foundation; either version 2 of the License, or 11734efb46Sjohn stultz * (at your option) any later version. 12734efb46Sjohn stultz * 13734efb46Sjohn stultz * This program is distributed in the hope that it will be useful, 14734efb46Sjohn stultz * but WITHOUT ANY WARRANTY; without even the implied warranty of 15734efb46Sjohn stultz * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16734efb46Sjohn stultz * GNU General Public License for more details. 17734efb46Sjohn stultz * 18734efb46Sjohn stultz * You should have received a copy of the GNU General Public License 19734efb46Sjohn stultz * along with this program; if not, write to the Free Software 20734efb46Sjohn stultz * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21734efb46Sjohn stultz * 22734efb46Sjohn stultz * TODO WishList: 23734efb46Sjohn stultz * o Allow clocksource drivers to be unregistered 24734efb46Sjohn stultz */ 25734efb46Sjohn stultz 26d369a5d8SKay Sievers #include <linux/device.h> 27734efb46Sjohn stultz #include <linux/clocksource.h> 28734efb46Sjohn stultz #include <linux/init.h> 29734efb46Sjohn stultz #include <linux/module.h> 30dc29a365SMathieu Desnoyers #include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */ 3179bf2bb3SThomas Gleixner #include <linux/tick.h> 3201548f4dSMartin Schwidefsky #include <linux/kthread.h> 33734efb46Sjohn stultz 34c1797bafSThomas Gleixner #include "tick-internal.h" 353a978377SThomas Gleixner #include "timekeeping_internal.h" 3603e13cf5SThomas Gleixner 377d2f944aSThomas Gleixner /** 387d2f944aSThomas Gleixner * clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks 397d2f944aSThomas Gleixner * @mult: pointer to mult variable 407d2f944aSThomas Gleixner * @shift: pointer to shift variable 417d2f944aSThomas Gleixner * @from: frequency to convert from 427d2f944aSThomas Gleixner * @to: frequency to convert to 435fdade95SNicolas Pitre * @maxsec: guaranteed runtime conversion range in seconds 447d2f944aSThomas Gleixner * 457d2f944aSThomas Gleixner * The function evaluates the shift/mult pair for the scaled math 467d2f944aSThomas Gleixner * operations of clocksources and clockevents. 477d2f944aSThomas Gleixner * 487d2f944aSThomas Gleixner * @to and @from are frequency values in HZ. For clock sources @to is 497d2f944aSThomas Gleixner * NSEC_PER_SEC == 1GHz and @from is the counter frequency. For clock 507d2f944aSThomas Gleixner * event @to is the counter frequency and @from is NSEC_PER_SEC. 517d2f944aSThomas Gleixner * 525fdade95SNicolas Pitre * The @maxsec conversion range argument controls the time frame in 537d2f944aSThomas Gleixner * seconds which must be covered by the runtime conversion with the 547d2f944aSThomas Gleixner * calculated mult and shift factors. This guarantees that no 64bit 557d2f944aSThomas Gleixner * overflow happens when the input value of the conversion is 567d2f944aSThomas Gleixner * multiplied with the calculated mult factor. Larger ranges may 577d2f944aSThomas Gleixner * reduce the conversion accuracy by chosing smaller mult and shift 587d2f944aSThomas Gleixner * factors. 597d2f944aSThomas Gleixner */ 607d2f944aSThomas Gleixner void 615fdade95SNicolas Pitre clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 maxsec) 627d2f944aSThomas Gleixner { 637d2f944aSThomas Gleixner u64 tmp; 647d2f944aSThomas Gleixner u32 sft, sftacc= 32; 657d2f944aSThomas Gleixner 667d2f944aSThomas Gleixner /* 677d2f944aSThomas Gleixner * Calculate the shift factor which is limiting the conversion 687d2f944aSThomas Gleixner * range: 697d2f944aSThomas Gleixner */ 705fdade95SNicolas Pitre tmp = ((u64)maxsec * from) >> 32; 717d2f944aSThomas Gleixner while (tmp) { 727d2f944aSThomas Gleixner tmp >>=1; 737d2f944aSThomas Gleixner sftacc--; 747d2f944aSThomas Gleixner } 757d2f944aSThomas Gleixner 767d2f944aSThomas Gleixner /* 777d2f944aSThomas Gleixner * Find the conversion shift/mult pair which has the best 787d2f944aSThomas Gleixner * accuracy and fits the maxsec conversion range: 797d2f944aSThomas Gleixner */ 807d2f944aSThomas Gleixner for (sft = 32; sft > 0; sft--) { 817d2f944aSThomas Gleixner tmp = (u64) to << sft; 82b5776c4aSjohn stultz tmp += from / 2; 837d2f944aSThomas Gleixner do_div(tmp, from); 847d2f944aSThomas Gleixner if ((tmp >> sftacc) == 0) 857d2f944aSThomas Gleixner break; 867d2f944aSThomas Gleixner } 877d2f944aSThomas Gleixner *mult = tmp; 887d2f944aSThomas Gleixner *shift = sft; 897d2f944aSThomas Gleixner } 907d2f944aSThomas Gleixner 91734efb46Sjohn stultz /*[Clocksource internal variables]--------- 92734efb46Sjohn stultz * curr_clocksource: 93f1b82746SMartin Schwidefsky * currently selected clocksource. 94734efb46Sjohn stultz * clocksource_list: 95734efb46Sjohn stultz * linked list with the registered clocksources 9675c5158fSMartin Schwidefsky * clocksource_mutex: 9775c5158fSMartin Schwidefsky * protects manipulations to curr_clocksource and the clocksource_list 98734efb46Sjohn stultz * override_name: 99734efb46Sjohn stultz * Name of the user-specified clocksource. 100734efb46Sjohn stultz */ 101f1b82746SMartin Schwidefsky static struct clocksource *curr_clocksource; 102734efb46Sjohn stultz static LIST_HEAD(clocksource_list); 10375c5158fSMartin Schwidefsky static DEFINE_MUTEX(clocksource_mutex); 10429b54078SThomas Gleixner static char override_name[CS_NAME_LEN]; 10554a6bc0bSThomas Gleixner static int finished_booting; 106734efb46Sjohn stultz 1075d8b34fdSThomas Gleixner #ifdef CONFIG_CLOCKSOURCE_WATCHDOG 108f79e0258SMartin Schwidefsky static void clocksource_watchdog_work(struct work_struct *work); 109332962f2SThomas Gleixner static void clocksource_select(void); 110f79e0258SMartin Schwidefsky 1115d8b34fdSThomas Gleixner static LIST_HEAD(watchdog_list); 1125d8b34fdSThomas Gleixner static struct clocksource *watchdog; 1135d8b34fdSThomas Gleixner static struct timer_list watchdog_timer; 114f79e0258SMartin Schwidefsky static DECLARE_WORK(watchdog_work, clocksource_watchdog_work); 1155d8b34fdSThomas Gleixner static DEFINE_SPINLOCK(watchdog_lock); 116fb63a0ebSMartin Schwidefsky static int watchdog_running; 1179fb60336SThomas Gleixner static atomic_t watchdog_reset_pending; 118b52f52a0SThomas Gleixner 11901548f4dSMartin Schwidefsky static int clocksource_watchdog_kthread(void *data); 120d0981a1bSThomas Gleixner static void __clocksource_change_rating(struct clocksource *cs, int rating); 121c55c87c8SMartin Schwidefsky 1225d8b34fdSThomas Gleixner /* 12335c35d1aSDaniel Walker * Interval: 0.5sec Threshold: 0.0625s 1245d8b34fdSThomas Gleixner */ 1255d8b34fdSThomas Gleixner #define WATCHDOG_INTERVAL (HZ >> 1) 12635c35d1aSDaniel Walker #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4) 1275d8b34fdSThomas Gleixner 12801548f4dSMartin Schwidefsky static void clocksource_watchdog_work(struct work_struct *work) 12901548f4dSMartin Schwidefsky { 13001548f4dSMartin Schwidefsky /* 13101548f4dSMartin Schwidefsky * If kthread_run fails the next watchdog scan over the 13201548f4dSMartin Schwidefsky * watchdog_list will find the unstable clock again. 13301548f4dSMartin Schwidefsky */ 13401548f4dSMartin Schwidefsky kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog"); 13501548f4dSMartin Schwidefsky } 13601548f4dSMartin Schwidefsky 1377285dd7fSThomas Gleixner static void __clocksource_unstable(struct clocksource *cs) 1387285dd7fSThomas Gleixner { 1397285dd7fSThomas Gleixner cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); 1407285dd7fSThomas Gleixner cs->flags |= CLOCK_SOURCE_UNSTABLE; 14154a6bc0bSThomas Gleixner if (finished_booting) 1427285dd7fSThomas Gleixner schedule_work(&watchdog_work); 1437285dd7fSThomas Gleixner } 1447285dd7fSThomas Gleixner 1457285dd7fSThomas Gleixner /** 1467285dd7fSThomas Gleixner * clocksource_mark_unstable - mark clocksource unstable via watchdog 1477285dd7fSThomas Gleixner * @cs: clocksource to be marked unstable 1487285dd7fSThomas Gleixner * 1497285dd7fSThomas Gleixner * This function is called instead of clocksource_change_rating from 1507285dd7fSThomas Gleixner * cpu hotplug code to avoid a deadlock between the clocksource mutex 1517285dd7fSThomas Gleixner * and the cpu hotplug mutex. It defers the update of the clocksource 1527285dd7fSThomas Gleixner * to the watchdog thread. 1537285dd7fSThomas Gleixner */ 1547285dd7fSThomas Gleixner void clocksource_mark_unstable(struct clocksource *cs) 1557285dd7fSThomas Gleixner { 1567285dd7fSThomas Gleixner unsigned long flags; 1577285dd7fSThomas Gleixner 1587285dd7fSThomas Gleixner spin_lock_irqsave(&watchdog_lock, flags); 1597285dd7fSThomas Gleixner if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) { 1607285dd7fSThomas Gleixner if (list_empty(&cs->wd_list)) 1617285dd7fSThomas Gleixner list_add(&cs->wd_list, &watchdog_list); 1627285dd7fSThomas Gleixner __clocksource_unstable(cs); 1637285dd7fSThomas Gleixner } 1647285dd7fSThomas Gleixner spin_unlock_irqrestore(&watchdog_lock, flags); 1655d8b34fdSThomas Gleixner } 1665d8b34fdSThomas Gleixner 1675d8b34fdSThomas Gleixner static void clocksource_watchdog(unsigned long data) 1685d8b34fdSThomas Gleixner { 169c55c87c8SMartin Schwidefsky struct clocksource *cs; 1700b046b21SJohn Stultz cycle_t csnow, wdnow, cslast, wdlast, delta; 1715d8b34fdSThomas Gleixner int64_t wd_nsec, cs_nsec; 1729fb60336SThomas Gleixner int next_cpu, reset_pending; 1735d8b34fdSThomas Gleixner 1745d8b34fdSThomas Gleixner spin_lock(&watchdog_lock); 175fb63a0ebSMartin Schwidefsky if (!watchdog_running) 176fb63a0ebSMartin Schwidefsky goto out; 1775d8b34fdSThomas Gleixner 1789fb60336SThomas Gleixner reset_pending = atomic_read(&watchdog_reset_pending); 1799fb60336SThomas Gleixner 180c55c87c8SMartin Schwidefsky list_for_each_entry(cs, &watchdog_list, wd_list) { 181c55c87c8SMartin Schwidefsky 182c55c87c8SMartin Schwidefsky /* Clocksource already marked unstable? */ 18301548f4dSMartin Schwidefsky if (cs->flags & CLOCK_SOURCE_UNSTABLE) { 18454a6bc0bSThomas Gleixner if (finished_booting) 18501548f4dSMartin Schwidefsky schedule_work(&watchdog_work); 186c55c87c8SMartin Schwidefsky continue; 18701548f4dSMartin Schwidefsky } 188c55c87c8SMartin Schwidefsky 189b5199515SThomas Gleixner local_irq_disable(); 1908e19608eSMagnus Damm csnow = cs->read(cs); 191b5199515SThomas Gleixner wdnow = watchdog->read(watchdog); 192b5199515SThomas Gleixner local_irq_enable(); 193b52f52a0SThomas Gleixner 1948cf4e750SMartin Schwidefsky /* Clocksource initialized ? */ 1959fb60336SThomas Gleixner if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) || 1969fb60336SThomas Gleixner atomic_read(&watchdog_reset_pending)) { 1978cf4e750SMartin Schwidefsky cs->flags |= CLOCK_SOURCE_WATCHDOG; 198b5199515SThomas Gleixner cs->wd_last = wdnow; 199b5199515SThomas Gleixner cs->cs_last = csnow; 200b52f52a0SThomas Gleixner continue; 201b52f52a0SThomas Gleixner } 202b52f52a0SThomas Gleixner 2033a978377SThomas Gleixner delta = clocksource_delta(wdnow, cs->wd_last, watchdog->mask); 2043a978377SThomas Gleixner wd_nsec = clocksource_cyc2ns(delta, watchdog->mult, 2053a978377SThomas Gleixner watchdog->shift); 206b5199515SThomas Gleixner 2073a978377SThomas Gleixner delta = clocksource_delta(csnow, cs->cs_last, cs->mask); 2083a978377SThomas Gleixner cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift); 2090b046b21SJohn Stultz wdlast = cs->wd_last; /* save these in case we print them */ 2100b046b21SJohn Stultz cslast = cs->cs_last; 211b5199515SThomas Gleixner cs->cs_last = csnow; 212b5199515SThomas Gleixner cs->wd_last = wdnow; 213b5199515SThomas Gleixner 2149fb60336SThomas Gleixner if (atomic_read(&watchdog_reset_pending)) 2159fb60336SThomas Gleixner continue; 2169fb60336SThomas Gleixner 217b5199515SThomas Gleixner /* Check the deviation from the watchdog clocksource. */ 2189fb60336SThomas Gleixner if ((abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD)) { 2190b046b21SJohn Stultz pr_warn("timekeeping watchdog: Marking clocksource '%s' as unstable, because the skew is too large:\n", cs->name); 2200b046b21SJohn Stultz pr_warn(" '%s' wd_now: %llx wd_last: %llx mask: %llx\n", 2210b046b21SJohn Stultz watchdog->name, wdnow, wdlast, watchdog->mask); 2220b046b21SJohn Stultz pr_warn(" '%s' cs_now: %llx cs_last: %llx mask: %llx\n", 2230b046b21SJohn Stultz cs->name, csnow, cslast, cs->mask); 2240b046b21SJohn Stultz __clocksource_unstable(cs); 2258cf4e750SMartin Schwidefsky continue; 2268cf4e750SMartin Schwidefsky } 2278cf4e750SMartin Schwidefsky 2288cf4e750SMartin Schwidefsky if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && 2298cf4e750SMartin Schwidefsky (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) && 2305d8b34fdSThomas Gleixner (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) { 231332962f2SThomas Gleixner /* Mark it valid for high-res. */ 2325d8b34fdSThomas Gleixner cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; 233332962f2SThomas Gleixner 23479bf2bb3SThomas Gleixner /* 235332962f2SThomas Gleixner * clocksource_done_booting() will sort it if 236332962f2SThomas Gleixner * finished_booting is not set yet. 23779bf2bb3SThomas Gleixner */ 238332962f2SThomas Gleixner if (!finished_booting) 239332962f2SThomas Gleixner continue; 240332962f2SThomas Gleixner 241332962f2SThomas Gleixner /* 242332962f2SThomas Gleixner * If this is not the current clocksource let 243332962f2SThomas Gleixner * the watchdog thread reselect it. Due to the 244332962f2SThomas Gleixner * change to high res this clocksource might 245332962f2SThomas Gleixner * be preferred now. If it is the current 246332962f2SThomas Gleixner * clocksource let the tick code know about 247332962f2SThomas Gleixner * that change. 248332962f2SThomas Gleixner */ 249332962f2SThomas Gleixner if (cs != curr_clocksource) { 250332962f2SThomas Gleixner cs->flags |= CLOCK_SOURCE_RESELECT; 251332962f2SThomas Gleixner schedule_work(&watchdog_work); 252332962f2SThomas Gleixner } else { 25379bf2bb3SThomas Gleixner tick_clock_notify(); 2545d8b34fdSThomas Gleixner } 2555d8b34fdSThomas Gleixner } 256332962f2SThomas Gleixner } 2575d8b34fdSThomas Gleixner 2586993fc5bSAndi Kleen /* 2599fb60336SThomas Gleixner * We only clear the watchdog_reset_pending, when we did a 2609fb60336SThomas Gleixner * full cycle through all clocksources. 2619fb60336SThomas Gleixner */ 2629fb60336SThomas Gleixner if (reset_pending) 2639fb60336SThomas Gleixner atomic_dec(&watchdog_reset_pending); 2649fb60336SThomas Gleixner 2659fb60336SThomas Gleixner /* 266c55c87c8SMartin Schwidefsky * Cycle through CPUs to check if the CPUs stay synchronized 267c55c87c8SMartin Schwidefsky * to each other. 2686993fc5bSAndi Kleen */ 269c55c87c8SMartin Schwidefsky next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask); 270cad0e458SMike Travis if (next_cpu >= nr_cpu_ids) 2716b954823SRusty Russell next_cpu = cpumask_first(cpu_online_mask); 2726993fc5bSAndi Kleen watchdog_timer.expires += WATCHDOG_INTERVAL; 2736993fc5bSAndi Kleen add_timer_on(&watchdog_timer, next_cpu); 274fb63a0ebSMartin Schwidefsky out: 2755d8b34fdSThomas Gleixner spin_unlock(&watchdog_lock); 2765d8b34fdSThomas Gleixner } 2770f8e8ef7SMartin Schwidefsky 278fb63a0ebSMartin Schwidefsky static inline void clocksource_start_watchdog(void) 279fb63a0ebSMartin Schwidefsky { 280fb63a0ebSMartin Schwidefsky if (watchdog_running || !watchdog || list_empty(&watchdog_list)) 281fb63a0ebSMartin Schwidefsky return; 282fb63a0ebSMartin Schwidefsky init_timer(&watchdog_timer); 283fb63a0ebSMartin Schwidefsky watchdog_timer.function = clocksource_watchdog; 284fb63a0ebSMartin Schwidefsky watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; 285fb63a0ebSMartin Schwidefsky add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask)); 286fb63a0ebSMartin Schwidefsky watchdog_running = 1; 287fb63a0ebSMartin Schwidefsky } 288fb63a0ebSMartin Schwidefsky 289fb63a0ebSMartin Schwidefsky static inline void clocksource_stop_watchdog(void) 290fb63a0ebSMartin Schwidefsky { 291fb63a0ebSMartin Schwidefsky if (!watchdog_running || (watchdog && !list_empty(&watchdog_list))) 292fb63a0ebSMartin Schwidefsky return; 293fb63a0ebSMartin Schwidefsky del_timer(&watchdog_timer); 294fb63a0ebSMartin Schwidefsky watchdog_running = 0; 295fb63a0ebSMartin Schwidefsky } 296fb63a0ebSMartin Schwidefsky 2970f8e8ef7SMartin Schwidefsky static inline void clocksource_reset_watchdog(void) 2980f8e8ef7SMartin Schwidefsky { 2990f8e8ef7SMartin Schwidefsky struct clocksource *cs; 3000f8e8ef7SMartin Schwidefsky 3010f8e8ef7SMartin Schwidefsky list_for_each_entry(cs, &watchdog_list, wd_list) 3020f8e8ef7SMartin Schwidefsky cs->flags &= ~CLOCK_SOURCE_WATCHDOG; 3030f8e8ef7SMartin Schwidefsky } 3040f8e8ef7SMartin Schwidefsky 305b52f52a0SThomas Gleixner static void clocksource_resume_watchdog(void) 306b52f52a0SThomas Gleixner { 3079fb60336SThomas Gleixner atomic_inc(&watchdog_reset_pending); 308b52f52a0SThomas Gleixner } 309b52f52a0SThomas Gleixner 310fb63a0ebSMartin Schwidefsky static void clocksource_enqueue_watchdog(struct clocksource *cs) 3115d8b34fdSThomas Gleixner { 3125d8b34fdSThomas Gleixner unsigned long flags; 3135d8b34fdSThomas Gleixner 3145d8b34fdSThomas Gleixner spin_lock_irqsave(&watchdog_lock, flags); 3155d8b34fdSThomas Gleixner if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { 316fb63a0ebSMartin Schwidefsky /* cs is a clocksource to be watched. */ 3175d8b34fdSThomas Gleixner list_add(&cs->wd_list, &watchdog_list); 318fb63a0ebSMartin Schwidefsky cs->flags &= ~CLOCK_SOURCE_WATCHDOG; 319948ac6d7SThomas Gleixner } else { 320fb63a0ebSMartin Schwidefsky /* cs is a watchdog. */ 321948ac6d7SThomas Gleixner if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) 3225d8b34fdSThomas Gleixner cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; 323fb63a0ebSMartin Schwidefsky /* Pick the best watchdog. */ 3245d8b34fdSThomas Gleixner if (!watchdog || cs->rating > watchdog->rating) { 3255d8b34fdSThomas Gleixner watchdog = cs; 3265d8b34fdSThomas Gleixner /* Reset watchdog cycles */ 3270f8e8ef7SMartin Schwidefsky clocksource_reset_watchdog(); 3285d8b34fdSThomas Gleixner } 3295d8b34fdSThomas Gleixner } 330fb63a0ebSMartin Schwidefsky /* Check if the watchdog timer needs to be started. */ 331fb63a0ebSMartin Schwidefsky clocksource_start_watchdog(); 3325d8b34fdSThomas Gleixner spin_unlock_irqrestore(&watchdog_lock, flags); 3335d8b34fdSThomas Gleixner } 334fb63a0ebSMartin Schwidefsky 335fb63a0ebSMartin Schwidefsky static void clocksource_dequeue_watchdog(struct clocksource *cs) 336fb63a0ebSMartin Schwidefsky { 337fb63a0ebSMartin Schwidefsky unsigned long flags; 338fb63a0ebSMartin Schwidefsky 339fb63a0ebSMartin Schwidefsky spin_lock_irqsave(&watchdog_lock, flags); 340a89c7edbSThomas Gleixner if (cs != watchdog) { 341fb63a0ebSMartin Schwidefsky if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { 342fb63a0ebSMartin Schwidefsky /* cs is a watched clocksource. */ 343fb63a0ebSMartin Schwidefsky list_del_init(&cs->wd_list); 344fb63a0ebSMartin Schwidefsky /* Check if the watchdog timer needs to be stopped. */ 345fb63a0ebSMartin Schwidefsky clocksource_stop_watchdog(); 346a89c7edbSThomas Gleixner } 347a89c7edbSThomas Gleixner } 348fb63a0ebSMartin Schwidefsky spin_unlock_irqrestore(&watchdog_lock, flags); 349fb63a0ebSMartin Schwidefsky } 350fb63a0ebSMartin Schwidefsky 351332962f2SThomas Gleixner static int __clocksource_watchdog_kthread(void) 352c55c87c8SMartin Schwidefsky { 353c55c87c8SMartin Schwidefsky struct clocksource *cs, *tmp; 354c55c87c8SMartin Schwidefsky unsigned long flags; 3556ea41d25SThomas Gleixner LIST_HEAD(unstable); 356332962f2SThomas Gleixner int select = 0; 357c55c87c8SMartin Schwidefsky 358c55c87c8SMartin Schwidefsky spin_lock_irqsave(&watchdog_lock, flags); 359332962f2SThomas Gleixner list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { 360c55c87c8SMartin Schwidefsky if (cs->flags & CLOCK_SOURCE_UNSTABLE) { 361c55c87c8SMartin Schwidefsky list_del_init(&cs->wd_list); 3626ea41d25SThomas Gleixner list_add(&cs->wd_list, &unstable); 363332962f2SThomas Gleixner select = 1; 364332962f2SThomas Gleixner } 365332962f2SThomas Gleixner if (cs->flags & CLOCK_SOURCE_RESELECT) { 366332962f2SThomas Gleixner cs->flags &= ~CLOCK_SOURCE_RESELECT; 367332962f2SThomas Gleixner select = 1; 368332962f2SThomas Gleixner } 369c55c87c8SMartin Schwidefsky } 370c55c87c8SMartin Schwidefsky /* Check if the watchdog timer needs to be stopped. */ 371c55c87c8SMartin Schwidefsky clocksource_stop_watchdog(); 3726ea41d25SThomas Gleixner spin_unlock_irqrestore(&watchdog_lock, flags); 3736ea41d25SThomas Gleixner 3746ea41d25SThomas Gleixner /* Needs to be done outside of watchdog lock */ 3756ea41d25SThomas Gleixner list_for_each_entry_safe(cs, tmp, &unstable, wd_list) { 3766ea41d25SThomas Gleixner list_del_init(&cs->wd_list); 377d0981a1bSThomas Gleixner __clocksource_change_rating(cs, 0); 3786ea41d25SThomas Gleixner } 379332962f2SThomas Gleixner return select; 380332962f2SThomas Gleixner } 381332962f2SThomas Gleixner 382332962f2SThomas Gleixner static int clocksource_watchdog_kthread(void *data) 383332962f2SThomas Gleixner { 384332962f2SThomas Gleixner mutex_lock(&clocksource_mutex); 385332962f2SThomas Gleixner if (__clocksource_watchdog_kthread()) 386332962f2SThomas Gleixner clocksource_select(); 387d0981a1bSThomas Gleixner mutex_unlock(&clocksource_mutex); 38801548f4dSMartin Schwidefsky return 0; 389c55c87c8SMartin Schwidefsky } 390c55c87c8SMartin Schwidefsky 3917eaeb343SThomas Gleixner static bool clocksource_is_watchdog(struct clocksource *cs) 3927eaeb343SThomas Gleixner { 3937eaeb343SThomas Gleixner return cs == watchdog; 3947eaeb343SThomas Gleixner } 3957eaeb343SThomas Gleixner 396fb63a0ebSMartin Schwidefsky #else /* CONFIG_CLOCKSOURCE_WATCHDOG */ 397fb63a0ebSMartin Schwidefsky 398fb63a0ebSMartin Schwidefsky static void clocksource_enqueue_watchdog(struct clocksource *cs) 3995d8b34fdSThomas Gleixner { 4005d8b34fdSThomas Gleixner if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) 4015d8b34fdSThomas Gleixner cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; 4025d8b34fdSThomas Gleixner } 403b52f52a0SThomas Gleixner 404fb63a0ebSMartin Schwidefsky static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { } 405b52f52a0SThomas Gleixner static inline void clocksource_resume_watchdog(void) { } 406332962f2SThomas Gleixner static inline int __clocksource_watchdog_kthread(void) { return 0; } 4077eaeb343SThomas Gleixner static bool clocksource_is_watchdog(struct clocksource *cs) { return false; } 408397bbf6dSPrarit Bhargava void clocksource_mark_unstable(struct clocksource *cs) { } 409fb63a0ebSMartin Schwidefsky 410fb63a0ebSMartin Schwidefsky #endif /* CONFIG_CLOCKSOURCE_WATCHDOG */ 4115d8b34fdSThomas Gleixner 412734efb46Sjohn stultz /** 413c54a42b1SMagnus Damm * clocksource_suspend - suspend the clocksource(s) 414c54a42b1SMagnus Damm */ 415c54a42b1SMagnus Damm void clocksource_suspend(void) 416c54a42b1SMagnus Damm { 417c54a42b1SMagnus Damm struct clocksource *cs; 418c54a42b1SMagnus Damm 419c54a42b1SMagnus Damm list_for_each_entry_reverse(cs, &clocksource_list, list) 420c54a42b1SMagnus Damm if (cs->suspend) 421c54a42b1SMagnus Damm cs->suspend(cs); 422c54a42b1SMagnus Damm } 423c54a42b1SMagnus Damm 424c54a42b1SMagnus Damm /** 425b52f52a0SThomas Gleixner * clocksource_resume - resume the clocksource(s) 426b52f52a0SThomas Gleixner */ 427b52f52a0SThomas Gleixner void clocksource_resume(void) 428b52f52a0SThomas Gleixner { 4292e197586SMatthias Kaehlcke struct clocksource *cs; 430b52f52a0SThomas Gleixner 43175c5158fSMartin Schwidefsky list_for_each_entry(cs, &clocksource_list, list) 432b52f52a0SThomas Gleixner if (cs->resume) 43317622339SMagnus Damm cs->resume(cs); 434b52f52a0SThomas Gleixner 435b52f52a0SThomas Gleixner clocksource_resume_watchdog(); 436b52f52a0SThomas Gleixner } 437b52f52a0SThomas Gleixner 438b52f52a0SThomas Gleixner /** 4397c3078b6SJason Wessel * clocksource_touch_watchdog - Update watchdog 4407c3078b6SJason Wessel * 4417c3078b6SJason Wessel * Update the watchdog after exception contexts such as kgdb so as not 4427b7422a5SThomas Gleixner * to incorrectly trip the watchdog. This might fail when the kernel 4437b7422a5SThomas Gleixner * was stopped in code which holds watchdog_lock. 4447c3078b6SJason Wessel */ 4457c3078b6SJason Wessel void clocksource_touch_watchdog(void) 4467c3078b6SJason Wessel { 4477c3078b6SJason Wessel clocksource_resume_watchdog(); 4487c3078b6SJason Wessel } 4497c3078b6SJason Wessel 450734efb46Sjohn stultz /** 451d65670a7SJohn Stultz * clocksource_max_adjustment- Returns max adjustment amount 452d65670a7SJohn Stultz * @cs: Pointer to clocksource 453d65670a7SJohn Stultz * 454d65670a7SJohn Stultz */ 455d65670a7SJohn Stultz static u32 clocksource_max_adjustment(struct clocksource *cs) 456d65670a7SJohn Stultz { 457d65670a7SJohn Stultz u64 ret; 458d65670a7SJohn Stultz /* 45988b28adfSJim Cromie * We won't try to correct for more than 11% adjustments (110,000 ppm), 460d65670a7SJohn Stultz */ 461d65670a7SJohn Stultz ret = (u64)cs->mult * 11; 462d65670a7SJohn Stultz do_div(ret,100); 463d65670a7SJohn Stultz return (u32)ret; 464d65670a7SJohn Stultz } 465d65670a7SJohn Stultz 466d65670a7SJohn Stultz /** 46787d8b9ebSStephen Boyd * clocks_calc_max_nsecs - Returns maximum nanoseconds that can be converted 46887d8b9ebSStephen Boyd * @mult: cycle to nanosecond multiplier 46987d8b9ebSStephen Boyd * @shift: cycle to nanosecond divisor (power of two) 47087d8b9ebSStephen Boyd * @maxadj: maximum adjustment value to mult (~11%) 47187d8b9ebSStephen Boyd * @mask: bitmask for two's complement subtraction of non 64 bit counters 472fb82fe2fSJohn Stultz * @max_cyc: maximum cycle value before potential overflow (does not include 473fb82fe2fSJohn Stultz * any safety margin) 474362fde04SJohn Stultz * 4758e56f33fSJohn Stultz * NOTE: This function includes a safety margin of 50%, in other words, we 4768e56f33fSJohn Stultz * return half the number of nanoseconds the hardware counter can technically 4778e56f33fSJohn Stultz * cover. This is done so that we can potentially detect problems caused by 4788e56f33fSJohn Stultz * delayed timers or bad hardware, which might result in time intervals that 4798e56f33fSJohn Stultz * are larger then what the math used can handle without overflows. 48098962465SJon Hunter */ 481fb82fe2fSJohn Stultz u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cyc) 48298962465SJon Hunter { 48398962465SJon Hunter u64 max_nsecs, max_cycles; 48498962465SJon Hunter 48598962465SJon Hunter /* 48698962465SJon Hunter * Calculate the maximum number of cycles that we can pass to the 4876086e346SJohn Stultz * cyc2ns() function without overflowing a 64-bit result. 48898962465SJon Hunter */ 4896086e346SJohn Stultz max_cycles = ULLONG_MAX; 4906086e346SJohn Stultz do_div(max_cycles, mult+maxadj); 49198962465SJon Hunter 49298962465SJon Hunter /* 49398962465SJon Hunter * The actual maximum number of cycles we can defer the clocksource is 49487d8b9ebSStephen Boyd * determined by the minimum of max_cycles and mask. 495d65670a7SJohn Stultz * Note: Here we subtract the maxadj to make sure we don't sleep for 496d65670a7SJohn Stultz * too long if there's a large negative adjustment. 49798962465SJon Hunter */ 49887d8b9ebSStephen Boyd max_cycles = min(max_cycles, mask); 49987d8b9ebSStephen Boyd max_nsecs = clocksource_cyc2ns(max_cycles, mult - maxadj, shift); 50098962465SJon Hunter 501fb82fe2fSJohn Stultz /* return the max_cycles value as well if requested */ 502fb82fe2fSJohn Stultz if (max_cyc) 503fb82fe2fSJohn Stultz *max_cyc = max_cycles; 504fb82fe2fSJohn Stultz 505362fde04SJohn Stultz /* Return 50% of the actual maximum, so we can detect bad values */ 506362fde04SJohn Stultz max_nsecs >>= 1; 507362fde04SJohn Stultz 50887d8b9ebSStephen Boyd return max_nsecs; 50987d8b9ebSStephen Boyd } 51087d8b9ebSStephen Boyd 51187d8b9ebSStephen Boyd /** 512fb82fe2fSJohn Stultz * clocksource_update_max_deferment - Updates the clocksource max_idle_ns & max_cycles 513fb82fe2fSJohn Stultz * @cs: Pointer to clocksource to be updated 51487d8b9ebSStephen Boyd * 51587d8b9ebSStephen Boyd */ 516fb82fe2fSJohn Stultz static inline void clocksource_update_max_deferment(struct clocksource *cs) 51787d8b9ebSStephen Boyd { 518fb82fe2fSJohn Stultz cs->max_idle_ns = clocks_calc_max_nsecs(cs->mult, cs->shift, 519fb82fe2fSJohn Stultz cs->maxadj, cs->mask, 520fb82fe2fSJohn Stultz &cs->max_cycles); 52198962465SJon Hunter } 52298962465SJon Hunter 523592913ecSJohn Stultz #ifndef CONFIG_ARCH_USES_GETTIMEOFFSET 524734efb46Sjohn stultz 525f5a2e343SThomas Gleixner static struct clocksource *clocksource_find_best(bool oneshot, bool skipcur) 5265d33b883SThomas Gleixner { 5275d33b883SThomas Gleixner struct clocksource *cs; 5285d33b883SThomas Gleixner 5295d33b883SThomas Gleixner if (!finished_booting || list_empty(&clocksource_list)) 5305d33b883SThomas Gleixner return NULL; 5315d33b883SThomas Gleixner 5325d33b883SThomas Gleixner /* 5335d33b883SThomas Gleixner * We pick the clocksource with the highest rating. If oneshot 5345d33b883SThomas Gleixner * mode is active, we pick the highres valid clocksource with 5355d33b883SThomas Gleixner * the best rating. 5365d33b883SThomas Gleixner */ 5375d33b883SThomas Gleixner list_for_each_entry(cs, &clocksource_list, list) { 538f5a2e343SThomas Gleixner if (skipcur && cs == curr_clocksource) 539f5a2e343SThomas Gleixner continue; 5405d33b883SThomas Gleixner if (oneshot && !(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES)) 5415d33b883SThomas Gleixner continue; 5425d33b883SThomas Gleixner return cs; 5435d33b883SThomas Gleixner } 5445d33b883SThomas Gleixner return NULL; 5455d33b883SThomas Gleixner } 5465d33b883SThomas Gleixner 547f5a2e343SThomas Gleixner static void __clocksource_select(bool skipcur) 548734efb46Sjohn stultz { 5495d33b883SThomas Gleixner bool oneshot = tick_oneshot_mode_active(); 550f1b82746SMartin Schwidefsky struct clocksource *best, *cs; 5515d8b34fdSThomas Gleixner 5525d33b883SThomas Gleixner /* Find the best suitable clocksource */ 553f5a2e343SThomas Gleixner best = clocksource_find_best(oneshot, skipcur); 5545d33b883SThomas Gleixner if (!best) 555f1b82746SMartin Schwidefsky return; 5565d33b883SThomas Gleixner 557f1b82746SMartin Schwidefsky /* Check for the override clocksource. */ 558f1b82746SMartin Schwidefsky list_for_each_entry(cs, &clocksource_list, list) { 559f5a2e343SThomas Gleixner if (skipcur && cs == curr_clocksource) 560f5a2e343SThomas Gleixner continue; 561f1b82746SMartin Schwidefsky if (strcmp(cs->name, override_name) != 0) 562f1b82746SMartin Schwidefsky continue; 563f1b82746SMartin Schwidefsky /* 564f1b82746SMartin Schwidefsky * Check to make sure we don't switch to a non-highres 565f1b82746SMartin Schwidefsky * capable clocksource if the tick code is in oneshot 566f1b82746SMartin Schwidefsky * mode (highres or nohz) 567f1b82746SMartin Schwidefsky */ 5685d33b883SThomas Gleixner if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && oneshot) { 569f1b82746SMartin Schwidefsky /* Override clocksource cannot be used. */ 570f1b82746SMartin Schwidefsky printk(KERN_WARNING "Override clocksource %s is not " 571f1b82746SMartin Schwidefsky "HRT compatible. Cannot switch while in " 572f1b82746SMartin Schwidefsky "HRT/NOHZ mode\n", cs->name); 573f1b82746SMartin Schwidefsky override_name[0] = 0; 574f1b82746SMartin Schwidefsky } else 575f1b82746SMartin Schwidefsky /* Override clocksource can be used. */ 576f1b82746SMartin Schwidefsky best = cs; 577f1b82746SMartin Schwidefsky break; 578734efb46Sjohn stultz } 579ba919d1cSThomas Gleixner 580ba919d1cSThomas Gleixner if (curr_clocksource != best && !timekeeping_notify(best)) { 581ba919d1cSThomas Gleixner pr_info("Switched to clocksource %s\n", best->name); 58275c5158fSMartin Schwidefsky curr_clocksource = best; 583f1b82746SMartin Schwidefsky } 58475c5158fSMartin Schwidefsky } 58575c5158fSMartin Schwidefsky 586f5a2e343SThomas Gleixner /** 587f5a2e343SThomas Gleixner * clocksource_select - Select the best clocksource available 588f5a2e343SThomas Gleixner * 589f5a2e343SThomas Gleixner * Private function. Must hold clocksource_mutex when called. 590f5a2e343SThomas Gleixner * 591f5a2e343SThomas Gleixner * Select the clocksource with the best rating, or the clocksource, 592f5a2e343SThomas Gleixner * which is selected by userspace override. 593f5a2e343SThomas Gleixner */ 594f5a2e343SThomas Gleixner static void clocksource_select(void) 595f5a2e343SThomas Gleixner { 596f5a2e343SThomas Gleixner return __clocksource_select(false); 597f5a2e343SThomas Gleixner } 598f5a2e343SThomas Gleixner 5997eaeb343SThomas Gleixner static void clocksource_select_fallback(void) 6007eaeb343SThomas Gleixner { 6017eaeb343SThomas Gleixner return __clocksource_select(true); 6027eaeb343SThomas Gleixner } 6037eaeb343SThomas Gleixner 604592913ecSJohn Stultz #else /* !CONFIG_ARCH_USES_GETTIMEOFFSET */ 60554a6bc0bSThomas Gleixner 60654a6bc0bSThomas Gleixner static inline void clocksource_select(void) { } 6071eaff672SThomas Gleixner static inline void clocksource_select_fallback(void) { } 60854a6bc0bSThomas Gleixner 60954a6bc0bSThomas Gleixner #endif 61054a6bc0bSThomas Gleixner 61175c5158fSMartin Schwidefsky /* 61275c5158fSMartin Schwidefsky * clocksource_done_booting - Called near the end of core bootup 61375c5158fSMartin Schwidefsky * 61475c5158fSMartin Schwidefsky * Hack to avoid lots of clocksource churn at boot time. 61575c5158fSMartin Schwidefsky * We use fs_initcall because we want this to start before 61675c5158fSMartin Schwidefsky * device_initcall but after subsys_initcall. 61775c5158fSMartin Schwidefsky */ 61875c5158fSMartin Schwidefsky static int __init clocksource_done_booting(void) 61975c5158fSMartin Schwidefsky { 620ad6759fbSjohn stultz mutex_lock(&clocksource_mutex); 621ad6759fbSjohn stultz curr_clocksource = clocksource_default_clock(); 62275c5158fSMartin Schwidefsky finished_booting = 1; 62354a6bc0bSThomas Gleixner /* 62454a6bc0bSThomas Gleixner * Run the watchdog first to eliminate unstable clock sources 62554a6bc0bSThomas Gleixner */ 626332962f2SThomas Gleixner __clocksource_watchdog_kthread(); 62775c5158fSMartin Schwidefsky clocksource_select(); 628e6c73305SThomas Gleixner mutex_unlock(&clocksource_mutex); 62975c5158fSMartin Schwidefsky return 0; 63075c5158fSMartin Schwidefsky } 63175c5158fSMartin Schwidefsky fs_initcall(clocksource_done_booting); 632f1b82746SMartin Schwidefsky 63392c7e002SThomas Gleixner /* 63492c7e002SThomas Gleixner * Enqueue the clocksource sorted by rating 635734efb46Sjohn stultz */ 636f1b82746SMartin Schwidefsky static void clocksource_enqueue(struct clocksource *cs) 637734efb46Sjohn stultz { 638f1b82746SMartin Schwidefsky struct list_head *entry = &clocksource_list; 639f1b82746SMartin Schwidefsky struct clocksource *tmp; 640734efb46Sjohn stultz 641f1b82746SMartin Schwidefsky list_for_each_entry(tmp, &clocksource_list, list) 64292c7e002SThomas Gleixner /* Keep track of the place, where to insert */ 643f1b82746SMartin Schwidefsky if (tmp->rating >= cs->rating) 644f1b82746SMartin Schwidefsky entry = &tmp->list; 645f1b82746SMartin Schwidefsky list_add(&cs->list, entry); 646734efb46Sjohn stultz } 647734efb46Sjohn stultz 648d7e81c26SJohn Stultz /** 649fba9e072SJohn Stultz * __clocksource_update_freq_scale - Used update clocksource with new freq 650b1b73d09SKusanagi Kouichi * @cs: clocksource to be registered 651852db46dSJohn Stultz * @scale: Scale factor multiplied against freq to get clocksource hz 652852db46dSJohn Stultz * @freq: clocksource frequency (cycles per second) divided by scale 653852db46dSJohn Stultz * 654852db46dSJohn Stultz * This should only be called from the clocksource->enable() method. 655852db46dSJohn Stultz * 656852db46dSJohn Stultz * This *SHOULD NOT* be called directly! Please use the 657fba9e072SJohn Stultz * __clocksource_update_freq_hz() or __clocksource_update_freq_khz() helper 658fba9e072SJohn Stultz * functions. 659852db46dSJohn Stultz */ 660fba9e072SJohn Stultz void __clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq) 661852db46dSJohn Stultz { 662c0e299b1SThomas Gleixner u64 sec; 663f8935983SJohn Stultz 664f8935983SJohn Stultz /* 665f8935983SJohn Stultz * Default clocksources are *special* and self-define their mult/shift. 666f8935983SJohn Stultz * But, you're not special, so you should specify a freq value. 667f8935983SJohn Stultz */ 668f8935983SJohn Stultz if (freq) { 669852db46dSJohn Stultz /* 670724ed53eSThomas Gleixner * Calc the maximum number of seconds which we can run before 671f8935983SJohn Stultz * wrapping around. For clocksources which have a mask > 32-bit 672724ed53eSThomas Gleixner * we need to limit the max sleep time to have a good 673724ed53eSThomas Gleixner * conversion precision. 10 minutes is still a reasonable 674724ed53eSThomas Gleixner * amount. That results in a shift value of 24 for a 675f8935983SJohn Stultz * clocksource with mask >= 40-bit and f >= 4GHz. That maps to 676362fde04SJohn Stultz * ~ 0.06ppm granularity for NTP. 677852db46dSJohn Stultz */ 678362fde04SJohn Stultz sec = cs->mask; 679724ed53eSThomas Gleixner do_div(sec, freq); 680724ed53eSThomas Gleixner do_div(sec, scale); 681724ed53eSThomas Gleixner if (!sec) 682724ed53eSThomas Gleixner sec = 1; 683724ed53eSThomas Gleixner else if (sec > 600 && cs->mask > UINT_MAX) 684724ed53eSThomas Gleixner sec = 600; 685724ed53eSThomas Gleixner 686852db46dSJohn Stultz clocks_calc_mult_shift(&cs->mult, &cs->shift, freq, 687724ed53eSThomas Gleixner NSEC_PER_SEC / scale, sec * scale); 688f8935983SJohn Stultz } 689d65670a7SJohn Stultz /* 690362fde04SJohn Stultz * Ensure clocksources that have large 'mult' values don't overflow 691362fde04SJohn Stultz * when adjusted. 692d65670a7SJohn Stultz */ 693d65670a7SJohn Stultz cs->maxadj = clocksource_max_adjustment(cs); 694f8935983SJohn Stultz while (freq && ((cs->mult + cs->maxadj < cs->mult) 695f8935983SJohn Stultz || (cs->mult - cs->maxadj > cs->mult))) { 696d65670a7SJohn Stultz cs->mult >>= 1; 697d65670a7SJohn Stultz cs->shift--; 698d65670a7SJohn Stultz cs->maxadj = clocksource_max_adjustment(cs); 699d65670a7SJohn Stultz } 700d65670a7SJohn Stultz 701f8935983SJohn Stultz /* 702f8935983SJohn Stultz * Only warn for *special* clocksources that self-define 703f8935983SJohn Stultz * their mult/shift values and don't specify a freq. 704f8935983SJohn Stultz */ 705f8935983SJohn Stultz WARN_ONCE(cs->mult + cs->maxadj < cs->mult, 706f8935983SJohn Stultz "timekeeping: Clocksource %s might overflow on 11%% adjustment\n", 707f8935983SJohn Stultz cs->name); 708f8935983SJohn Stultz 709fb82fe2fSJohn Stultz clocksource_update_max_deferment(cs); 7108cc8c525SJohn Stultz 7118cc8c525SJohn Stultz pr_info("clocksource %s: mask: 0x%llx max_cycles: 0x%llx, max_idle_ns: %lld ns\n", 7128cc8c525SJohn Stultz cs->name, cs->mask, cs->max_cycles, cs->max_idle_ns); 713852db46dSJohn Stultz } 714fba9e072SJohn Stultz EXPORT_SYMBOL_GPL(__clocksource_update_freq_scale); 715852db46dSJohn Stultz 716852db46dSJohn Stultz /** 717d7e81c26SJohn Stultz * __clocksource_register_scale - Used to install new clocksources 718b1b73d09SKusanagi Kouichi * @cs: clocksource to be registered 719d7e81c26SJohn Stultz * @scale: Scale factor multiplied against freq to get clocksource hz 720d7e81c26SJohn Stultz * @freq: clocksource frequency (cycles per second) divided by scale 721d7e81c26SJohn Stultz * 722d7e81c26SJohn Stultz * Returns -EBUSY if registration fails, zero otherwise. 723d7e81c26SJohn Stultz * 724d7e81c26SJohn Stultz * This *SHOULD NOT* be called directly! Please use the 725d7e81c26SJohn Stultz * clocksource_register_hz() or clocksource_register_khz helper functions. 726d7e81c26SJohn Stultz */ 727d7e81c26SJohn Stultz int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) 728d7e81c26SJohn Stultz { 729d7e81c26SJohn Stultz 730b595076aSUwe Kleine-König /* Initialize mult/shift and max_idle_ns */ 731fba9e072SJohn Stultz __clocksource_update_freq_scale(cs, scale, freq); 732d7e81c26SJohn Stultz 733be278e98SJames Hartley /* Add clocksource to the clocksource list */ 734d7e81c26SJohn Stultz mutex_lock(&clocksource_mutex); 735d7e81c26SJohn Stultz clocksource_enqueue(cs); 736d7e81c26SJohn Stultz clocksource_enqueue_watchdog(cs); 737e05b2efbSjohn stultz clocksource_select(); 738d7e81c26SJohn Stultz mutex_unlock(&clocksource_mutex); 739d7e81c26SJohn Stultz return 0; 740d7e81c26SJohn Stultz } 741d7e81c26SJohn Stultz EXPORT_SYMBOL_GPL(__clocksource_register_scale); 742d7e81c26SJohn Stultz 743d0981a1bSThomas Gleixner static void __clocksource_change_rating(struct clocksource *cs, int rating) 744d0981a1bSThomas Gleixner { 745d0981a1bSThomas Gleixner list_del(&cs->list); 746d0981a1bSThomas Gleixner cs->rating = rating; 747d0981a1bSThomas Gleixner clocksource_enqueue(cs); 748d0981a1bSThomas Gleixner } 749d0981a1bSThomas Gleixner 750734efb46Sjohn stultz /** 75192c7e002SThomas Gleixner * clocksource_change_rating - Change the rating of a registered clocksource 752b1b73d09SKusanagi Kouichi * @cs: clocksource to be changed 753b1b73d09SKusanagi Kouichi * @rating: new rating 754734efb46Sjohn stultz */ 75592c7e002SThomas Gleixner void clocksource_change_rating(struct clocksource *cs, int rating) 756734efb46Sjohn stultz { 75775c5158fSMartin Schwidefsky mutex_lock(&clocksource_mutex); 758d0981a1bSThomas Gleixner __clocksource_change_rating(cs, rating); 759332962f2SThomas Gleixner clocksource_select(); 76075c5158fSMartin Schwidefsky mutex_unlock(&clocksource_mutex); 761734efb46Sjohn stultz } 762fb63a0ebSMartin Schwidefsky EXPORT_SYMBOL(clocksource_change_rating); 763734efb46Sjohn stultz 7647eaeb343SThomas Gleixner /* 7657eaeb343SThomas Gleixner * Unbind clocksource @cs. Called with clocksource_mutex held 7667eaeb343SThomas Gleixner */ 7677eaeb343SThomas Gleixner static int clocksource_unbind(struct clocksource *cs) 7687eaeb343SThomas Gleixner { 7697eaeb343SThomas Gleixner /* 7707eaeb343SThomas Gleixner * I really can't convince myself to support this on hardware 7717eaeb343SThomas Gleixner * designed by lobotomized monkeys. 7727eaeb343SThomas Gleixner */ 7737eaeb343SThomas Gleixner if (clocksource_is_watchdog(cs)) 7747eaeb343SThomas Gleixner return -EBUSY; 7757eaeb343SThomas Gleixner 7767eaeb343SThomas Gleixner if (cs == curr_clocksource) { 7777eaeb343SThomas Gleixner /* Select and try to install a replacement clock source */ 7787eaeb343SThomas Gleixner clocksource_select_fallback(); 7797eaeb343SThomas Gleixner if (curr_clocksource == cs) 7807eaeb343SThomas Gleixner return -EBUSY; 7817eaeb343SThomas Gleixner } 7827eaeb343SThomas Gleixner clocksource_dequeue_watchdog(cs); 7837eaeb343SThomas Gleixner list_del_init(&cs->list); 7847eaeb343SThomas Gleixner return 0; 7857eaeb343SThomas Gleixner } 7867eaeb343SThomas Gleixner 7874713e22cSThomas Gleixner /** 7884713e22cSThomas Gleixner * clocksource_unregister - remove a registered clocksource 789b1b73d09SKusanagi Kouichi * @cs: clocksource to be unregistered 7904713e22cSThomas Gleixner */ 791a89c7edbSThomas Gleixner int clocksource_unregister(struct clocksource *cs) 7924713e22cSThomas Gleixner { 793a89c7edbSThomas Gleixner int ret = 0; 794a89c7edbSThomas Gleixner 79575c5158fSMartin Schwidefsky mutex_lock(&clocksource_mutex); 796a89c7edbSThomas Gleixner if (!list_empty(&cs->list)) 797a89c7edbSThomas Gleixner ret = clocksource_unbind(cs); 79875c5158fSMartin Schwidefsky mutex_unlock(&clocksource_mutex); 799a89c7edbSThomas Gleixner return ret; 8004713e22cSThomas Gleixner } 801fb63a0ebSMartin Schwidefsky EXPORT_SYMBOL(clocksource_unregister); 8024713e22cSThomas Gleixner 8032b013700SDaniel Walker #ifdef CONFIG_SYSFS 804734efb46Sjohn stultz /** 805734efb46Sjohn stultz * sysfs_show_current_clocksources - sysfs interface for current clocksource 806734efb46Sjohn stultz * @dev: unused 807b1b73d09SKusanagi Kouichi * @attr: unused 808734efb46Sjohn stultz * @buf: char buffer to be filled with clocksource list 809734efb46Sjohn stultz * 810734efb46Sjohn stultz * Provides sysfs interface for listing current clocksource. 811734efb46Sjohn stultz */ 812734efb46Sjohn stultz static ssize_t 813d369a5d8SKay Sievers sysfs_show_current_clocksources(struct device *dev, 814d369a5d8SKay Sievers struct device_attribute *attr, char *buf) 815734efb46Sjohn stultz { 8165e2cb101SMiao Xie ssize_t count = 0; 817734efb46Sjohn stultz 81875c5158fSMartin Schwidefsky mutex_lock(&clocksource_mutex); 8195e2cb101SMiao Xie count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name); 82075c5158fSMartin Schwidefsky mutex_unlock(&clocksource_mutex); 821734efb46Sjohn stultz 8225e2cb101SMiao Xie return count; 823734efb46Sjohn stultz } 824734efb46Sjohn stultz 825891292a7SPatrick Palka ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt) 82629b54078SThomas Gleixner { 82729b54078SThomas Gleixner size_t ret = cnt; 82829b54078SThomas Gleixner 82929b54078SThomas Gleixner /* strings from sysfs write are not 0 terminated! */ 83029b54078SThomas Gleixner if (!cnt || cnt >= CS_NAME_LEN) 83129b54078SThomas Gleixner return -EINVAL; 83229b54078SThomas Gleixner 83329b54078SThomas Gleixner /* strip of \n: */ 83429b54078SThomas Gleixner if (buf[cnt-1] == '\n') 83529b54078SThomas Gleixner cnt--; 83629b54078SThomas Gleixner if (cnt > 0) 83729b54078SThomas Gleixner memcpy(dst, buf, cnt); 83829b54078SThomas Gleixner dst[cnt] = 0; 83929b54078SThomas Gleixner return ret; 84029b54078SThomas Gleixner } 84129b54078SThomas Gleixner 842734efb46Sjohn stultz /** 843734efb46Sjohn stultz * sysfs_override_clocksource - interface for manually overriding clocksource 844734efb46Sjohn stultz * @dev: unused 845b1b73d09SKusanagi Kouichi * @attr: unused 846734efb46Sjohn stultz * @buf: name of override clocksource 847734efb46Sjohn stultz * @count: length of buffer 848734efb46Sjohn stultz * 849734efb46Sjohn stultz * Takes input from sysfs interface for manually overriding the default 850b71a8eb0SUwe Kleine-König * clocksource selection. 851734efb46Sjohn stultz */ 852d369a5d8SKay Sievers static ssize_t sysfs_override_clocksource(struct device *dev, 853d369a5d8SKay Sievers struct device_attribute *attr, 854734efb46Sjohn stultz const char *buf, size_t count) 855734efb46Sjohn stultz { 856233bcb41SElad Wexler ssize_t ret; 857734efb46Sjohn stultz 85875c5158fSMartin Schwidefsky mutex_lock(&clocksource_mutex); 859734efb46Sjohn stultz 86003e13cf5SThomas Gleixner ret = sysfs_get_uname(buf, override_name, count); 86129b54078SThomas Gleixner if (ret >= 0) 862f1b82746SMartin Schwidefsky clocksource_select(); 863734efb46Sjohn stultz 86475c5158fSMartin Schwidefsky mutex_unlock(&clocksource_mutex); 865734efb46Sjohn stultz 866734efb46Sjohn stultz return ret; 867734efb46Sjohn stultz } 868734efb46Sjohn stultz 869734efb46Sjohn stultz /** 8707eaeb343SThomas Gleixner * sysfs_unbind_current_clocksource - interface for manually unbinding clocksource 8717eaeb343SThomas Gleixner * @dev: unused 8727eaeb343SThomas Gleixner * @attr: unused 8737eaeb343SThomas Gleixner * @buf: unused 8747eaeb343SThomas Gleixner * @count: length of buffer 8757eaeb343SThomas Gleixner * 8767eaeb343SThomas Gleixner * Takes input from sysfs interface for manually unbinding a clocksource. 8777eaeb343SThomas Gleixner */ 8787eaeb343SThomas Gleixner static ssize_t sysfs_unbind_clocksource(struct device *dev, 8797eaeb343SThomas Gleixner struct device_attribute *attr, 8807eaeb343SThomas Gleixner const char *buf, size_t count) 8817eaeb343SThomas Gleixner { 8827eaeb343SThomas Gleixner struct clocksource *cs; 8837eaeb343SThomas Gleixner char name[CS_NAME_LEN]; 884233bcb41SElad Wexler ssize_t ret; 8857eaeb343SThomas Gleixner 88603e13cf5SThomas Gleixner ret = sysfs_get_uname(buf, name, count); 8877eaeb343SThomas Gleixner if (ret < 0) 8887eaeb343SThomas Gleixner return ret; 8897eaeb343SThomas Gleixner 8907eaeb343SThomas Gleixner ret = -ENODEV; 8917eaeb343SThomas Gleixner mutex_lock(&clocksource_mutex); 8927eaeb343SThomas Gleixner list_for_each_entry(cs, &clocksource_list, list) { 8937eaeb343SThomas Gleixner if (strcmp(cs->name, name)) 8947eaeb343SThomas Gleixner continue; 8957eaeb343SThomas Gleixner ret = clocksource_unbind(cs); 8967eaeb343SThomas Gleixner break; 8977eaeb343SThomas Gleixner } 8987eaeb343SThomas Gleixner mutex_unlock(&clocksource_mutex); 8997eaeb343SThomas Gleixner 9007eaeb343SThomas Gleixner return ret ? ret : count; 9017eaeb343SThomas Gleixner } 9027eaeb343SThomas Gleixner 9037eaeb343SThomas Gleixner /** 904734efb46Sjohn stultz * sysfs_show_available_clocksources - sysfs interface for listing clocksource 905734efb46Sjohn stultz * @dev: unused 906b1b73d09SKusanagi Kouichi * @attr: unused 907734efb46Sjohn stultz * @buf: char buffer to be filled with clocksource list 908734efb46Sjohn stultz * 909734efb46Sjohn stultz * Provides sysfs interface for listing registered clocksources 910734efb46Sjohn stultz */ 911734efb46Sjohn stultz static ssize_t 912d369a5d8SKay Sievers sysfs_show_available_clocksources(struct device *dev, 913d369a5d8SKay Sievers struct device_attribute *attr, 9144a0b2b4dSAndi Kleen char *buf) 915734efb46Sjohn stultz { 9162e197586SMatthias Kaehlcke struct clocksource *src; 9175e2cb101SMiao Xie ssize_t count = 0; 918734efb46Sjohn stultz 91975c5158fSMartin Schwidefsky mutex_lock(&clocksource_mutex); 9202e197586SMatthias Kaehlcke list_for_each_entry(src, &clocksource_list, list) { 921cd6d95d8SThomas Gleixner /* 922cd6d95d8SThomas Gleixner * Don't show non-HRES clocksource if the tick code is 923cd6d95d8SThomas Gleixner * in one shot mode (highres=on or nohz=on) 924cd6d95d8SThomas Gleixner */ 925cd6d95d8SThomas Gleixner if (!tick_oneshot_mode_active() || 9263f68535aSjohn stultz (src->flags & CLOCK_SOURCE_VALID_FOR_HRES)) 9275e2cb101SMiao Xie count += snprintf(buf + count, 9285e2cb101SMiao Xie max((ssize_t)PAGE_SIZE - count, (ssize_t)0), 9295e2cb101SMiao Xie "%s ", src->name); 930734efb46Sjohn stultz } 93175c5158fSMartin Schwidefsky mutex_unlock(&clocksource_mutex); 932734efb46Sjohn stultz 9335e2cb101SMiao Xie count += snprintf(buf + count, 9345e2cb101SMiao Xie max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n"); 935734efb46Sjohn stultz 9365e2cb101SMiao Xie return count; 937734efb46Sjohn stultz } 938734efb46Sjohn stultz 939734efb46Sjohn stultz /* 940734efb46Sjohn stultz * Sysfs setup bits: 941734efb46Sjohn stultz */ 942d369a5d8SKay Sievers static DEVICE_ATTR(current_clocksource, 0644, sysfs_show_current_clocksources, 943734efb46Sjohn stultz sysfs_override_clocksource); 944734efb46Sjohn stultz 9457eaeb343SThomas Gleixner static DEVICE_ATTR(unbind_clocksource, 0200, NULL, sysfs_unbind_clocksource); 9467eaeb343SThomas Gleixner 947d369a5d8SKay Sievers static DEVICE_ATTR(available_clocksource, 0444, 948734efb46Sjohn stultz sysfs_show_available_clocksources, NULL); 949734efb46Sjohn stultz 950d369a5d8SKay Sievers static struct bus_type clocksource_subsys = { 951af5ca3f4SKay Sievers .name = "clocksource", 952d369a5d8SKay Sievers .dev_name = "clocksource", 953734efb46Sjohn stultz }; 954734efb46Sjohn stultz 955d369a5d8SKay Sievers static struct device device_clocksource = { 956734efb46Sjohn stultz .id = 0, 957d369a5d8SKay Sievers .bus = &clocksource_subsys, 958734efb46Sjohn stultz }; 959734efb46Sjohn stultz 960ad596171Sjohn stultz static int __init init_clocksource_sysfs(void) 961734efb46Sjohn stultz { 962d369a5d8SKay Sievers int error = subsys_system_register(&clocksource_subsys, NULL); 963734efb46Sjohn stultz 964734efb46Sjohn stultz if (!error) 965d369a5d8SKay Sievers error = device_register(&device_clocksource); 966734efb46Sjohn stultz if (!error) 967d369a5d8SKay Sievers error = device_create_file( 968734efb46Sjohn stultz &device_clocksource, 969d369a5d8SKay Sievers &dev_attr_current_clocksource); 970734efb46Sjohn stultz if (!error) 9717eaeb343SThomas Gleixner error = device_create_file(&device_clocksource, 9727eaeb343SThomas Gleixner &dev_attr_unbind_clocksource); 9737eaeb343SThomas Gleixner if (!error) 974d369a5d8SKay Sievers error = device_create_file( 975734efb46Sjohn stultz &device_clocksource, 976d369a5d8SKay Sievers &dev_attr_available_clocksource); 977734efb46Sjohn stultz return error; 978734efb46Sjohn stultz } 979734efb46Sjohn stultz 980734efb46Sjohn stultz device_initcall(init_clocksource_sysfs); 9812b013700SDaniel Walker #endif /* CONFIG_SYSFS */ 982734efb46Sjohn stultz 983734efb46Sjohn stultz /** 984734efb46Sjohn stultz * boot_override_clocksource - boot clock override 985734efb46Sjohn stultz * @str: override name 986734efb46Sjohn stultz * 987734efb46Sjohn stultz * Takes a clocksource= boot argument and uses it 988734efb46Sjohn stultz * as the clocksource override name. 989734efb46Sjohn stultz */ 990734efb46Sjohn stultz static int __init boot_override_clocksource(char* str) 991734efb46Sjohn stultz { 99275c5158fSMartin Schwidefsky mutex_lock(&clocksource_mutex); 993734efb46Sjohn stultz if (str) 994734efb46Sjohn stultz strlcpy(override_name, str, sizeof(override_name)); 99575c5158fSMartin Schwidefsky mutex_unlock(&clocksource_mutex); 996734efb46Sjohn stultz return 1; 997734efb46Sjohn stultz } 998734efb46Sjohn stultz 999734efb46Sjohn stultz __setup("clocksource=", boot_override_clocksource); 1000734efb46Sjohn stultz 1001734efb46Sjohn stultz /** 1002734efb46Sjohn stultz * boot_override_clock - Compatibility layer for deprecated boot option 1003734efb46Sjohn stultz * @str: override name 1004734efb46Sjohn stultz * 1005734efb46Sjohn stultz * DEPRECATED! Takes a clock= boot argument and uses it 1006734efb46Sjohn stultz * as the clocksource override name 1007734efb46Sjohn stultz */ 1008734efb46Sjohn stultz static int __init boot_override_clock(char* str) 1009734efb46Sjohn stultz { 10105d0cf410Sjohn stultz if (!strcmp(str, "pmtmr")) { 10115d0cf410Sjohn stultz printk("Warning: clock=pmtmr is deprecated. " 10125d0cf410Sjohn stultz "Use clocksource=acpi_pm.\n"); 10135d0cf410Sjohn stultz return boot_override_clocksource("acpi_pm"); 10145d0cf410Sjohn stultz } 10155d0cf410Sjohn stultz printk("Warning! clock= boot option is deprecated. " 10165d0cf410Sjohn stultz "Use clocksource=xyz\n"); 1017734efb46Sjohn stultz return boot_override_clocksource(str); 1018734efb46Sjohn stultz } 1019734efb46Sjohn stultz 1020734efb46Sjohn stultz __setup("clock=", boot_override_clock); 1021