xref: /openbmc/linux/kernel/time/clocksource.c (revision cd2af07d)
1734efb46Sjohn stultz /*
2734efb46Sjohn stultz  * linux/kernel/time/clocksource.c
3734efb46Sjohn stultz  *
4734efb46Sjohn stultz  * This file contains the functions which manage clocksource drivers.
5734efb46Sjohn stultz  *
6734efb46Sjohn stultz  * Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com)
7734efb46Sjohn stultz  *
8734efb46Sjohn stultz  * This program is free software; you can redistribute it and/or modify
9734efb46Sjohn stultz  * it under the terms of the GNU General Public License as published by
10734efb46Sjohn stultz  * the Free Software Foundation; either version 2 of the License, or
11734efb46Sjohn stultz  * (at your option) any later version.
12734efb46Sjohn stultz  *
13734efb46Sjohn stultz  * This program is distributed in the hope that it will be useful,
14734efb46Sjohn stultz  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15734efb46Sjohn stultz  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16734efb46Sjohn stultz  * GNU General Public License for more details.
17734efb46Sjohn stultz  *
18734efb46Sjohn stultz  * You should have received a copy of the GNU General Public License
19734efb46Sjohn stultz  * along with this program; if not, write to the Free Software
20734efb46Sjohn stultz  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21734efb46Sjohn stultz  *
22734efb46Sjohn stultz  * TODO WishList:
23734efb46Sjohn stultz  *   o Allow clocksource drivers to be unregistered
24734efb46Sjohn stultz  */
25734efb46Sjohn stultz 
2645bbfe64SJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2745bbfe64SJoe Perches 
28d369a5d8SKay Sievers #include <linux/device.h>
29734efb46Sjohn stultz #include <linux/clocksource.h>
30734efb46Sjohn stultz #include <linux/init.h>
31734efb46Sjohn stultz #include <linux/module.h>
32dc29a365SMathieu Desnoyers #include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */
3379bf2bb3SThomas Gleixner #include <linux/tick.h>
3401548f4dSMartin Schwidefsky #include <linux/kthread.h>
35734efb46Sjohn stultz 
36c1797bafSThomas Gleixner #include "tick-internal.h"
373a978377SThomas Gleixner #include "timekeeping_internal.h"
3803e13cf5SThomas Gleixner 
397d2f944aSThomas Gleixner /**
407d2f944aSThomas Gleixner  * clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks
417d2f944aSThomas Gleixner  * @mult:	pointer to mult variable
427d2f944aSThomas Gleixner  * @shift:	pointer to shift variable
437d2f944aSThomas Gleixner  * @from:	frequency to convert from
447d2f944aSThomas Gleixner  * @to:		frequency to convert to
455fdade95SNicolas Pitre  * @maxsec:	guaranteed runtime conversion range in seconds
467d2f944aSThomas Gleixner  *
477d2f944aSThomas Gleixner  * The function evaluates the shift/mult pair for the scaled math
487d2f944aSThomas Gleixner  * operations of clocksources and clockevents.
497d2f944aSThomas Gleixner  *
507d2f944aSThomas Gleixner  * @to and @from are frequency values in HZ. For clock sources @to is
517d2f944aSThomas Gleixner  * NSEC_PER_SEC == 1GHz and @from is the counter frequency. For clock
527d2f944aSThomas Gleixner  * event @to is the counter frequency and @from is NSEC_PER_SEC.
537d2f944aSThomas Gleixner  *
545fdade95SNicolas Pitre  * The @maxsec conversion range argument controls the time frame in
557d2f944aSThomas Gleixner  * seconds which must be covered by the runtime conversion with the
567d2f944aSThomas Gleixner  * calculated mult and shift factors. This guarantees that no 64bit
577d2f944aSThomas Gleixner  * overflow happens when the input value of the conversion is
587d2f944aSThomas Gleixner  * multiplied with the calculated mult factor. Larger ranges may
597d2f944aSThomas Gleixner  * reduce the conversion accuracy by chosing smaller mult and shift
607d2f944aSThomas Gleixner  * factors.
617d2f944aSThomas Gleixner  */
627d2f944aSThomas Gleixner void
635fdade95SNicolas Pitre clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 maxsec)
647d2f944aSThomas Gleixner {
657d2f944aSThomas Gleixner 	u64 tmp;
667d2f944aSThomas Gleixner 	u32 sft, sftacc= 32;
677d2f944aSThomas Gleixner 
687d2f944aSThomas Gleixner 	/*
697d2f944aSThomas Gleixner 	 * Calculate the shift factor which is limiting the conversion
707d2f944aSThomas Gleixner 	 * range:
717d2f944aSThomas Gleixner 	 */
725fdade95SNicolas Pitre 	tmp = ((u64)maxsec * from) >> 32;
737d2f944aSThomas Gleixner 	while (tmp) {
747d2f944aSThomas Gleixner 		tmp >>=1;
757d2f944aSThomas Gleixner 		sftacc--;
767d2f944aSThomas Gleixner 	}
777d2f944aSThomas Gleixner 
787d2f944aSThomas Gleixner 	/*
797d2f944aSThomas Gleixner 	 * Find the conversion shift/mult pair which has the best
807d2f944aSThomas Gleixner 	 * accuracy and fits the maxsec conversion range:
817d2f944aSThomas Gleixner 	 */
827d2f944aSThomas Gleixner 	for (sft = 32; sft > 0; sft--) {
837d2f944aSThomas Gleixner 		tmp = (u64) to << sft;
84b5776c4aSjohn stultz 		tmp += from / 2;
857d2f944aSThomas Gleixner 		do_div(tmp, from);
867d2f944aSThomas Gleixner 		if ((tmp >> sftacc) == 0)
877d2f944aSThomas Gleixner 			break;
887d2f944aSThomas Gleixner 	}
897d2f944aSThomas Gleixner 	*mult = tmp;
907d2f944aSThomas Gleixner 	*shift = sft;
917d2f944aSThomas Gleixner }
925304121aSMurali Karicheri EXPORT_SYMBOL_GPL(clocks_calc_mult_shift);
937d2f944aSThomas Gleixner 
94734efb46Sjohn stultz /*[Clocksource internal variables]---------
95734efb46Sjohn stultz  * curr_clocksource:
96f1b82746SMartin Schwidefsky  *	currently selected clocksource.
97734efb46Sjohn stultz  * clocksource_list:
98734efb46Sjohn stultz  *	linked list with the registered clocksources
9975c5158fSMartin Schwidefsky  * clocksource_mutex:
10075c5158fSMartin Schwidefsky  *	protects manipulations to curr_clocksource and the clocksource_list
101734efb46Sjohn stultz  * override_name:
102734efb46Sjohn stultz  *	Name of the user-specified clocksource.
103734efb46Sjohn stultz  */
104f1b82746SMartin Schwidefsky static struct clocksource *curr_clocksource;
105734efb46Sjohn stultz static LIST_HEAD(clocksource_list);
10675c5158fSMartin Schwidefsky static DEFINE_MUTEX(clocksource_mutex);
10729b54078SThomas Gleixner static char override_name[CS_NAME_LEN];
10854a6bc0bSThomas Gleixner static int finished_booting;
109734efb46Sjohn stultz 
1105d8b34fdSThomas Gleixner #ifdef CONFIG_CLOCKSOURCE_WATCHDOG
111f79e0258SMartin Schwidefsky static void clocksource_watchdog_work(struct work_struct *work);
112332962f2SThomas Gleixner static void clocksource_select(void);
113f79e0258SMartin Schwidefsky 
1145d8b34fdSThomas Gleixner static LIST_HEAD(watchdog_list);
1155d8b34fdSThomas Gleixner static struct clocksource *watchdog;
1165d8b34fdSThomas Gleixner static struct timer_list watchdog_timer;
117f79e0258SMartin Schwidefsky static DECLARE_WORK(watchdog_work, clocksource_watchdog_work);
1185d8b34fdSThomas Gleixner static DEFINE_SPINLOCK(watchdog_lock);
119fb63a0ebSMartin Schwidefsky static int watchdog_running;
1209fb60336SThomas Gleixner static atomic_t watchdog_reset_pending;
121b52f52a0SThomas Gleixner 
1222aae7bcfSPeter Zijlstra static void inline clocksource_watchdog_lock(unsigned long *flags)
1232aae7bcfSPeter Zijlstra {
1242aae7bcfSPeter Zijlstra 	spin_lock_irqsave(&watchdog_lock, *flags);
1252aae7bcfSPeter Zijlstra }
1262aae7bcfSPeter Zijlstra 
1272aae7bcfSPeter Zijlstra static void inline clocksource_watchdog_unlock(unsigned long *flags)
1282aae7bcfSPeter Zijlstra {
1292aae7bcfSPeter Zijlstra 	spin_unlock_irqrestore(&watchdog_lock, *flags);
1302aae7bcfSPeter Zijlstra }
1312aae7bcfSPeter Zijlstra 
13201548f4dSMartin Schwidefsky static int clocksource_watchdog_kthread(void *data);
133d0981a1bSThomas Gleixner static void __clocksource_change_rating(struct clocksource *cs, int rating);
134c55c87c8SMartin Schwidefsky 
1355d8b34fdSThomas Gleixner /*
13635c35d1aSDaniel Walker  * Interval: 0.5sec Threshold: 0.0625s
1375d8b34fdSThomas Gleixner  */
1385d8b34fdSThomas Gleixner #define WATCHDOG_INTERVAL (HZ >> 1)
13935c35d1aSDaniel Walker #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)
1405d8b34fdSThomas Gleixner 
14101548f4dSMartin Schwidefsky static void clocksource_watchdog_work(struct work_struct *work)
14201548f4dSMartin Schwidefsky {
14301548f4dSMartin Schwidefsky 	/*
14401548f4dSMartin Schwidefsky 	 * If kthread_run fails the next watchdog scan over the
14501548f4dSMartin Schwidefsky 	 * watchdog_list will find the unstable clock again.
14601548f4dSMartin Schwidefsky 	 */
14701548f4dSMartin Schwidefsky 	kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog");
14801548f4dSMartin Schwidefsky }
14901548f4dSMartin Schwidefsky 
1507285dd7fSThomas Gleixner static void __clocksource_unstable(struct clocksource *cs)
1517285dd7fSThomas Gleixner {
1527285dd7fSThomas Gleixner 	cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
1537285dd7fSThomas Gleixner 	cs->flags |= CLOCK_SOURCE_UNSTABLE;
15412907fbbSThomas Gleixner 
155cd2af07dSPeter Zijlstra 	/*
156cd2af07dSPeter Zijlstra 	 * If the clocksource is registered clocksource_watchdog_kthread() will
157cd2af07dSPeter Zijlstra 	 * re-rate and re-select.
158cd2af07dSPeter Zijlstra 	 */
159cd2af07dSPeter Zijlstra 	if (list_empty(&cs->list)) {
160cd2af07dSPeter Zijlstra 		cs->rating = 0;
1612aae7bcfSPeter Zijlstra 		return;
162cd2af07dSPeter Zijlstra 	}
1632aae7bcfSPeter Zijlstra 
16412907fbbSThomas Gleixner 	if (cs->mark_unstable)
16512907fbbSThomas Gleixner 		cs->mark_unstable(cs);
16612907fbbSThomas Gleixner 
167cd2af07dSPeter Zijlstra 	/* kick clocksource_watchdog_kthread() */
16854a6bc0bSThomas Gleixner 	if (finished_booting)
1697285dd7fSThomas Gleixner 		schedule_work(&watchdog_work);
1707285dd7fSThomas Gleixner }
1717285dd7fSThomas Gleixner 
1727285dd7fSThomas Gleixner /**
1737285dd7fSThomas Gleixner  * clocksource_mark_unstable - mark clocksource unstable via watchdog
1747285dd7fSThomas Gleixner  * @cs:		clocksource to be marked unstable
1757285dd7fSThomas Gleixner  *
1767285dd7fSThomas Gleixner  * This function is called instead of clocksource_change_rating from
1777285dd7fSThomas Gleixner  * cpu hotplug code to avoid a deadlock between the clocksource mutex
1787285dd7fSThomas Gleixner  * and the cpu hotplug mutex. It defers the update of the clocksource
1797285dd7fSThomas Gleixner  * to the watchdog thread.
1807285dd7fSThomas Gleixner  */
1817285dd7fSThomas Gleixner void clocksource_mark_unstable(struct clocksource *cs)
1827285dd7fSThomas Gleixner {
1837285dd7fSThomas Gleixner 	unsigned long flags;
1847285dd7fSThomas Gleixner 
1857285dd7fSThomas Gleixner 	spin_lock_irqsave(&watchdog_lock, flags);
1867285dd7fSThomas Gleixner 	if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) {
1872aae7bcfSPeter Zijlstra 		if (!list_empty(&cs->list) && list_empty(&cs->wd_list))
1887285dd7fSThomas Gleixner 			list_add(&cs->wd_list, &watchdog_list);
1897285dd7fSThomas Gleixner 		__clocksource_unstable(cs);
1907285dd7fSThomas Gleixner 	}
1917285dd7fSThomas Gleixner 	spin_unlock_irqrestore(&watchdog_lock, flags);
1925d8b34fdSThomas Gleixner }
1935d8b34fdSThomas Gleixner 
194e99e88a9SKees Cook static void clocksource_watchdog(struct timer_list *unused)
1955d8b34fdSThomas Gleixner {
196c55c87c8SMartin Schwidefsky 	struct clocksource *cs;
197a5a1d1c2SThomas Gleixner 	u64 csnow, wdnow, cslast, wdlast, delta;
1985d8b34fdSThomas Gleixner 	int64_t wd_nsec, cs_nsec;
1999fb60336SThomas Gleixner 	int next_cpu, reset_pending;
2005d8b34fdSThomas Gleixner 
2015d8b34fdSThomas Gleixner 	spin_lock(&watchdog_lock);
202fb63a0ebSMartin Schwidefsky 	if (!watchdog_running)
203fb63a0ebSMartin Schwidefsky 		goto out;
2045d8b34fdSThomas Gleixner 
2059fb60336SThomas Gleixner 	reset_pending = atomic_read(&watchdog_reset_pending);
2069fb60336SThomas Gleixner 
207c55c87c8SMartin Schwidefsky 	list_for_each_entry(cs, &watchdog_list, wd_list) {
208c55c87c8SMartin Schwidefsky 
209c55c87c8SMartin Schwidefsky 		/* Clocksource already marked unstable? */
21001548f4dSMartin Schwidefsky 		if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
21154a6bc0bSThomas Gleixner 			if (finished_booting)
21201548f4dSMartin Schwidefsky 				schedule_work(&watchdog_work);
213c55c87c8SMartin Schwidefsky 			continue;
21401548f4dSMartin Schwidefsky 		}
215c55c87c8SMartin Schwidefsky 
216b5199515SThomas Gleixner 		local_irq_disable();
2178e19608eSMagnus Damm 		csnow = cs->read(cs);
218b5199515SThomas Gleixner 		wdnow = watchdog->read(watchdog);
219b5199515SThomas Gleixner 		local_irq_enable();
220b52f52a0SThomas Gleixner 
2218cf4e750SMartin Schwidefsky 		/* Clocksource initialized ? */
2229fb60336SThomas Gleixner 		if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) ||
2239fb60336SThomas Gleixner 		    atomic_read(&watchdog_reset_pending)) {
2248cf4e750SMartin Schwidefsky 			cs->flags |= CLOCK_SOURCE_WATCHDOG;
225b5199515SThomas Gleixner 			cs->wd_last = wdnow;
226b5199515SThomas Gleixner 			cs->cs_last = csnow;
227b52f52a0SThomas Gleixner 			continue;
228b52f52a0SThomas Gleixner 		}
229b52f52a0SThomas Gleixner 
2303a978377SThomas Gleixner 		delta = clocksource_delta(wdnow, cs->wd_last, watchdog->mask);
2313a978377SThomas Gleixner 		wd_nsec = clocksource_cyc2ns(delta, watchdog->mult,
2323a978377SThomas Gleixner 					     watchdog->shift);
233b5199515SThomas Gleixner 
2343a978377SThomas Gleixner 		delta = clocksource_delta(csnow, cs->cs_last, cs->mask);
2353a978377SThomas Gleixner 		cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift);
2360b046b21SJohn Stultz 		wdlast = cs->wd_last; /* save these in case we print them */
2370b046b21SJohn Stultz 		cslast = cs->cs_last;
238b5199515SThomas Gleixner 		cs->cs_last = csnow;
239b5199515SThomas Gleixner 		cs->wd_last = wdnow;
240b5199515SThomas Gleixner 
2419fb60336SThomas Gleixner 		if (atomic_read(&watchdog_reset_pending))
2429fb60336SThomas Gleixner 			continue;
2439fb60336SThomas Gleixner 
244b5199515SThomas Gleixner 		/* Check the deviation from the watchdog clocksource. */
24579211c8eSAndrew Morton 		if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) {
246390dd67cSSeiichi Ikarashi 			pr_warn("timekeeping watchdog on CPU%d: Marking clocksource '%s' as unstable because the skew is too large:\n",
247390dd67cSSeiichi Ikarashi 				smp_processor_id(), cs->name);
2480b046b21SJohn Stultz 			pr_warn("                      '%s' wd_now: %llx wd_last: %llx mask: %llx\n",
2490b046b21SJohn Stultz 				watchdog->name, wdnow, wdlast, watchdog->mask);
2500b046b21SJohn Stultz 			pr_warn("                      '%s' cs_now: %llx cs_last: %llx mask: %llx\n",
2510b046b21SJohn Stultz 				cs->name, csnow, cslast, cs->mask);
2520b046b21SJohn Stultz 			__clocksource_unstable(cs);
2538cf4e750SMartin Schwidefsky 			continue;
2548cf4e750SMartin Schwidefsky 		}
2558cf4e750SMartin Schwidefsky 
256b421b22bSPeter Zijlstra 		if (cs == curr_clocksource && cs->tick_stable)
257b421b22bSPeter Zijlstra 			cs->tick_stable(cs);
258b421b22bSPeter Zijlstra 
2598cf4e750SMartin Schwidefsky 		if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) &&
2608cf4e750SMartin Schwidefsky 		    (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) &&
2615d8b34fdSThomas Gleixner 		    (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) {
262332962f2SThomas Gleixner 			/* Mark it valid for high-res. */
2635d8b34fdSThomas Gleixner 			cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
264332962f2SThomas Gleixner 
26579bf2bb3SThomas Gleixner 			/*
266332962f2SThomas Gleixner 			 * clocksource_done_booting() will sort it if
267332962f2SThomas Gleixner 			 * finished_booting is not set yet.
26879bf2bb3SThomas Gleixner 			 */
269332962f2SThomas Gleixner 			if (!finished_booting)
270332962f2SThomas Gleixner 				continue;
271332962f2SThomas Gleixner 
272332962f2SThomas Gleixner 			/*
273332962f2SThomas Gleixner 			 * If this is not the current clocksource let
274332962f2SThomas Gleixner 			 * the watchdog thread reselect it. Due to the
275332962f2SThomas Gleixner 			 * change to high res this clocksource might
276332962f2SThomas Gleixner 			 * be preferred now. If it is the current
277332962f2SThomas Gleixner 			 * clocksource let the tick code know about
278332962f2SThomas Gleixner 			 * that change.
279332962f2SThomas Gleixner 			 */
280332962f2SThomas Gleixner 			if (cs != curr_clocksource) {
281332962f2SThomas Gleixner 				cs->flags |= CLOCK_SOURCE_RESELECT;
282332962f2SThomas Gleixner 				schedule_work(&watchdog_work);
283332962f2SThomas Gleixner 			} else {
28479bf2bb3SThomas Gleixner 				tick_clock_notify();
2855d8b34fdSThomas Gleixner 			}
2865d8b34fdSThomas Gleixner 		}
287332962f2SThomas Gleixner 	}
2885d8b34fdSThomas Gleixner 
2896993fc5bSAndi Kleen 	/*
2909fb60336SThomas Gleixner 	 * We only clear the watchdog_reset_pending, when we did a
2919fb60336SThomas Gleixner 	 * full cycle through all clocksources.
2929fb60336SThomas Gleixner 	 */
2939fb60336SThomas Gleixner 	if (reset_pending)
2949fb60336SThomas Gleixner 		atomic_dec(&watchdog_reset_pending);
2959fb60336SThomas Gleixner 
2969fb60336SThomas Gleixner 	/*
297c55c87c8SMartin Schwidefsky 	 * Cycle through CPUs to check if the CPUs stay synchronized
298c55c87c8SMartin Schwidefsky 	 * to each other.
2996993fc5bSAndi Kleen 	 */
300c55c87c8SMartin Schwidefsky 	next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
301cad0e458SMike Travis 	if (next_cpu >= nr_cpu_ids)
3026b954823SRusty Russell 		next_cpu = cpumask_first(cpu_online_mask);
3036993fc5bSAndi Kleen 	watchdog_timer.expires += WATCHDOG_INTERVAL;
3046993fc5bSAndi Kleen 	add_timer_on(&watchdog_timer, next_cpu);
305fb63a0ebSMartin Schwidefsky out:
3065d8b34fdSThomas Gleixner 	spin_unlock(&watchdog_lock);
3075d8b34fdSThomas Gleixner }
3080f8e8ef7SMartin Schwidefsky 
309fb63a0ebSMartin Schwidefsky static inline void clocksource_start_watchdog(void)
310fb63a0ebSMartin Schwidefsky {
311fb63a0ebSMartin Schwidefsky 	if (watchdog_running || !watchdog || list_empty(&watchdog_list))
312fb63a0ebSMartin Schwidefsky 		return;
313e99e88a9SKees Cook 	timer_setup(&watchdog_timer, clocksource_watchdog, 0);
314fb63a0ebSMartin Schwidefsky 	watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
315fb63a0ebSMartin Schwidefsky 	add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask));
316fb63a0ebSMartin Schwidefsky 	watchdog_running = 1;
317fb63a0ebSMartin Schwidefsky }
318fb63a0ebSMartin Schwidefsky 
319fb63a0ebSMartin Schwidefsky static inline void clocksource_stop_watchdog(void)
320fb63a0ebSMartin Schwidefsky {
321fb63a0ebSMartin Schwidefsky 	if (!watchdog_running || (watchdog && !list_empty(&watchdog_list)))
322fb63a0ebSMartin Schwidefsky 		return;
323fb63a0ebSMartin Schwidefsky 	del_timer(&watchdog_timer);
324fb63a0ebSMartin Schwidefsky 	watchdog_running = 0;
325fb63a0ebSMartin Schwidefsky }
326fb63a0ebSMartin Schwidefsky 
3270f8e8ef7SMartin Schwidefsky static inline void clocksource_reset_watchdog(void)
3280f8e8ef7SMartin Schwidefsky {
3290f8e8ef7SMartin Schwidefsky 	struct clocksource *cs;
3300f8e8ef7SMartin Schwidefsky 
3310f8e8ef7SMartin Schwidefsky 	list_for_each_entry(cs, &watchdog_list, wd_list)
3320f8e8ef7SMartin Schwidefsky 		cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
3330f8e8ef7SMartin Schwidefsky }
3340f8e8ef7SMartin Schwidefsky 
335b52f52a0SThomas Gleixner static void clocksource_resume_watchdog(void)
336b52f52a0SThomas Gleixner {
3379fb60336SThomas Gleixner 	atomic_inc(&watchdog_reset_pending);
338b52f52a0SThomas Gleixner }
339b52f52a0SThomas Gleixner 
340fb63a0ebSMartin Schwidefsky static void clocksource_enqueue_watchdog(struct clocksource *cs)
3415d8b34fdSThomas Gleixner {
3425b9e886aSPeter Zijlstra 	INIT_LIST_HEAD(&cs->wd_list);
3435b9e886aSPeter Zijlstra 
3445d8b34fdSThomas Gleixner 	if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
345fb63a0ebSMartin Schwidefsky 		/* cs is a clocksource to be watched. */
3465d8b34fdSThomas Gleixner 		list_add(&cs->wd_list, &watchdog_list);
347fb63a0ebSMartin Schwidefsky 		cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
348948ac6d7SThomas Gleixner 	} else {
349fb63a0ebSMartin Schwidefsky 		/* cs is a watchdog. */
350948ac6d7SThomas Gleixner 		if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
3515d8b34fdSThomas Gleixner 			cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
352bbf66d89SVitaly Kuznetsov 	}
353bbf66d89SVitaly Kuznetsov }
354bbf66d89SVitaly Kuznetsov 
355bbf66d89SVitaly Kuznetsov static void clocksource_select_watchdog(bool fallback)
356bbf66d89SVitaly Kuznetsov {
357bbf66d89SVitaly Kuznetsov 	struct clocksource *cs, *old_wd;
358bbf66d89SVitaly Kuznetsov 	unsigned long flags;
359bbf66d89SVitaly Kuznetsov 
360bbf66d89SVitaly Kuznetsov 	spin_lock_irqsave(&watchdog_lock, flags);
361bbf66d89SVitaly Kuznetsov 	/* save current watchdog */
362bbf66d89SVitaly Kuznetsov 	old_wd = watchdog;
363bbf66d89SVitaly Kuznetsov 	if (fallback)
364bbf66d89SVitaly Kuznetsov 		watchdog = NULL;
365bbf66d89SVitaly Kuznetsov 
366bbf66d89SVitaly Kuznetsov 	list_for_each_entry(cs, &clocksource_list, list) {
367bbf66d89SVitaly Kuznetsov 		/* cs is a clocksource to be watched. */
368bbf66d89SVitaly Kuznetsov 		if (cs->flags & CLOCK_SOURCE_MUST_VERIFY)
369bbf66d89SVitaly Kuznetsov 			continue;
370bbf66d89SVitaly Kuznetsov 
371bbf66d89SVitaly Kuznetsov 		/* Skip current if we were requested for a fallback. */
372bbf66d89SVitaly Kuznetsov 		if (fallback && cs == old_wd)
373bbf66d89SVitaly Kuznetsov 			continue;
374bbf66d89SVitaly Kuznetsov 
375fb63a0ebSMartin Schwidefsky 		/* Pick the best watchdog. */
376bbf66d89SVitaly Kuznetsov 		if (!watchdog || cs->rating > watchdog->rating)
3775d8b34fdSThomas Gleixner 			watchdog = cs;
378bbf66d89SVitaly Kuznetsov 	}
379bbf66d89SVitaly Kuznetsov 	/* If we failed to find a fallback restore the old one. */
380bbf66d89SVitaly Kuznetsov 	if (!watchdog)
381bbf66d89SVitaly Kuznetsov 		watchdog = old_wd;
382bbf66d89SVitaly Kuznetsov 
383bbf66d89SVitaly Kuznetsov 	/* If we changed the watchdog we need to reset cycles. */
384bbf66d89SVitaly Kuznetsov 	if (watchdog != old_wd)
3850f8e8ef7SMartin Schwidefsky 		clocksource_reset_watchdog();
386bbf66d89SVitaly Kuznetsov 
387fb63a0ebSMartin Schwidefsky 	/* Check if the watchdog timer needs to be started. */
388fb63a0ebSMartin Schwidefsky 	clocksource_start_watchdog();
3895d8b34fdSThomas Gleixner 	spin_unlock_irqrestore(&watchdog_lock, flags);
3905d8b34fdSThomas Gleixner }
391fb63a0ebSMartin Schwidefsky 
392fb63a0ebSMartin Schwidefsky static void clocksource_dequeue_watchdog(struct clocksource *cs)
393fb63a0ebSMartin Schwidefsky {
394a89c7edbSThomas Gleixner 	if (cs != watchdog) {
395fb63a0ebSMartin Schwidefsky 		if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
396fb63a0ebSMartin Schwidefsky 			/* cs is a watched clocksource. */
397fb63a0ebSMartin Schwidefsky 			list_del_init(&cs->wd_list);
398fb63a0ebSMartin Schwidefsky 			/* Check if the watchdog timer needs to be stopped. */
399fb63a0ebSMartin Schwidefsky 			clocksource_stop_watchdog();
400a89c7edbSThomas Gleixner 		}
401a89c7edbSThomas Gleixner 	}
402fb63a0ebSMartin Schwidefsky }
403fb63a0ebSMartin Schwidefsky 
404332962f2SThomas Gleixner static int __clocksource_watchdog_kthread(void)
405c55c87c8SMartin Schwidefsky {
406c55c87c8SMartin Schwidefsky 	struct clocksource *cs, *tmp;
407c55c87c8SMartin Schwidefsky 	unsigned long flags;
408332962f2SThomas Gleixner 	int select = 0;
409c55c87c8SMartin Schwidefsky 
410c55c87c8SMartin Schwidefsky 	spin_lock_irqsave(&watchdog_lock, flags);
411332962f2SThomas Gleixner 	list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
412c55c87c8SMartin Schwidefsky 		if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
413c55c87c8SMartin Schwidefsky 			list_del_init(&cs->wd_list);
4142aae7bcfSPeter Zijlstra 			__clocksource_change_rating(cs, 0);
415332962f2SThomas Gleixner 			select = 1;
416332962f2SThomas Gleixner 		}
417332962f2SThomas Gleixner 		if (cs->flags & CLOCK_SOURCE_RESELECT) {
418332962f2SThomas Gleixner 			cs->flags &= ~CLOCK_SOURCE_RESELECT;
419332962f2SThomas Gleixner 			select = 1;
420332962f2SThomas Gleixner 		}
421c55c87c8SMartin Schwidefsky 	}
422c55c87c8SMartin Schwidefsky 	/* Check if the watchdog timer needs to be stopped. */
423c55c87c8SMartin Schwidefsky 	clocksource_stop_watchdog();
4246ea41d25SThomas Gleixner 	spin_unlock_irqrestore(&watchdog_lock, flags);
4256ea41d25SThomas Gleixner 
426332962f2SThomas Gleixner 	return select;
427332962f2SThomas Gleixner }
428332962f2SThomas Gleixner 
429332962f2SThomas Gleixner static int clocksource_watchdog_kthread(void *data)
430332962f2SThomas Gleixner {
431332962f2SThomas Gleixner 	mutex_lock(&clocksource_mutex);
432332962f2SThomas Gleixner 	if (__clocksource_watchdog_kthread())
433332962f2SThomas Gleixner 		clocksource_select();
434d0981a1bSThomas Gleixner 	mutex_unlock(&clocksource_mutex);
43501548f4dSMartin Schwidefsky 	return 0;
436c55c87c8SMartin Schwidefsky }
437c55c87c8SMartin Schwidefsky 
4387eaeb343SThomas Gleixner static bool clocksource_is_watchdog(struct clocksource *cs)
4397eaeb343SThomas Gleixner {
4407eaeb343SThomas Gleixner 	return cs == watchdog;
4417eaeb343SThomas Gleixner }
4427eaeb343SThomas Gleixner 
443fb63a0ebSMartin Schwidefsky #else /* CONFIG_CLOCKSOURCE_WATCHDOG */
444fb63a0ebSMartin Schwidefsky 
445fb63a0ebSMartin Schwidefsky static void clocksource_enqueue_watchdog(struct clocksource *cs)
4465d8b34fdSThomas Gleixner {
4475d8b34fdSThomas Gleixner 	if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
4485d8b34fdSThomas Gleixner 		cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
4495d8b34fdSThomas Gleixner }
450b52f52a0SThomas Gleixner 
451bbf66d89SVitaly Kuznetsov static void clocksource_select_watchdog(bool fallback) { }
452fb63a0ebSMartin Schwidefsky static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
453b52f52a0SThomas Gleixner static inline void clocksource_resume_watchdog(void) { }
454332962f2SThomas Gleixner static inline int __clocksource_watchdog_kthread(void) { return 0; }
4557eaeb343SThomas Gleixner static bool clocksource_is_watchdog(struct clocksource *cs) { return false; }
456397bbf6dSPrarit Bhargava void clocksource_mark_unstable(struct clocksource *cs) { }
457fb63a0ebSMartin Schwidefsky 
4582aae7bcfSPeter Zijlstra static void inline clocksource_watchdog_lock(unsigned long *flags) { }
4592aae7bcfSPeter Zijlstra static void inline clocksource_watchdog_unlock(unsigned long *flags) { }
4602aae7bcfSPeter Zijlstra 
461fb63a0ebSMartin Schwidefsky #endif /* CONFIG_CLOCKSOURCE_WATCHDOG */
4625d8b34fdSThomas Gleixner 
463734efb46Sjohn stultz /**
464c54a42b1SMagnus Damm  * clocksource_suspend - suspend the clocksource(s)
465c54a42b1SMagnus Damm  */
466c54a42b1SMagnus Damm void clocksource_suspend(void)
467c54a42b1SMagnus Damm {
468c54a42b1SMagnus Damm 	struct clocksource *cs;
469c54a42b1SMagnus Damm 
470c54a42b1SMagnus Damm 	list_for_each_entry_reverse(cs, &clocksource_list, list)
471c54a42b1SMagnus Damm 		if (cs->suspend)
472c54a42b1SMagnus Damm 			cs->suspend(cs);
473c54a42b1SMagnus Damm }
474c54a42b1SMagnus Damm 
475c54a42b1SMagnus Damm /**
476b52f52a0SThomas Gleixner  * clocksource_resume - resume the clocksource(s)
477b52f52a0SThomas Gleixner  */
478b52f52a0SThomas Gleixner void clocksource_resume(void)
479b52f52a0SThomas Gleixner {
4802e197586SMatthias Kaehlcke 	struct clocksource *cs;
481b52f52a0SThomas Gleixner 
48275c5158fSMartin Schwidefsky 	list_for_each_entry(cs, &clocksource_list, list)
483b52f52a0SThomas Gleixner 		if (cs->resume)
48417622339SMagnus Damm 			cs->resume(cs);
485b52f52a0SThomas Gleixner 
486b52f52a0SThomas Gleixner 	clocksource_resume_watchdog();
487b52f52a0SThomas Gleixner }
488b52f52a0SThomas Gleixner 
489b52f52a0SThomas Gleixner /**
4907c3078b6SJason Wessel  * clocksource_touch_watchdog - Update watchdog
4917c3078b6SJason Wessel  *
4927c3078b6SJason Wessel  * Update the watchdog after exception contexts such as kgdb so as not
4937b7422a5SThomas Gleixner  * to incorrectly trip the watchdog. This might fail when the kernel
4947b7422a5SThomas Gleixner  * was stopped in code which holds watchdog_lock.
4957c3078b6SJason Wessel  */
4967c3078b6SJason Wessel void clocksource_touch_watchdog(void)
4977c3078b6SJason Wessel {
4987c3078b6SJason Wessel 	clocksource_resume_watchdog();
4997c3078b6SJason Wessel }
5007c3078b6SJason Wessel 
501734efb46Sjohn stultz /**
502d65670a7SJohn Stultz  * clocksource_max_adjustment- Returns max adjustment amount
503d65670a7SJohn Stultz  * @cs:         Pointer to clocksource
504d65670a7SJohn Stultz  *
505d65670a7SJohn Stultz  */
506d65670a7SJohn Stultz static u32 clocksource_max_adjustment(struct clocksource *cs)
507d65670a7SJohn Stultz {
508d65670a7SJohn Stultz 	u64 ret;
509d65670a7SJohn Stultz 	/*
51088b28adfSJim Cromie 	 * We won't try to correct for more than 11% adjustments (110,000 ppm),
511d65670a7SJohn Stultz 	 */
512d65670a7SJohn Stultz 	ret = (u64)cs->mult * 11;
513d65670a7SJohn Stultz 	do_div(ret,100);
514d65670a7SJohn Stultz 	return (u32)ret;
515d65670a7SJohn Stultz }
516d65670a7SJohn Stultz 
517d65670a7SJohn Stultz /**
51887d8b9ebSStephen Boyd  * clocks_calc_max_nsecs - Returns maximum nanoseconds that can be converted
51987d8b9ebSStephen Boyd  * @mult:	cycle to nanosecond multiplier
52087d8b9ebSStephen Boyd  * @shift:	cycle to nanosecond divisor (power of two)
52187d8b9ebSStephen Boyd  * @maxadj:	maximum adjustment value to mult (~11%)
52287d8b9ebSStephen Boyd  * @mask:	bitmask for two's complement subtraction of non 64 bit counters
523fb82fe2fSJohn Stultz  * @max_cyc:	maximum cycle value before potential overflow (does not include
524fb82fe2fSJohn Stultz  *		any safety margin)
525362fde04SJohn Stultz  *
5268e56f33fSJohn Stultz  * NOTE: This function includes a safety margin of 50%, in other words, we
5278e56f33fSJohn Stultz  * return half the number of nanoseconds the hardware counter can technically
5288e56f33fSJohn Stultz  * cover. This is done so that we can potentially detect problems caused by
5298e56f33fSJohn Stultz  * delayed timers or bad hardware, which might result in time intervals that
530571af55aSZhen Lei  * are larger than what the math used can handle without overflows.
53198962465SJon Hunter  */
532fb82fe2fSJohn Stultz u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cyc)
53398962465SJon Hunter {
53498962465SJon Hunter 	u64 max_nsecs, max_cycles;
53598962465SJon Hunter 
53698962465SJon Hunter 	/*
53798962465SJon Hunter 	 * Calculate the maximum number of cycles that we can pass to the
5386086e346SJohn Stultz 	 * cyc2ns() function without overflowing a 64-bit result.
53998962465SJon Hunter 	 */
5406086e346SJohn Stultz 	max_cycles = ULLONG_MAX;
5416086e346SJohn Stultz 	do_div(max_cycles, mult+maxadj);
54298962465SJon Hunter 
54398962465SJon Hunter 	/*
54498962465SJon Hunter 	 * The actual maximum number of cycles we can defer the clocksource is
54587d8b9ebSStephen Boyd 	 * determined by the minimum of max_cycles and mask.
546d65670a7SJohn Stultz 	 * Note: Here we subtract the maxadj to make sure we don't sleep for
547d65670a7SJohn Stultz 	 * too long if there's a large negative adjustment.
54898962465SJon Hunter 	 */
54987d8b9ebSStephen Boyd 	max_cycles = min(max_cycles, mask);
55087d8b9ebSStephen Boyd 	max_nsecs = clocksource_cyc2ns(max_cycles, mult - maxadj, shift);
55198962465SJon Hunter 
552fb82fe2fSJohn Stultz 	/* return the max_cycles value as well if requested */
553fb82fe2fSJohn Stultz 	if (max_cyc)
554fb82fe2fSJohn Stultz 		*max_cyc = max_cycles;
555fb82fe2fSJohn Stultz 
556362fde04SJohn Stultz 	/* Return 50% of the actual maximum, so we can detect bad values */
557362fde04SJohn Stultz 	max_nsecs >>= 1;
558362fde04SJohn Stultz 
55987d8b9ebSStephen Boyd 	return max_nsecs;
56087d8b9ebSStephen Boyd }
56187d8b9ebSStephen Boyd 
56287d8b9ebSStephen Boyd /**
563fb82fe2fSJohn Stultz  * clocksource_update_max_deferment - Updates the clocksource max_idle_ns & max_cycles
564fb82fe2fSJohn Stultz  * @cs:         Pointer to clocksource to be updated
56587d8b9ebSStephen Boyd  *
56687d8b9ebSStephen Boyd  */
567fb82fe2fSJohn Stultz static inline void clocksource_update_max_deferment(struct clocksource *cs)
56887d8b9ebSStephen Boyd {
569fb82fe2fSJohn Stultz 	cs->max_idle_ns = clocks_calc_max_nsecs(cs->mult, cs->shift,
570fb82fe2fSJohn Stultz 						cs->maxadj, cs->mask,
571fb82fe2fSJohn Stultz 						&cs->max_cycles);
57298962465SJon Hunter }
57398962465SJon Hunter 
574592913ecSJohn Stultz #ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
575734efb46Sjohn stultz 
576f5a2e343SThomas Gleixner static struct clocksource *clocksource_find_best(bool oneshot, bool skipcur)
5775d33b883SThomas Gleixner {
5785d33b883SThomas Gleixner 	struct clocksource *cs;
5795d33b883SThomas Gleixner 
5805d33b883SThomas Gleixner 	if (!finished_booting || list_empty(&clocksource_list))
5815d33b883SThomas Gleixner 		return NULL;
5825d33b883SThomas Gleixner 
5835d33b883SThomas Gleixner 	/*
5845d33b883SThomas Gleixner 	 * We pick the clocksource with the highest rating. If oneshot
5855d33b883SThomas Gleixner 	 * mode is active, we pick the highres valid clocksource with
5865d33b883SThomas Gleixner 	 * the best rating.
5875d33b883SThomas Gleixner 	 */
5885d33b883SThomas Gleixner 	list_for_each_entry(cs, &clocksource_list, list) {
589f5a2e343SThomas Gleixner 		if (skipcur && cs == curr_clocksource)
590f5a2e343SThomas Gleixner 			continue;
5915d33b883SThomas Gleixner 		if (oneshot && !(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES))
5925d33b883SThomas Gleixner 			continue;
5935d33b883SThomas Gleixner 		return cs;
5945d33b883SThomas Gleixner 	}
5955d33b883SThomas Gleixner 	return NULL;
5965d33b883SThomas Gleixner }
5975d33b883SThomas Gleixner 
598f5a2e343SThomas Gleixner static void __clocksource_select(bool skipcur)
599734efb46Sjohn stultz {
6005d33b883SThomas Gleixner 	bool oneshot = tick_oneshot_mode_active();
601f1b82746SMartin Schwidefsky 	struct clocksource *best, *cs;
6025d8b34fdSThomas Gleixner 
6035d33b883SThomas Gleixner 	/* Find the best suitable clocksource */
604f5a2e343SThomas Gleixner 	best = clocksource_find_best(oneshot, skipcur);
6055d33b883SThomas Gleixner 	if (!best)
606f1b82746SMartin Schwidefsky 		return;
6075d33b883SThomas Gleixner 
6087f852afeSBaolin Wang 	if (!strlen(override_name))
6097f852afeSBaolin Wang 		goto found;
6107f852afeSBaolin Wang 
611f1b82746SMartin Schwidefsky 	/* Check for the override clocksource. */
612f1b82746SMartin Schwidefsky 	list_for_each_entry(cs, &clocksource_list, list) {
613f5a2e343SThomas Gleixner 		if (skipcur && cs == curr_clocksource)
614f5a2e343SThomas Gleixner 			continue;
615f1b82746SMartin Schwidefsky 		if (strcmp(cs->name, override_name) != 0)
616f1b82746SMartin Schwidefsky 			continue;
617f1b82746SMartin Schwidefsky 		/*
618f1b82746SMartin Schwidefsky 		 * Check to make sure we don't switch to a non-highres
619f1b82746SMartin Schwidefsky 		 * capable clocksource if the tick code is in oneshot
620f1b82746SMartin Schwidefsky 		 * mode (highres or nohz)
621f1b82746SMartin Schwidefsky 		 */
6225d33b883SThomas Gleixner 		if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && oneshot) {
623f1b82746SMartin Schwidefsky 			/* Override clocksource cannot be used. */
62436374583SKyle Walker 			if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
62536374583SKyle Walker 				pr_warn("Override clocksource %s is unstable and not HRT compatible - cannot switch while in HRT/NOHZ mode\n",
62645bbfe64SJoe Perches 					cs->name);
627f1b82746SMartin Schwidefsky 				override_name[0] = 0;
62836374583SKyle Walker 			} else {
62936374583SKyle Walker 				/*
63036374583SKyle Walker 				 * The override cannot be currently verified.
63136374583SKyle Walker 				 * Deferring to let the watchdog check.
63236374583SKyle Walker 				 */
63336374583SKyle Walker 				pr_info("Override clocksource %s is not currently HRT compatible - deferring\n",
63436374583SKyle Walker 					cs->name);
63536374583SKyle Walker 			}
636f1b82746SMartin Schwidefsky 		} else
637f1b82746SMartin Schwidefsky 			/* Override clocksource can be used. */
638f1b82746SMartin Schwidefsky 			best = cs;
639f1b82746SMartin Schwidefsky 		break;
640734efb46Sjohn stultz 	}
641ba919d1cSThomas Gleixner 
6427f852afeSBaolin Wang found:
643ba919d1cSThomas Gleixner 	if (curr_clocksource != best && !timekeeping_notify(best)) {
644ba919d1cSThomas Gleixner 		pr_info("Switched to clocksource %s\n", best->name);
64575c5158fSMartin Schwidefsky 		curr_clocksource = best;
646f1b82746SMartin Schwidefsky 	}
64775c5158fSMartin Schwidefsky }
64875c5158fSMartin Schwidefsky 
649f5a2e343SThomas Gleixner /**
650f5a2e343SThomas Gleixner  * clocksource_select - Select the best clocksource available
651f5a2e343SThomas Gleixner  *
652f5a2e343SThomas Gleixner  * Private function. Must hold clocksource_mutex when called.
653f5a2e343SThomas Gleixner  *
654f5a2e343SThomas Gleixner  * Select the clocksource with the best rating, or the clocksource,
655f5a2e343SThomas Gleixner  * which is selected by userspace override.
656f5a2e343SThomas Gleixner  */
657f5a2e343SThomas Gleixner static void clocksource_select(void)
658f5a2e343SThomas Gleixner {
659cfed432dSGuillaume Gomez 	__clocksource_select(false);
660f5a2e343SThomas Gleixner }
661f5a2e343SThomas Gleixner 
6627eaeb343SThomas Gleixner static void clocksource_select_fallback(void)
6637eaeb343SThomas Gleixner {
664cfed432dSGuillaume Gomez 	__clocksource_select(true);
6657eaeb343SThomas Gleixner }
6667eaeb343SThomas Gleixner 
667592913ecSJohn Stultz #else /* !CONFIG_ARCH_USES_GETTIMEOFFSET */
66854a6bc0bSThomas Gleixner static inline void clocksource_select(void) { }
6691eaff672SThomas Gleixner static inline void clocksource_select_fallback(void) { }
67054a6bc0bSThomas Gleixner 
67154a6bc0bSThomas Gleixner #endif
67254a6bc0bSThomas Gleixner 
67375c5158fSMartin Schwidefsky /*
67475c5158fSMartin Schwidefsky  * clocksource_done_booting - Called near the end of core bootup
67575c5158fSMartin Schwidefsky  *
67675c5158fSMartin Schwidefsky  * Hack to avoid lots of clocksource churn at boot time.
67775c5158fSMartin Schwidefsky  * We use fs_initcall because we want this to start before
67875c5158fSMartin Schwidefsky  * device_initcall but after subsys_initcall.
67975c5158fSMartin Schwidefsky  */
68075c5158fSMartin Schwidefsky static int __init clocksource_done_booting(void)
68175c5158fSMartin Schwidefsky {
682ad6759fbSjohn stultz 	mutex_lock(&clocksource_mutex);
683ad6759fbSjohn stultz 	curr_clocksource = clocksource_default_clock();
68475c5158fSMartin Schwidefsky 	finished_booting = 1;
68554a6bc0bSThomas Gleixner 	/*
68654a6bc0bSThomas Gleixner 	 * Run the watchdog first to eliminate unstable clock sources
68754a6bc0bSThomas Gleixner 	 */
688332962f2SThomas Gleixner 	__clocksource_watchdog_kthread();
68975c5158fSMartin Schwidefsky 	clocksource_select();
690e6c73305SThomas Gleixner 	mutex_unlock(&clocksource_mutex);
69175c5158fSMartin Schwidefsky 	return 0;
69275c5158fSMartin Schwidefsky }
69375c5158fSMartin Schwidefsky fs_initcall(clocksource_done_booting);
694f1b82746SMartin Schwidefsky 
69592c7e002SThomas Gleixner /*
69692c7e002SThomas Gleixner  * Enqueue the clocksource sorted by rating
697734efb46Sjohn stultz  */
698f1b82746SMartin Schwidefsky static void clocksource_enqueue(struct clocksource *cs)
699734efb46Sjohn stultz {
700f1b82746SMartin Schwidefsky 	struct list_head *entry = &clocksource_list;
701f1b82746SMartin Schwidefsky 	struct clocksource *tmp;
702734efb46Sjohn stultz 
7030fb71d34SMinfei Huang 	list_for_each_entry(tmp, &clocksource_list, list) {
70492c7e002SThomas Gleixner 		/* Keep track of the place, where to insert */
7050fb71d34SMinfei Huang 		if (tmp->rating < cs->rating)
7060fb71d34SMinfei Huang 			break;
707f1b82746SMartin Schwidefsky 		entry = &tmp->list;
7080fb71d34SMinfei Huang 	}
709f1b82746SMartin Schwidefsky 	list_add(&cs->list, entry);
710734efb46Sjohn stultz }
711734efb46Sjohn stultz 
712d7e81c26SJohn Stultz /**
713fba9e072SJohn Stultz  * __clocksource_update_freq_scale - Used update clocksource with new freq
714b1b73d09SKusanagi Kouichi  * @cs:		clocksource to be registered
715852db46dSJohn Stultz  * @scale:	Scale factor multiplied against freq to get clocksource hz
716852db46dSJohn Stultz  * @freq:	clocksource frequency (cycles per second) divided by scale
717852db46dSJohn Stultz  *
718852db46dSJohn Stultz  * This should only be called from the clocksource->enable() method.
719852db46dSJohn Stultz  *
720852db46dSJohn Stultz  * This *SHOULD NOT* be called directly! Please use the
721fba9e072SJohn Stultz  * __clocksource_update_freq_hz() or __clocksource_update_freq_khz() helper
722fba9e072SJohn Stultz  * functions.
723852db46dSJohn Stultz  */
724fba9e072SJohn Stultz void __clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq)
725852db46dSJohn Stultz {
726c0e299b1SThomas Gleixner 	u64 sec;
727f8935983SJohn Stultz 
728f8935983SJohn Stultz 	/*
729f8935983SJohn Stultz 	 * Default clocksources are *special* and self-define their mult/shift.
730f8935983SJohn Stultz 	 * But, you're not special, so you should specify a freq value.
731f8935983SJohn Stultz 	 */
732f8935983SJohn Stultz 	if (freq) {
733852db46dSJohn Stultz 		/*
734724ed53eSThomas Gleixner 		 * Calc the maximum number of seconds which we can run before
735f8935983SJohn Stultz 		 * wrapping around. For clocksources which have a mask > 32-bit
736724ed53eSThomas Gleixner 		 * we need to limit the max sleep time to have a good
737724ed53eSThomas Gleixner 		 * conversion precision. 10 minutes is still a reasonable
738724ed53eSThomas Gleixner 		 * amount. That results in a shift value of 24 for a
739f8935983SJohn Stultz 		 * clocksource with mask >= 40-bit and f >= 4GHz. That maps to
740362fde04SJohn Stultz 		 * ~ 0.06ppm granularity for NTP.
741852db46dSJohn Stultz 		 */
742362fde04SJohn Stultz 		sec = cs->mask;
743724ed53eSThomas Gleixner 		do_div(sec, freq);
744724ed53eSThomas Gleixner 		do_div(sec, scale);
745724ed53eSThomas Gleixner 		if (!sec)
746724ed53eSThomas Gleixner 			sec = 1;
747724ed53eSThomas Gleixner 		else if (sec > 600 && cs->mask > UINT_MAX)
748724ed53eSThomas Gleixner 			sec = 600;
749724ed53eSThomas Gleixner 
750852db46dSJohn Stultz 		clocks_calc_mult_shift(&cs->mult, &cs->shift, freq,
751724ed53eSThomas Gleixner 				       NSEC_PER_SEC / scale, sec * scale);
752f8935983SJohn Stultz 	}
753d65670a7SJohn Stultz 	/*
754362fde04SJohn Stultz 	 * Ensure clocksources that have large 'mult' values don't overflow
755362fde04SJohn Stultz 	 * when adjusted.
756d65670a7SJohn Stultz 	 */
757d65670a7SJohn Stultz 	cs->maxadj = clocksource_max_adjustment(cs);
758f8935983SJohn Stultz 	while (freq && ((cs->mult + cs->maxadj < cs->mult)
759f8935983SJohn Stultz 		|| (cs->mult - cs->maxadj > cs->mult))) {
760d65670a7SJohn Stultz 		cs->mult >>= 1;
761d65670a7SJohn Stultz 		cs->shift--;
762d65670a7SJohn Stultz 		cs->maxadj = clocksource_max_adjustment(cs);
763d65670a7SJohn Stultz 	}
764d65670a7SJohn Stultz 
765f8935983SJohn Stultz 	/*
766f8935983SJohn Stultz 	 * Only warn for *special* clocksources that self-define
767f8935983SJohn Stultz 	 * their mult/shift values and don't specify a freq.
768f8935983SJohn Stultz 	 */
769f8935983SJohn Stultz 	WARN_ONCE(cs->mult + cs->maxadj < cs->mult,
770f8935983SJohn Stultz 		"timekeeping: Clocksource %s might overflow on 11%% adjustment\n",
771f8935983SJohn Stultz 		cs->name);
772f8935983SJohn Stultz 
773fb82fe2fSJohn Stultz 	clocksource_update_max_deferment(cs);
7748cc8c525SJohn Stultz 
77545bbfe64SJoe Perches 	pr_info("%s: mask: 0x%llx max_cycles: 0x%llx, max_idle_ns: %lld ns\n",
7768cc8c525SJohn Stultz 		cs->name, cs->mask, cs->max_cycles, cs->max_idle_ns);
777852db46dSJohn Stultz }
778fba9e072SJohn Stultz EXPORT_SYMBOL_GPL(__clocksource_update_freq_scale);
779852db46dSJohn Stultz 
780852db46dSJohn Stultz /**
781d7e81c26SJohn Stultz  * __clocksource_register_scale - Used to install new clocksources
782b1b73d09SKusanagi Kouichi  * @cs:		clocksource to be registered
783d7e81c26SJohn Stultz  * @scale:	Scale factor multiplied against freq to get clocksource hz
784d7e81c26SJohn Stultz  * @freq:	clocksource frequency (cycles per second) divided by scale
785d7e81c26SJohn Stultz  *
786d7e81c26SJohn Stultz  * Returns -EBUSY if registration fails, zero otherwise.
787d7e81c26SJohn Stultz  *
788d7e81c26SJohn Stultz  * This *SHOULD NOT* be called directly! Please use the
789d7e81c26SJohn Stultz  * clocksource_register_hz() or clocksource_register_khz helper functions.
790d7e81c26SJohn Stultz  */
791d7e81c26SJohn Stultz int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
792d7e81c26SJohn Stultz {
7932aae7bcfSPeter Zijlstra 	unsigned long flags;
794d7e81c26SJohn Stultz 
795b595076aSUwe Kleine-König 	/* Initialize mult/shift and max_idle_ns */
796fba9e072SJohn Stultz 	__clocksource_update_freq_scale(cs, scale, freq);
797d7e81c26SJohn Stultz 
798be278e98SJames Hartley 	/* Add clocksource to the clocksource list */
799d7e81c26SJohn Stultz 	mutex_lock(&clocksource_mutex);
8002aae7bcfSPeter Zijlstra 
8012aae7bcfSPeter Zijlstra 	clocksource_watchdog_lock(&flags);
802d7e81c26SJohn Stultz 	clocksource_enqueue(cs);
803d7e81c26SJohn Stultz 	clocksource_enqueue_watchdog(cs);
8042aae7bcfSPeter Zijlstra 	clocksource_watchdog_unlock(&flags);
8052aae7bcfSPeter Zijlstra 
806e05b2efbSjohn stultz 	clocksource_select();
807bbf66d89SVitaly Kuznetsov 	clocksource_select_watchdog(false);
808d7e81c26SJohn Stultz 	mutex_unlock(&clocksource_mutex);
809d7e81c26SJohn Stultz 	return 0;
810d7e81c26SJohn Stultz }
811d7e81c26SJohn Stultz EXPORT_SYMBOL_GPL(__clocksource_register_scale);
812d7e81c26SJohn Stultz 
813d0981a1bSThomas Gleixner static void __clocksource_change_rating(struct clocksource *cs, int rating)
814d0981a1bSThomas Gleixner {
815d0981a1bSThomas Gleixner 	list_del(&cs->list);
816d0981a1bSThomas Gleixner 	cs->rating = rating;
817d0981a1bSThomas Gleixner 	clocksource_enqueue(cs);
818d0981a1bSThomas Gleixner }
819d0981a1bSThomas Gleixner 
820734efb46Sjohn stultz /**
82192c7e002SThomas Gleixner  * clocksource_change_rating - Change the rating of a registered clocksource
822b1b73d09SKusanagi Kouichi  * @cs:		clocksource to be changed
823b1b73d09SKusanagi Kouichi  * @rating:	new rating
824734efb46Sjohn stultz  */
82592c7e002SThomas Gleixner void clocksource_change_rating(struct clocksource *cs, int rating)
826734efb46Sjohn stultz {
8272aae7bcfSPeter Zijlstra 	unsigned long flags;
8282aae7bcfSPeter Zijlstra 
82975c5158fSMartin Schwidefsky 	mutex_lock(&clocksource_mutex);
8302aae7bcfSPeter Zijlstra 	clocksource_watchdog_lock(&flags);
831d0981a1bSThomas Gleixner 	__clocksource_change_rating(cs, rating);
8322aae7bcfSPeter Zijlstra 	clocksource_watchdog_unlock(&flags);
8332aae7bcfSPeter Zijlstra 
834332962f2SThomas Gleixner 	clocksource_select();
835bbf66d89SVitaly Kuznetsov 	clocksource_select_watchdog(false);
83675c5158fSMartin Schwidefsky 	mutex_unlock(&clocksource_mutex);
837734efb46Sjohn stultz }
838fb63a0ebSMartin Schwidefsky EXPORT_SYMBOL(clocksource_change_rating);
839734efb46Sjohn stultz 
8407eaeb343SThomas Gleixner /*
8417eaeb343SThomas Gleixner  * Unbind clocksource @cs. Called with clocksource_mutex held
8427eaeb343SThomas Gleixner  */
8437eaeb343SThomas Gleixner static int clocksource_unbind(struct clocksource *cs)
8447eaeb343SThomas Gleixner {
8452aae7bcfSPeter Zijlstra 	unsigned long flags;
8462aae7bcfSPeter Zijlstra 
847bbf66d89SVitaly Kuznetsov 	if (clocksource_is_watchdog(cs)) {
848bbf66d89SVitaly Kuznetsov 		/* Select and try to install a replacement watchdog. */
849bbf66d89SVitaly Kuznetsov 		clocksource_select_watchdog(true);
8507eaeb343SThomas Gleixner 		if (clocksource_is_watchdog(cs))
8517eaeb343SThomas Gleixner 			return -EBUSY;
852bbf66d89SVitaly Kuznetsov 	}
8537eaeb343SThomas Gleixner 
8547eaeb343SThomas Gleixner 	if (cs == curr_clocksource) {
8557eaeb343SThomas Gleixner 		/* Select and try to install a replacement clock source */
8567eaeb343SThomas Gleixner 		clocksource_select_fallback();
8577eaeb343SThomas Gleixner 		if (curr_clocksource == cs)
8587eaeb343SThomas Gleixner 			return -EBUSY;
8597eaeb343SThomas Gleixner 	}
8602aae7bcfSPeter Zijlstra 
8612aae7bcfSPeter Zijlstra 	clocksource_watchdog_lock(&flags);
8627eaeb343SThomas Gleixner 	clocksource_dequeue_watchdog(cs);
8637eaeb343SThomas Gleixner 	list_del_init(&cs->list);
8642aae7bcfSPeter Zijlstra 	clocksource_watchdog_unlock(&flags);
8652aae7bcfSPeter Zijlstra 
8667eaeb343SThomas Gleixner 	return 0;
8677eaeb343SThomas Gleixner }
8687eaeb343SThomas Gleixner 
8694713e22cSThomas Gleixner /**
8704713e22cSThomas Gleixner  * clocksource_unregister - remove a registered clocksource
871b1b73d09SKusanagi Kouichi  * @cs:	clocksource to be unregistered
8724713e22cSThomas Gleixner  */
873a89c7edbSThomas Gleixner int clocksource_unregister(struct clocksource *cs)
8744713e22cSThomas Gleixner {
875a89c7edbSThomas Gleixner 	int ret = 0;
876a89c7edbSThomas Gleixner 
87775c5158fSMartin Schwidefsky 	mutex_lock(&clocksource_mutex);
878a89c7edbSThomas Gleixner 	if (!list_empty(&cs->list))
879a89c7edbSThomas Gleixner 		ret = clocksource_unbind(cs);
88075c5158fSMartin Schwidefsky 	mutex_unlock(&clocksource_mutex);
881a89c7edbSThomas Gleixner 	return ret;
8824713e22cSThomas Gleixner }
883fb63a0ebSMartin Schwidefsky EXPORT_SYMBOL(clocksource_unregister);
8844713e22cSThomas Gleixner 
8852b013700SDaniel Walker #ifdef CONFIG_SYSFS
886734efb46Sjohn stultz /**
887e87821d1SBaolin Wang  * current_clocksource_show - sysfs interface for current clocksource
888734efb46Sjohn stultz  * @dev:	unused
889b1b73d09SKusanagi Kouichi  * @attr:	unused
890734efb46Sjohn stultz  * @buf:	char buffer to be filled with clocksource list
891734efb46Sjohn stultz  *
892734efb46Sjohn stultz  * Provides sysfs interface for listing current clocksource.
893734efb46Sjohn stultz  */
894e87821d1SBaolin Wang static ssize_t current_clocksource_show(struct device *dev,
895e87821d1SBaolin Wang 					struct device_attribute *attr,
896e87821d1SBaolin Wang 					char *buf)
897734efb46Sjohn stultz {
8985e2cb101SMiao Xie 	ssize_t count = 0;
899734efb46Sjohn stultz 
90075c5158fSMartin Schwidefsky 	mutex_lock(&clocksource_mutex);
9015e2cb101SMiao Xie 	count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name);
90275c5158fSMartin Schwidefsky 	mutex_unlock(&clocksource_mutex);
903734efb46Sjohn stultz 
9045e2cb101SMiao Xie 	return count;
905734efb46Sjohn stultz }
906734efb46Sjohn stultz 
907891292a7SPatrick Palka ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt)
90829b54078SThomas Gleixner {
90929b54078SThomas Gleixner 	size_t ret = cnt;
91029b54078SThomas Gleixner 
91129b54078SThomas Gleixner 	/* strings from sysfs write are not 0 terminated! */
91229b54078SThomas Gleixner 	if (!cnt || cnt >= CS_NAME_LEN)
91329b54078SThomas Gleixner 		return -EINVAL;
91429b54078SThomas Gleixner 
91529b54078SThomas Gleixner 	/* strip of \n: */
91629b54078SThomas Gleixner 	if (buf[cnt-1] == '\n')
91729b54078SThomas Gleixner 		cnt--;
91829b54078SThomas Gleixner 	if (cnt > 0)
91929b54078SThomas Gleixner 		memcpy(dst, buf, cnt);
92029b54078SThomas Gleixner 	dst[cnt] = 0;
92129b54078SThomas Gleixner 	return ret;
92229b54078SThomas Gleixner }
92329b54078SThomas Gleixner 
924734efb46Sjohn stultz /**
925e87821d1SBaolin Wang  * current_clocksource_store - interface for manually overriding clocksource
926734efb46Sjohn stultz  * @dev:	unused
927b1b73d09SKusanagi Kouichi  * @attr:	unused
928734efb46Sjohn stultz  * @buf:	name of override clocksource
929734efb46Sjohn stultz  * @count:	length of buffer
930734efb46Sjohn stultz  *
931734efb46Sjohn stultz  * Takes input from sysfs interface for manually overriding the default
932b71a8eb0SUwe Kleine-König  * clocksource selection.
933734efb46Sjohn stultz  */
934e87821d1SBaolin Wang static ssize_t current_clocksource_store(struct device *dev,
935d369a5d8SKay Sievers 					 struct device_attribute *attr,
936734efb46Sjohn stultz 					 const char *buf, size_t count)
937734efb46Sjohn stultz {
938233bcb41SElad Wexler 	ssize_t ret;
939734efb46Sjohn stultz 
94075c5158fSMartin Schwidefsky 	mutex_lock(&clocksource_mutex);
941734efb46Sjohn stultz 
94203e13cf5SThomas Gleixner 	ret = sysfs_get_uname(buf, override_name, count);
94329b54078SThomas Gleixner 	if (ret >= 0)
944f1b82746SMartin Schwidefsky 		clocksource_select();
945734efb46Sjohn stultz 
94675c5158fSMartin Schwidefsky 	mutex_unlock(&clocksource_mutex);
947734efb46Sjohn stultz 
948734efb46Sjohn stultz 	return ret;
949734efb46Sjohn stultz }
950e87821d1SBaolin Wang static DEVICE_ATTR_RW(current_clocksource);
951734efb46Sjohn stultz 
952734efb46Sjohn stultz /**
953e87821d1SBaolin Wang  * unbind_clocksource_store - interface for manually unbinding clocksource
9547eaeb343SThomas Gleixner  * @dev:	unused
9557eaeb343SThomas Gleixner  * @attr:	unused
9567eaeb343SThomas Gleixner  * @buf:	unused
9577eaeb343SThomas Gleixner  * @count:	length of buffer
9587eaeb343SThomas Gleixner  *
9597eaeb343SThomas Gleixner  * Takes input from sysfs interface for manually unbinding a clocksource.
9607eaeb343SThomas Gleixner  */
961e87821d1SBaolin Wang static ssize_t unbind_clocksource_store(struct device *dev,
9627eaeb343SThomas Gleixner 					struct device_attribute *attr,
9637eaeb343SThomas Gleixner 					const char *buf, size_t count)
9647eaeb343SThomas Gleixner {
9657eaeb343SThomas Gleixner 	struct clocksource *cs;
9667eaeb343SThomas Gleixner 	char name[CS_NAME_LEN];
967233bcb41SElad Wexler 	ssize_t ret;
9687eaeb343SThomas Gleixner 
96903e13cf5SThomas Gleixner 	ret = sysfs_get_uname(buf, name, count);
9707eaeb343SThomas Gleixner 	if (ret < 0)
9717eaeb343SThomas Gleixner 		return ret;
9727eaeb343SThomas Gleixner 
9737eaeb343SThomas Gleixner 	ret = -ENODEV;
9747eaeb343SThomas Gleixner 	mutex_lock(&clocksource_mutex);
9757eaeb343SThomas Gleixner 	list_for_each_entry(cs, &clocksource_list, list) {
9767eaeb343SThomas Gleixner 		if (strcmp(cs->name, name))
9777eaeb343SThomas Gleixner 			continue;
9787eaeb343SThomas Gleixner 		ret = clocksource_unbind(cs);
9797eaeb343SThomas Gleixner 		break;
9807eaeb343SThomas Gleixner 	}
9817eaeb343SThomas Gleixner 	mutex_unlock(&clocksource_mutex);
9827eaeb343SThomas Gleixner 
9837eaeb343SThomas Gleixner 	return ret ? ret : count;
9847eaeb343SThomas Gleixner }
985e87821d1SBaolin Wang static DEVICE_ATTR_WO(unbind_clocksource);
9867eaeb343SThomas Gleixner 
9877eaeb343SThomas Gleixner /**
988e87821d1SBaolin Wang  * available_clocksource_show - sysfs interface for listing clocksource
989734efb46Sjohn stultz  * @dev:	unused
990b1b73d09SKusanagi Kouichi  * @attr:	unused
991734efb46Sjohn stultz  * @buf:	char buffer to be filled with clocksource list
992734efb46Sjohn stultz  *
993734efb46Sjohn stultz  * Provides sysfs interface for listing registered clocksources
994734efb46Sjohn stultz  */
995e87821d1SBaolin Wang static ssize_t available_clocksource_show(struct device *dev,
996d369a5d8SKay Sievers 					  struct device_attribute *attr,
9974a0b2b4dSAndi Kleen 					  char *buf)
998734efb46Sjohn stultz {
9992e197586SMatthias Kaehlcke 	struct clocksource *src;
10005e2cb101SMiao Xie 	ssize_t count = 0;
1001734efb46Sjohn stultz 
100275c5158fSMartin Schwidefsky 	mutex_lock(&clocksource_mutex);
10032e197586SMatthias Kaehlcke 	list_for_each_entry(src, &clocksource_list, list) {
1004cd6d95d8SThomas Gleixner 		/*
1005cd6d95d8SThomas Gleixner 		 * Don't show non-HRES clocksource if the tick code is
1006cd6d95d8SThomas Gleixner 		 * in one shot mode (highres=on or nohz=on)
1007cd6d95d8SThomas Gleixner 		 */
1008cd6d95d8SThomas Gleixner 		if (!tick_oneshot_mode_active() ||
10093f68535aSjohn stultz 		    (src->flags & CLOCK_SOURCE_VALID_FOR_HRES))
10105e2cb101SMiao Xie 			count += snprintf(buf + count,
10115e2cb101SMiao Xie 				  max((ssize_t)PAGE_SIZE - count, (ssize_t)0),
10125e2cb101SMiao Xie 				  "%s ", src->name);
1013734efb46Sjohn stultz 	}
101475c5158fSMartin Schwidefsky 	mutex_unlock(&clocksource_mutex);
1015734efb46Sjohn stultz 
10165e2cb101SMiao Xie 	count += snprintf(buf + count,
10175e2cb101SMiao Xie 			  max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n");
1018734efb46Sjohn stultz 
10195e2cb101SMiao Xie 	return count;
1020734efb46Sjohn stultz }
1021e87821d1SBaolin Wang static DEVICE_ATTR_RO(available_clocksource);
1022734efb46Sjohn stultz 
102327263e8dSBaolin Wang static struct attribute *clocksource_attrs[] = {
102427263e8dSBaolin Wang 	&dev_attr_current_clocksource.attr,
102527263e8dSBaolin Wang 	&dev_attr_unbind_clocksource.attr,
102627263e8dSBaolin Wang 	&dev_attr_available_clocksource.attr,
102727263e8dSBaolin Wang 	NULL
102827263e8dSBaolin Wang };
102927263e8dSBaolin Wang ATTRIBUTE_GROUPS(clocksource);
103027263e8dSBaolin Wang 
1031d369a5d8SKay Sievers static struct bus_type clocksource_subsys = {
1032af5ca3f4SKay Sievers 	.name = "clocksource",
1033d369a5d8SKay Sievers 	.dev_name = "clocksource",
1034734efb46Sjohn stultz };
1035734efb46Sjohn stultz 
1036d369a5d8SKay Sievers static struct device device_clocksource = {
1037734efb46Sjohn stultz 	.id	= 0,
1038d369a5d8SKay Sievers 	.bus	= &clocksource_subsys,
103927263e8dSBaolin Wang 	.groups	= clocksource_groups,
1040734efb46Sjohn stultz };
1041734efb46Sjohn stultz 
1042ad596171Sjohn stultz static int __init init_clocksource_sysfs(void)
1043734efb46Sjohn stultz {
1044d369a5d8SKay Sievers 	int error = subsys_system_register(&clocksource_subsys, NULL);
1045734efb46Sjohn stultz 
1046734efb46Sjohn stultz 	if (!error)
1047d369a5d8SKay Sievers 		error = device_register(&device_clocksource);
104827263e8dSBaolin Wang 
1049734efb46Sjohn stultz 	return error;
1050734efb46Sjohn stultz }
1051734efb46Sjohn stultz 
1052734efb46Sjohn stultz device_initcall(init_clocksource_sysfs);
10532b013700SDaniel Walker #endif /* CONFIG_SYSFS */
1054734efb46Sjohn stultz 
1055734efb46Sjohn stultz /**
1056734efb46Sjohn stultz  * boot_override_clocksource - boot clock override
1057734efb46Sjohn stultz  * @str:	override name
1058734efb46Sjohn stultz  *
1059734efb46Sjohn stultz  * Takes a clocksource= boot argument and uses it
1060734efb46Sjohn stultz  * as the clocksource override name.
1061734efb46Sjohn stultz  */
1062734efb46Sjohn stultz static int __init boot_override_clocksource(char* str)
1063734efb46Sjohn stultz {
106475c5158fSMartin Schwidefsky 	mutex_lock(&clocksource_mutex);
1065734efb46Sjohn stultz 	if (str)
1066734efb46Sjohn stultz 		strlcpy(override_name, str, sizeof(override_name));
106775c5158fSMartin Schwidefsky 	mutex_unlock(&clocksource_mutex);
1068734efb46Sjohn stultz 	return 1;
1069734efb46Sjohn stultz }
1070734efb46Sjohn stultz 
1071734efb46Sjohn stultz __setup("clocksource=", boot_override_clocksource);
1072734efb46Sjohn stultz 
1073734efb46Sjohn stultz /**
1074734efb46Sjohn stultz  * boot_override_clock - Compatibility layer for deprecated boot option
1075734efb46Sjohn stultz  * @str:	override name
1076734efb46Sjohn stultz  *
1077734efb46Sjohn stultz  * DEPRECATED! Takes a clock= boot argument and uses it
1078734efb46Sjohn stultz  * as the clocksource override name
1079734efb46Sjohn stultz  */
1080734efb46Sjohn stultz static int __init boot_override_clock(char* str)
1081734efb46Sjohn stultz {
10825d0cf410Sjohn stultz 	if (!strcmp(str, "pmtmr")) {
108345bbfe64SJoe Perches 		pr_warn("clock=pmtmr is deprecated - use clocksource=acpi_pm\n");
10845d0cf410Sjohn stultz 		return boot_override_clocksource("acpi_pm");
10855d0cf410Sjohn stultz 	}
108645bbfe64SJoe Perches 	pr_warn("clock= boot option is deprecated - use clocksource=xyz\n");
1087734efb46Sjohn stultz 	return boot_override_clocksource(str);
1088734efb46Sjohn stultz }
1089734efb46Sjohn stultz 
1090734efb46Sjohn stultz __setup("clock=", boot_override_clock);
1091