xref: /openbmc/linux/kernel/time/clocksource.c (revision e99e88a9)
1734efb46Sjohn stultz /*
2734efb46Sjohn stultz  * linux/kernel/time/clocksource.c
3734efb46Sjohn stultz  *
4734efb46Sjohn stultz  * This file contains the functions which manage clocksource drivers.
5734efb46Sjohn stultz  *
6734efb46Sjohn stultz  * Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com)
7734efb46Sjohn stultz  *
8734efb46Sjohn stultz  * This program is free software; you can redistribute it and/or modify
9734efb46Sjohn stultz  * it under the terms of the GNU General Public License as published by
10734efb46Sjohn stultz  * the Free Software Foundation; either version 2 of the License, or
11734efb46Sjohn stultz  * (at your option) any later version.
12734efb46Sjohn stultz  *
13734efb46Sjohn stultz  * This program is distributed in the hope that it will be useful,
14734efb46Sjohn stultz  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15734efb46Sjohn stultz  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16734efb46Sjohn stultz  * GNU General Public License for more details.
17734efb46Sjohn stultz  *
18734efb46Sjohn stultz  * You should have received a copy of the GNU General Public License
19734efb46Sjohn stultz  * along with this program; if not, write to the Free Software
20734efb46Sjohn stultz  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21734efb46Sjohn stultz  *
22734efb46Sjohn stultz  * TODO WishList:
23734efb46Sjohn stultz  *   o Allow clocksource drivers to be unregistered
24734efb46Sjohn stultz  */
25734efb46Sjohn stultz 
2645bbfe64SJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2745bbfe64SJoe Perches 
28d369a5d8SKay Sievers #include <linux/device.h>
29734efb46Sjohn stultz #include <linux/clocksource.h>
30734efb46Sjohn stultz #include <linux/init.h>
31734efb46Sjohn stultz #include <linux/module.h>
32dc29a365SMathieu Desnoyers #include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */
3379bf2bb3SThomas Gleixner #include <linux/tick.h>
3401548f4dSMartin Schwidefsky #include <linux/kthread.h>
35734efb46Sjohn stultz 
36c1797bafSThomas Gleixner #include "tick-internal.h"
373a978377SThomas Gleixner #include "timekeeping_internal.h"
3803e13cf5SThomas Gleixner 
397d2f944aSThomas Gleixner /**
407d2f944aSThomas Gleixner  * clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks
417d2f944aSThomas Gleixner  * @mult:	pointer to mult variable
427d2f944aSThomas Gleixner  * @shift:	pointer to shift variable
437d2f944aSThomas Gleixner  * @from:	frequency to convert from
447d2f944aSThomas Gleixner  * @to:		frequency to convert to
455fdade95SNicolas Pitre  * @maxsec:	guaranteed runtime conversion range in seconds
467d2f944aSThomas Gleixner  *
477d2f944aSThomas Gleixner  * The function evaluates the shift/mult pair for the scaled math
487d2f944aSThomas Gleixner  * operations of clocksources and clockevents.
497d2f944aSThomas Gleixner  *
507d2f944aSThomas Gleixner  * @to and @from are frequency values in HZ. For clock sources @to is
517d2f944aSThomas Gleixner  * NSEC_PER_SEC == 1GHz and @from is the counter frequency. For clock
527d2f944aSThomas Gleixner  * event @to is the counter frequency and @from is NSEC_PER_SEC.
537d2f944aSThomas Gleixner  *
545fdade95SNicolas Pitre  * The @maxsec conversion range argument controls the time frame in
557d2f944aSThomas Gleixner  * seconds which must be covered by the runtime conversion with the
567d2f944aSThomas Gleixner  * calculated mult and shift factors. This guarantees that no 64bit
577d2f944aSThomas Gleixner  * overflow happens when the input value of the conversion is
587d2f944aSThomas Gleixner  * multiplied with the calculated mult factor. Larger ranges may
597d2f944aSThomas Gleixner  * reduce the conversion accuracy by chosing smaller mult and shift
607d2f944aSThomas Gleixner  * factors.
617d2f944aSThomas Gleixner  */
627d2f944aSThomas Gleixner void
635fdade95SNicolas Pitre clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 maxsec)
647d2f944aSThomas Gleixner {
657d2f944aSThomas Gleixner 	u64 tmp;
667d2f944aSThomas Gleixner 	u32 sft, sftacc= 32;
677d2f944aSThomas Gleixner 
687d2f944aSThomas Gleixner 	/*
697d2f944aSThomas Gleixner 	 * Calculate the shift factor which is limiting the conversion
707d2f944aSThomas Gleixner 	 * range:
717d2f944aSThomas Gleixner 	 */
725fdade95SNicolas Pitre 	tmp = ((u64)maxsec * from) >> 32;
737d2f944aSThomas Gleixner 	while (tmp) {
747d2f944aSThomas Gleixner 		tmp >>=1;
757d2f944aSThomas Gleixner 		sftacc--;
767d2f944aSThomas Gleixner 	}
777d2f944aSThomas Gleixner 
787d2f944aSThomas Gleixner 	/*
797d2f944aSThomas Gleixner 	 * Find the conversion shift/mult pair which has the best
807d2f944aSThomas Gleixner 	 * accuracy and fits the maxsec conversion range:
817d2f944aSThomas Gleixner 	 */
827d2f944aSThomas Gleixner 	for (sft = 32; sft > 0; sft--) {
837d2f944aSThomas Gleixner 		tmp = (u64) to << sft;
84b5776c4aSjohn stultz 		tmp += from / 2;
857d2f944aSThomas Gleixner 		do_div(tmp, from);
867d2f944aSThomas Gleixner 		if ((tmp >> sftacc) == 0)
877d2f944aSThomas Gleixner 			break;
887d2f944aSThomas Gleixner 	}
897d2f944aSThomas Gleixner 	*mult = tmp;
907d2f944aSThomas Gleixner 	*shift = sft;
917d2f944aSThomas Gleixner }
925304121aSMurali Karicheri EXPORT_SYMBOL_GPL(clocks_calc_mult_shift);
937d2f944aSThomas Gleixner 
94734efb46Sjohn stultz /*[Clocksource internal variables]---------
95734efb46Sjohn stultz  * curr_clocksource:
96f1b82746SMartin Schwidefsky  *	currently selected clocksource.
97734efb46Sjohn stultz  * clocksource_list:
98734efb46Sjohn stultz  *	linked list with the registered clocksources
9975c5158fSMartin Schwidefsky  * clocksource_mutex:
10075c5158fSMartin Schwidefsky  *	protects manipulations to curr_clocksource and the clocksource_list
101734efb46Sjohn stultz  * override_name:
102734efb46Sjohn stultz  *	Name of the user-specified clocksource.
103734efb46Sjohn stultz  */
104f1b82746SMartin Schwidefsky static struct clocksource *curr_clocksource;
105734efb46Sjohn stultz static LIST_HEAD(clocksource_list);
10675c5158fSMartin Schwidefsky static DEFINE_MUTEX(clocksource_mutex);
10729b54078SThomas Gleixner static char override_name[CS_NAME_LEN];
10854a6bc0bSThomas Gleixner static int finished_booting;
109734efb46Sjohn stultz 
1105d8b34fdSThomas Gleixner #ifdef CONFIG_CLOCKSOURCE_WATCHDOG
111f79e0258SMartin Schwidefsky static void clocksource_watchdog_work(struct work_struct *work);
112332962f2SThomas Gleixner static void clocksource_select(void);
113f79e0258SMartin Schwidefsky 
1145d8b34fdSThomas Gleixner static LIST_HEAD(watchdog_list);
1155d8b34fdSThomas Gleixner static struct clocksource *watchdog;
1165d8b34fdSThomas Gleixner static struct timer_list watchdog_timer;
117f79e0258SMartin Schwidefsky static DECLARE_WORK(watchdog_work, clocksource_watchdog_work);
1185d8b34fdSThomas Gleixner static DEFINE_SPINLOCK(watchdog_lock);
119fb63a0ebSMartin Schwidefsky static int watchdog_running;
1209fb60336SThomas Gleixner static atomic_t watchdog_reset_pending;
121b52f52a0SThomas Gleixner 
12201548f4dSMartin Schwidefsky static int clocksource_watchdog_kthread(void *data);
123d0981a1bSThomas Gleixner static void __clocksource_change_rating(struct clocksource *cs, int rating);
124c55c87c8SMartin Schwidefsky 
1255d8b34fdSThomas Gleixner /*
12635c35d1aSDaniel Walker  * Interval: 0.5sec Threshold: 0.0625s
1275d8b34fdSThomas Gleixner  */
1285d8b34fdSThomas Gleixner #define WATCHDOG_INTERVAL (HZ >> 1)
12935c35d1aSDaniel Walker #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)
1305d8b34fdSThomas Gleixner 
13101548f4dSMartin Schwidefsky static void clocksource_watchdog_work(struct work_struct *work)
13201548f4dSMartin Schwidefsky {
13301548f4dSMartin Schwidefsky 	/*
13401548f4dSMartin Schwidefsky 	 * If kthread_run fails the next watchdog scan over the
13501548f4dSMartin Schwidefsky 	 * watchdog_list will find the unstable clock again.
13601548f4dSMartin Schwidefsky 	 */
13701548f4dSMartin Schwidefsky 	kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog");
13801548f4dSMartin Schwidefsky }
13901548f4dSMartin Schwidefsky 
1407285dd7fSThomas Gleixner static void __clocksource_unstable(struct clocksource *cs)
1417285dd7fSThomas Gleixner {
1427285dd7fSThomas Gleixner 	cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
1437285dd7fSThomas Gleixner 	cs->flags |= CLOCK_SOURCE_UNSTABLE;
14412907fbbSThomas Gleixner 
14512907fbbSThomas Gleixner 	if (cs->mark_unstable)
14612907fbbSThomas Gleixner 		cs->mark_unstable(cs);
14712907fbbSThomas Gleixner 
14854a6bc0bSThomas Gleixner 	if (finished_booting)
1497285dd7fSThomas Gleixner 		schedule_work(&watchdog_work);
1507285dd7fSThomas Gleixner }
1517285dd7fSThomas Gleixner 
1527285dd7fSThomas Gleixner /**
1537285dd7fSThomas Gleixner  * clocksource_mark_unstable - mark clocksource unstable via watchdog
1547285dd7fSThomas Gleixner  * @cs:		clocksource to be marked unstable
1557285dd7fSThomas Gleixner  *
1567285dd7fSThomas Gleixner  * This function is called instead of clocksource_change_rating from
1577285dd7fSThomas Gleixner  * cpu hotplug code to avoid a deadlock between the clocksource mutex
1587285dd7fSThomas Gleixner  * and the cpu hotplug mutex. It defers the update of the clocksource
1597285dd7fSThomas Gleixner  * to the watchdog thread.
1607285dd7fSThomas Gleixner  */
1617285dd7fSThomas Gleixner void clocksource_mark_unstable(struct clocksource *cs)
1627285dd7fSThomas Gleixner {
1637285dd7fSThomas Gleixner 	unsigned long flags;
1647285dd7fSThomas Gleixner 
1657285dd7fSThomas Gleixner 	spin_lock_irqsave(&watchdog_lock, flags);
1667285dd7fSThomas Gleixner 	if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) {
1677285dd7fSThomas Gleixner 		if (list_empty(&cs->wd_list))
1687285dd7fSThomas Gleixner 			list_add(&cs->wd_list, &watchdog_list);
1697285dd7fSThomas Gleixner 		__clocksource_unstable(cs);
1707285dd7fSThomas Gleixner 	}
1717285dd7fSThomas Gleixner 	spin_unlock_irqrestore(&watchdog_lock, flags);
1725d8b34fdSThomas Gleixner }
1735d8b34fdSThomas Gleixner 
174e99e88a9SKees Cook static void clocksource_watchdog(struct timer_list *unused)
1755d8b34fdSThomas Gleixner {
176c55c87c8SMartin Schwidefsky 	struct clocksource *cs;
177a5a1d1c2SThomas Gleixner 	u64 csnow, wdnow, cslast, wdlast, delta;
1785d8b34fdSThomas Gleixner 	int64_t wd_nsec, cs_nsec;
1799fb60336SThomas Gleixner 	int next_cpu, reset_pending;
1805d8b34fdSThomas Gleixner 
1815d8b34fdSThomas Gleixner 	spin_lock(&watchdog_lock);
182fb63a0ebSMartin Schwidefsky 	if (!watchdog_running)
183fb63a0ebSMartin Schwidefsky 		goto out;
1845d8b34fdSThomas Gleixner 
1859fb60336SThomas Gleixner 	reset_pending = atomic_read(&watchdog_reset_pending);
1869fb60336SThomas Gleixner 
187c55c87c8SMartin Schwidefsky 	list_for_each_entry(cs, &watchdog_list, wd_list) {
188c55c87c8SMartin Schwidefsky 
189c55c87c8SMartin Schwidefsky 		/* Clocksource already marked unstable? */
19001548f4dSMartin Schwidefsky 		if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
19154a6bc0bSThomas Gleixner 			if (finished_booting)
19201548f4dSMartin Schwidefsky 				schedule_work(&watchdog_work);
193c55c87c8SMartin Schwidefsky 			continue;
19401548f4dSMartin Schwidefsky 		}
195c55c87c8SMartin Schwidefsky 
196b5199515SThomas Gleixner 		local_irq_disable();
1978e19608eSMagnus Damm 		csnow = cs->read(cs);
198b5199515SThomas Gleixner 		wdnow = watchdog->read(watchdog);
199b5199515SThomas Gleixner 		local_irq_enable();
200b52f52a0SThomas Gleixner 
2018cf4e750SMartin Schwidefsky 		/* Clocksource initialized ? */
2029fb60336SThomas Gleixner 		if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) ||
2039fb60336SThomas Gleixner 		    atomic_read(&watchdog_reset_pending)) {
2048cf4e750SMartin Schwidefsky 			cs->flags |= CLOCK_SOURCE_WATCHDOG;
205b5199515SThomas Gleixner 			cs->wd_last = wdnow;
206b5199515SThomas Gleixner 			cs->cs_last = csnow;
207b52f52a0SThomas Gleixner 			continue;
208b52f52a0SThomas Gleixner 		}
209b52f52a0SThomas Gleixner 
2103a978377SThomas Gleixner 		delta = clocksource_delta(wdnow, cs->wd_last, watchdog->mask);
2113a978377SThomas Gleixner 		wd_nsec = clocksource_cyc2ns(delta, watchdog->mult,
2123a978377SThomas Gleixner 					     watchdog->shift);
213b5199515SThomas Gleixner 
2143a978377SThomas Gleixner 		delta = clocksource_delta(csnow, cs->cs_last, cs->mask);
2153a978377SThomas Gleixner 		cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift);
2160b046b21SJohn Stultz 		wdlast = cs->wd_last; /* save these in case we print them */
2170b046b21SJohn Stultz 		cslast = cs->cs_last;
218b5199515SThomas Gleixner 		cs->cs_last = csnow;
219b5199515SThomas Gleixner 		cs->wd_last = wdnow;
220b5199515SThomas Gleixner 
2219fb60336SThomas Gleixner 		if (atomic_read(&watchdog_reset_pending))
2229fb60336SThomas Gleixner 			continue;
2239fb60336SThomas Gleixner 
224b5199515SThomas Gleixner 		/* Check the deviation from the watchdog clocksource. */
22579211c8eSAndrew Morton 		if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) {
226390dd67cSSeiichi Ikarashi 			pr_warn("timekeeping watchdog on CPU%d: Marking clocksource '%s' as unstable because the skew is too large:\n",
227390dd67cSSeiichi Ikarashi 				smp_processor_id(), cs->name);
2280b046b21SJohn Stultz 			pr_warn("                      '%s' wd_now: %llx wd_last: %llx mask: %llx\n",
2290b046b21SJohn Stultz 				watchdog->name, wdnow, wdlast, watchdog->mask);
2300b046b21SJohn Stultz 			pr_warn("                      '%s' cs_now: %llx cs_last: %llx mask: %llx\n",
2310b046b21SJohn Stultz 				cs->name, csnow, cslast, cs->mask);
2320b046b21SJohn Stultz 			__clocksource_unstable(cs);
2338cf4e750SMartin Schwidefsky 			continue;
2348cf4e750SMartin Schwidefsky 		}
2358cf4e750SMartin Schwidefsky 
236b421b22bSPeter Zijlstra 		if (cs == curr_clocksource && cs->tick_stable)
237b421b22bSPeter Zijlstra 			cs->tick_stable(cs);
238b421b22bSPeter Zijlstra 
2398cf4e750SMartin Schwidefsky 		if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) &&
2408cf4e750SMartin Schwidefsky 		    (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) &&
2415d8b34fdSThomas Gleixner 		    (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) {
242332962f2SThomas Gleixner 			/* Mark it valid for high-res. */
2435d8b34fdSThomas Gleixner 			cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
244332962f2SThomas Gleixner 
24579bf2bb3SThomas Gleixner 			/*
246332962f2SThomas Gleixner 			 * clocksource_done_booting() will sort it if
247332962f2SThomas Gleixner 			 * finished_booting is not set yet.
24879bf2bb3SThomas Gleixner 			 */
249332962f2SThomas Gleixner 			if (!finished_booting)
250332962f2SThomas Gleixner 				continue;
251332962f2SThomas Gleixner 
252332962f2SThomas Gleixner 			/*
253332962f2SThomas Gleixner 			 * If this is not the current clocksource let
254332962f2SThomas Gleixner 			 * the watchdog thread reselect it. Due to the
255332962f2SThomas Gleixner 			 * change to high res this clocksource might
256332962f2SThomas Gleixner 			 * be preferred now. If it is the current
257332962f2SThomas Gleixner 			 * clocksource let the tick code know about
258332962f2SThomas Gleixner 			 * that change.
259332962f2SThomas Gleixner 			 */
260332962f2SThomas Gleixner 			if (cs != curr_clocksource) {
261332962f2SThomas Gleixner 				cs->flags |= CLOCK_SOURCE_RESELECT;
262332962f2SThomas Gleixner 				schedule_work(&watchdog_work);
263332962f2SThomas Gleixner 			} else {
26479bf2bb3SThomas Gleixner 				tick_clock_notify();
2655d8b34fdSThomas Gleixner 			}
2665d8b34fdSThomas Gleixner 		}
267332962f2SThomas Gleixner 	}
2685d8b34fdSThomas Gleixner 
2696993fc5bSAndi Kleen 	/*
2709fb60336SThomas Gleixner 	 * We only clear the watchdog_reset_pending, when we did a
2719fb60336SThomas Gleixner 	 * full cycle through all clocksources.
2729fb60336SThomas Gleixner 	 */
2739fb60336SThomas Gleixner 	if (reset_pending)
2749fb60336SThomas Gleixner 		atomic_dec(&watchdog_reset_pending);
2759fb60336SThomas Gleixner 
2769fb60336SThomas Gleixner 	/*
277c55c87c8SMartin Schwidefsky 	 * Cycle through CPUs to check if the CPUs stay synchronized
278c55c87c8SMartin Schwidefsky 	 * to each other.
2796993fc5bSAndi Kleen 	 */
280c55c87c8SMartin Schwidefsky 	next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
281cad0e458SMike Travis 	if (next_cpu >= nr_cpu_ids)
2826b954823SRusty Russell 		next_cpu = cpumask_first(cpu_online_mask);
2836993fc5bSAndi Kleen 	watchdog_timer.expires += WATCHDOG_INTERVAL;
2846993fc5bSAndi Kleen 	add_timer_on(&watchdog_timer, next_cpu);
285fb63a0ebSMartin Schwidefsky out:
2865d8b34fdSThomas Gleixner 	spin_unlock(&watchdog_lock);
2875d8b34fdSThomas Gleixner }
2880f8e8ef7SMartin Schwidefsky 
289fb63a0ebSMartin Schwidefsky static inline void clocksource_start_watchdog(void)
290fb63a0ebSMartin Schwidefsky {
291fb63a0ebSMartin Schwidefsky 	if (watchdog_running || !watchdog || list_empty(&watchdog_list))
292fb63a0ebSMartin Schwidefsky 		return;
293e99e88a9SKees Cook 	timer_setup(&watchdog_timer, clocksource_watchdog, 0);
294fb63a0ebSMartin Schwidefsky 	watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
295fb63a0ebSMartin Schwidefsky 	add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask));
296fb63a0ebSMartin Schwidefsky 	watchdog_running = 1;
297fb63a0ebSMartin Schwidefsky }
298fb63a0ebSMartin Schwidefsky 
299fb63a0ebSMartin Schwidefsky static inline void clocksource_stop_watchdog(void)
300fb63a0ebSMartin Schwidefsky {
301fb63a0ebSMartin Schwidefsky 	if (!watchdog_running || (watchdog && !list_empty(&watchdog_list)))
302fb63a0ebSMartin Schwidefsky 		return;
303fb63a0ebSMartin Schwidefsky 	del_timer(&watchdog_timer);
304fb63a0ebSMartin Schwidefsky 	watchdog_running = 0;
305fb63a0ebSMartin Schwidefsky }
306fb63a0ebSMartin Schwidefsky 
3070f8e8ef7SMartin Schwidefsky static inline void clocksource_reset_watchdog(void)
3080f8e8ef7SMartin Schwidefsky {
3090f8e8ef7SMartin Schwidefsky 	struct clocksource *cs;
3100f8e8ef7SMartin Schwidefsky 
3110f8e8ef7SMartin Schwidefsky 	list_for_each_entry(cs, &watchdog_list, wd_list)
3120f8e8ef7SMartin Schwidefsky 		cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
3130f8e8ef7SMartin Schwidefsky }
3140f8e8ef7SMartin Schwidefsky 
315b52f52a0SThomas Gleixner static void clocksource_resume_watchdog(void)
316b52f52a0SThomas Gleixner {
3179fb60336SThomas Gleixner 	atomic_inc(&watchdog_reset_pending);
318b52f52a0SThomas Gleixner }
319b52f52a0SThomas Gleixner 
320fb63a0ebSMartin Schwidefsky static void clocksource_enqueue_watchdog(struct clocksource *cs)
3215d8b34fdSThomas Gleixner {
3225d8b34fdSThomas Gleixner 	unsigned long flags;
3235d8b34fdSThomas Gleixner 
3245d8b34fdSThomas Gleixner 	spin_lock_irqsave(&watchdog_lock, flags);
3255d8b34fdSThomas Gleixner 	if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
326fb63a0ebSMartin Schwidefsky 		/* cs is a clocksource to be watched. */
3275d8b34fdSThomas Gleixner 		list_add(&cs->wd_list, &watchdog_list);
328fb63a0ebSMartin Schwidefsky 		cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
329948ac6d7SThomas Gleixner 	} else {
330fb63a0ebSMartin Schwidefsky 		/* cs is a watchdog. */
331948ac6d7SThomas Gleixner 		if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
3325d8b34fdSThomas Gleixner 			cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
333bbf66d89SVitaly Kuznetsov 	}
334bbf66d89SVitaly Kuznetsov 	spin_unlock_irqrestore(&watchdog_lock, flags);
335bbf66d89SVitaly Kuznetsov }
336bbf66d89SVitaly Kuznetsov 
337bbf66d89SVitaly Kuznetsov static void clocksource_select_watchdog(bool fallback)
338bbf66d89SVitaly Kuznetsov {
339bbf66d89SVitaly Kuznetsov 	struct clocksource *cs, *old_wd;
340bbf66d89SVitaly Kuznetsov 	unsigned long flags;
341bbf66d89SVitaly Kuznetsov 
342bbf66d89SVitaly Kuznetsov 	spin_lock_irqsave(&watchdog_lock, flags);
343bbf66d89SVitaly Kuznetsov 	/* save current watchdog */
344bbf66d89SVitaly Kuznetsov 	old_wd = watchdog;
345bbf66d89SVitaly Kuznetsov 	if (fallback)
346bbf66d89SVitaly Kuznetsov 		watchdog = NULL;
347bbf66d89SVitaly Kuznetsov 
348bbf66d89SVitaly Kuznetsov 	list_for_each_entry(cs, &clocksource_list, list) {
349bbf66d89SVitaly Kuznetsov 		/* cs is a clocksource to be watched. */
350bbf66d89SVitaly Kuznetsov 		if (cs->flags & CLOCK_SOURCE_MUST_VERIFY)
351bbf66d89SVitaly Kuznetsov 			continue;
352bbf66d89SVitaly Kuznetsov 
353bbf66d89SVitaly Kuznetsov 		/* Skip current if we were requested for a fallback. */
354bbf66d89SVitaly Kuznetsov 		if (fallback && cs == old_wd)
355bbf66d89SVitaly Kuznetsov 			continue;
356bbf66d89SVitaly Kuznetsov 
357fb63a0ebSMartin Schwidefsky 		/* Pick the best watchdog. */
358bbf66d89SVitaly Kuznetsov 		if (!watchdog || cs->rating > watchdog->rating)
3595d8b34fdSThomas Gleixner 			watchdog = cs;
360bbf66d89SVitaly Kuznetsov 	}
361bbf66d89SVitaly Kuznetsov 	/* If we failed to find a fallback restore the old one. */
362bbf66d89SVitaly Kuznetsov 	if (!watchdog)
363bbf66d89SVitaly Kuznetsov 		watchdog = old_wd;
364bbf66d89SVitaly Kuznetsov 
365bbf66d89SVitaly Kuznetsov 	/* If we changed the watchdog we need to reset cycles. */
366bbf66d89SVitaly Kuznetsov 	if (watchdog != old_wd)
3670f8e8ef7SMartin Schwidefsky 		clocksource_reset_watchdog();
368bbf66d89SVitaly Kuznetsov 
369fb63a0ebSMartin Schwidefsky 	/* Check if the watchdog timer needs to be started. */
370fb63a0ebSMartin Schwidefsky 	clocksource_start_watchdog();
3715d8b34fdSThomas Gleixner 	spin_unlock_irqrestore(&watchdog_lock, flags);
3725d8b34fdSThomas Gleixner }
373fb63a0ebSMartin Schwidefsky 
374fb63a0ebSMartin Schwidefsky static void clocksource_dequeue_watchdog(struct clocksource *cs)
375fb63a0ebSMartin Schwidefsky {
376fb63a0ebSMartin Schwidefsky 	unsigned long flags;
377fb63a0ebSMartin Schwidefsky 
378fb63a0ebSMartin Schwidefsky 	spin_lock_irqsave(&watchdog_lock, flags);
379a89c7edbSThomas Gleixner 	if (cs != watchdog) {
380fb63a0ebSMartin Schwidefsky 		if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
381fb63a0ebSMartin Schwidefsky 			/* cs is a watched clocksource. */
382fb63a0ebSMartin Schwidefsky 			list_del_init(&cs->wd_list);
383fb63a0ebSMartin Schwidefsky 			/* Check if the watchdog timer needs to be stopped. */
384fb63a0ebSMartin Schwidefsky 			clocksource_stop_watchdog();
385a89c7edbSThomas Gleixner 		}
386a89c7edbSThomas Gleixner 	}
387fb63a0ebSMartin Schwidefsky 	spin_unlock_irqrestore(&watchdog_lock, flags);
388fb63a0ebSMartin Schwidefsky }
389fb63a0ebSMartin Schwidefsky 
390332962f2SThomas Gleixner static int __clocksource_watchdog_kthread(void)
391c55c87c8SMartin Schwidefsky {
392c55c87c8SMartin Schwidefsky 	struct clocksource *cs, *tmp;
393c55c87c8SMartin Schwidefsky 	unsigned long flags;
3946ea41d25SThomas Gleixner 	LIST_HEAD(unstable);
395332962f2SThomas Gleixner 	int select = 0;
396c55c87c8SMartin Schwidefsky 
397c55c87c8SMartin Schwidefsky 	spin_lock_irqsave(&watchdog_lock, flags);
398332962f2SThomas Gleixner 	list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
399c55c87c8SMartin Schwidefsky 		if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
400c55c87c8SMartin Schwidefsky 			list_del_init(&cs->wd_list);
4016ea41d25SThomas Gleixner 			list_add(&cs->wd_list, &unstable);
402332962f2SThomas Gleixner 			select = 1;
403332962f2SThomas Gleixner 		}
404332962f2SThomas Gleixner 		if (cs->flags & CLOCK_SOURCE_RESELECT) {
405332962f2SThomas Gleixner 			cs->flags &= ~CLOCK_SOURCE_RESELECT;
406332962f2SThomas Gleixner 			select = 1;
407332962f2SThomas Gleixner 		}
408c55c87c8SMartin Schwidefsky 	}
409c55c87c8SMartin Schwidefsky 	/* Check if the watchdog timer needs to be stopped. */
410c55c87c8SMartin Schwidefsky 	clocksource_stop_watchdog();
4116ea41d25SThomas Gleixner 	spin_unlock_irqrestore(&watchdog_lock, flags);
4126ea41d25SThomas Gleixner 
4136ea41d25SThomas Gleixner 	/* Needs to be done outside of watchdog lock */
4146ea41d25SThomas Gleixner 	list_for_each_entry_safe(cs, tmp, &unstable, wd_list) {
4156ea41d25SThomas Gleixner 		list_del_init(&cs->wd_list);
416d0981a1bSThomas Gleixner 		__clocksource_change_rating(cs, 0);
4176ea41d25SThomas Gleixner 	}
418332962f2SThomas Gleixner 	return select;
419332962f2SThomas Gleixner }
420332962f2SThomas Gleixner 
421332962f2SThomas Gleixner static int clocksource_watchdog_kthread(void *data)
422332962f2SThomas Gleixner {
423332962f2SThomas Gleixner 	mutex_lock(&clocksource_mutex);
424332962f2SThomas Gleixner 	if (__clocksource_watchdog_kthread())
425332962f2SThomas Gleixner 		clocksource_select();
426d0981a1bSThomas Gleixner 	mutex_unlock(&clocksource_mutex);
42701548f4dSMartin Schwidefsky 	return 0;
428c55c87c8SMartin Schwidefsky }
429c55c87c8SMartin Schwidefsky 
4307eaeb343SThomas Gleixner static bool clocksource_is_watchdog(struct clocksource *cs)
4317eaeb343SThomas Gleixner {
4327eaeb343SThomas Gleixner 	return cs == watchdog;
4337eaeb343SThomas Gleixner }
4347eaeb343SThomas Gleixner 
435fb63a0ebSMartin Schwidefsky #else /* CONFIG_CLOCKSOURCE_WATCHDOG */
436fb63a0ebSMartin Schwidefsky 
437fb63a0ebSMartin Schwidefsky static void clocksource_enqueue_watchdog(struct clocksource *cs)
4385d8b34fdSThomas Gleixner {
4395d8b34fdSThomas Gleixner 	if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
4405d8b34fdSThomas Gleixner 		cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
4415d8b34fdSThomas Gleixner }
442b52f52a0SThomas Gleixner 
443bbf66d89SVitaly Kuznetsov static void clocksource_select_watchdog(bool fallback) { }
444fb63a0ebSMartin Schwidefsky static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
445b52f52a0SThomas Gleixner static inline void clocksource_resume_watchdog(void) { }
446332962f2SThomas Gleixner static inline int __clocksource_watchdog_kthread(void) { return 0; }
4477eaeb343SThomas Gleixner static bool clocksource_is_watchdog(struct clocksource *cs) { return false; }
448397bbf6dSPrarit Bhargava void clocksource_mark_unstable(struct clocksource *cs) { }
449fb63a0ebSMartin Schwidefsky 
450fb63a0ebSMartin Schwidefsky #endif /* CONFIG_CLOCKSOURCE_WATCHDOG */
4515d8b34fdSThomas Gleixner 
452734efb46Sjohn stultz /**
453c54a42b1SMagnus Damm  * clocksource_suspend - suspend the clocksource(s)
454c54a42b1SMagnus Damm  */
455c54a42b1SMagnus Damm void clocksource_suspend(void)
456c54a42b1SMagnus Damm {
457c54a42b1SMagnus Damm 	struct clocksource *cs;
458c54a42b1SMagnus Damm 
459c54a42b1SMagnus Damm 	list_for_each_entry_reverse(cs, &clocksource_list, list)
460c54a42b1SMagnus Damm 		if (cs->suspend)
461c54a42b1SMagnus Damm 			cs->suspend(cs);
462c54a42b1SMagnus Damm }
463c54a42b1SMagnus Damm 
464c54a42b1SMagnus Damm /**
465b52f52a0SThomas Gleixner  * clocksource_resume - resume the clocksource(s)
466b52f52a0SThomas Gleixner  */
467b52f52a0SThomas Gleixner void clocksource_resume(void)
468b52f52a0SThomas Gleixner {
4692e197586SMatthias Kaehlcke 	struct clocksource *cs;
470b52f52a0SThomas Gleixner 
47175c5158fSMartin Schwidefsky 	list_for_each_entry(cs, &clocksource_list, list)
472b52f52a0SThomas Gleixner 		if (cs->resume)
47317622339SMagnus Damm 			cs->resume(cs);
474b52f52a0SThomas Gleixner 
475b52f52a0SThomas Gleixner 	clocksource_resume_watchdog();
476b52f52a0SThomas Gleixner }
477b52f52a0SThomas Gleixner 
478b52f52a0SThomas Gleixner /**
4797c3078b6SJason Wessel  * clocksource_touch_watchdog - Update watchdog
4807c3078b6SJason Wessel  *
4817c3078b6SJason Wessel  * Update the watchdog after exception contexts such as kgdb so as not
4827b7422a5SThomas Gleixner  * to incorrectly trip the watchdog. This might fail when the kernel
4837b7422a5SThomas Gleixner  * was stopped in code which holds watchdog_lock.
4847c3078b6SJason Wessel  */
4857c3078b6SJason Wessel void clocksource_touch_watchdog(void)
4867c3078b6SJason Wessel {
4877c3078b6SJason Wessel 	clocksource_resume_watchdog();
4887c3078b6SJason Wessel }
4897c3078b6SJason Wessel 
490734efb46Sjohn stultz /**
491d65670a7SJohn Stultz  * clocksource_max_adjustment- Returns max adjustment amount
492d65670a7SJohn Stultz  * @cs:         Pointer to clocksource
493d65670a7SJohn Stultz  *
494d65670a7SJohn Stultz  */
495d65670a7SJohn Stultz static u32 clocksource_max_adjustment(struct clocksource *cs)
496d65670a7SJohn Stultz {
497d65670a7SJohn Stultz 	u64 ret;
498d65670a7SJohn Stultz 	/*
49988b28adfSJim Cromie 	 * We won't try to correct for more than 11% adjustments (110,000 ppm),
500d65670a7SJohn Stultz 	 */
501d65670a7SJohn Stultz 	ret = (u64)cs->mult * 11;
502d65670a7SJohn Stultz 	do_div(ret,100);
503d65670a7SJohn Stultz 	return (u32)ret;
504d65670a7SJohn Stultz }
505d65670a7SJohn Stultz 
506d65670a7SJohn Stultz /**
50787d8b9ebSStephen Boyd  * clocks_calc_max_nsecs - Returns maximum nanoseconds that can be converted
50887d8b9ebSStephen Boyd  * @mult:	cycle to nanosecond multiplier
50987d8b9ebSStephen Boyd  * @shift:	cycle to nanosecond divisor (power of two)
51087d8b9ebSStephen Boyd  * @maxadj:	maximum adjustment value to mult (~11%)
51187d8b9ebSStephen Boyd  * @mask:	bitmask for two's complement subtraction of non 64 bit counters
512fb82fe2fSJohn Stultz  * @max_cyc:	maximum cycle value before potential overflow (does not include
513fb82fe2fSJohn Stultz  *		any safety margin)
514362fde04SJohn Stultz  *
5158e56f33fSJohn Stultz  * NOTE: This function includes a safety margin of 50%, in other words, we
5168e56f33fSJohn Stultz  * return half the number of nanoseconds the hardware counter can technically
5178e56f33fSJohn Stultz  * cover. This is done so that we can potentially detect problems caused by
5188e56f33fSJohn Stultz  * delayed timers or bad hardware, which might result in time intervals that
519571af55aSZhen Lei  * are larger than what the math used can handle without overflows.
52098962465SJon Hunter  */
521fb82fe2fSJohn Stultz u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cyc)
52298962465SJon Hunter {
52398962465SJon Hunter 	u64 max_nsecs, max_cycles;
52498962465SJon Hunter 
52598962465SJon Hunter 	/*
52698962465SJon Hunter 	 * Calculate the maximum number of cycles that we can pass to the
5276086e346SJohn Stultz 	 * cyc2ns() function without overflowing a 64-bit result.
52898962465SJon Hunter 	 */
5296086e346SJohn Stultz 	max_cycles = ULLONG_MAX;
5306086e346SJohn Stultz 	do_div(max_cycles, mult+maxadj);
53198962465SJon Hunter 
53298962465SJon Hunter 	/*
53398962465SJon Hunter 	 * The actual maximum number of cycles we can defer the clocksource is
53487d8b9ebSStephen Boyd 	 * determined by the minimum of max_cycles and mask.
535d65670a7SJohn Stultz 	 * Note: Here we subtract the maxadj to make sure we don't sleep for
536d65670a7SJohn Stultz 	 * too long if there's a large negative adjustment.
53798962465SJon Hunter 	 */
53887d8b9ebSStephen Boyd 	max_cycles = min(max_cycles, mask);
53987d8b9ebSStephen Boyd 	max_nsecs = clocksource_cyc2ns(max_cycles, mult - maxadj, shift);
54098962465SJon Hunter 
541fb82fe2fSJohn Stultz 	/* return the max_cycles value as well if requested */
542fb82fe2fSJohn Stultz 	if (max_cyc)
543fb82fe2fSJohn Stultz 		*max_cyc = max_cycles;
544fb82fe2fSJohn Stultz 
545362fde04SJohn Stultz 	/* Return 50% of the actual maximum, so we can detect bad values */
546362fde04SJohn Stultz 	max_nsecs >>= 1;
547362fde04SJohn Stultz 
54887d8b9ebSStephen Boyd 	return max_nsecs;
54987d8b9ebSStephen Boyd }
55087d8b9ebSStephen Boyd 
55187d8b9ebSStephen Boyd /**
552fb82fe2fSJohn Stultz  * clocksource_update_max_deferment - Updates the clocksource max_idle_ns & max_cycles
553fb82fe2fSJohn Stultz  * @cs:         Pointer to clocksource to be updated
55487d8b9ebSStephen Boyd  *
55587d8b9ebSStephen Boyd  */
556fb82fe2fSJohn Stultz static inline void clocksource_update_max_deferment(struct clocksource *cs)
55787d8b9ebSStephen Boyd {
558fb82fe2fSJohn Stultz 	cs->max_idle_ns = clocks_calc_max_nsecs(cs->mult, cs->shift,
559fb82fe2fSJohn Stultz 						cs->maxadj, cs->mask,
560fb82fe2fSJohn Stultz 						&cs->max_cycles);
56198962465SJon Hunter }
56298962465SJon Hunter 
563592913ecSJohn Stultz #ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
564734efb46Sjohn stultz 
565f5a2e343SThomas Gleixner static struct clocksource *clocksource_find_best(bool oneshot, bool skipcur)
5665d33b883SThomas Gleixner {
5675d33b883SThomas Gleixner 	struct clocksource *cs;
5685d33b883SThomas Gleixner 
5695d33b883SThomas Gleixner 	if (!finished_booting || list_empty(&clocksource_list))
5705d33b883SThomas Gleixner 		return NULL;
5715d33b883SThomas Gleixner 
5725d33b883SThomas Gleixner 	/*
5735d33b883SThomas Gleixner 	 * We pick the clocksource with the highest rating. If oneshot
5745d33b883SThomas Gleixner 	 * mode is active, we pick the highres valid clocksource with
5755d33b883SThomas Gleixner 	 * the best rating.
5765d33b883SThomas Gleixner 	 */
5775d33b883SThomas Gleixner 	list_for_each_entry(cs, &clocksource_list, list) {
578f5a2e343SThomas Gleixner 		if (skipcur && cs == curr_clocksource)
579f5a2e343SThomas Gleixner 			continue;
5805d33b883SThomas Gleixner 		if (oneshot && !(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES))
5815d33b883SThomas Gleixner 			continue;
5825d33b883SThomas Gleixner 		return cs;
5835d33b883SThomas Gleixner 	}
5845d33b883SThomas Gleixner 	return NULL;
5855d33b883SThomas Gleixner }
5865d33b883SThomas Gleixner 
587f5a2e343SThomas Gleixner static void __clocksource_select(bool skipcur)
588734efb46Sjohn stultz {
5895d33b883SThomas Gleixner 	bool oneshot = tick_oneshot_mode_active();
590f1b82746SMartin Schwidefsky 	struct clocksource *best, *cs;
5915d8b34fdSThomas Gleixner 
5925d33b883SThomas Gleixner 	/* Find the best suitable clocksource */
593f5a2e343SThomas Gleixner 	best = clocksource_find_best(oneshot, skipcur);
5945d33b883SThomas Gleixner 	if (!best)
595f1b82746SMartin Schwidefsky 		return;
5965d33b883SThomas Gleixner 
597f1b82746SMartin Schwidefsky 	/* Check for the override clocksource. */
598f1b82746SMartin Schwidefsky 	list_for_each_entry(cs, &clocksource_list, list) {
599f5a2e343SThomas Gleixner 		if (skipcur && cs == curr_clocksource)
600f5a2e343SThomas Gleixner 			continue;
601f1b82746SMartin Schwidefsky 		if (strcmp(cs->name, override_name) != 0)
602f1b82746SMartin Schwidefsky 			continue;
603f1b82746SMartin Schwidefsky 		/*
604f1b82746SMartin Schwidefsky 		 * Check to make sure we don't switch to a non-highres
605f1b82746SMartin Schwidefsky 		 * capable clocksource if the tick code is in oneshot
606f1b82746SMartin Schwidefsky 		 * mode (highres or nohz)
607f1b82746SMartin Schwidefsky 		 */
6085d33b883SThomas Gleixner 		if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && oneshot) {
609f1b82746SMartin Schwidefsky 			/* Override clocksource cannot be used. */
61036374583SKyle Walker 			if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
61136374583SKyle Walker 				pr_warn("Override clocksource %s is unstable and not HRT compatible - cannot switch while in HRT/NOHZ mode\n",
61245bbfe64SJoe Perches 					cs->name);
613f1b82746SMartin Schwidefsky 				override_name[0] = 0;
61436374583SKyle Walker 			} else {
61536374583SKyle Walker 				/*
61636374583SKyle Walker 				 * The override cannot be currently verified.
61736374583SKyle Walker 				 * Deferring to let the watchdog check.
61836374583SKyle Walker 				 */
61936374583SKyle Walker 				pr_info("Override clocksource %s is not currently HRT compatible - deferring\n",
62036374583SKyle Walker 					cs->name);
62136374583SKyle Walker 			}
622f1b82746SMartin Schwidefsky 		} else
623f1b82746SMartin Schwidefsky 			/* Override clocksource can be used. */
624f1b82746SMartin Schwidefsky 			best = cs;
625f1b82746SMartin Schwidefsky 		break;
626734efb46Sjohn stultz 	}
627ba919d1cSThomas Gleixner 
628ba919d1cSThomas Gleixner 	if (curr_clocksource != best && !timekeeping_notify(best)) {
629ba919d1cSThomas Gleixner 		pr_info("Switched to clocksource %s\n", best->name);
63075c5158fSMartin Schwidefsky 		curr_clocksource = best;
631f1b82746SMartin Schwidefsky 	}
63275c5158fSMartin Schwidefsky }
63375c5158fSMartin Schwidefsky 
634f5a2e343SThomas Gleixner /**
635f5a2e343SThomas Gleixner  * clocksource_select - Select the best clocksource available
636f5a2e343SThomas Gleixner  *
637f5a2e343SThomas Gleixner  * Private function. Must hold clocksource_mutex when called.
638f5a2e343SThomas Gleixner  *
639f5a2e343SThomas Gleixner  * Select the clocksource with the best rating, or the clocksource,
640f5a2e343SThomas Gleixner  * which is selected by userspace override.
641f5a2e343SThomas Gleixner  */
642f5a2e343SThomas Gleixner static void clocksource_select(void)
643f5a2e343SThomas Gleixner {
644cfed432dSGuillaume Gomez 	__clocksource_select(false);
645f5a2e343SThomas Gleixner }
646f5a2e343SThomas Gleixner 
6477eaeb343SThomas Gleixner static void clocksource_select_fallback(void)
6487eaeb343SThomas Gleixner {
649cfed432dSGuillaume Gomez 	__clocksource_select(true);
6507eaeb343SThomas Gleixner }
6517eaeb343SThomas Gleixner 
652592913ecSJohn Stultz #else /* !CONFIG_ARCH_USES_GETTIMEOFFSET */
65354a6bc0bSThomas Gleixner static inline void clocksource_select(void) { }
6541eaff672SThomas Gleixner static inline void clocksource_select_fallback(void) { }
65554a6bc0bSThomas Gleixner 
65654a6bc0bSThomas Gleixner #endif
65754a6bc0bSThomas Gleixner 
65875c5158fSMartin Schwidefsky /*
65975c5158fSMartin Schwidefsky  * clocksource_done_booting - Called near the end of core bootup
66075c5158fSMartin Schwidefsky  *
66175c5158fSMartin Schwidefsky  * Hack to avoid lots of clocksource churn at boot time.
66275c5158fSMartin Schwidefsky  * We use fs_initcall because we want this to start before
66375c5158fSMartin Schwidefsky  * device_initcall but after subsys_initcall.
66475c5158fSMartin Schwidefsky  */
66575c5158fSMartin Schwidefsky static int __init clocksource_done_booting(void)
66675c5158fSMartin Schwidefsky {
667ad6759fbSjohn stultz 	mutex_lock(&clocksource_mutex);
668ad6759fbSjohn stultz 	curr_clocksource = clocksource_default_clock();
66975c5158fSMartin Schwidefsky 	finished_booting = 1;
67054a6bc0bSThomas Gleixner 	/*
67154a6bc0bSThomas Gleixner 	 * Run the watchdog first to eliminate unstable clock sources
67254a6bc0bSThomas Gleixner 	 */
673332962f2SThomas Gleixner 	__clocksource_watchdog_kthread();
67475c5158fSMartin Schwidefsky 	clocksource_select();
675e6c73305SThomas Gleixner 	mutex_unlock(&clocksource_mutex);
67675c5158fSMartin Schwidefsky 	return 0;
67775c5158fSMartin Schwidefsky }
67875c5158fSMartin Schwidefsky fs_initcall(clocksource_done_booting);
679f1b82746SMartin Schwidefsky 
68092c7e002SThomas Gleixner /*
68192c7e002SThomas Gleixner  * Enqueue the clocksource sorted by rating
682734efb46Sjohn stultz  */
683f1b82746SMartin Schwidefsky static void clocksource_enqueue(struct clocksource *cs)
684734efb46Sjohn stultz {
685f1b82746SMartin Schwidefsky 	struct list_head *entry = &clocksource_list;
686f1b82746SMartin Schwidefsky 	struct clocksource *tmp;
687734efb46Sjohn stultz 
6880fb71d34SMinfei Huang 	list_for_each_entry(tmp, &clocksource_list, list) {
68992c7e002SThomas Gleixner 		/* Keep track of the place, where to insert */
6900fb71d34SMinfei Huang 		if (tmp->rating < cs->rating)
6910fb71d34SMinfei Huang 			break;
692f1b82746SMartin Schwidefsky 		entry = &tmp->list;
6930fb71d34SMinfei Huang 	}
694f1b82746SMartin Schwidefsky 	list_add(&cs->list, entry);
695734efb46Sjohn stultz }
696734efb46Sjohn stultz 
697d7e81c26SJohn Stultz /**
698fba9e072SJohn Stultz  * __clocksource_update_freq_scale - Used update clocksource with new freq
699b1b73d09SKusanagi Kouichi  * @cs:		clocksource to be registered
700852db46dSJohn Stultz  * @scale:	Scale factor multiplied against freq to get clocksource hz
701852db46dSJohn Stultz  * @freq:	clocksource frequency (cycles per second) divided by scale
702852db46dSJohn Stultz  *
703852db46dSJohn Stultz  * This should only be called from the clocksource->enable() method.
704852db46dSJohn Stultz  *
705852db46dSJohn Stultz  * This *SHOULD NOT* be called directly! Please use the
706fba9e072SJohn Stultz  * __clocksource_update_freq_hz() or __clocksource_update_freq_khz() helper
707fba9e072SJohn Stultz  * functions.
708852db46dSJohn Stultz  */
709fba9e072SJohn Stultz void __clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq)
710852db46dSJohn Stultz {
711c0e299b1SThomas Gleixner 	u64 sec;
712f8935983SJohn Stultz 
713f8935983SJohn Stultz 	/*
714f8935983SJohn Stultz 	 * Default clocksources are *special* and self-define their mult/shift.
715f8935983SJohn Stultz 	 * But, you're not special, so you should specify a freq value.
716f8935983SJohn Stultz 	 */
717f8935983SJohn Stultz 	if (freq) {
718852db46dSJohn Stultz 		/*
719724ed53eSThomas Gleixner 		 * Calc the maximum number of seconds which we can run before
720f8935983SJohn Stultz 		 * wrapping around. For clocksources which have a mask > 32-bit
721724ed53eSThomas Gleixner 		 * we need to limit the max sleep time to have a good
722724ed53eSThomas Gleixner 		 * conversion precision. 10 minutes is still a reasonable
723724ed53eSThomas Gleixner 		 * amount. That results in a shift value of 24 for a
724f8935983SJohn Stultz 		 * clocksource with mask >= 40-bit and f >= 4GHz. That maps to
725362fde04SJohn Stultz 		 * ~ 0.06ppm granularity for NTP.
726852db46dSJohn Stultz 		 */
727362fde04SJohn Stultz 		sec = cs->mask;
728724ed53eSThomas Gleixner 		do_div(sec, freq);
729724ed53eSThomas Gleixner 		do_div(sec, scale);
730724ed53eSThomas Gleixner 		if (!sec)
731724ed53eSThomas Gleixner 			sec = 1;
732724ed53eSThomas Gleixner 		else if (sec > 600 && cs->mask > UINT_MAX)
733724ed53eSThomas Gleixner 			sec = 600;
734724ed53eSThomas Gleixner 
735852db46dSJohn Stultz 		clocks_calc_mult_shift(&cs->mult, &cs->shift, freq,
736724ed53eSThomas Gleixner 				       NSEC_PER_SEC / scale, sec * scale);
737f8935983SJohn Stultz 	}
738d65670a7SJohn Stultz 	/*
739362fde04SJohn Stultz 	 * Ensure clocksources that have large 'mult' values don't overflow
740362fde04SJohn Stultz 	 * when adjusted.
741d65670a7SJohn Stultz 	 */
742d65670a7SJohn Stultz 	cs->maxadj = clocksource_max_adjustment(cs);
743f8935983SJohn Stultz 	while (freq && ((cs->mult + cs->maxadj < cs->mult)
744f8935983SJohn Stultz 		|| (cs->mult - cs->maxadj > cs->mult))) {
745d65670a7SJohn Stultz 		cs->mult >>= 1;
746d65670a7SJohn Stultz 		cs->shift--;
747d65670a7SJohn Stultz 		cs->maxadj = clocksource_max_adjustment(cs);
748d65670a7SJohn Stultz 	}
749d65670a7SJohn Stultz 
750f8935983SJohn Stultz 	/*
751f8935983SJohn Stultz 	 * Only warn for *special* clocksources that self-define
752f8935983SJohn Stultz 	 * their mult/shift values and don't specify a freq.
753f8935983SJohn Stultz 	 */
754f8935983SJohn Stultz 	WARN_ONCE(cs->mult + cs->maxadj < cs->mult,
755f8935983SJohn Stultz 		"timekeeping: Clocksource %s might overflow on 11%% adjustment\n",
756f8935983SJohn Stultz 		cs->name);
757f8935983SJohn Stultz 
758fb82fe2fSJohn Stultz 	clocksource_update_max_deferment(cs);
7598cc8c525SJohn Stultz 
76045bbfe64SJoe Perches 	pr_info("%s: mask: 0x%llx max_cycles: 0x%llx, max_idle_ns: %lld ns\n",
7618cc8c525SJohn Stultz 		cs->name, cs->mask, cs->max_cycles, cs->max_idle_ns);
762852db46dSJohn Stultz }
763fba9e072SJohn Stultz EXPORT_SYMBOL_GPL(__clocksource_update_freq_scale);
764852db46dSJohn Stultz 
765852db46dSJohn Stultz /**
766d7e81c26SJohn Stultz  * __clocksource_register_scale - Used to install new clocksources
767b1b73d09SKusanagi Kouichi  * @cs:		clocksource to be registered
768d7e81c26SJohn Stultz  * @scale:	Scale factor multiplied against freq to get clocksource hz
769d7e81c26SJohn Stultz  * @freq:	clocksource frequency (cycles per second) divided by scale
770d7e81c26SJohn Stultz  *
771d7e81c26SJohn Stultz  * Returns -EBUSY if registration fails, zero otherwise.
772d7e81c26SJohn Stultz  *
773d7e81c26SJohn Stultz  * This *SHOULD NOT* be called directly! Please use the
774d7e81c26SJohn Stultz  * clocksource_register_hz() or clocksource_register_khz helper functions.
775d7e81c26SJohn Stultz  */
776d7e81c26SJohn Stultz int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
777d7e81c26SJohn Stultz {
778d7e81c26SJohn Stultz 
779b595076aSUwe Kleine-König 	/* Initialize mult/shift and max_idle_ns */
780fba9e072SJohn Stultz 	__clocksource_update_freq_scale(cs, scale, freq);
781d7e81c26SJohn Stultz 
782be278e98SJames Hartley 	/* Add clocksource to the clocksource list */
783d7e81c26SJohn Stultz 	mutex_lock(&clocksource_mutex);
784d7e81c26SJohn Stultz 	clocksource_enqueue(cs);
785d7e81c26SJohn Stultz 	clocksource_enqueue_watchdog(cs);
786e05b2efbSjohn stultz 	clocksource_select();
787bbf66d89SVitaly Kuznetsov 	clocksource_select_watchdog(false);
788d7e81c26SJohn Stultz 	mutex_unlock(&clocksource_mutex);
789d7e81c26SJohn Stultz 	return 0;
790d7e81c26SJohn Stultz }
791d7e81c26SJohn Stultz EXPORT_SYMBOL_GPL(__clocksource_register_scale);
792d7e81c26SJohn Stultz 
793d0981a1bSThomas Gleixner static void __clocksource_change_rating(struct clocksource *cs, int rating)
794d0981a1bSThomas Gleixner {
795d0981a1bSThomas Gleixner 	list_del(&cs->list);
796d0981a1bSThomas Gleixner 	cs->rating = rating;
797d0981a1bSThomas Gleixner 	clocksource_enqueue(cs);
798d0981a1bSThomas Gleixner }
799d0981a1bSThomas Gleixner 
800734efb46Sjohn stultz /**
80192c7e002SThomas Gleixner  * clocksource_change_rating - Change the rating of a registered clocksource
802b1b73d09SKusanagi Kouichi  * @cs:		clocksource to be changed
803b1b73d09SKusanagi Kouichi  * @rating:	new rating
804734efb46Sjohn stultz  */
80592c7e002SThomas Gleixner void clocksource_change_rating(struct clocksource *cs, int rating)
806734efb46Sjohn stultz {
80775c5158fSMartin Schwidefsky 	mutex_lock(&clocksource_mutex);
808d0981a1bSThomas Gleixner 	__clocksource_change_rating(cs, rating);
809332962f2SThomas Gleixner 	clocksource_select();
810bbf66d89SVitaly Kuznetsov 	clocksource_select_watchdog(false);
81175c5158fSMartin Schwidefsky 	mutex_unlock(&clocksource_mutex);
812734efb46Sjohn stultz }
813fb63a0ebSMartin Schwidefsky EXPORT_SYMBOL(clocksource_change_rating);
814734efb46Sjohn stultz 
8157eaeb343SThomas Gleixner /*
8167eaeb343SThomas Gleixner  * Unbind clocksource @cs. Called with clocksource_mutex held
8177eaeb343SThomas Gleixner  */
8187eaeb343SThomas Gleixner static int clocksource_unbind(struct clocksource *cs)
8197eaeb343SThomas Gleixner {
820bbf66d89SVitaly Kuznetsov 	if (clocksource_is_watchdog(cs)) {
821bbf66d89SVitaly Kuznetsov 		/* Select and try to install a replacement watchdog. */
822bbf66d89SVitaly Kuznetsov 		clocksource_select_watchdog(true);
8237eaeb343SThomas Gleixner 		if (clocksource_is_watchdog(cs))
8247eaeb343SThomas Gleixner 			return -EBUSY;
825bbf66d89SVitaly Kuznetsov 	}
8267eaeb343SThomas Gleixner 
8277eaeb343SThomas Gleixner 	if (cs == curr_clocksource) {
8287eaeb343SThomas Gleixner 		/* Select and try to install a replacement clock source */
8297eaeb343SThomas Gleixner 		clocksource_select_fallback();
8307eaeb343SThomas Gleixner 		if (curr_clocksource == cs)
8317eaeb343SThomas Gleixner 			return -EBUSY;
8327eaeb343SThomas Gleixner 	}
8337eaeb343SThomas Gleixner 	clocksource_dequeue_watchdog(cs);
8347eaeb343SThomas Gleixner 	list_del_init(&cs->list);
8357eaeb343SThomas Gleixner 	return 0;
8367eaeb343SThomas Gleixner }
8377eaeb343SThomas Gleixner 
8384713e22cSThomas Gleixner /**
8394713e22cSThomas Gleixner  * clocksource_unregister - remove a registered clocksource
840b1b73d09SKusanagi Kouichi  * @cs:	clocksource to be unregistered
8414713e22cSThomas Gleixner  */
842a89c7edbSThomas Gleixner int clocksource_unregister(struct clocksource *cs)
8434713e22cSThomas Gleixner {
844a89c7edbSThomas Gleixner 	int ret = 0;
845a89c7edbSThomas Gleixner 
84675c5158fSMartin Schwidefsky 	mutex_lock(&clocksource_mutex);
847a89c7edbSThomas Gleixner 	if (!list_empty(&cs->list))
848a89c7edbSThomas Gleixner 		ret = clocksource_unbind(cs);
84975c5158fSMartin Schwidefsky 	mutex_unlock(&clocksource_mutex);
850a89c7edbSThomas Gleixner 	return ret;
8514713e22cSThomas Gleixner }
852fb63a0ebSMartin Schwidefsky EXPORT_SYMBOL(clocksource_unregister);
8534713e22cSThomas Gleixner 
8542b013700SDaniel Walker #ifdef CONFIG_SYSFS
855734efb46Sjohn stultz /**
856734efb46Sjohn stultz  * sysfs_show_current_clocksources - sysfs interface for current clocksource
857734efb46Sjohn stultz  * @dev:	unused
858b1b73d09SKusanagi Kouichi  * @attr:	unused
859734efb46Sjohn stultz  * @buf:	char buffer to be filled with clocksource list
860734efb46Sjohn stultz  *
861734efb46Sjohn stultz  * Provides sysfs interface for listing current clocksource.
862734efb46Sjohn stultz  */
863734efb46Sjohn stultz static ssize_t
864d369a5d8SKay Sievers sysfs_show_current_clocksources(struct device *dev,
865d369a5d8SKay Sievers 				struct device_attribute *attr, char *buf)
866734efb46Sjohn stultz {
8675e2cb101SMiao Xie 	ssize_t count = 0;
868734efb46Sjohn stultz 
86975c5158fSMartin Schwidefsky 	mutex_lock(&clocksource_mutex);
8705e2cb101SMiao Xie 	count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name);
87175c5158fSMartin Schwidefsky 	mutex_unlock(&clocksource_mutex);
872734efb46Sjohn stultz 
8735e2cb101SMiao Xie 	return count;
874734efb46Sjohn stultz }
875734efb46Sjohn stultz 
876891292a7SPatrick Palka ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt)
87729b54078SThomas Gleixner {
87829b54078SThomas Gleixner 	size_t ret = cnt;
87929b54078SThomas Gleixner 
88029b54078SThomas Gleixner 	/* strings from sysfs write are not 0 terminated! */
88129b54078SThomas Gleixner 	if (!cnt || cnt >= CS_NAME_LEN)
88229b54078SThomas Gleixner 		return -EINVAL;
88329b54078SThomas Gleixner 
88429b54078SThomas Gleixner 	/* strip of \n: */
88529b54078SThomas Gleixner 	if (buf[cnt-1] == '\n')
88629b54078SThomas Gleixner 		cnt--;
88729b54078SThomas Gleixner 	if (cnt > 0)
88829b54078SThomas Gleixner 		memcpy(dst, buf, cnt);
88929b54078SThomas Gleixner 	dst[cnt] = 0;
89029b54078SThomas Gleixner 	return ret;
89129b54078SThomas Gleixner }
89229b54078SThomas Gleixner 
893734efb46Sjohn stultz /**
894734efb46Sjohn stultz  * sysfs_override_clocksource - interface for manually overriding clocksource
895734efb46Sjohn stultz  * @dev:	unused
896b1b73d09SKusanagi Kouichi  * @attr:	unused
897734efb46Sjohn stultz  * @buf:	name of override clocksource
898734efb46Sjohn stultz  * @count:	length of buffer
899734efb46Sjohn stultz  *
900734efb46Sjohn stultz  * Takes input from sysfs interface for manually overriding the default
901b71a8eb0SUwe Kleine-König  * clocksource selection.
902734efb46Sjohn stultz  */
903d369a5d8SKay Sievers static ssize_t sysfs_override_clocksource(struct device *dev,
904d369a5d8SKay Sievers 					  struct device_attribute *attr,
905734efb46Sjohn stultz 					  const char *buf, size_t count)
906734efb46Sjohn stultz {
907233bcb41SElad Wexler 	ssize_t ret;
908734efb46Sjohn stultz 
90975c5158fSMartin Schwidefsky 	mutex_lock(&clocksource_mutex);
910734efb46Sjohn stultz 
91103e13cf5SThomas Gleixner 	ret = sysfs_get_uname(buf, override_name, count);
91229b54078SThomas Gleixner 	if (ret >= 0)
913f1b82746SMartin Schwidefsky 		clocksource_select();
914734efb46Sjohn stultz 
91575c5158fSMartin Schwidefsky 	mutex_unlock(&clocksource_mutex);
916734efb46Sjohn stultz 
917734efb46Sjohn stultz 	return ret;
918734efb46Sjohn stultz }
919734efb46Sjohn stultz 
920734efb46Sjohn stultz /**
9217eaeb343SThomas Gleixner  * sysfs_unbind_current_clocksource - interface for manually unbinding clocksource
9227eaeb343SThomas Gleixner  * @dev:	unused
9237eaeb343SThomas Gleixner  * @attr:	unused
9247eaeb343SThomas Gleixner  * @buf:	unused
9257eaeb343SThomas Gleixner  * @count:	length of buffer
9267eaeb343SThomas Gleixner  *
9277eaeb343SThomas Gleixner  * Takes input from sysfs interface for manually unbinding a clocksource.
9287eaeb343SThomas Gleixner  */
9297eaeb343SThomas Gleixner static ssize_t sysfs_unbind_clocksource(struct device *dev,
9307eaeb343SThomas Gleixner 					struct device_attribute *attr,
9317eaeb343SThomas Gleixner 					const char *buf, size_t count)
9327eaeb343SThomas Gleixner {
9337eaeb343SThomas Gleixner 	struct clocksource *cs;
9347eaeb343SThomas Gleixner 	char name[CS_NAME_LEN];
935233bcb41SElad Wexler 	ssize_t ret;
9367eaeb343SThomas Gleixner 
93703e13cf5SThomas Gleixner 	ret = sysfs_get_uname(buf, name, count);
9387eaeb343SThomas Gleixner 	if (ret < 0)
9397eaeb343SThomas Gleixner 		return ret;
9407eaeb343SThomas Gleixner 
9417eaeb343SThomas Gleixner 	ret = -ENODEV;
9427eaeb343SThomas Gleixner 	mutex_lock(&clocksource_mutex);
9437eaeb343SThomas Gleixner 	list_for_each_entry(cs, &clocksource_list, list) {
9447eaeb343SThomas Gleixner 		if (strcmp(cs->name, name))
9457eaeb343SThomas Gleixner 			continue;
9467eaeb343SThomas Gleixner 		ret = clocksource_unbind(cs);
9477eaeb343SThomas Gleixner 		break;
9487eaeb343SThomas Gleixner 	}
9497eaeb343SThomas Gleixner 	mutex_unlock(&clocksource_mutex);
9507eaeb343SThomas Gleixner 
9517eaeb343SThomas Gleixner 	return ret ? ret : count;
9527eaeb343SThomas Gleixner }
9537eaeb343SThomas Gleixner 
9547eaeb343SThomas Gleixner /**
955734efb46Sjohn stultz  * sysfs_show_available_clocksources - sysfs interface for listing clocksource
956734efb46Sjohn stultz  * @dev:	unused
957b1b73d09SKusanagi Kouichi  * @attr:	unused
958734efb46Sjohn stultz  * @buf:	char buffer to be filled with clocksource list
959734efb46Sjohn stultz  *
960734efb46Sjohn stultz  * Provides sysfs interface for listing registered clocksources
961734efb46Sjohn stultz  */
962734efb46Sjohn stultz static ssize_t
963d369a5d8SKay Sievers sysfs_show_available_clocksources(struct device *dev,
964d369a5d8SKay Sievers 				  struct device_attribute *attr,
9654a0b2b4dSAndi Kleen 				  char *buf)
966734efb46Sjohn stultz {
9672e197586SMatthias Kaehlcke 	struct clocksource *src;
9685e2cb101SMiao Xie 	ssize_t count = 0;
969734efb46Sjohn stultz 
97075c5158fSMartin Schwidefsky 	mutex_lock(&clocksource_mutex);
9712e197586SMatthias Kaehlcke 	list_for_each_entry(src, &clocksource_list, list) {
972cd6d95d8SThomas Gleixner 		/*
973cd6d95d8SThomas Gleixner 		 * Don't show non-HRES clocksource if the tick code is
974cd6d95d8SThomas Gleixner 		 * in one shot mode (highres=on or nohz=on)
975cd6d95d8SThomas Gleixner 		 */
976cd6d95d8SThomas Gleixner 		if (!tick_oneshot_mode_active() ||
9773f68535aSjohn stultz 		    (src->flags & CLOCK_SOURCE_VALID_FOR_HRES))
9785e2cb101SMiao Xie 			count += snprintf(buf + count,
9795e2cb101SMiao Xie 				  max((ssize_t)PAGE_SIZE - count, (ssize_t)0),
9805e2cb101SMiao Xie 				  "%s ", src->name);
981734efb46Sjohn stultz 	}
98275c5158fSMartin Schwidefsky 	mutex_unlock(&clocksource_mutex);
983734efb46Sjohn stultz 
9845e2cb101SMiao Xie 	count += snprintf(buf + count,
9855e2cb101SMiao Xie 			  max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n");
986734efb46Sjohn stultz 
9875e2cb101SMiao Xie 	return count;
988734efb46Sjohn stultz }
989734efb46Sjohn stultz 
990734efb46Sjohn stultz /*
991734efb46Sjohn stultz  * Sysfs setup bits:
992734efb46Sjohn stultz  */
993d369a5d8SKay Sievers static DEVICE_ATTR(current_clocksource, 0644, sysfs_show_current_clocksources,
994734efb46Sjohn stultz 		   sysfs_override_clocksource);
995734efb46Sjohn stultz 
9967eaeb343SThomas Gleixner static DEVICE_ATTR(unbind_clocksource, 0200, NULL, sysfs_unbind_clocksource);
9977eaeb343SThomas Gleixner 
998d369a5d8SKay Sievers static DEVICE_ATTR(available_clocksource, 0444,
999734efb46Sjohn stultz 		   sysfs_show_available_clocksources, NULL);
1000734efb46Sjohn stultz 
1001d369a5d8SKay Sievers static struct bus_type clocksource_subsys = {
1002af5ca3f4SKay Sievers 	.name = "clocksource",
1003d369a5d8SKay Sievers 	.dev_name = "clocksource",
1004734efb46Sjohn stultz };
1005734efb46Sjohn stultz 
1006d369a5d8SKay Sievers static struct device device_clocksource = {
1007734efb46Sjohn stultz 	.id	= 0,
1008d369a5d8SKay Sievers 	.bus	= &clocksource_subsys,
1009734efb46Sjohn stultz };
1010734efb46Sjohn stultz 
1011ad596171Sjohn stultz static int __init init_clocksource_sysfs(void)
1012734efb46Sjohn stultz {
1013d369a5d8SKay Sievers 	int error = subsys_system_register(&clocksource_subsys, NULL);
1014734efb46Sjohn stultz 
1015734efb46Sjohn stultz 	if (!error)
1016d369a5d8SKay Sievers 		error = device_register(&device_clocksource);
1017734efb46Sjohn stultz 	if (!error)
1018d369a5d8SKay Sievers 		error = device_create_file(
1019734efb46Sjohn stultz 				&device_clocksource,
1020d369a5d8SKay Sievers 				&dev_attr_current_clocksource);
1021734efb46Sjohn stultz 	if (!error)
10227eaeb343SThomas Gleixner 		error = device_create_file(&device_clocksource,
10237eaeb343SThomas Gleixner 					   &dev_attr_unbind_clocksource);
10247eaeb343SThomas Gleixner 	if (!error)
1025d369a5d8SKay Sievers 		error = device_create_file(
1026734efb46Sjohn stultz 				&device_clocksource,
1027d369a5d8SKay Sievers 				&dev_attr_available_clocksource);
1028734efb46Sjohn stultz 	return error;
1029734efb46Sjohn stultz }
1030734efb46Sjohn stultz 
1031734efb46Sjohn stultz device_initcall(init_clocksource_sysfs);
10322b013700SDaniel Walker #endif /* CONFIG_SYSFS */
1033734efb46Sjohn stultz 
1034734efb46Sjohn stultz /**
1035734efb46Sjohn stultz  * boot_override_clocksource - boot clock override
1036734efb46Sjohn stultz  * @str:	override name
1037734efb46Sjohn stultz  *
1038734efb46Sjohn stultz  * Takes a clocksource= boot argument and uses it
1039734efb46Sjohn stultz  * as the clocksource override name.
1040734efb46Sjohn stultz  */
1041734efb46Sjohn stultz static int __init boot_override_clocksource(char* str)
1042734efb46Sjohn stultz {
104375c5158fSMartin Schwidefsky 	mutex_lock(&clocksource_mutex);
1044734efb46Sjohn stultz 	if (str)
1045734efb46Sjohn stultz 		strlcpy(override_name, str, sizeof(override_name));
104675c5158fSMartin Schwidefsky 	mutex_unlock(&clocksource_mutex);
1047734efb46Sjohn stultz 	return 1;
1048734efb46Sjohn stultz }
1049734efb46Sjohn stultz 
1050734efb46Sjohn stultz __setup("clocksource=", boot_override_clocksource);
1051734efb46Sjohn stultz 
1052734efb46Sjohn stultz /**
1053734efb46Sjohn stultz  * boot_override_clock - Compatibility layer for deprecated boot option
1054734efb46Sjohn stultz  * @str:	override name
1055734efb46Sjohn stultz  *
1056734efb46Sjohn stultz  * DEPRECATED! Takes a clock= boot argument and uses it
1057734efb46Sjohn stultz  * as the clocksource override name
1058734efb46Sjohn stultz  */
1059734efb46Sjohn stultz static int __init boot_override_clock(char* str)
1060734efb46Sjohn stultz {
10615d0cf410Sjohn stultz 	if (!strcmp(str, "pmtmr")) {
106245bbfe64SJoe Perches 		pr_warn("clock=pmtmr is deprecated - use clocksource=acpi_pm\n");
10635d0cf410Sjohn stultz 		return boot_override_clocksource("acpi_pm");
10645d0cf410Sjohn stultz 	}
106545bbfe64SJoe Perches 	pr_warn("clock= boot option is deprecated - use clocksource=xyz\n");
1066734efb46Sjohn stultz 	return boot_override_clocksource(str);
1067734efb46Sjohn stultz }
1068734efb46Sjohn stultz 
1069734efb46Sjohn stultz __setup("clock=", boot_override_clock);
1070