xref: /openbmc/linux/kernel/time/clocksource.c (revision 362fde04)
1734efb46Sjohn stultz /*
2734efb46Sjohn stultz  * linux/kernel/time/clocksource.c
3734efb46Sjohn stultz  *
4734efb46Sjohn stultz  * This file contains the functions which manage clocksource drivers.
5734efb46Sjohn stultz  *
6734efb46Sjohn stultz  * Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com)
7734efb46Sjohn stultz  *
8734efb46Sjohn stultz  * This program is free software; you can redistribute it and/or modify
9734efb46Sjohn stultz  * it under the terms of the GNU General Public License as published by
10734efb46Sjohn stultz  * the Free Software Foundation; either version 2 of the License, or
11734efb46Sjohn stultz  * (at your option) any later version.
12734efb46Sjohn stultz  *
13734efb46Sjohn stultz  * This program is distributed in the hope that it will be useful,
14734efb46Sjohn stultz  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15734efb46Sjohn stultz  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16734efb46Sjohn stultz  * GNU General Public License for more details.
17734efb46Sjohn stultz  *
18734efb46Sjohn stultz  * You should have received a copy of the GNU General Public License
19734efb46Sjohn stultz  * along with this program; if not, write to the Free Software
20734efb46Sjohn stultz  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21734efb46Sjohn stultz  *
22734efb46Sjohn stultz  * TODO WishList:
23734efb46Sjohn stultz  *   o Allow clocksource drivers to be unregistered
24734efb46Sjohn stultz  */
25734efb46Sjohn stultz 
26d369a5d8SKay Sievers #include <linux/device.h>
27734efb46Sjohn stultz #include <linux/clocksource.h>
28734efb46Sjohn stultz #include <linux/init.h>
29734efb46Sjohn stultz #include <linux/module.h>
30dc29a365SMathieu Desnoyers #include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */
3179bf2bb3SThomas Gleixner #include <linux/tick.h>
3201548f4dSMartin Schwidefsky #include <linux/kthread.h>
33734efb46Sjohn stultz 
3403e13cf5SThomas Gleixner #include "tick-internal.h"
353a978377SThomas Gleixner #include "timekeeping_internal.h"
3603e13cf5SThomas Gleixner 
377d2f944aSThomas Gleixner /**
387d2f944aSThomas Gleixner  * clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks
397d2f944aSThomas Gleixner  * @mult:	pointer to mult variable
407d2f944aSThomas Gleixner  * @shift:	pointer to shift variable
417d2f944aSThomas Gleixner  * @from:	frequency to convert from
427d2f944aSThomas Gleixner  * @to:		frequency to convert to
435fdade95SNicolas Pitre  * @maxsec:	guaranteed runtime conversion range in seconds
447d2f944aSThomas Gleixner  *
457d2f944aSThomas Gleixner  * The function evaluates the shift/mult pair for the scaled math
467d2f944aSThomas Gleixner  * operations of clocksources and clockevents.
477d2f944aSThomas Gleixner  *
487d2f944aSThomas Gleixner  * @to and @from are frequency values in HZ. For clock sources @to is
497d2f944aSThomas Gleixner  * NSEC_PER_SEC == 1GHz and @from is the counter frequency. For clock
507d2f944aSThomas Gleixner  * event @to is the counter frequency and @from is NSEC_PER_SEC.
517d2f944aSThomas Gleixner  *
525fdade95SNicolas Pitre  * The @maxsec conversion range argument controls the time frame in
537d2f944aSThomas Gleixner  * seconds which must be covered by the runtime conversion with the
547d2f944aSThomas Gleixner  * calculated mult and shift factors. This guarantees that no 64bit
557d2f944aSThomas Gleixner  * overflow happens when the input value of the conversion is
567d2f944aSThomas Gleixner  * multiplied with the calculated mult factor. Larger ranges may
577d2f944aSThomas Gleixner  * reduce the conversion accuracy by chosing smaller mult and shift
587d2f944aSThomas Gleixner  * factors.
597d2f944aSThomas Gleixner  */
607d2f944aSThomas Gleixner void
615fdade95SNicolas Pitre clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 maxsec)
627d2f944aSThomas Gleixner {
637d2f944aSThomas Gleixner 	u64 tmp;
647d2f944aSThomas Gleixner 	u32 sft, sftacc= 32;
657d2f944aSThomas Gleixner 
667d2f944aSThomas Gleixner 	/*
677d2f944aSThomas Gleixner 	 * Calculate the shift factor which is limiting the conversion
687d2f944aSThomas Gleixner 	 * range:
697d2f944aSThomas Gleixner 	 */
705fdade95SNicolas Pitre 	tmp = ((u64)maxsec * from) >> 32;
717d2f944aSThomas Gleixner 	while (tmp) {
727d2f944aSThomas Gleixner 		tmp >>=1;
737d2f944aSThomas Gleixner 		sftacc--;
747d2f944aSThomas Gleixner 	}
757d2f944aSThomas Gleixner 
767d2f944aSThomas Gleixner 	/*
777d2f944aSThomas Gleixner 	 * Find the conversion shift/mult pair which has the best
787d2f944aSThomas Gleixner 	 * accuracy and fits the maxsec conversion range:
797d2f944aSThomas Gleixner 	 */
807d2f944aSThomas Gleixner 	for (sft = 32; sft > 0; sft--) {
817d2f944aSThomas Gleixner 		tmp = (u64) to << sft;
82b5776c4aSjohn stultz 		tmp += from / 2;
837d2f944aSThomas Gleixner 		do_div(tmp, from);
847d2f944aSThomas Gleixner 		if ((tmp >> sftacc) == 0)
857d2f944aSThomas Gleixner 			break;
867d2f944aSThomas Gleixner 	}
877d2f944aSThomas Gleixner 	*mult = tmp;
887d2f944aSThomas Gleixner 	*shift = sft;
897d2f944aSThomas Gleixner }
907d2f944aSThomas Gleixner 
91734efb46Sjohn stultz /*[Clocksource internal variables]---------
92734efb46Sjohn stultz  * curr_clocksource:
93f1b82746SMartin Schwidefsky  *	currently selected clocksource.
94734efb46Sjohn stultz  * clocksource_list:
95734efb46Sjohn stultz  *	linked list with the registered clocksources
9675c5158fSMartin Schwidefsky  * clocksource_mutex:
9775c5158fSMartin Schwidefsky  *	protects manipulations to curr_clocksource and the clocksource_list
98734efb46Sjohn stultz  * override_name:
99734efb46Sjohn stultz  *	Name of the user-specified clocksource.
100734efb46Sjohn stultz  */
101f1b82746SMartin Schwidefsky static struct clocksource *curr_clocksource;
102734efb46Sjohn stultz static LIST_HEAD(clocksource_list);
10375c5158fSMartin Schwidefsky static DEFINE_MUTEX(clocksource_mutex);
10429b54078SThomas Gleixner static char override_name[CS_NAME_LEN];
10554a6bc0bSThomas Gleixner static int finished_booting;
106734efb46Sjohn stultz 
1075d8b34fdSThomas Gleixner #ifdef CONFIG_CLOCKSOURCE_WATCHDOG
108f79e0258SMartin Schwidefsky static void clocksource_watchdog_work(struct work_struct *work);
109332962f2SThomas Gleixner static void clocksource_select(void);
110f79e0258SMartin Schwidefsky 
1115d8b34fdSThomas Gleixner static LIST_HEAD(watchdog_list);
1125d8b34fdSThomas Gleixner static struct clocksource *watchdog;
1135d8b34fdSThomas Gleixner static struct timer_list watchdog_timer;
114f79e0258SMartin Schwidefsky static DECLARE_WORK(watchdog_work, clocksource_watchdog_work);
1155d8b34fdSThomas Gleixner static DEFINE_SPINLOCK(watchdog_lock);
116fb63a0ebSMartin Schwidefsky static int watchdog_running;
1179fb60336SThomas Gleixner static atomic_t watchdog_reset_pending;
118b52f52a0SThomas Gleixner 
11901548f4dSMartin Schwidefsky static int clocksource_watchdog_kthread(void *data);
120d0981a1bSThomas Gleixner static void __clocksource_change_rating(struct clocksource *cs, int rating);
121c55c87c8SMartin Schwidefsky 
1225d8b34fdSThomas Gleixner /*
12335c35d1aSDaniel Walker  * Interval: 0.5sec Threshold: 0.0625s
1245d8b34fdSThomas Gleixner  */
1255d8b34fdSThomas Gleixner #define WATCHDOG_INTERVAL (HZ >> 1)
12635c35d1aSDaniel Walker #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)
1275d8b34fdSThomas Gleixner 
12801548f4dSMartin Schwidefsky static void clocksource_watchdog_work(struct work_struct *work)
12901548f4dSMartin Schwidefsky {
13001548f4dSMartin Schwidefsky 	/*
13101548f4dSMartin Schwidefsky 	 * If kthread_run fails the next watchdog scan over the
13201548f4dSMartin Schwidefsky 	 * watchdog_list will find the unstable clock again.
13301548f4dSMartin Schwidefsky 	 */
13401548f4dSMartin Schwidefsky 	kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog");
13501548f4dSMartin Schwidefsky }
13601548f4dSMartin Schwidefsky 
1377285dd7fSThomas Gleixner static void __clocksource_unstable(struct clocksource *cs)
1387285dd7fSThomas Gleixner {
1397285dd7fSThomas Gleixner 	cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
1407285dd7fSThomas Gleixner 	cs->flags |= CLOCK_SOURCE_UNSTABLE;
14154a6bc0bSThomas Gleixner 	if (finished_booting)
1427285dd7fSThomas Gleixner 		schedule_work(&watchdog_work);
1437285dd7fSThomas Gleixner }
1447285dd7fSThomas Gleixner 
1458cf4e750SMartin Schwidefsky static void clocksource_unstable(struct clocksource *cs, int64_t delta)
1465d8b34fdSThomas Gleixner {
1475d8b34fdSThomas Gleixner 	printk(KERN_WARNING "Clocksource %s unstable (delta = %Ld ns)\n",
1485d8b34fdSThomas Gleixner 	       cs->name, delta);
1497285dd7fSThomas Gleixner 	__clocksource_unstable(cs);
1507285dd7fSThomas Gleixner }
1517285dd7fSThomas Gleixner 
1527285dd7fSThomas Gleixner /**
1537285dd7fSThomas Gleixner  * clocksource_mark_unstable - mark clocksource unstable via watchdog
1547285dd7fSThomas Gleixner  * @cs:		clocksource to be marked unstable
1557285dd7fSThomas Gleixner  *
1567285dd7fSThomas Gleixner  * This function is called instead of clocksource_change_rating from
1577285dd7fSThomas Gleixner  * cpu hotplug code to avoid a deadlock between the clocksource mutex
1587285dd7fSThomas Gleixner  * and the cpu hotplug mutex. It defers the update of the clocksource
1597285dd7fSThomas Gleixner  * to the watchdog thread.
1607285dd7fSThomas Gleixner  */
1617285dd7fSThomas Gleixner void clocksource_mark_unstable(struct clocksource *cs)
1627285dd7fSThomas Gleixner {
1637285dd7fSThomas Gleixner 	unsigned long flags;
1647285dd7fSThomas Gleixner 
1657285dd7fSThomas Gleixner 	spin_lock_irqsave(&watchdog_lock, flags);
1667285dd7fSThomas Gleixner 	if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) {
1677285dd7fSThomas Gleixner 		if (list_empty(&cs->wd_list))
1687285dd7fSThomas Gleixner 			list_add(&cs->wd_list, &watchdog_list);
1697285dd7fSThomas Gleixner 		__clocksource_unstable(cs);
1707285dd7fSThomas Gleixner 	}
1717285dd7fSThomas Gleixner 	spin_unlock_irqrestore(&watchdog_lock, flags);
1725d8b34fdSThomas Gleixner }
1735d8b34fdSThomas Gleixner 
1745d8b34fdSThomas Gleixner static void clocksource_watchdog(unsigned long data)
1755d8b34fdSThomas Gleixner {
176c55c87c8SMartin Schwidefsky 	struct clocksource *cs;
1773a978377SThomas Gleixner 	cycle_t csnow, wdnow, delta;
1785d8b34fdSThomas Gleixner 	int64_t wd_nsec, cs_nsec;
1799fb60336SThomas Gleixner 	int next_cpu, reset_pending;
1805d8b34fdSThomas Gleixner 
1815d8b34fdSThomas Gleixner 	spin_lock(&watchdog_lock);
182fb63a0ebSMartin Schwidefsky 	if (!watchdog_running)
183fb63a0ebSMartin Schwidefsky 		goto out;
1845d8b34fdSThomas Gleixner 
1859fb60336SThomas Gleixner 	reset_pending = atomic_read(&watchdog_reset_pending);
1869fb60336SThomas Gleixner 
187c55c87c8SMartin Schwidefsky 	list_for_each_entry(cs, &watchdog_list, wd_list) {
188c55c87c8SMartin Schwidefsky 
189c55c87c8SMartin Schwidefsky 		/* Clocksource already marked unstable? */
19001548f4dSMartin Schwidefsky 		if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
19154a6bc0bSThomas Gleixner 			if (finished_booting)
19201548f4dSMartin Schwidefsky 				schedule_work(&watchdog_work);
193c55c87c8SMartin Schwidefsky 			continue;
19401548f4dSMartin Schwidefsky 		}
195c55c87c8SMartin Schwidefsky 
196b5199515SThomas Gleixner 		local_irq_disable();
1978e19608eSMagnus Damm 		csnow = cs->read(cs);
198b5199515SThomas Gleixner 		wdnow = watchdog->read(watchdog);
199b5199515SThomas Gleixner 		local_irq_enable();
200b52f52a0SThomas Gleixner 
2018cf4e750SMartin Schwidefsky 		/* Clocksource initialized ? */
2029fb60336SThomas Gleixner 		if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) ||
2039fb60336SThomas Gleixner 		    atomic_read(&watchdog_reset_pending)) {
2048cf4e750SMartin Schwidefsky 			cs->flags |= CLOCK_SOURCE_WATCHDOG;
205b5199515SThomas Gleixner 			cs->wd_last = wdnow;
206b5199515SThomas Gleixner 			cs->cs_last = csnow;
207b52f52a0SThomas Gleixner 			continue;
208b52f52a0SThomas Gleixner 		}
209b52f52a0SThomas Gleixner 
2103a978377SThomas Gleixner 		delta = clocksource_delta(wdnow, cs->wd_last, watchdog->mask);
2113a978377SThomas Gleixner 		wd_nsec = clocksource_cyc2ns(delta, watchdog->mult,
2123a978377SThomas Gleixner 					     watchdog->shift);
213b5199515SThomas Gleixner 
2143a978377SThomas Gleixner 		delta = clocksource_delta(csnow, cs->cs_last, cs->mask);
2153a978377SThomas Gleixner 		cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift);
216b5199515SThomas Gleixner 		cs->cs_last = csnow;
217b5199515SThomas Gleixner 		cs->wd_last = wdnow;
218b5199515SThomas Gleixner 
2199fb60336SThomas Gleixner 		if (atomic_read(&watchdog_reset_pending))
2209fb60336SThomas Gleixner 			continue;
2219fb60336SThomas Gleixner 
222b5199515SThomas Gleixner 		/* Check the deviation from the watchdog clocksource. */
2239fb60336SThomas Gleixner 		if ((abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD)) {
2248cf4e750SMartin Schwidefsky 			clocksource_unstable(cs, cs_nsec - wd_nsec);
2258cf4e750SMartin Schwidefsky 			continue;
2268cf4e750SMartin Schwidefsky 		}
2278cf4e750SMartin Schwidefsky 
2288cf4e750SMartin Schwidefsky 		if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) &&
2298cf4e750SMartin Schwidefsky 		    (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) &&
2305d8b34fdSThomas Gleixner 		    (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) {
231332962f2SThomas Gleixner 			/* Mark it valid for high-res. */
2325d8b34fdSThomas Gleixner 			cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
233332962f2SThomas Gleixner 
23479bf2bb3SThomas Gleixner 			/*
235332962f2SThomas Gleixner 			 * clocksource_done_booting() will sort it if
236332962f2SThomas Gleixner 			 * finished_booting is not set yet.
23779bf2bb3SThomas Gleixner 			 */
238332962f2SThomas Gleixner 			if (!finished_booting)
239332962f2SThomas Gleixner 				continue;
240332962f2SThomas Gleixner 
241332962f2SThomas Gleixner 			/*
242332962f2SThomas Gleixner 			 * If this is not the current clocksource let
243332962f2SThomas Gleixner 			 * the watchdog thread reselect it. Due to the
244332962f2SThomas Gleixner 			 * change to high res this clocksource might
245332962f2SThomas Gleixner 			 * be preferred now. If it is the current
246332962f2SThomas Gleixner 			 * clocksource let the tick code know about
247332962f2SThomas Gleixner 			 * that change.
248332962f2SThomas Gleixner 			 */
249332962f2SThomas Gleixner 			if (cs != curr_clocksource) {
250332962f2SThomas Gleixner 				cs->flags |= CLOCK_SOURCE_RESELECT;
251332962f2SThomas Gleixner 				schedule_work(&watchdog_work);
252332962f2SThomas Gleixner 			} else {
25379bf2bb3SThomas Gleixner 				tick_clock_notify();
2545d8b34fdSThomas Gleixner 			}
2555d8b34fdSThomas Gleixner 		}
256332962f2SThomas Gleixner 	}
2575d8b34fdSThomas Gleixner 
2586993fc5bSAndi Kleen 	/*
2599fb60336SThomas Gleixner 	 * We only clear the watchdog_reset_pending, when we did a
2609fb60336SThomas Gleixner 	 * full cycle through all clocksources.
2619fb60336SThomas Gleixner 	 */
2629fb60336SThomas Gleixner 	if (reset_pending)
2639fb60336SThomas Gleixner 		atomic_dec(&watchdog_reset_pending);
2649fb60336SThomas Gleixner 
2659fb60336SThomas Gleixner 	/*
266c55c87c8SMartin Schwidefsky 	 * Cycle through CPUs to check if the CPUs stay synchronized
267c55c87c8SMartin Schwidefsky 	 * to each other.
2686993fc5bSAndi Kleen 	 */
269c55c87c8SMartin Schwidefsky 	next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
270cad0e458SMike Travis 	if (next_cpu >= nr_cpu_ids)
2716b954823SRusty Russell 		next_cpu = cpumask_first(cpu_online_mask);
2726993fc5bSAndi Kleen 	watchdog_timer.expires += WATCHDOG_INTERVAL;
2736993fc5bSAndi Kleen 	add_timer_on(&watchdog_timer, next_cpu);
274fb63a0ebSMartin Schwidefsky out:
2755d8b34fdSThomas Gleixner 	spin_unlock(&watchdog_lock);
2765d8b34fdSThomas Gleixner }
2770f8e8ef7SMartin Schwidefsky 
278fb63a0ebSMartin Schwidefsky static inline void clocksource_start_watchdog(void)
279fb63a0ebSMartin Schwidefsky {
280fb63a0ebSMartin Schwidefsky 	if (watchdog_running || !watchdog || list_empty(&watchdog_list))
281fb63a0ebSMartin Schwidefsky 		return;
282fb63a0ebSMartin Schwidefsky 	init_timer(&watchdog_timer);
283fb63a0ebSMartin Schwidefsky 	watchdog_timer.function = clocksource_watchdog;
284fb63a0ebSMartin Schwidefsky 	watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
285fb63a0ebSMartin Schwidefsky 	add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask));
286fb63a0ebSMartin Schwidefsky 	watchdog_running = 1;
287fb63a0ebSMartin Schwidefsky }
288fb63a0ebSMartin Schwidefsky 
289fb63a0ebSMartin Schwidefsky static inline void clocksource_stop_watchdog(void)
290fb63a0ebSMartin Schwidefsky {
291fb63a0ebSMartin Schwidefsky 	if (!watchdog_running || (watchdog && !list_empty(&watchdog_list)))
292fb63a0ebSMartin Schwidefsky 		return;
293fb63a0ebSMartin Schwidefsky 	del_timer(&watchdog_timer);
294fb63a0ebSMartin Schwidefsky 	watchdog_running = 0;
295fb63a0ebSMartin Schwidefsky }
296fb63a0ebSMartin Schwidefsky 
2970f8e8ef7SMartin Schwidefsky static inline void clocksource_reset_watchdog(void)
2980f8e8ef7SMartin Schwidefsky {
2990f8e8ef7SMartin Schwidefsky 	struct clocksource *cs;
3000f8e8ef7SMartin Schwidefsky 
3010f8e8ef7SMartin Schwidefsky 	list_for_each_entry(cs, &watchdog_list, wd_list)
3020f8e8ef7SMartin Schwidefsky 		cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
3030f8e8ef7SMartin Schwidefsky }
3040f8e8ef7SMartin Schwidefsky 
305b52f52a0SThomas Gleixner static void clocksource_resume_watchdog(void)
306b52f52a0SThomas Gleixner {
3079fb60336SThomas Gleixner 	atomic_inc(&watchdog_reset_pending);
308b52f52a0SThomas Gleixner }
309b52f52a0SThomas Gleixner 
310fb63a0ebSMartin Schwidefsky static void clocksource_enqueue_watchdog(struct clocksource *cs)
3115d8b34fdSThomas Gleixner {
3125d8b34fdSThomas Gleixner 	unsigned long flags;
3135d8b34fdSThomas Gleixner 
3145d8b34fdSThomas Gleixner 	spin_lock_irqsave(&watchdog_lock, flags);
3155d8b34fdSThomas Gleixner 	if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
316fb63a0ebSMartin Schwidefsky 		/* cs is a clocksource to be watched. */
3175d8b34fdSThomas Gleixner 		list_add(&cs->wd_list, &watchdog_list);
318fb63a0ebSMartin Schwidefsky 		cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
319948ac6d7SThomas Gleixner 	} else {
320fb63a0ebSMartin Schwidefsky 		/* cs is a watchdog. */
321948ac6d7SThomas Gleixner 		if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
3225d8b34fdSThomas Gleixner 			cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
323fb63a0ebSMartin Schwidefsky 		/* Pick the best watchdog. */
3245d8b34fdSThomas Gleixner 		if (!watchdog || cs->rating > watchdog->rating) {
3255d8b34fdSThomas Gleixner 			watchdog = cs;
3265d8b34fdSThomas Gleixner 			/* Reset watchdog cycles */
3270f8e8ef7SMartin Schwidefsky 			clocksource_reset_watchdog();
3285d8b34fdSThomas Gleixner 		}
3295d8b34fdSThomas Gleixner 	}
330fb63a0ebSMartin Schwidefsky 	/* Check if the watchdog timer needs to be started. */
331fb63a0ebSMartin Schwidefsky 	clocksource_start_watchdog();
3325d8b34fdSThomas Gleixner 	spin_unlock_irqrestore(&watchdog_lock, flags);
3335d8b34fdSThomas Gleixner }
334fb63a0ebSMartin Schwidefsky 
335fb63a0ebSMartin Schwidefsky static void clocksource_dequeue_watchdog(struct clocksource *cs)
336fb63a0ebSMartin Schwidefsky {
337fb63a0ebSMartin Schwidefsky 	unsigned long flags;
338fb63a0ebSMartin Schwidefsky 
339fb63a0ebSMartin Schwidefsky 	spin_lock_irqsave(&watchdog_lock, flags);
340a89c7edbSThomas Gleixner 	if (cs != watchdog) {
341fb63a0ebSMartin Schwidefsky 		if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
342fb63a0ebSMartin Schwidefsky 			/* cs is a watched clocksource. */
343fb63a0ebSMartin Schwidefsky 			list_del_init(&cs->wd_list);
344fb63a0ebSMartin Schwidefsky 			/* Check if the watchdog timer needs to be stopped. */
345fb63a0ebSMartin Schwidefsky 			clocksource_stop_watchdog();
346a89c7edbSThomas Gleixner 		}
347a89c7edbSThomas Gleixner 	}
348fb63a0ebSMartin Schwidefsky 	spin_unlock_irqrestore(&watchdog_lock, flags);
349fb63a0ebSMartin Schwidefsky }
350fb63a0ebSMartin Schwidefsky 
351332962f2SThomas Gleixner static int __clocksource_watchdog_kthread(void)
352c55c87c8SMartin Schwidefsky {
353c55c87c8SMartin Schwidefsky 	struct clocksource *cs, *tmp;
354c55c87c8SMartin Schwidefsky 	unsigned long flags;
3556ea41d25SThomas Gleixner 	LIST_HEAD(unstable);
356332962f2SThomas Gleixner 	int select = 0;
357c55c87c8SMartin Schwidefsky 
358c55c87c8SMartin Schwidefsky 	spin_lock_irqsave(&watchdog_lock, flags);
359332962f2SThomas Gleixner 	list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
360c55c87c8SMartin Schwidefsky 		if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
361c55c87c8SMartin Schwidefsky 			list_del_init(&cs->wd_list);
3626ea41d25SThomas Gleixner 			list_add(&cs->wd_list, &unstable);
363332962f2SThomas Gleixner 			select = 1;
364332962f2SThomas Gleixner 		}
365332962f2SThomas Gleixner 		if (cs->flags & CLOCK_SOURCE_RESELECT) {
366332962f2SThomas Gleixner 			cs->flags &= ~CLOCK_SOURCE_RESELECT;
367332962f2SThomas Gleixner 			select = 1;
368332962f2SThomas Gleixner 		}
369c55c87c8SMartin Schwidefsky 	}
370c55c87c8SMartin Schwidefsky 	/* Check if the watchdog timer needs to be stopped. */
371c55c87c8SMartin Schwidefsky 	clocksource_stop_watchdog();
3726ea41d25SThomas Gleixner 	spin_unlock_irqrestore(&watchdog_lock, flags);
3736ea41d25SThomas Gleixner 
3746ea41d25SThomas Gleixner 	/* Needs to be done outside of watchdog lock */
3756ea41d25SThomas Gleixner 	list_for_each_entry_safe(cs, tmp, &unstable, wd_list) {
3766ea41d25SThomas Gleixner 		list_del_init(&cs->wd_list);
377d0981a1bSThomas Gleixner 		__clocksource_change_rating(cs, 0);
3786ea41d25SThomas Gleixner 	}
379332962f2SThomas Gleixner 	return select;
380332962f2SThomas Gleixner }
381332962f2SThomas Gleixner 
382332962f2SThomas Gleixner static int clocksource_watchdog_kthread(void *data)
383332962f2SThomas Gleixner {
384332962f2SThomas Gleixner 	mutex_lock(&clocksource_mutex);
385332962f2SThomas Gleixner 	if (__clocksource_watchdog_kthread())
386332962f2SThomas Gleixner 		clocksource_select();
387d0981a1bSThomas Gleixner 	mutex_unlock(&clocksource_mutex);
38801548f4dSMartin Schwidefsky 	return 0;
389c55c87c8SMartin Schwidefsky }
390c55c87c8SMartin Schwidefsky 
3917eaeb343SThomas Gleixner static bool clocksource_is_watchdog(struct clocksource *cs)
3927eaeb343SThomas Gleixner {
3937eaeb343SThomas Gleixner 	return cs == watchdog;
3947eaeb343SThomas Gleixner }
3957eaeb343SThomas Gleixner 
396fb63a0ebSMartin Schwidefsky #else /* CONFIG_CLOCKSOURCE_WATCHDOG */
397fb63a0ebSMartin Schwidefsky 
398fb63a0ebSMartin Schwidefsky static void clocksource_enqueue_watchdog(struct clocksource *cs)
3995d8b34fdSThomas Gleixner {
4005d8b34fdSThomas Gleixner 	if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
4015d8b34fdSThomas Gleixner 		cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
4025d8b34fdSThomas Gleixner }
403b52f52a0SThomas Gleixner 
404fb63a0ebSMartin Schwidefsky static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
405b52f52a0SThomas Gleixner static inline void clocksource_resume_watchdog(void) { }
406332962f2SThomas Gleixner static inline int __clocksource_watchdog_kthread(void) { return 0; }
4077eaeb343SThomas Gleixner static bool clocksource_is_watchdog(struct clocksource *cs) { return false; }
408397bbf6dSPrarit Bhargava void clocksource_mark_unstable(struct clocksource *cs) { }
409fb63a0ebSMartin Schwidefsky 
410fb63a0ebSMartin Schwidefsky #endif /* CONFIG_CLOCKSOURCE_WATCHDOG */
4115d8b34fdSThomas Gleixner 
412734efb46Sjohn stultz /**
413c54a42b1SMagnus Damm  * clocksource_suspend - suspend the clocksource(s)
414c54a42b1SMagnus Damm  */
415c54a42b1SMagnus Damm void clocksource_suspend(void)
416c54a42b1SMagnus Damm {
417c54a42b1SMagnus Damm 	struct clocksource *cs;
418c54a42b1SMagnus Damm 
419c54a42b1SMagnus Damm 	list_for_each_entry_reverse(cs, &clocksource_list, list)
420c54a42b1SMagnus Damm 		if (cs->suspend)
421c54a42b1SMagnus Damm 			cs->suspend(cs);
422c54a42b1SMagnus Damm }
423c54a42b1SMagnus Damm 
424c54a42b1SMagnus Damm /**
425b52f52a0SThomas Gleixner  * clocksource_resume - resume the clocksource(s)
426b52f52a0SThomas Gleixner  */
427b52f52a0SThomas Gleixner void clocksource_resume(void)
428b52f52a0SThomas Gleixner {
4292e197586SMatthias Kaehlcke 	struct clocksource *cs;
430b52f52a0SThomas Gleixner 
43175c5158fSMartin Schwidefsky 	list_for_each_entry(cs, &clocksource_list, list)
432b52f52a0SThomas Gleixner 		if (cs->resume)
43317622339SMagnus Damm 			cs->resume(cs);
434b52f52a0SThomas Gleixner 
435b52f52a0SThomas Gleixner 	clocksource_resume_watchdog();
436b52f52a0SThomas Gleixner }
437b52f52a0SThomas Gleixner 
438b52f52a0SThomas Gleixner /**
4397c3078b6SJason Wessel  * clocksource_touch_watchdog - Update watchdog
4407c3078b6SJason Wessel  *
4417c3078b6SJason Wessel  * Update the watchdog after exception contexts such as kgdb so as not
4427b7422a5SThomas Gleixner  * to incorrectly trip the watchdog. This might fail when the kernel
4437b7422a5SThomas Gleixner  * was stopped in code which holds watchdog_lock.
4447c3078b6SJason Wessel  */
4457c3078b6SJason Wessel void clocksource_touch_watchdog(void)
4467c3078b6SJason Wessel {
4477c3078b6SJason Wessel 	clocksource_resume_watchdog();
4487c3078b6SJason Wessel }
4497c3078b6SJason Wessel 
450734efb46Sjohn stultz /**
451d65670a7SJohn Stultz  * clocksource_max_adjustment- Returns max adjustment amount
452d65670a7SJohn Stultz  * @cs:         Pointer to clocksource
453d65670a7SJohn Stultz  *
454d65670a7SJohn Stultz  */
455d65670a7SJohn Stultz static u32 clocksource_max_adjustment(struct clocksource *cs)
456d65670a7SJohn Stultz {
457d65670a7SJohn Stultz 	u64 ret;
458d65670a7SJohn Stultz 	/*
45988b28adfSJim Cromie 	 * We won't try to correct for more than 11% adjustments (110,000 ppm),
460d65670a7SJohn Stultz 	 */
461d65670a7SJohn Stultz 	ret = (u64)cs->mult * 11;
462d65670a7SJohn Stultz 	do_div(ret,100);
463d65670a7SJohn Stultz 	return (u32)ret;
464d65670a7SJohn Stultz }
465d65670a7SJohn Stultz 
466d65670a7SJohn Stultz /**
46787d8b9ebSStephen Boyd  * clocks_calc_max_nsecs - Returns maximum nanoseconds that can be converted
46887d8b9ebSStephen Boyd  * @mult:	cycle to nanosecond multiplier
46987d8b9ebSStephen Boyd  * @shift:	cycle to nanosecond divisor (power of two)
47087d8b9ebSStephen Boyd  * @maxadj:	maximum adjustment value to mult (~11%)
47187d8b9ebSStephen Boyd  * @mask:	bitmask for two's complement subtraction of non 64 bit counters
472362fde04SJohn Stultz  *
473362fde04SJohn Stultz  * NOTE: This function includes a safety margin of 50%, so that bad clock values
474362fde04SJohn Stultz  * can be detected.
47598962465SJon Hunter  */
47687d8b9ebSStephen Boyd u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask)
47798962465SJon Hunter {
47898962465SJon Hunter 	u64 max_nsecs, max_cycles;
47998962465SJon Hunter 
48098962465SJon Hunter 	/*
48198962465SJon Hunter 	 * Calculate the maximum number of cycles that we can pass to the
4826086e346SJohn Stultz 	 * cyc2ns() function without overflowing a 64-bit result.
48398962465SJon Hunter 	 */
4846086e346SJohn Stultz 	max_cycles = ULLONG_MAX;
4856086e346SJohn Stultz 	do_div(max_cycles, mult+maxadj);
48698962465SJon Hunter 
48798962465SJon Hunter 	/*
48898962465SJon Hunter 	 * The actual maximum number of cycles we can defer the clocksource is
48987d8b9ebSStephen Boyd 	 * determined by the minimum of max_cycles and mask.
490d65670a7SJohn Stultz 	 * Note: Here we subtract the maxadj to make sure we don't sleep for
491d65670a7SJohn Stultz 	 * too long if there's a large negative adjustment.
49298962465SJon Hunter 	 */
49387d8b9ebSStephen Boyd 	max_cycles = min(max_cycles, mask);
49487d8b9ebSStephen Boyd 	max_nsecs = clocksource_cyc2ns(max_cycles, mult - maxadj, shift);
49598962465SJon Hunter 
496362fde04SJohn Stultz 	/* Return 50% of the actual maximum, so we can detect bad values */
497362fde04SJohn Stultz 	max_nsecs >>= 1;
498362fde04SJohn Stultz 
49987d8b9ebSStephen Boyd 	return max_nsecs;
50087d8b9ebSStephen Boyd }
50187d8b9ebSStephen Boyd 
50287d8b9ebSStephen Boyd /**
503362fde04SJohn Stultz  * clocksource_max_deferment - Returns max time the clocksource should be deferred
50487d8b9ebSStephen Boyd  * @cs:         Pointer to clocksource
50587d8b9ebSStephen Boyd  *
50687d8b9ebSStephen Boyd  */
50787d8b9ebSStephen Boyd static u64 clocksource_max_deferment(struct clocksource *cs)
50887d8b9ebSStephen Boyd {
50987d8b9ebSStephen Boyd 	u64 max_nsecs;
51087d8b9ebSStephen Boyd 
51187d8b9ebSStephen Boyd 	max_nsecs = clocks_calc_max_nsecs(cs->mult, cs->shift, cs->maxadj,
51287d8b9ebSStephen Boyd 					  cs->mask);
513362fde04SJohn Stultz 	return max_nsecs;
51498962465SJon Hunter }
51598962465SJon Hunter 
516592913ecSJohn Stultz #ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
517734efb46Sjohn stultz 
518f5a2e343SThomas Gleixner static struct clocksource *clocksource_find_best(bool oneshot, bool skipcur)
5195d33b883SThomas Gleixner {
5205d33b883SThomas Gleixner 	struct clocksource *cs;
5215d33b883SThomas Gleixner 
5225d33b883SThomas Gleixner 	if (!finished_booting || list_empty(&clocksource_list))
5235d33b883SThomas Gleixner 		return NULL;
5245d33b883SThomas Gleixner 
5255d33b883SThomas Gleixner 	/*
5265d33b883SThomas Gleixner 	 * We pick the clocksource with the highest rating. If oneshot
5275d33b883SThomas Gleixner 	 * mode is active, we pick the highres valid clocksource with
5285d33b883SThomas Gleixner 	 * the best rating.
5295d33b883SThomas Gleixner 	 */
5305d33b883SThomas Gleixner 	list_for_each_entry(cs, &clocksource_list, list) {
531f5a2e343SThomas Gleixner 		if (skipcur && cs == curr_clocksource)
532f5a2e343SThomas Gleixner 			continue;
5335d33b883SThomas Gleixner 		if (oneshot && !(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES))
5345d33b883SThomas Gleixner 			continue;
5355d33b883SThomas Gleixner 		return cs;
5365d33b883SThomas Gleixner 	}
5375d33b883SThomas Gleixner 	return NULL;
5385d33b883SThomas Gleixner }
5395d33b883SThomas Gleixner 
540f5a2e343SThomas Gleixner static void __clocksource_select(bool skipcur)
541734efb46Sjohn stultz {
5425d33b883SThomas Gleixner 	bool oneshot = tick_oneshot_mode_active();
543f1b82746SMartin Schwidefsky 	struct clocksource *best, *cs;
5445d8b34fdSThomas Gleixner 
5455d33b883SThomas Gleixner 	/* Find the best suitable clocksource */
546f5a2e343SThomas Gleixner 	best = clocksource_find_best(oneshot, skipcur);
5475d33b883SThomas Gleixner 	if (!best)
548f1b82746SMartin Schwidefsky 		return;
5495d33b883SThomas Gleixner 
550f1b82746SMartin Schwidefsky 	/* Check for the override clocksource. */
551f1b82746SMartin Schwidefsky 	list_for_each_entry(cs, &clocksource_list, list) {
552f5a2e343SThomas Gleixner 		if (skipcur && cs == curr_clocksource)
553f5a2e343SThomas Gleixner 			continue;
554f1b82746SMartin Schwidefsky 		if (strcmp(cs->name, override_name) != 0)
555f1b82746SMartin Schwidefsky 			continue;
556f1b82746SMartin Schwidefsky 		/*
557f1b82746SMartin Schwidefsky 		 * Check to make sure we don't switch to a non-highres
558f1b82746SMartin Schwidefsky 		 * capable clocksource if the tick code is in oneshot
559f1b82746SMartin Schwidefsky 		 * mode (highres or nohz)
560f1b82746SMartin Schwidefsky 		 */
5615d33b883SThomas Gleixner 		if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && oneshot) {
562f1b82746SMartin Schwidefsky 			/* Override clocksource cannot be used. */
563f1b82746SMartin Schwidefsky 			printk(KERN_WARNING "Override clocksource %s is not "
564f1b82746SMartin Schwidefsky 			       "HRT compatible. Cannot switch while in "
565f1b82746SMartin Schwidefsky 			       "HRT/NOHZ mode\n", cs->name);
566f1b82746SMartin Schwidefsky 			override_name[0] = 0;
567f1b82746SMartin Schwidefsky 		} else
568f1b82746SMartin Schwidefsky 			/* Override clocksource can be used. */
569f1b82746SMartin Schwidefsky 			best = cs;
570f1b82746SMartin Schwidefsky 		break;
571734efb46Sjohn stultz 	}
572ba919d1cSThomas Gleixner 
573ba919d1cSThomas Gleixner 	if (curr_clocksource != best && !timekeeping_notify(best)) {
574ba919d1cSThomas Gleixner 		pr_info("Switched to clocksource %s\n", best->name);
57575c5158fSMartin Schwidefsky 		curr_clocksource = best;
576f1b82746SMartin Schwidefsky 	}
57775c5158fSMartin Schwidefsky }
57875c5158fSMartin Schwidefsky 
579f5a2e343SThomas Gleixner /**
580f5a2e343SThomas Gleixner  * clocksource_select - Select the best clocksource available
581f5a2e343SThomas Gleixner  *
582f5a2e343SThomas Gleixner  * Private function. Must hold clocksource_mutex when called.
583f5a2e343SThomas Gleixner  *
584f5a2e343SThomas Gleixner  * Select the clocksource with the best rating, or the clocksource,
585f5a2e343SThomas Gleixner  * which is selected by userspace override.
586f5a2e343SThomas Gleixner  */
587f5a2e343SThomas Gleixner static void clocksource_select(void)
588f5a2e343SThomas Gleixner {
589f5a2e343SThomas Gleixner 	return __clocksource_select(false);
590f5a2e343SThomas Gleixner }
591f5a2e343SThomas Gleixner 
5927eaeb343SThomas Gleixner static void clocksource_select_fallback(void)
5937eaeb343SThomas Gleixner {
5947eaeb343SThomas Gleixner 	return __clocksource_select(true);
5957eaeb343SThomas Gleixner }
5967eaeb343SThomas Gleixner 
597592913ecSJohn Stultz #else /* !CONFIG_ARCH_USES_GETTIMEOFFSET */
59854a6bc0bSThomas Gleixner 
59954a6bc0bSThomas Gleixner static inline void clocksource_select(void) { }
6001eaff672SThomas Gleixner static inline void clocksource_select_fallback(void) { }
60154a6bc0bSThomas Gleixner 
60254a6bc0bSThomas Gleixner #endif
60354a6bc0bSThomas Gleixner 
60475c5158fSMartin Schwidefsky /*
60575c5158fSMartin Schwidefsky  * clocksource_done_booting - Called near the end of core bootup
60675c5158fSMartin Schwidefsky  *
60775c5158fSMartin Schwidefsky  * Hack to avoid lots of clocksource churn at boot time.
60875c5158fSMartin Schwidefsky  * We use fs_initcall because we want this to start before
60975c5158fSMartin Schwidefsky  * device_initcall but after subsys_initcall.
61075c5158fSMartin Schwidefsky  */
61175c5158fSMartin Schwidefsky static int __init clocksource_done_booting(void)
61275c5158fSMartin Schwidefsky {
613ad6759fbSjohn stultz 	mutex_lock(&clocksource_mutex);
614ad6759fbSjohn stultz 	curr_clocksource = clocksource_default_clock();
61575c5158fSMartin Schwidefsky 	finished_booting = 1;
61654a6bc0bSThomas Gleixner 	/*
61754a6bc0bSThomas Gleixner 	 * Run the watchdog first to eliminate unstable clock sources
61854a6bc0bSThomas Gleixner 	 */
619332962f2SThomas Gleixner 	__clocksource_watchdog_kthread();
62075c5158fSMartin Schwidefsky 	clocksource_select();
621e6c73305SThomas Gleixner 	mutex_unlock(&clocksource_mutex);
62275c5158fSMartin Schwidefsky 	return 0;
62375c5158fSMartin Schwidefsky }
62475c5158fSMartin Schwidefsky fs_initcall(clocksource_done_booting);
625f1b82746SMartin Schwidefsky 
62692c7e002SThomas Gleixner /*
62792c7e002SThomas Gleixner  * Enqueue the clocksource sorted by rating
628734efb46Sjohn stultz  */
629f1b82746SMartin Schwidefsky static void clocksource_enqueue(struct clocksource *cs)
630734efb46Sjohn stultz {
631f1b82746SMartin Schwidefsky 	struct list_head *entry = &clocksource_list;
632f1b82746SMartin Schwidefsky 	struct clocksource *tmp;
633734efb46Sjohn stultz 
634f1b82746SMartin Schwidefsky 	list_for_each_entry(tmp, &clocksource_list, list)
63592c7e002SThomas Gleixner 		/* Keep track of the place, where to insert */
636f1b82746SMartin Schwidefsky 		if (tmp->rating >= cs->rating)
637f1b82746SMartin Schwidefsky 			entry = &tmp->list;
638f1b82746SMartin Schwidefsky 	list_add(&cs->list, entry);
639734efb46Sjohn stultz }
640734efb46Sjohn stultz 
641d7e81c26SJohn Stultz /**
642852db46dSJohn Stultz  * __clocksource_updatefreq_scale - Used update clocksource with new freq
643b1b73d09SKusanagi Kouichi  * @cs:		clocksource to be registered
644852db46dSJohn Stultz  * @scale:	Scale factor multiplied against freq to get clocksource hz
645852db46dSJohn Stultz  * @freq:	clocksource frequency (cycles per second) divided by scale
646852db46dSJohn Stultz  *
647852db46dSJohn Stultz  * This should only be called from the clocksource->enable() method.
648852db46dSJohn Stultz  *
649852db46dSJohn Stultz  * This *SHOULD NOT* be called directly! Please use the
650852db46dSJohn Stultz  * clocksource_updatefreq_hz() or clocksource_updatefreq_khz helper functions.
651852db46dSJohn Stultz  */
652852db46dSJohn Stultz void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq)
653852db46dSJohn Stultz {
654c0e299b1SThomas Gleixner 	u64 sec;
655852db46dSJohn Stultz 	/*
656724ed53eSThomas Gleixner 	 * Calc the maximum number of seconds which we can run before
657724ed53eSThomas Gleixner 	 * wrapping around. For clocksources which have a mask > 32bit
658724ed53eSThomas Gleixner 	 * we need to limit the max sleep time to have a good
659724ed53eSThomas Gleixner 	 * conversion precision. 10 minutes is still a reasonable
660724ed53eSThomas Gleixner 	 * amount. That results in a shift value of 24 for a
661724ed53eSThomas Gleixner 	 * clocksource with mask >= 40bit and f >= 4GHz. That maps to
662362fde04SJohn Stultz 	 * ~ 0.06ppm granularity for NTP.
663852db46dSJohn Stultz 	 */
664362fde04SJohn Stultz 	sec = cs->mask;
665724ed53eSThomas Gleixner 	do_div(sec, freq);
666724ed53eSThomas Gleixner 	do_div(sec, scale);
667724ed53eSThomas Gleixner 	if (!sec)
668724ed53eSThomas Gleixner 		sec = 1;
669724ed53eSThomas Gleixner 	else if (sec > 600 && cs->mask > UINT_MAX)
670724ed53eSThomas Gleixner 		sec = 600;
671724ed53eSThomas Gleixner 
672852db46dSJohn Stultz 	clocks_calc_mult_shift(&cs->mult, &cs->shift, freq,
673724ed53eSThomas Gleixner 			       NSEC_PER_SEC / scale, sec * scale);
674d65670a7SJohn Stultz 
675d65670a7SJohn Stultz 	/*
676362fde04SJohn Stultz 	 * Ensure clocksources that have large 'mult' values don't overflow
677362fde04SJohn Stultz 	 * when adjusted.
678d65670a7SJohn Stultz 	 */
679d65670a7SJohn Stultz 	cs->maxadj = clocksource_max_adjustment(cs);
680d65670a7SJohn Stultz 	while ((cs->mult + cs->maxadj < cs->mult)
681d65670a7SJohn Stultz 		|| (cs->mult - cs->maxadj > cs->mult)) {
682d65670a7SJohn Stultz 		cs->mult >>= 1;
683d65670a7SJohn Stultz 		cs->shift--;
684d65670a7SJohn Stultz 		cs->maxadj = clocksource_max_adjustment(cs);
685d65670a7SJohn Stultz 	}
686d65670a7SJohn Stultz 
687852db46dSJohn Stultz 	cs->max_idle_ns = clocksource_max_deferment(cs);
688852db46dSJohn Stultz }
689852db46dSJohn Stultz EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale);
690852db46dSJohn Stultz 
691852db46dSJohn Stultz /**
692d7e81c26SJohn Stultz  * __clocksource_register_scale - Used to install new clocksources
693b1b73d09SKusanagi Kouichi  * @cs:		clocksource to be registered
694d7e81c26SJohn Stultz  * @scale:	Scale factor multiplied against freq to get clocksource hz
695d7e81c26SJohn Stultz  * @freq:	clocksource frequency (cycles per second) divided by scale
696d7e81c26SJohn Stultz  *
697d7e81c26SJohn Stultz  * Returns -EBUSY if registration fails, zero otherwise.
698d7e81c26SJohn Stultz  *
699d7e81c26SJohn Stultz  * This *SHOULD NOT* be called directly! Please use the
700d7e81c26SJohn Stultz  * clocksource_register_hz() or clocksource_register_khz helper functions.
701d7e81c26SJohn Stultz  */
702d7e81c26SJohn Stultz int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
703d7e81c26SJohn Stultz {
704d7e81c26SJohn Stultz 
705b595076aSUwe Kleine-König 	/* Initialize mult/shift and max_idle_ns */
706852db46dSJohn Stultz 	__clocksource_updatefreq_scale(cs, scale, freq);
707d7e81c26SJohn Stultz 
708be278e98SJames Hartley 	/* Add clocksource to the clocksource list */
709d7e81c26SJohn Stultz 	mutex_lock(&clocksource_mutex);
710d7e81c26SJohn Stultz 	clocksource_enqueue(cs);
711d7e81c26SJohn Stultz 	clocksource_enqueue_watchdog(cs);
712e05b2efbSjohn stultz 	clocksource_select();
713d7e81c26SJohn Stultz 	mutex_unlock(&clocksource_mutex);
714d7e81c26SJohn Stultz 	return 0;
715d7e81c26SJohn Stultz }
716d7e81c26SJohn Stultz EXPORT_SYMBOL_GPL(__clocksource_register_scale);
717d7e81c26SJohn Stultz 
718d7e81c26SJohn Stultz 
719734efb46Sjohn stultz /**
720a2752549Sjohn stultz  * clocksource_register - Used to install new clocksources
721b1b73d09SKusanagi Kouichi  * @cs:		clocksource to be registered
722734efb46Sjohn stultz  *
723734efb46Sjohn stultz  * Returns -EBUSY if registration fails, zero otherwise.
724734efb46Sjohn stultz  */
725f1b82746SMartin Schwidefsky int clocksource_register(struct clocksource *cs)
726734efb46Sjohn stultz {
727d65670a7SJohn Stultz 	/* calculate max adjustment for given mult/shift */
728d65670a7SJohn Stultz 	cs->maxadj = clocksource_max_adjustment(cs);
729d65670a7SJohn Stultz 	WARN_ONCE(cs->mult + cs->maxadj < cs->mult,
730d65670a7SJohn Stultz 		"Clocksource %s might overflow on 11%% adjustment\n",
731d65670a7SJohn Stultz 		cs->name);
732d65670a7SJohn Stultz 
73398962465SJon Hunter 	/* calculate max idle time permitted for this clocksource */
73498962465SJon Hunter 	cs->max_idle_ns = clocksource_max_deferment(cs);
73598962465SJon Hunter 
73675c5158fSMartin Schwidefsky 	mutex_lock(&clocksource_mutex);
737f1b82746SMartin Schwidefsky 	clocksource_enqueue(cs);
738fb63a0ebSMartin Schwidefsky 	clocksource_enqueue_watchdog(cs);
739e05b2efbSjohn stultz 	clocksource_select();
74075c5158fSMartin Schwidefsky 	mutex_unlock(&clocksource_mutex);
741f1b82746SMartin Schwidefsky 	return 0;
742734efb46Sjohn stultz }
743a2752549Sjohn stultz EXPORT_SYMBOL(clocksource_register);
744734efb46Sjohn stultz 
745d0981a1bSThomas Gleixner static void __clocksource_change_rating(struct clocksource *cs, int rating)
746d0981a1bSThomas Gleixner {
747d0981a1bSThomas Gleixner 	list_del(&cs->list);
748d0981a1bSThomas Gleixner 	cs->rating = rating;
749d0981a1bSThomas Gleixner 	clocksource_enqueue(cs);
750d0981a1bSThomas Gleixner }
751d0981a1bSThomas Gleixner 
752734efb46Sjohn stultz /**
75392c7e002SThomas Gleixner  * clocksource_change_rating - Change the rating of a registered clocksource
754b1b73d09SKusanagi Kouichi  * @cs:		clocksource to be changed
755b1b73d09SKusanagi Kouichi  * @rating:	new rating
756734efb46Sjohn stultz  */
75792c7e002SThomas Gleixner void clocksource_change_rating(struct clocksource *cs, int rating)
758734efb46Sjohn stultz {
75975c5158fSMartin Schwidefsky 	mutex_lock(&clocksource_mutex);
760d0981a1bSThomas Gleixner 	__clocksource_change_rating(cs, rating);
761332962f2SThomas Gleixner 	clocksource_select();
76275c5158fSMartin Schwidefsky 	mutex_unlock(&clocksource_mutex);
763734efb46Sjohn stultz }
764fb63a0ebSMartin Schwidefsky EXPORT_SYMBOL(clocksource_change_rating);
765734efb46Sjohn stultz 
7667eaeb343SThomas Gleixner /*
7677eaeb343SThomas Gleixner  * Unbind clocksource @cs. Called with clocksource_mutex held
7687eaeb343SThomas Gleixner  */
7697eaeb343SThomas Gleixner static int clocksource_unbind(struct clocksource *cs)
7707eaeb343SThomas Gleixner {
7717eaeb343SThomas Gleixner 	/*
7727eaeb343SThomas Gleixner 	 * I really can't convince myself to support this on hardware
7737eaeb343SThomas Gleixner 	 * designed by lobotomized monkeys.
7747eaeb343SThomas Gleixner 	 */
7757eaeb343SThomas Gleixner 	if (clocksource_is_watchdog(cs))
7767eaeb343SThomas Gleixner 		return -EBUSY;
7777eaeb343SThomas Gleixner 
7787eaeb343SThomas Gleixner 	if (cs == curr_clocksource) {
7797eaeb343SThomas Gleixner 		/* Select and try to install a replacement clock source */
7807eaeb343SThomas Gleixner 		clocksource_select_fallback();
7817eaeb343SThomas Gleixner 		if (curr_clocksource == cs)
7827eaeb343SThomas Gleixner 			return -EBUSY;
7837eaeb343SThomas Gleixner 	}
7847eaeb343SThomas Gleixner 	clocksource_dequeue_watchdog(cs);
7857eaeb343SThomas Gleixner 	list_del_init(&cs->list);
7867eaeb343SThomas Gleixner 	return 0;
7877eaeb343SThomas Gleixner }
7887eaeb343SThomas Gleixner 
7894713e22cSThomas Gleixner /**
7904713e22cSThomas Gleixner  * clocksource_unregister - remove a registered clocksource
791b1b73d09SKusanagi Kouichi  * @cs:	clocksource to be unregistered
7924713e22cSThomas Gleixner  */
793a89c7edbSThomas Gleixner int clocksource_unregister(struct clocksource *cs)
7944713e22cSThomas Gleixner {
795a89c7edbSThomas Gleixner 	int ret = 0;
796a89c7edbSThomas Gleixner 
79775c5158fSMartin Schwidefsky 	mutex_lock(&clocksource_mutex);
798a89c7edbSThomas Gleixner 	if (!list_empty(&cs->list))
799a89c7edbSThomas Gleixner 		ret = clocksource_unbind(cs);
80075c5158fSMartin Schwidefsky 	mutex_unlock(&clocksource_mutex);
801a89c7edbSThomas Gleixner 	return ret;
8024713e22cSThomas Gleixner }
803fb63a0ebSMartin Schwidefsky EXPORT_SYMBOL(clocksource_unregister);
8044713e22cSThomas Gleixner 
8052b013700SDaniel Walker #ifdef CONFIG_SYSFS
806734efb46Sjohn stultz /**
807734efb46Sjohn stultz  * sysfs_show_current_clocksources - sysfs interface for current clocksource
808734efb46Sjohn stultz  * @dev:	unused
809b1b73d09SKusanagi Kouichi  * @attr:	unused
810734efb46Sjohn stultz  * @buf:	char buffer to be filled with clocksource list
811734efb46Sjohn stultz  *
812734efb46Sjohn stultz  * Provides sysfs interface for listing current clocksource.
813734efb46Sjohn stultz  */
814734efb46Sjohn stultz static ssize_t
815d369a5d8SKay Sievers sysfs_show_current_clocksources(struct device *dev,
816d369a5d8SKay Sievers 				struct device_attribute *attr, char *buf)
817734efb46Sjohn stultz {
8185e2cb101SMiao Xie 	ssize_t count = 0;
819734efb46Sjohn stultz 
82075c5158fSMartin Schwidefsky 	mutex_lock(&clocksource_mutex);
8215e2cb101SMiao Xie 	count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name);
82275c5158fSMartin Schwidefsky 	mutex_unlock(&clocksource_mutex);
823734efb46Sjohn stultz 
8245e2cb101SMiao Xie 	return count;
825734efb46Sjohn stultz }
826734efb46Sjohn stultz 
827891292a7SPatrick Palka ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt)
82829b54078SThomas Gleixner {
82929b54078SThomas Gleixner 	size_t ret = cnt;
83029b54078SThomas Gleixner 
83129b54078SThomas Gleixner 	/* strings from sysfs write are not 0 terminated! */
83229b54078SThomas Gleixner 	if (!cnt || cnt >= CS_NAME_LEN)
83329b54078SThomas Gleixner 		return -EINVAL;
83429b54078SThomas Gleixner 
83529b54078SThomas Gleixner 	/* strip of \n: */
83629b54078SThomas Gleixner 	if (buf[cnt-1] == '\n')
83729b54078SThomas Gleixner 		cnt--;
83829b54078SThomas Gleixner 	if (cnt > 0)
83929b54078SThomas Gleixner 		memcpy(dst, buf, cnt);
84029b54078SThomas Gleixner 	dst[cnt] = 0;
84129b54078SThomas Gleixner 	return ret;
84229b54078SThomas Gleixner }
84329b54078SThomas Gleixner 
844734efb46Sjohn stultz /**
845734efb46Sjohn stultz  * sysfs_override_clocksource - interface for manually overriding clocksource
846734efb46Sjohn stultz  * @dev:	unused
847b1b73d09SKusanagi Kouichi  * @attr:	unused
848734efb46Sjohn stultz  * @buf:	name of override clocksource
849734efb46Sjohn stultz  * @count:	length of buffer
850734efb46Sjohn stultz  *
851734efb46Sjohn stultz  * Takes input from sysfs interface for manually overriding the default
852b71a8eb0SUwe Kleine-König  * clocksource selection.
853734efb46Sjohn stultz  */
854d369a5d8SKay Sievers static ssize_t sysfs_override_clocksource(struct device *dev,
855d369a5d8SKay Sievers 					  struct device_attribute *attr,
856734efb46Sjohn stultz 					  const char *buf, size_t count)
857734efb46Sjohn stultz {
858233bcb41SElad Wexler 	ssize_t ret;
859734efb46Sjohn stultz 
86075c5158fSMartin Schwidefsky 	mutex_lock(&clocksource_mutex);
861734efb46Sjohn stultz 
86203e13cf5SThomas Gleixner 	ret = sysfs_get_uname(buf, override_name, count);
86329b54078SThomas Gleixner 	if (ret >= 0)
864f1b82746SMartin Schwidefsky 		clocksource_select();
865734efb46Sjohn stultz 
86675c5158fSMartin Schwidefsky 	mutex_unlock(&clocksource_mutex);
867734efb46Sjohn stultz 
868734efb46Sjohn stultz 	return ret;
869734efb46Sjohn stultz }
870734efb46Sjohn stultz 
871734efb46Sjohn stultz /**
8727eaeb343SThomas Gleixner  * sysfs_unbind_current_clocksource - interface for manually unbinding clocksource
8737eaeb343SThomas Gleixner  * @dev:	unused
8747eaeb343SThomas Gleixner  * @attr:	unused
8757eaeb343SThomas Gleixner  * @buf:	unused
8767eaeb343SThomas Gleixner  * @count:	length of buffer
8777eaeb343SThomas Gleixner  *
8787eaeb343SThomas Gleixner  * Takes input from sysfs interface for manually unbinding a clocksource.
8797eaeb343SThomas Gleixner  */
8807eaeb343SThomas Gleixner static ssize_t sysfs_unbind_clocksource(struct device *dev,
8817eaeb343SThomas Gleixner 					struct device_attribute *attr,
8827eaeb343SThomas Gleixner 					const char *buf, size_t count)
8837eaeb343SThomas Gleixner {
8847eaeb343SThomas Gleixner 	struct clocksource *cs;
8857eaeb343SThomas Gleixner 	char name[CS_NAME_LEN];
886233bcb41SElad Wexler 	ssize_t ret;
8877eaeb343SThomas Gleixner 
88803e13cf5SThomas Gleixner 	ret = sysfs_get_uname(buf, name, count);
8897eaeb343SThomas Gleixner 	if (ret < 0)
8907eaeb343SThomas Gleixner 		return ret;
8917eaeb343SThomas Gleixner 
8927eaeb343SThomas Gleixner 	ret = -ENODEV;
8937eaeb343SThomas Gleixner 	mutex_lock(&clocksource_mutex);
8947eaeb343SThomas Gleixner 	list_for_each_entry(cs, &clocksource_list, list) {
8957eaeb343SThomas Gleixner 		if (strcmp(cs->name, name))
8967eaeb343SThomas Gleixner 			continue;
8977eaeb343SThomas Gleixner 		ret = clocksource_unbind(cs);
8987eaeb343SThomas Gleixner 		break;
8997eaeb343SThomas Gleixner 	}
9007eaeb343SThomas Gleixner 	mutex_unlock(&clocksource_mutex);
9017eaeb343SThomas Gleixner 
9027eaeb343SThomas Gleixner 	return ret ? ret : count;
9037eaeb343SThomas Gleixner }
9047eaeb343SThomas Gleixner 
9057eaeb343SThomas Gleixner /**
906734efb46Sjohn stultz  * sysfs_show_available_clocksources - sysfs interface for listing clocksource
907734efb46Sjohn stultz  * @dev:	unused
908b1b73d09SKusanagi Kouichi  * @attr:	unused
909734efb46Sjohn stultz  * @buf:	char buffer to be filled with clocksource list
910734efb46Sjohn stultz  *
911734efb46Sjohn stultz  * Provides sysfs interface for listing registered clocksources
912734efb46Sjohn stultz  */
913734efb46Sjohn stultz static ssize_t
914d369a5d8SKay Sievers sysfs_show_available_clocksources(struct device *dev,
915d369a5d8SKay Sievers 				  struct device_attribute *attr,
9164a0b2b4dSAndi Kleen 				  char *buf)
917734efb46Sjohn stultz {
9182e197586SMatthias Kaehlcke 	struct clocksource *src;
9195e2cb101SMiao Xie 	ssize_t count = 0;
920734efb46Sjohn stultz 
92175c5158fSMartin Schwidefsky 	mutex_lock(&clocksource_mutex);
9222e197586SMatthias Kaehlcke 	list_for_each_entry(src, &clocksource_list, list) {
923cd6d95d8SThomas Gleixner 		/*
924cd6d95d8SThomas Gleixner 		 * Don't show non-HRES clocksource if the tick code is
925cd6d95d8SThomas Gleixner 		 * in one shot mode (highres=on or nohz=on)
926cd6d95d8SThomas Gleixner 		 */
927cd6d95d8SThomas Gleixner 		if (!tick_oneshot_mode_active() ||
9283f68535aSjohn stultz 		    (src->flags & CLOCK_SOURCE_VALID_FOR_HRES))
9295e2cb101SMiao Xie 			count += snprintf(buf + count,
9305e2cb101SMiao Xie 				  max((ssize_t)PAGE_SIZE - count, (ssize_t)0),
9315e2cb101SMiao Xie 				  "%s ", src->name);
932734efb46Sjohn stultz 	}
93375c5158fSMartin Schwidefsky 	mutex_unlock(&clocksource_mutex);
934734efb46Sjohn stultz 
9355e2cb101SMiao Xie 	count += snprintf(buf + count,
9365e2cb101SMiao Xie 			  max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n");
937734efb46Sjohn stultz 
9385e2cb101SMiao Xie 	return count;
939734efb46Sjohn stultz }
940734efb46Sjohn stultz 
941734efb46Sjohn stultz /*
942734efb46Sjohn stultz  * Sysfs setup bits:
943734efb46Sjohn stultz  */
944d369a5d8SKay Sievers static DEVICE_ATTR(current_clocksource, 0644, sysfs_show_current_clocksources,
945734efb46Sjohn stultz 		   sysfs_override_clocksource);
946734efb46Sjohn stultz 
9477eaeb343SThomas Gleixner static DEVICE_ATTR(unbind_clocksource, 0200, NULL, sysfs_unbind_clocksource);
9487eaeb343SThomas Gleixner 
949d369a5d8SKay Sievers static DEVICE_ATTR(available_clocksource, 0444,
950734efb46Sjohn stultz 		   sysfs_show_available_clocksources, NULL);
951734efb46Sjohn stultz 
952d369a5d8SKay Sievers static struct bus_type clocksource_subsys = {
953af5ca3f4SKay Sievers 	.name = "clocksource",
954d369a5d8SKay Sievers 	.dev_name = "clocksource",
955734efb46Sjohn stultz };
956734efb46Sjohn stultz 
957d369a5d8SKay Sievers static struct device device_clocksource = {
958734efb46Sjohn stultz 	.id	= 0,
959d369a5d8SKay Sievers 	.bus	= &clocksource_subsys,
960734efb46Sjohn stultz };
961734efb46Sjohn stultz 
962ad596171Sjohn stultz static int __init init_clocksource_sysfs(void)
963734efb46Sjohn stultz {
964d369a5d8SKay Sievers 	int error = subsys_system_register(&clocksource_subsys, NULL);
965734efb46Sjohn stultz 
966734efb46Sjohn stultz 	if (!error)
967d369a5d8SKay Sievers 		error = device_register(&device_clocksource);
968734efb46Sjohn stultz 	if (!error)
969d369a5d8SKay Sievers 		error = device_create_file(
970734efb46Sjohn stultz 				&device_clocksource,
971d369a5d8SKay Sievers 				&dev_attr_current_clocksource);
972734efb46Sjohn stultz 	if (!error)
9737eaeb343SThomas Gleixner 		error = device_create_file(&device_clocksource,
9747eaeb343SThomas Gleixner 					   &dev_attr_unbind_clocksource);
9757eaeb343SThomas Gleixner 	if (!error)
976d369a5d8SKay Sievers 		error = device_create_file(
977734efb46Sjohn stultz 				&device_clocksource,
978d369a5d8SKay Sievers 				&dev_attr_available_clocksource);
979734efb46Sjohn stultz 	return error;
980734efb46Sjohn stultz }
981734efb46Sjohn stultz 
982734efb46Sjohn stultz device_initcall(init_clocksource_sysfs);
9832b013700SDaniel Walker #endif /* CONFIG_SYSFS */
984734efb46Sjohn stultz 
985734efb46Sjohn stultz /**
986734efb46Sjohn stultz  * boot_override_clocksource - boot clock override
987734efb46Sjohn stultz  * @str:	override name
988734efb46Sjohn stultz  *
989734efb46Sjohn stultz  * Takes a clocksource= boot argument and uses it
990734efb46Sjohn stultz  * as the clocksource override name.
991734efb46Sjohn stultz  */
992734efb46Sjohn stultz static int __init boot_override_clocksource(char* str)
993734efb46Sjohn stultz {
99475c5158fSMartin Schwidefsky 	mutex_lock(&clocksource_mutex);
995734efb46Sjohn stultz 	if (str)
996734efb46Sjohn stultz 		strlcpy(override_name, str, sizeof(override_name));
99775c5158fSMartin Schwidefsky 	mutex_unlock(&clocksource_mutex);
998734efb46Sjohn stultz 	return 1;
999734efb46Sjohn stultz }
1000734efb46Sjohn stultz 
1001734efb46Sjohn stultz __setup("clocksource=", boot_override_clocksource);
1002734efb46Sjohn stultz 
1003734efb46Sjohn stultz /**
1004734efb46Sjohn stultz  * boot_override_clock - Compatibility layer for deprecated boot option
1005734efb46Sjohn stultz  * @str:	override name
1006734efb46Sjohn stultz  *
1007734efb46Sjohn stultz  * DEPRECATED! Takes a clock= boot argument and uses it
1008734efb46Sjohn stultz  * as the clocksource override name
1009734efb46Sjohn stultz  */
1010734efb46Sjohn stultz static int __init boot_override_clock(char* str)
1011734efb46Sjohn stultz {
10125d0cf410Sjohn stultz 	if (!strcmp(str, "pmtmr")) {
10135d0cf410Sjohn stultz 		printk("Warning: clock=pmtmr is deprecated. "
10145d0cf410Sjohn stultz 			"Use clocksource=acpi_pm.\n");
10155d0cf410Sjohn stultz 		return boot_override_clocksource("acpi_pm");
10165d0cf410Sjohn stultz 	}
10175d0cf410Sjohn stultz 	printk("Warning! clock= boot option is deprecated. "
10185d0cf410Sjohn stultz 		"Use clocksource=xyz\n");
1019734efb46Sjohn stultz 	return boot_override_clocksource(str);
1020734efb46Sjohn stultz }
1021734efb46Sjohn stultz 
1022734efb46Sjohn stultz __setup("clock=", boot_override_clock);
1023