xref: /openbmc/linux/kernel/time/clocksource.c (revision 5fdade95)
1734efb46Sjohn stultz /*
2734efb46Sjohn stultz  * linux/kernel/time/clocksource.c
3734efb46Sjohn stultz  *
4734efb46Sjohn stultz  * This file contains the functions which manage clocksource drivers.
5734efb46Sjohn stultz  *
6734efb46Sjohn stultz  * Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com)
7734efb46Sjohn stultz  *
8734efb46Sjohn stultz  * This program is free software; you can redistribute it and/or modify
9734efb46Sjohn stultz  * it under the terms of the GNU General Public License as published by
10734efb46Sjohn stultz  * the Free Software Foundation; either version 2 of the License, or
11734efb46Sjohn stultz  * (at your option) any later version.
12734efb46Sjohn stultz  *
13734efb46Sjohn stultz  * This program is distributed in the hope that it will be useful,
14734efb46Sjohn stultz  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15734efb46Sjohn stultz  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16734efb46Sjohn stultz  * GNU General Public License for more details.
17734efb46Sjohn stultz  *
18734efb46Sjohn stultz  * You should have received a copy of the GNU General Public License
19734efb46Sjohn stultz  * along with this program; if not, write to the Free Software
20734efb46Sjohn stultz  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21734efb46Sjohn stultz  *
22734efb46Sjohn stultz  * TODO WishList:
23734efb46Sjohn stultz  *   o Allow clocksource drivers to be unregistered
24734efb46Sjohn stultz  */
25734efb46Sjohn stultz 
26734efb46Sjohn stultz #include <linux/clocksource.h>
27734efb46Sjohn stultz #include <linux/sysdev.h>
28734efb46Sjohn stultz #include <linux/init.h>
29734efb46Sjohn stultz #include <linux/module.h>
30dc29a365SMathieu Desnoyers #include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */
3179bf2bb3SThomas Gleixner #include <linux/tick.h>
3201548f4dSMartin Schwidefsky #include <linux/kthread.h>
33734efb46Sjohn stultz 
34a038a353SPatrick Ohly void timecounter_init(struct timecounter *tc,
35a038a353SPatrick Ohly 		      const struct cyclecounter *cc,
36a038a353SPatrick Ohly 		      u64 start_tstamp)
37a038a353SPatrick Ohly {
38a038a353SPatrick Ohly 	tc->cc = cc;
39a038a353SPatrick Ohly 	tc->cycle_last = cc->read(cc);
40a038a353SPatrick Ohly 	tc->nsec = start_tstamp;
41a038a353SPatrick Ohly }
423586e0a9SDavid S. Miller EXPORT_SYMBOL_GPL(timecounter_init);
43a038a353SPatrick Ohly 
44a038a353SPatrick Ohly /**
45a038a353SPatrick Ohly  * timecounter_read_delta - get nanoseconds since last call of this function
46a038a353SPatrick Ohly  * @tc:         Pointer to time counter
47a038a353SPatrick Ohly  *
48a038a353SPatrick Ohly  * When the underlying cycle counter runs over, this will be handled
49a038a353SPatrick Ohly  * correctly as long as it does not run over more than once between
50a038a353SPatrick Ohly  * calls.
51a038a353SPatrick Ohly  *
52a038a353SPatrick Ohly  * The first call to this function for a new time counter initializes
53a038a353SPatrick Ohly  * the time tracking and returns an undefined result.
54a038a353SPatrick Ohly  */
55a038a353SPatrick Ohly static u64 timecounter_read_delta(struct timecounter *tc)
56a038a353SPatrick Ohly {
57a038a353SPatrick Ohly 	cycle_t cycle_now, cycle_delta;
58a038a353SPatrick Ohly 	u64 ns_offset;
59a038a353SPatrick Ohly 
60a038a353SPatrick Ohly 	/* read cycle counter: */
61a038a353SPatrick Ohly 	cycle_now = tc->cc->read(tc->cc);
62a038a353SPatrick Ohly 
63a038a353SPatrick Ohly 	/* calculate the delta since the last timecounter_read_delta(): */
64a038a353SPatrick Ohly 	cycle_delta = (cycle_now - tc->cycle_last) & tc->cc->mask;
65a038a353SPatrick Ohly 
66a038a353SPatrick Ohly 	/* convert to nanoseconds: */
67a038a353SPatrick Ohly 	ns_offset = cyclecounter_cyc2ns(tc->cc, cycle_delta);
68a038a353SPatrick Ohly 
69a038a353SPatrick Ohly 	/* update time stamp of timecounter_read_delta() call: */
70a038a353SPatrick Ohly 	tc->cycle_last = cycle_now;
71a038a353SPatrick Ohly 
72a038a353SPatrick Ohly 	return ns_offset;
73a038a353SPatrick Ohly }
74a038a353SPatrick Ohly 
75a038a353SPatrick Ohly u64 timecounter_read(struct timecounter *tc)
76a038a353SPatrick Ohly {
77a038a353SPatrick Ohly 	u64 nsec;
78a038a353SPatrick Ohly 
79a038a353SPatrick Ohly 	/* increment time by nanoseconds since last call */
80a038a353SPatrick Ohly 	nsec = timecounter_read_delta(tc);
81a038a353SPatrick Ohly 	nsec += tc->nsec;
82a038a353SPatrick Ohly 	tc->nsec = nsec;
83a038a353SPatrick Ohly 
84a038a353SPatrick Ohly 	return nsec;
85a038a353SPatrick Ohly }
863586e0a9SDavid S. Miller EXPORT_SYMBOL_GPL(timecounter_read);
87a038a353SPatrick Ohly 
88a038a353SPatrick Ohly u64 timecounter_cyc2time(struct timecounter *tc,
89a038a353SPatrick Ohly 			 cycle_t cycle_tstamp)
90a038a353SPatrick Ohly {
91a038a353SPatrick Ohly 	u64 cycle_delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask;
92a038a353SPatrick Ohly 	u64 nsec;
93a038a353SPatrick Ohly 
94a038a353SPatrick Ohly 	/*
95a038a353SPatrick Ohly 	 * Instead of always treating cycle_tstamp as more recent
96a038a353SPatrick Ohly 	 * than tc->cycle_last, detect when it is too far in the
97a038a353SPatrick Ohly 	 * future and treat it as old time stamp instead.
98a038a353SPatrick Ohly 	 */
99a038a353SPatrick Ohly 	if (cycle_delta > tc->cc->mask / 2) {
100a038a353SPatrick Ohly 		cycle_delta = (tc->cycle_last - cycle_tstamp) & tc->cc->mask;
101a038a353SPatrick Ohly 		nsec = tc->nsec - cyclecounter_cyc2ns(tc->cc, cycle_delta);
102a038a353SPatrick Ohly 	} else {
103a038a353SPatrick Ohly 		nsec = cyclecounter_cyc2ns(tc->cc, cycle_delta) + tc->nsec;
104a038a353SPatrick Ohly 	}
105a038a353SPatrick Ohly 
106a038a353SPatrick Ohly 	return nsec;
107a038a353SPatrick Ohly }
1083586e0a9SDavid S. Miller EXPORT_SYMBOL_GPL(timecounter_cyc2time);
109a038a353SPatrick Ohly 
1107d2f944aSThomas Gleixner /**
1117d2f944aSThomas Gleixner  * clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks
1127d2f944aSThomas Gleixner  * @mult:	pointer to mult variable
1137d2f944aSThomas Gleixner  * @shift:	pointer to shift variable
1147d2f944aSThomas Gleixner  * @from:	frequency to convert from
1157d2f944aSThomas Gleixner  * @to:		frequency to convert to
116*5fdade95SNicolas Pitre  * @maxsec:	guaranteed runtime conversion range in seconds
1177d2f944aSThomas Gleixner  *
1187d2f944aSThomas Gleixner  * The function evaluates the shift/mult pair for the scaled math
1197d2f944aSThomas Gleixner  * operations of clocksources and clockevents.
1207d2f944aSThomas Gleixner  *
1217d2f944aSThomas Gleixner  * @to and @from are frequency values in HZ. For clock sources @to is
1227d2f944aSThomas Gleixner  * NSEC_PER_SEC == 1GHz and @from is the counter frequency. For clock
1237d2f944aSThomas Gleixner  * event @to is the counter frequency and @from is NSEC_PER_SEC.
1247d2f944aSThomas Gleixner  *
125*5fdade95SNicolas Pitre  * The @maxsec conversion range argument controls the time frame in
1267d2f944aSThomas Gleixner  * seconds which must be covered by the runtime conversion with the
1277d2f944aSThomas Gleixner  * calculated mult and shift factors. This guarantees that no 64bit
1287d2f944aSThomas Gleixner  * overflow happens when the input value of the conversion is
1297d2f944aSThomas Gleixner  * multiplied with the calculated mult factor. Larger ranges may
1307d2f944aSThomas Gleixner  * reduce the conversion accuracy by chosing smaller mult and shift
1317d2f944aSThomas Gleixner  * factors.
1327d2f944aSThomas Gleixner  */
1337d2f944aSThomas Gleixner void
134*5fdade95SNicolas Pitre clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 maxsec)
1357d2f944aSThomas Gleixner {
1367d2f944aSThomas Gleixner 	u64 tmp;
1377d2f944aSThomas Gleixner 	u32 sft, sftacc= 32;
1387d2f944aSThomas Gleixner 
1397d2f944aSThomas Gleixner 	/*
1407d2f944aSThomas Gleixner 	 * Calculate the shift factor which is limiting the conversion
1417d2f944aSThomas Gleixner 	 * range:
1427d2f944aSThomas Gleixner 	 */
143*5fdade95SNicolas Pitre 	tmp = ((u64)maxsec * from) >> 32;
1447d2f944aSThomas Gleixner 	while (tmp) {
1457d2f944aSThomas Gleixner 		tmp >>=1;
1467d2f944aSThomas Gleixner 		sftacc--;
1477d2f944aSThomas Gleixner 	}
1487d2f944aSThomas Gleixner 
1497d2f944aSThomas Gleixner 	/*
1507d2f944aSThomas Gleixner 	 * Find the conversion shift/mult pair which has the best
1517d2f944aSThomas Gleixner 	 * accuracy and fits the maxsec conversion range:
1527d2f944aSThomas Gleixner 	 */
1537d2f944aSThomas Gleixner 	for (sft = 32; sft > 0; sft--) {
1547d2f944aSThomas Gleixner 		tmp = (u64) to << sft;
1557d2f944aSThomas Gleixner 		do_div(tmp, from);
1567d2f944aSThomas Gleixner 		if ((tmp >> sftacc) == 0)
1577d2f944aSThomas Gleixner 			break;
1587d2f944aSThomas Gleixner 	}
1597d2f944aSThomas Gleixner 	*mult = tmp;
1607d2f944aSThomas Gleixner 	*shift = sft;
1617d2f944aSThomas Gleixner }
1627d2f944aSThomas Gleixner 
163734efb46Sjohn stultz /*[Clocksource internal variables]---------
164734efb46Sjohn stultz  * curr_clocksource:
165f1b82746SMartin Schwidefsky  *	currently selected clocksource.
166734efb46Sjohn stultz  * clocksource_list:
167734efb46Sjohn stultz  *	linked list with the registered clocksources
16875c5158fSMartin Schwidefsky  * clocksource_mutex:
16975c5158fSMartin Schwidefsky  *	protects manipulations to curr_clocksource and the clocksource_list
170734efb46Sjohn stultz  * override_name:
171734efb46Sjohn stultz  *	Name of the user-specified clocksource.
172734efb46Sjohn stultz  */
173f1b82746SMartin Schwidefsky static struct clocksource *curr_clocksource;
174734efb46Sjohn stultz static LIST_HEAD(clocksource_list);
17575c5158fSMartin Schwidefsky static DEFINE_MUTEX(clocksource_mutex);
176734efb46Sjohn stultz static char override_name[32];
17754a6bc0bSThomas Gleixner static int finished_booting;
178734efb46Sjohn stultz 
1795d8b34fdSThomas Gleixner #ifdef CONFIG_CLOCKSOURCE_WATCHDOG
180f79e0258SMartin Schwidefsky static void clocksource_watchdog_work(struct work_struct *work);
181f79e0258SMartin Schwidefsky 
1825d8b34fdSThomas Gleixner static LIST_HEAD(watchdog_list);
1835d8b34fdSThomas Gleixner static struct clocksource *watchdog;
1845d8b34fdSThomas Gleixner static struct timer_list watchdog_timer;
185f79e0258SMartin Schwidefsky static DECLARE_WORK(watchdog_work, clocksource_watchdog_work);
1865d8b34fdSThomas Gleixner static DEFINE_SPINLOCK(watchdog_lock);
1875d8b34fdSThomas Gleixner static cycle_t watchdog_last;
188fb63a0ebSMartin Schwidefsky static int watchdog_running;
189b52f52a0SThomas Gleixner 
19001548f4dSMartin Schwidefsky static int clocksource_watchdog_kthread(void *data);
191d0981a1bSThomas Gleixner static void __clocksource_change_rating(struct clocksource *cs, int rating);
192c55c87c8SMartin Schwidefsky 
1935d8b34fdSThomas Gleixner /*
19435c35d1aSDaniel Walker  * Interval: 0.5sec Threshold: 0.0625s
1955d8b34fdSThomas Gleixner  */
1965d8b34fdSThomas Gleixner #define WATCHDOG_INTERVAL (HZ >> 1)
19735c35d1aSDaniel Walker #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)
1985d8b34fdSThomas Gleixner 
19901548f4dSMartin Schwidefsky static void clocksource_watchdog_work(struct work_struct *work)
20001548f4dSMartin Schwidefsky {
20101548f4dSMartin Schwidefsky 	/*
20201548f4dSMartin Schwidefsky 	 * If kthread_run fails the next watchdog scan over the
20301548f4dSMartin Schwidefsky 	 * watchdog_list will find the unstable clock again.
20401548f4dSMartin Schwidefsky 	 */
20501548f4dSMartin Schwidefsky 	kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog");
20601548f4dSMartin Schwidefsky }
20701548f4dSMartin Schwidefsky 
2087285dd7fSThomas Gleixner static void __clocksource_unstable(struct clocksource *cs)
2097285dd7fSThomas Gleixner {
2107285dd7fSThomas Gleixner 	cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
2117285dd7fSThomas Gleixner 	cs->flags |= CLOCK_SOURCE_UNSTABLE;
21254a6bc0bSThomas Gleixner 	if (finished_booting)
2137285dd7fSThomas Gleixner 		schedule_work(&watchdog_work);
2147285dd7fSThomas Gleixner }
2157285dd7fSThomas Gleixner 
2168cf4e750SMartin Schwidefsky static void clocksource_unstable(struct clocksource *cs, int64_t delta)
2175d8b34fdSThomas Gleixner {
2185d8b34fdSThomas Gleixner 	printk(KERN_WARNING "Clocksource %s unstable (delta = %Ld ns)\n",
2195d8b34fdSThomas Gleixner 	       cs->name, delta);
2207285dd7fSThomas Gleixner 	__clocksource_unstable(cs);
2217285dd7fSThomas Gleixner }
2227285dd7fSThomas Gleixner 
2237285dd7fSThomas Gleixner /**
2247285dd7fSThomas Gleixner  * clocksource_mark_unstable - mark clocksource unstable via watchdog
2257285dd7fSThomas Gleixner  * @cs:		clocksource to be marked unstable
2267285dd7fSThomas Gleixner  *
2277285dd7fSThomas Gleixner  * This function is called instead of clocksource_change_rating from
2287285dd7fSThomas Gleixner  * cpu hotplug code to avoid a deadlock between the clocksource mutex
2297285dd7fSThomas Gleixner  * and the cpu hotplug mutex. It defers the update of the clocksource
2307285dd7fSThomas Gleixner  * to the watchdog thread.
2317285dd7fSThomas Gleixner  */
2327285dd7fSThomas Gleixner void clocksource_mark_unstable(struct clocksource *cs)
2337285dd7fSThomas Gleixner {
2347285dd7fSThomas Gleixner 	unsigned long flags;
2357285dd7fSThomas Gleixner 
2367285dd7fSThomas Gleixner 	spin_lock_irqsave(&watchdog_lock, flags);
2377285dd7fSThomas Gleixner 	if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) {
2387285dd7fSThomas Gleixner 		if (list_empty(&cs->wd_list))
2397285dd7fSThomas Gleixner 			list_add(&cs->wd_list, &watchdog_list);
2407285dd7fSThomas Gleixner 		__clocksource_unstable(cs);
2417285dd7fSThomas Gleixner 	}
2427285dd7fSThomas Gleixner 	spin_unlock_irqrestore(&watchdog_lock, flags);
2435d8b34fdSThomas Gleixner }
2445d8b34fdSThomas Gleixner 
2455d8b34fdSThomas Gleixner static void clocksource_watchdog(unsigned long data)
2465d8b34fdSThomas Gleixner {
247c55c87c8SMartin Schwidefsky 	struct clocksource *cs;
2485d8b34fdSThomas Gleixner 	cycle_t csnow, wdnow;
2495d8b34fdSThomas Gleixner 	int64_t wd_nsec, cs_nsec;
250c55c87c8SMartin Schwidefsky 	int next_cpu;
2515d8b34fdSThomas Gleixner 
2525d8b34fdSThomas Gleixner 	spin_lock(&watchdog_lock);
253fb63a0ebSMartin Schwidefsky 	if (!watchdog_running)
254fb63a0ebSMartin Schwidefsky 		goto out;
2555d8b34fdSThomas Gleixner 
2568e19608eSMagnus Damm 	wdnow = watchdog->read(watchdog);
257155ec602SMartin Schwidefsky 	wd_nsec = clocksource_cyc2ns((wdnow - watchdog_last) & watchdog->mask,
258155ec602SMartin Schwidefsky 				     watchdog->mult, watchdog->shift);
2595d8b34fdSThomas Gleixner 	watchdog_last = wdnow;
2605d8b34fdSThomas Gleixner 
261c55c87c8SMartin Schwidefsky 	list_for_each_entry(cs, &watchdog_list, wd_list) {
262c55c87c8SMartin Schwidefsky 
263c55c87c8SMartin Schwidefsky 		/* Clocksource already marked unstable? */
26401548f4dSMartin Schwidefsky 		if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
26554a6bc0bSThomas Gleixner 			if (finished_booting)
26601548f4dSMartin Schwidefsky 				schedule_work(&watchdog_work);
267c55c87c8SMartin Schwidefsky 			continue;
26801548f4dSMartin Schwidefsky 		}
269c55c87c8SMartin Schwidefsky 
2708e19608eSMagnus Damm 		csnow = cs->read(cs);
271b52f52a0SThomas Gleixner 
2728cf4e750SMartin Schwidefsky 		/* Clocksource initialized ? */
2738cf4e750SMartin Schwidefsky 		if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) {
2748cf4e750SMartin Schwidefsky 			cs->flags |= CLOCK_SOURCE_WATCHDOG;
275b52f52a0SThomas Gleixner 			cs->wd_last = csnow;
276b52f52a0SThomas Gleixner 			continue;
277b52f52a0SThomas Gleixner 		}
278b52f52a0SThomas Gleixner 
2798cf4e750SMartin Schwidefsky 		/* Check the deviation from the watchdog clocksource. */
280155ec602SMartin Schwidefsky 		cs_nsec = clocksource_cyc2ns((csnow - cs->wd_last) &
281155ec602SMartin Schwidefsky 					     cs->mask, cs->mult, cs->shift);
2828cf4e750SMartin Schwidefsky 		cs->wd_last = csnow;
2838cf4e750SMartin Schwidefsky 		if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) {
2848cf4e750SMartin Schwidefsky 			clocksource_unstable(cs, cs_nsec - wd_nsec);
2858cf4e750SMartin Schwidefsky 			continue;
2868cf4e750SMartin Schwidefsky 		}
2878cf4e750SMartin Schwidefsky 
2888cf4e750SMartin Schwidefsky 		if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) &&
2898cf4e750SMartin Schwidefsky 		    (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) &&
2905d8b34fdSThomas Gleixner 		    (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) {
2915d8b34fdSThomas Gleixner 			cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
29279bf2bb3SThomas Gleixner 			/*
2938cf4e750SMartin Schwidefsky 			 * We just marked the clocksource as highres-capable,
2948cf4e750SMartin Schwidefsky 			 * notify the rest of the system as well so that we
2958cf4e750SMartin Schwidefsky 			 * transition into high-res mode:
29679bf2bb3SThomas Gleixner 			 */
29779bf2bb3SThomas Gleixner 			tick_clock_notify();
2985d8b34fdSThomas Gleixner 		}
2995d8b34fdSThomas Gleixner 	}
3005d8b34fdSThomas Gleixner 
3016993fc5bSAndi Kleen 	/*
302c55c87c8SMartin Schwidefsky 	 * Cycle through CPUs to check if the CPUs stay synchronized
303c55c87c8SMartin Schwidefsky 	 * to each other.
3046993fc5bSAndi Kleen 	 */
305c55c87c8SMartin Schwidefsky 	next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
306cad0e458SMike Travis 	if (next_cpu >= nr_cpu_ids)
3076b954823SRusty Russell 		next_cpu = cpumask_first(cpu_online_mask);
3086993fc5bSAndi Kleen 	watchdog_timer.expires += WATCHDOG_INTERVAL;
3096993fc5bSAndi Kleen 	add_timer_on(&watchdog_timer, next_cpu);
310fb63a0ebSMartin Schwidefsky out:
3115d8b34fdSThomas Gleixner 	spin_unlock(&watchdog_lock);
3125d8b34fdSThomas Gleixner }
3130f8e8ef7SMartin Schwidefsky 
314fb63a0ebSMartin Schwidefsky static inline void clocksource_start_watchdog(void)
315fb63a0ebSMartin Schwidefsky {
316fb63a0ebSMartin Schwidefsky 	if (watchdog_running || !watchdog || list_empty(&watchdog_list))
317fb63a0ebSMartin Schwidefsky 		return;
318fb63a0ebSMartin Schwidefsky 	init_timer(&watchdog_timer);
319fb63a0ebSMartin Schwidefsky 	watchdog_timer.function = clocksource_watchdog;
320fb63a0ebSMartin Schwidefsky 	watchdog_last = watchdog->read(watchdog);
321fb63a0ebSMartin Schwidefsky 	watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
322fb63a0ebSMartin Schwidefsky 	add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask));
323fb63a0ebSMartin Schwidefsky 	watchdog_running = 1;
324fb63a0ebSMartin Schwidefsky }
325fb63a0ebSMartin Schwidefsky 
326fb63a0ebSMartin Schwidefsky static inline void clocksource_stop_watchdog(void)
327fb63a0ebSMartin Schwidefsky {
328fb63a0ebSMartin Schwidefsky 	if (!watchdog_running || (watchdog && !list_empty(&watchdog_list)))
329fb63a0ebSMartin Schwidefsky 		return;
330fb63a0ebSMartin Schwidefsky 	del_timer(&watchdog_timer);
331fb63a0ebSMartin Schwidefsky 	watchdog_running = 0;
332fb63a0ebSMartin Schwidefsky }
333fb63a0ebSMartin Schwidefsky 
3340f8e8ef7SMartin Schwidefsky static inline void clocksource_reset_watchdog(void)
3350f8e8ef7SMartin Schwidefsky {
3360f8e8ef7SMartin Schwidefsky 	struct clocksource *cs;
3370f8e8ef7SMartin Schwidefsky 
3380f8e8ef7SMartin Schwidefsky 	list_for_each_entry(cs, &watchdog_list, wd_list)
3390f8e8ef7SMartin Schwidefsky 		cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
3400f8e8ef7SMartin Schwidefsky }
3410f8e8ef7SMartin Schwidefsky 
342b52f52a0SThomas Gleixner static void clocksource_resume_watchdog(void)
343b52f52a0SThomas Gleixner {
3440f8e8ef7SMartin Schwidefsky 	unsigned long flags;
3450f8e8ef7SMartin Schwidefsky 
3467b7422a5SThomas Gleixner 	/*
3477b7422a5SThomas Gleixner 	 * We use trylock here to avoid a potential dead lock when
3487b7422a5SThomas Gleixner 	 * kgdb calls this code after the kernel has been stopped with
3497b7422a5SThomas Gleixner 	 * watchdog_lock held. When watchdog_lock is held we just
3507b7422a5SThomas Gleixner 	 * return and accept, that the watchdog might trigger and mark
3517b7422a5SThomas Gleixner 	 * the monitored clock source (usually TSC) unstable.
3527b7422a5SThomas Gleixner 	 *
3537b7422a5SThomas Gleixner 	 * This does not affect the other caller clocksource_resume()
3547b7422a5SThomas Gleixner 	 * because at this point the kernel is UP, interrupts are
3557b7422a5SThomas Gleixner 	 * disabled and nothing can hold watchdog_lock.
3567b7422a5SThomas Gleixner 	 */
3577b7422a5SThomas Gleixner 	if (!spin_trylock_irqsave(&watchdog_lock, flags))
3587b7422a5SThomas Gleixner 		return;
3590f8e8ef7SMartin Schwidefsky 	clocksource_reset_watchdog();
3600f8e8ef7SMartin Schwidefsky 	spin_unlock_irqrestore(&watchdog_lock, flags);
361b52f52a0SThomas Gleixner }
362b52f52a0SThomas Gleixner 
363fb63a0ebSMartin Schwidefsky static void clocksource_enqueue_watchdog(struct clocksource *cs)
3645d8b34fdSThomas Gleixner {
3655d8b34fdSThomas Gleixner 	unsigned long flags;
3665d8b34fdSThomas Gleixner 
3675d8b34fdSThomas Gleixner 	spin_lock_irqsave(&watchdog_lock, flags);
3685d8b34fdSThomas Gleixner 	if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
369fb63a0ebSMartin Schwidefsky 		/* cs is a clocksource to be watched. */
3705d8b34fdSThomas Gleixner 		list_add(&cs->wd_list, &watchdog_list);
371fb63a0ebSMartin Schwidefsky 		cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
372948ac6d7SThomas Gleixner 	} else {
373fb63a0ebSMartin Schwidefsky 		/* cs is a watchdog. */
374948ac6d7SThomas Gleixner 		if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
3755d8b34fdSThomas Gleixner 			cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
376fb63a0ebSMartin Schwidefsky 		/* Pick the best watchdog. */
3775d8b34fdSThomas Gleixner 		if (!watchdog || cs->rating > watchdog->rating) {
3785d8b34fdSThomas Gleixner 			watchdog = cs;
3795d8b34fdSThomas Gleixner 			/* Reset watchdog cycles */
3800f8e8ef7SMartin Schwidefsky 			clocksource_reset_watchdog();
3815d8b34fdSThomas Gleixner 		}
3825d8b34fdSThomas Gleixner 	}
383fb63a0ebSMartin Schwidefsky 	/* Check if the watchdog timer needs to be started. */
384fb63a0ebSMartin Schwidefsky 	clocksource_start_watchdog();
3855d8b34fdSThomas Gleixner 	spin_unlock_irqrestore(&watchdog_lock, flags);
3865d8b34fdSThomas Gleixner }
387fb63a0ebSMartin Schwidefsky 
388fb63a0ebSMartin Schwidefsky static void clocksource_dequeue_watchdog(struct clocksource *cs)
389fb63a0ebSMartin Schwidefsky {
390fb63a0ebSMartin Schwidefsky 	struct clocksource *tmp;
391fb63a0ebSMartin Schwidefsky 	unsigned long flags;
392fb63a0ebSMartin Schwidefsky 
393fb63a0ebSMartin Schwidefsky 	spin_lock_irqsave(&watchdog_lock, flags);
394fb63a0ebSMartin Schwidefsky 	if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
395fb63a0ebSMartin Schwidefsky 		/* cs is a watched clocksource. */
396fb63a0ebSMartin Schwidefsky 		list_del_init(&cs->wd_list);
397fb63a0ebSMartin Schwidefsky 	} else if (cs == watchdog) {
398fb63a0ebSMartin Schwidefsky 		/* Reset watchdog cycles */
399fb63a0ebSMartin Schwidefsky 		clocksource_reset_watchdog();
400fb63a0ebSMartin Schwidefsky 		/* Current watchdog is removed. Find an alternative. */
401fb63a0ebSMartin Schwidefsky 		watchdog = NULL;
402fb63a0ebSMartin Schwidefsky 		list_for_each_entry(tmp, &clocksource_list, list) {
403fb63a0ebSMartin Schwidefsky 			if (tmp == cs || tmp->flags & CLOCK_SOURCE_MUST_VERIFY)
404fb63a0ebSMartin Schwidefsky 				continue;
405fb63a0ebSMartin Schwidefsky 			if (!watchdog || tmp->rating > watchdog->rating)
406fb63a0ebSMartin Schwidefsky 				watchdog = tmp;
407fb63a0ebSMartin Schwidefsky 		}
408fb63a0ebSMartin Schwidefsky 	}
409fb63a0ebSMartin Schwidefsky 	cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
410fb63a0ebSMartin Schwidefsky 	/* Check if the watchdog timer needs to be stopped. */
411fb63a0ebSMartin Schwidefsky 	clocksource_stop_watchdog();
412fb63a0ebSMartin Schwidefsky 	spin_unlock_irqrestore(&watchdog_lock, flags);
413fb63a0ebSMartin Schwidefsky }
414fb63a0ebSMartin Schwidefsky 
41501548f4dSMartin Schwidefsky static int clocksource_watchdog_kthread(void *data)
416c55c87c8SMartin Schwidefsky {
417c55c87c8SMartin Schwidefsky 	struct clocksource *cs, *tmp;
418c55c87c8SMartin Schwidefsky 	unsigned long flags;
4196ea41d25SThomas Gleixner 	LIST_HEAD(unstable);
420c55c87c8SMartin Schwidefsky 
421d0981a1bSThomas Gleixner 	mutex_lock(&clocksource_mutex);
422c55c87c8SMartin Schwidefsky 	spin_lock_irqsave(&watchdog_lock, flags);
423c55c87c8SMartin Schwidefsky 	list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list)
424c55c87c8SMartin Schwidefsky 		if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
425c55c87c8SMartin Schwidefsky 			list_del_init(&cs->wd_list);
4266ea41d25SThomas Gleixner 			list_add(&cs->wd_list, &unstable);
427c55c87c8SMartin Schwidefsky 		}
428c55c87c8SMartin Schwidefsky 	/* Check if the watchdog timer needs to be stopped. */
429c55c87c8SMartin Schwidefsky 	clocksource_stop_watchdog();
4306ea41d25SThomas Gleixner 	spin_unlock_irqrestore(&watchdog_lock, flags);
4316ea41d25SThomas Gleixner 
4326ea41d25SThomas Gleixner 	/* Needs to be done outside of watchdog lock */
4336ea41d25SThomas Gleixner 	list_for_each_entry_safe(cs, tmp, &unstable, wd_list) {
4346ea41d25SThomas Gleixner 		list_del_init(&cs->wd_list);
435d0981a1bSThomas Gleixner 		__clocksource_change_rating(cs, 0);
4366ea41d25SThomas Gleixner 	}
437d0981a1bSThomas Gleixner 	mutex_unlock(&clocksource_mutex);
43801548f4dSMartin Schwidefsky 	return 0;
439c55c87c8SMartin Schwidefsky }
440c55c87c8SMartin Schwidefsky 
441fb63a0ebSMartin Schwidefsky #else /* CONFIG_CLOCKSOURCE_WATCHDOG */
442fb63a0ebSMartin Schwidefsky 
443fb63a0ebSMartin Schwidefsky static void clocksource_enqueue_watchdog(struct clocksource *cs)
4445d8b34fdSThomas Gleixner {
4455d8b34fdSThomas Gleixner 	if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
4465d8b34fdSThomas Gleixner 		cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
4475d8b34fdSThomas Gleixner }
448b52f52a0SThomas Gleixner 
449fb63a0ebSMartin Schwidefsky static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
450b52f52a0SThomas Gleixner static inline void clocksource_resume_watchdog(void) { }
45154a6bc0bSThomas Gleixner static inline int clocksource_watchdog_kthread(void *data) { return 0; }
452fb63a0ebSMartin Schwidefsky 
453fb63a0ebSMartin Schwidefsky #endif /* CONFIG_CLOCKSOURCE_WATCHDOG */
4545d8b34fdSThomas Gleixner 
455734efb46Sjohn stultz /**
456c54a42b1SMagnus Damm  * clocksource_suspend - suspend the clocksource(s)
457c54a42b1SMagnus Damm  */
458c54a42b1SMagnus Damm void clocksource_suspend(void)
459c54a42b1SMagnus Damm {
460c54a42b1SMagnus Damm 	struct clocksource *cs;
461c54a42b1SMagnus Damm 
462c54a42b1SMagnus Damm 	list_for_each_entry_reverse(cs, &clocksource_list, list)
463c54a42b1SMagnus Damm 		if (cs->suspend)
464c54a42b1SMagnus Damm 			cs->suspend(cs);
465c54a42b1SMagnus Damm }
466c54a42b1SMagnus Damm 
467c54a42b1SMagnus Damm /**
468b52f52a0SThomas Gleixner  * clocksource_resume - resume the clocksource(s)
469b52f52a0SThomas Gleixner  */
470b52f52a0SThomas Gleixner void clocksource_resume(void)
471b52f52a0SThomas Gleixner {
4722e197586SMatthias Kaehlcke 	struct clocksource *cs;
473b52f52a0SThomas Gleixner 
47475c5158fSMartin Schwidefsky 	list_for_each_entry(cs, &clocksource_list, list)
475b52f52a0SThomas Gleixner 		if (cs->resume)
47617622339SMagnus Damm 			cs->resume(cs);
477b52f52a0SThomas Gleixner 
478b52f52a0SThomas Gleixner 	clocksource_resume_watchdog();
479b52f52a0SThomas Gleixner }
480b52f52a0SThomas Gleixner 
481b52f52a0SThomas Gleixner /**
4827c3078b6SJason Wessel  * clocksource_touch_watchdog - Update watchdog
4837c3078b6SJason Wessel  *
4847c3078b6SJason Wessel  * Update the watchdog after exception contexts such as kgdb so as not
4857b7422a5SThomas Gleixner  * to incorrectly trip the watchdog. This might fail when the kernel
4867b7422a5SThomas Gleixner  * was stopped in code which holds watchdog_lock.
4877c3078b6SJason Wessel  */
4887c3078b6SJason Wessel void clocksource_touch_watchdog(void)
4897c3078b6SJason Wessel {
4907c3078b6SJason Wessel 	clocksource_resume_watchdog();
4917c3078b6SJason Wessel }
4927c3078b6SJason Wessel 
493734efb46Sjohn stultz /**
49498962465SJon Hunter  * clocksource_max_deferment - Returns max time the clocksource can be deferred
49598962465SJon Hunter  * @cs:         Pointer to clocksource
49698962465SJon Hunter  *
49798962465SJon Hunter  */
49898962465SJon Hunter static u64 clocksource_max_deferment(struct clocksource *cs)
49998962465SJon Hunter {
50098962465SJon Hunter 	u64 max_nsecs, max_cycles;
50198962465SJon Hunter 
50298962465SJon Hunter 	/*
50398962465SJon Hunter 	 * Calculate the maximum number of cycles that we can pass to the
50498962465SJon Hunter 	 * cyc2ns function without overflowing a 64-bit signed result. The
50598962465SJon Hunter 	 * maximum number of cycles is equal to ULLONG_MAX/cs->mult which
50698962465SJon Hunter 	 * is equivalent to the below.
50798962465SJon Hunter 	 * max_cycles < (2^63)/cs->mult
50898962465SJon Hunter 	 * max_cycles < 2^(log2((2^63)/cs->mult))
50998962465SJon Hunter 	 * max_cycles < 2^(log2(2^63) - log2(cs->mult))
51098962465SJon Hunter 	 * max_cycles < 2^(63 - log2(cs->mult))
51198962465SJon Hunter 	 * max_cycles < 1 << (63 - log2(cs->mult))
51298962465SJon Hunter 	 * Please note that we add 1 to the result of the log2 to account for
51398962465SJon Hunter 	 * any rounding errors, ensure the above inequality is satisfied and
51498962465SJon Hunter 	 * no overflow will occur.
51598962465SJon Hunter 	 */
51698962465SJon Hunter 	max_cycles = 1ULL << (63 - (ilog2(cs->mult) + 1));
51798962465SJon Hunter 
51898962465SJon Hunter 	/*
51998962465SJon Hunter 	 * The actual maximum number of cycles we can defer the clocksource is
52098962465SJon Hunter 	 * determined by the minimum of max_cycles and cs->mask.
52198962465SJon Hunter 	 */
52298962465SJon Hunter 	max_cycles = min_t(u64, max_cycles, (u64) cs->mask);
52398962465SJon Hunter 	max_nsecs = clocksource_cyc2ns(max_cycles, cs->mult, cs->shift);
52498962465SJon Hunter 
52598962465SJon Hunter 	/*
52698962465SJon Hunter 	 * To ensure that the clocksource does not wrap whilst we are idle,
52798962465SJon Hunter 	 * limit the time the clocksource can be deferred by 12.5%. Please
52898962465SJon Hunter 	 * note a margin of 12.5% is used because this can be computed with
52998962465SJon Hunter 	 * a shift, versus say 10% which would require division.
53098962465SJon Hunter 	 */
53198962465SJon Hunter 	return max_nsecs - (max_nsecs >> 5);
53298962465SJon Hunter }
53398962465SJon Hunter 
534592913ecSJohn Stultz #ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
535734efb46Sjohn stultz 
536734efb46Sjohn stultz /**
537f1b82746SMartin Schwidefsky  * clocksource_select - Select the best clocksource available
538734efb46Sjohn stultz  *
53975c5158fSMartin Schwidefsky  * Private function. Must hold clocksource_mutex when called.
540734efb46Sjohn stultz  *
54192c7e002SThomas Gleixner  * Select the clocksource with the best rating, or the clocksource,
54292c7e002SThomas Gleixner  * which is selected by userspace override.
543734efb46Sjohn stultz  */
544f1b82746SMartin Schwidefsky static void clocksource_select(void)
545734efb46Sjohn stultz {
546f1b82746SMartin Schwidefsky 	struct clocksource *best, *cs;
5475d8b34fdSThomas Gleixner 
54875c5158fSMartin Schwidefsky 	if (!finished_booting || list_empty(&clocksource_list))
549f1b82746SMartin Schwidefsky 		return;
550f1b82746SMartin Schwidefsky 	/* First clocksource on the list has the best rating. */
551f1b82746SMartin Schwidefsky 	best = list_first_entry(&clocksource_list, struct clocksource, list);
552f1b82746SMartin Schwidefsky 	/* Check for the override clocksource. */
553f1b82746SMartin Schwidefsky 	list_for_each_entry(cs, &clocksource_list, list) {
554f1b82746SMartin Schwidefsky 		if (strcmp(cs->name, override_name) != 0)
555f1b82746SMartin Schwidefsky 			continue;
556f1b82746SMartin Schwidefsky 		/*
557f1b82746SMartin Schwidefsky 		 * Check to make sure we don't switch to a non-highres
558f1b82746SMartin Schwidefsky 		 * capable clocksource if the tick code is in oneshot
559f1b82746SMartin Schwidefsky 		 * mode (highres or nohz)
560f1b82746SMartin Schwidefsky 		 */
561f1b82746SMartin Schwidefsky 		if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) &&
562f1b82746SMartin Schwidefsky 		    tick_oneshot_mode_active()) {
563f1b82746SMartin Schwidefsky 			/* Override clocksource cannot be used. */
564f1b82746SMartin Schwidefsky 			printk(KERN_WARNING "Override clocksource %s is not "
565f1b82746SMartin Schwidefsky 			       "HRT compatible. Cannot switch while in "
566f1b82746SMartin Schwidefsky 			       "HRT/NOHZ mode\n", cs->name);
567f1b82746SMartin Schwidefsky 			override_name[0] = 0;
568f1b82746SMartin Schwidefsky 		} else
569f1b82746SMartin Schwidefsky 			/* Override clocksource can be used. */
570f1b82746SMartin Schwidefsky 			best = cs;
571f1b82746SMartin Schwidefsky 		break;
572734efb46Sjohn stultz 	}
57375c5158fSMartin Schwidefsky 	if (curr_clocksource != best) {
57475c5158fSMartin Schwidefsky 		printk(KERN_INFO "Switching to clocksource %s\n", best->name);
57575c5158fSMartin Schwidefsky 		curr_clocksource = best;
57675c5158fSMartin Schwidefsky 		timekeeping_notify(curr_clocksource);
577f1b82746SMartin Schwidefsky 	}
57875c5158fSMartin Schwidefsky }
57975c5158fSMartin Schwidefsky 
580592913ecSJohn Stultz #else /* !CONFIG_ARCH_USES_GETTIMEOFFSET */
58154a6bc0bSThomas Gleixner 
58254a6bc0bSThomas Gleixner static inline void clocksource_select(void) { }
58354a6bc0bSThomas Gleixner 
58454a6bc0bSThomas Gleixner #endif
58554a6bc0bSThomas Gleixner 
58675c5158fSMartin Schwidefsky /*
58775c5158fSMartin Schwidefsky  * clocksource_done_booting - Called near the end of core bootup
58875c5158fSMartin Schwidefsky  *
58975c5158fSMartin Schwidefsky  * Hack to avoid lots of clocksource churn at boot time.
59075c5158fSMartin Schwidefsky  * We use fs_initcall because we want this to start before
59175c5158fSMartin Schwidefsky  * device_initcall but after subsys_initcall.
59275c5158fSMartin Schwidefsky  */
59375c5158fSMartin Schwidefsky static int __init clocksource_done_booting(void)
59475c5158fSMartin Schwidefsky {
595ad6759fbSjohn stultz 	mutex_lock(&clocksource_mutex);
596ad6759fbSjohn stultz 	curr_clocksource = clocksource_default_clock();
597ad6759fbSjohn stultz 	mutex_unlock(&clocksource_mutex);
598ad6759fbSjohn stultz 
59975c5158fSMartin Schwidefsky 	finished_booting = 1;
60054a6bc0bSThomas Gleixner 
60154a6bc0bSThomas Gleixner 	/*
60254a6bc0bSThomas Gleixner 	 * Run the watchdog first to eliminate unstable clock sources
60354a6bc0bSThomas Gleixner 	 */
60454a6bc0bSThomas Gleixner 	clocksource_watchdog_kthread(NULL);
60554a6bc0bSThomas Gleixner 
606e6c73305SThomas Gleixner 	mutex_lock(&clocksource_mutex);
60775c5158fSMartin Schwidefsky 	clocksource_select();
608e6c73305SThomas Gleixner 	mutex_unlock(&clocksource_mutex);
60975c5158fSMartin Schwidefsky 	return 0;
61075c5158fSMartin Schwidefsky }
61175c5158fSMartin Schwidefsky fs_initcall(clocksource_done_booting);
612f1b82746SMartin Schwidefsky 
61392c7e002SThomas Gleixner /*
61492c7e002SThomas Gleixner  * Enqueue the clocksource sorted by rating
615734efb46Sjohn stultz  */
616f1b82746SMartin Schwidefsky static void clocksource_enqueue(struct clocksource *cs)
617734efb46Sjohn stultz {
618f1b82746SMartin Schwidefsky 	struct list_head *entry = &clocksource_list;
619f1b82746SMartin Schwidefsky 	struct clocksource *tmp;
620734efb46Sjohn stultz 
621f1b82746SMartin Schwidefsky 	list_for_each_entry(tmp, &clocksource_list, list)
62292c7e002SThomas Gleixner 		/* Keep track of the place, where to insert */
623f1b82746SMartin Schwidefsky 		if (tmp->rating >= cs->rating)
624f1b82746SMartin Schwidefsky 			entry = &tmp->list;
625f1b82746SMartin Schwidefsky 	list_add(&cs->list, entry);
626734efb46Sjohn stultz }
627734efb46Sjohn stultz 
628d7e81c26SJohn Stultz 
629d7e81c26SJohn Stultz /*
630d7e81c26SJohn Stultz  * Maximum time we expect to go between ticks. This includes idle
631d7e81c26SJohn Stultz  * tickless time. It provides the trade off between selecting a
632d7e81c26SJohn Stultz  * mult/shift pair that is very precise but can only handle a short
633d7e81c26SJohn Stultz  * period of time, vs. a mult/shift pair that can handle long periods
634d7e81c26SJohn Stultz  * of time but isn't as precise.
635d7e81c26SJohn Stultz  *
636d7e81c26SJohn Stultz  * This is a subsystem constant, and actual hardware limitations
637d7e81c26SJohn Stultz  * may override it (ie: clocksources that wrap every 3 seconds).
638d7e81c26SJohn Stultz  */
639d7e81c26SJohn Stultz #define MAX_UPDATE_LENGTH 5 /* Seconds */
640d7e81c26SJohn Stultz 
641d7e81c26SJohn Stultz /**
642852db46dSJohn Stultz  * __clocksource_updatefreq_scale - Used update clocksource with new freq
643852db46dSJohn Stultz  * @t:		clocksource to be registered
644852db46dSJohn Stultz  * @scale:	Scale factor multiplied against freq to get clocksource hz
645852db46dSJohn Stultz  * @freq:	clocksource frequency (cycles per second) divided by scale
646852db46dSJohn Stultz  *
647852db46dSJohn Stultz  * This should only be called from the clocksource->enable() method.
648852db46dSJohn Stultz  *
649852db46dSJohn Stultz  * This *SHOULD NOT* be called directly! Please use the
650852db46dSJohn Stultz  * clocksource_updatefreq_hz() or clocksource_updatefreq_khz helper functions.
651852db46dSJohn Stultz  */
652852db46dSJohn Stultz void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq)
653852db46dSJohn Stultz {
654852db46dSJohn Stultz 	/*
655852db46dSJohn Stultz 	 * Ideally we want to use  some of the limits used in
656852db46dSJohn Stultz 	 * clocksource_max_deferment, to provide a more informed
657852db46dSJohn Stultz 	 * MAX_UPDATE_LENGTH. But for now this just gets the
658852db46dSJohn Stultz 	 * register interface working properly.
659852db46dSJohn Stultz 	 */
660852db46dSJohn Stultz 	clocks_calc_mult_shift(&cs->mult, &cs->shift, freq,
661852db46dSJohn Stultz 				      NSEC_PER_SEC/scale,
662852db46dSJohn Stultz 				      MAX_UPDATE_LENGTH*scale);
663852db46dSJohn Stultz 	cs->max_idle_ns = clocksource_max_deferment(cs);
664852db46dSJohn Stultz }
665852db46dSJohn Stultz EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale);
666852db46dSJohn Stultz 
667852db46dSJohn Stultz /**
668d7e81c26SJohn Stultz  * __clocksource_register_scale - Used to install new clocksources
669d7e81c26SJohn Stultz  * @t:		clocksource to be registered
670d7e81c26SJohn Stultz  * @scale:	Scale factor multiplied against freq to get clocksource hz
671d7e81c26SJohn Stultz  * @freq:	clocksource frequency (cycles per second) divided by scale
672d7e81c26SJohn Stultz  *
673d7e81c26SJohn Stultz  * Returns -EBUSY if registration fails, zero otherwise.
674d7e81c26SJohn Stultz  *
675d7e81c26SJohn Stultz  * This *SHOULD NOT* be called directly! Please use the
676d7e81c26SJohn Stultz  * clocksource_register_hz() or clocksource_register_khz helper functions.
677d7e81c26SJohn Stultz  */
678d7e81c26SJohn Stultz int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
679d7e81c26SJohn Stultz {
680d7e81c26SJohn Stultz 
681852db46dSJohn Stultz 	/* Intialize mult/shift and max_idle_ns */
682852db46dSJohn Stultz 	__clocksource_updatefreq_scale(cs, scale, freq);
683d7e81c26SJohn Stultz 
684852db46dSJohn Stultz 	/* Add clocksource to the clcoksource list */
685d7e81c26SJohn Stultz 	mutex_lock(&clocksource_mutex);
686d7e81c26SJohn Stultz 	clocksource_enqueue(cs);
687d7e81c26SJohn Stultz 	clocksource_select();
688d7e81c26SJohn Stultz 	clocksource_enqueue_watchdog(cs);
689d7e81c26SJohn Stultz 	mutex_unlock(&clocksource_mutex);
690d7e81c26SJohn Stultz 	return 0;
691d7e81c26SJohn Stultz }
692d7e81c26SJohn Stultz EXPORT_SYMBOL_GPL(__clocksource_register_scale);
693d7e81c26SJohn Stultz 
694d7e81c26SJohn Stultz 
695734efb46Sjohn stultz /**
696a2752549Sjohn stultz  * clocksource_register - Used to install new clocksources
697734efb46Sjohn stultz  * @t:		clocksource to be registered
698734efb46Sjohn stultz  *
699734efb46Sjohn stultz  * Returns -EBUSY if registration fails, zero otherwise.
700734efb46Sjohn stultz  */
701f1b82746SMartin Schwidefsky int clocksource_register(struct clocksource *cs)
702734efb46Sjohn stultz {
70398962465SJon Hunter 	/* calculate max idle time permitted for this clocksource */
70498962465SJon Hunter 	cs->max_idle_ns = clocksource_max_deferment(cs);
70598962465SJon Hunter 
70675c5158fSMartin Schwidefsky 	mutex_lock(&clocksource_mutex);
707f1b82746SMartin Schwidefsky 	clocksource_enqueue(cs);
708f1b82746SMartin Schwidefsky 	clocksource_select();
709fb63a0ebSMartin Schwidefsky 	clocksource_enqueue_watchdog(cs);
71075c5158fSMartin Schwidefsky 	mutex_unlock(&clocksource_mutex);
711f1b82746SMartin Schwidefsky 	return 0;
712734efb46Sjohn stultz }
713a2752549Sjohn stultz EXPORT_SYMBOL(clocksource_register);
714734efb46Sjohn stultz 
715d0981a1bSThomas Gleixner static void __clocksource_change_rating(struct clocksource *cs, int rating)
716d0981a1bSThomas Gleixner {
717d0981a1bSThomas Gleixner 	list_del(&cs->list);
718d0981a1bSThomas Gleixner 	cs->rating = rating;
719d0981a1bSThomas Gleixner 	clocksource_enqueue(cs);
720d0981a1bSThomas Gleixner 	clocksource_select();
721d0981a1bSThomas Gleixner }
722d0981a1bSThomas Gleixner 
723734efb46Sjohn stultz /**
72492c7e002SThomas Gleixner  * clocksource_change_rating - Change the rating of a registered clocksource
725734efb46Sjohn stultz  */
72692c7e002SThomas Gleixner void clocksource_change_rating(struct clocksource *cs, int rating)
727734efb46Sjohn stultz {
72875c5158fSMartin Schwidefsky 	mutex_lock(&clocksource_mutex);
729d0981a1bSThomas Gleixner 	__clocksource_change_rating(cs, rating);
73075c5158fSMartin Schwidefsky 	mutex_unlock(&clocksource_mutex);
731734efb46Sjohn stultz }
732fb63a0ebSMartin Schwidefsky EXPORT_SYMBOL(clocksource_change_rating);
733734efb46Sjohn stultz 
7344713e22cSThomas Gleixner /**
7354713e22cSThomas Gleixner  * clocksource_unregister - remove a registered clocksource
7364713e22cSThomas Gleixner  */
7374713e22cSThomas Gleixner void clocksource_unregister(struct clocksource *cs)
7384713e22cSThomas Gleixner {
73975c5158fSMartin Schwidefsky 	mutex_lock(&clocksource_mutex);
740fb63a0ebSMartin Schwidefsky 	clocksource_dequeue_watchdog(cs);
7414713e22cSThomas Gleixner 	list_del(&cs->list);
742f1b82746SMartin Schwidefsky 	clocksource_select();
74375c5158fSMartin Schwidefsky 	mutex_unlock(&clocksource_mutex);
7444713e22cSThomas Gleixner }
745fb63a0ebSMartin Schwidefsky EXPORT_SYMBOL(clocksource_unregister);
7464713e22cSThomas Gleixner 
7472b013700SDaniel Walker #ifdef CONFIG_SYSFS
748734efb46Sjohn stultz /**
749734efb46Sjohn stultz  * sysfs_show_current_clocksources - sysfs interface for current clocksource
750734efb46Sjohn stultz  * @dev:	unused
751734efb46Sjohn stultz  * @buf:	char buffer to be filled with clocksource list
752734efb46Sjohn stultz  *
753734efb46Sjohn stultz  * Provides sysfs interface for listing current clocksource.
754734efb46Sjohn stultz  */
755734efb46Sjohn stultz static ssize_t
7564a0b2b4dSAndi Kleen sysfs_show_current_clocksources(struct sys_device *dev,
7574a0b2b4dSAndi Kleen 				struct sysdev_attribute *attr, char *buf)
758734efb46Sjohn stultz {
7595e2cb101SMiao Xie 	ssize_t count = 0;
760734efb46Sjohn stultz 
76175c5158fSMartin Schwidefsky 	mutex_lock(&clocksource_mutex);
7625e2cb101SMiao Xie 	count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name);
76375c5158fSMartin Schwidefsky 	mutex_unlock(&clocksource_mutex);
764734efb46Sjohn stultz 
7655e2cb101SMiao Xie 	return count;
766734efb46Sjohn stultz }
767734efb46Sjohn stultz 
768734efb46Sjohn stultz /**
769734efb46Sjohn stultz  * sysfs_override_clocksource - interface for manually overriding clocksource
770734efb46Sjohn stultz  * @dev:	unused
771734efb46Sjohn stultz  * @buf:	name of override clocksource
772734efb46Sjohn stultz  * @count:	length of buffer
773734efb46Sjohn stultz  *
774734efb46Sjohn stultz  * Takes input from sysfs interface for manually overriding the default
775b71a8eb0SUwe Kleine-König  * clocksource selection.
776734efb46Sjohn stultz  */
777734efb46Sjohn stultz static ssize_t sysfs_override_clocksource(struct sys_device *dev,
7784a0b2b4dSAndi Kleen 					  struct sysdev_attribute *attr,
779734efb46Sjohn stultz 					  const char *buf, size_t count)
780734efb46Sjohn stultz {
781734efb46Sjohn stultz 	size_t ret = count;
78292c7e002SThomas Gleixner 
783734efb46Sjohn stultz 	/* strings from sysfs write are not 0 terminated! */
784734efb46Sjohn stultz 	if (count >= sizeof(override_name))
785734efb46Sjohn stultz 		return -EINVAL;
786734efb46Sjohn stultz 
787734efb46Sjohn stultz 	/* strip of \n: */
788734efb46Sjohn stultz 	if (buf[count-1] == '\n')
789734efb46Sjohn stultz 		count--;
790734efb46Sjohn stultz 
79175c5158fSMartin Schwidefsky 	mutex_lock(&clocksource_mutex);
792734efb46Sjohn stultz 
79392c7e002SThomas Gleixner 	if (count > 0)
794734efb46Sjohn stultz 		memcpy(override_name, buf, count);
795734efb46Sjohn stultz 	override_name[count] = 0;
796f1b82746SMartin Schwidefsky 	clocksource_select();
797734efb46Sjohn stultz 
79875c5158fSMartin Schwidefsky 	mutex_unlock(&clocksource_mutex);
799734efb46Sjohn stultz 
800734efb46Sjohn stultz 	return ret;
801734efb46Sjohn stultz }
802734efb46Sjohn stultz 
803734efb46Sjohn stultz /**
804734efb46Sjohn stultz  * sysfs_show_available_clocksources - sysfs interface for listing clocksource
805734efb46Sjohn stultz  * @dev:	unused
806734efb46Sjohn stultz  * @buf:	char buffer to be filled with clocksource list
807734efb46Sjohn stultz  *
808734efb46Sjohn stultz  * Provides sysfs interface for listing registered clocksources
809734efb46Sjohn stultz  */
810734efb46Sjohn stultz static ssize_t
8114a0b2b4dSAndi Kleen sysfs_show_available_clocksources(struct sys_device *dev,
8124a0b2b4dSAndi Kleen 				  struct sysdev_attribute *attr,
8134a0b2b4dSAndi Kleen 				  char *buf)
814734efb46Sjohn stultz {
8152e197586SMatthias Kaehlcke 	struct clocksource *src;
8165e2cb101SMiao Xie 	ssize_t count = 0;
817734efb46Sjohn stultz 
81875c5158fSMartin Schwidefsky 	mutex_lock(&clocksource_mutex);
8192e197586SMatthias Kaehlcke 	list_for_each_entry(src, &clocksource_list, list) {
820cd6d95d8SThomas Gleixner 		/*
821cd6d95d8SThomas Gleixner 		 * Don't show non-HRES clocksource if the tick code is
822cd6d95d8SThomas Gleixner 		 * in one shot mode (highres=on or nohz=on)
823cd6d95d8SThomas Gleixner 		 */
824cd6d95d8SThomas Gleixner 		if (!tick_oneshot_mode_active() ||
8253f68535aSjohn stultz 		    (src->flags & CLOCK_SOURCE_VALID_FOR_HRES))
8265e2cb101SMiao Xie 			count += snprintf(buf + count,
8275e2cb101SMiao Xie 				  max((ssize_t)PAGE_SIZE - count, (ssize_t)0),
8285e2cb101SMiao Xie 				  "%s ", src->name);
829734efb46Sjohn stultz 	}
83075c5158fSMartin Schwidefsky 	mutex_unlock(&clocksource_mutex);
831734efb46Sjohn stultz 
8325e2cb101SMiao Xie 	count += snprintf(buf + count,
8335e2cb101SMiao Xie 			  max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n");
834734efb46Sjohn stultz 
8355e2cb101SMiao Xie 	return count;
836734efb46Sjohn stultz }
837734efb46Sjohn stultz 
838734efb46Sjohn stultz /*
839734efb46Sjohn stultz  * Sysfs setup bits:
840734efb46Sjohn stultz  */
8414f95f81aSHeiko Carstens static SYSDEV_ATTR(current_clocksource, 0644, sysfs_show_current_clocksources,
842734efb46Sjohn stultz 		   sysfs_override_clocksource);
843734efb46Sjohn stultz 
8444f95f81aSHeiko Carstens static SYSDEV_ATTR(available_clocksource, 0444,
845734efb46Sjohn stultz 		   sysfs_show_available_clocksources, NULL);
846734efb46Sjohn stultz 
847734efb46Sjohn stultz static struct sysdev_class clocksource_sysclass = {
848af5ca3f4SKay Sievers 	.name = "clocksource",
849734efb46Sjohn stultz };
850734efb46Sjohn stultz 
851734efb46Sjohn stultz static struct sys_device device_clocksource = {
852734efb46Sjohn stultz 	.id	= 0,
853734efb46Sjohn stultz 	.cls	= &clocksource_sysclass,
854734efb46Sjohn stultz };
855734efb46Sjohn stultz 
856ad596171Sjohn stultz static int __init init_clocksource_sysfs(void)
857734efb46Sjohn stultz {
858734efb46Sjohn stultz 	int error = sysdev_class_register(&clocksource_sysclass);
859734efb46Sjohn stultz 
860734efb46Sjohn stultz 	if (!error)
861734efb46Sjohn stultz 		error = sysdev_register(&device_clocksource);
862734efb46Sjohn stultz 	if (!error)
863734efb46Sjohn stultz 		error = sysdev_create_file(
864734efb46Sjohn stultz 				&device_clocksource,
865734efb46Sjohn stultz 				&attr_current_clocksource);
866734efb46Sjohn stultz 	if (!error)
867734efb46Sjohn stultz 		error = sysdev_create_file(
868734efb46Sjohn stultz 				&device_clocksource,
869734efb46Sjohn stultz 				&attr_available_clocksource);
870734efb46Sjohn stultz 	return error;
871734efb46Sjohn stultz }
872734efb46Sjohn stultz 
873734efb46Sjohn stultz device_initcall(init_clocksource_sysfs);
8742b013700SDaniel Walker #endif /* CONFIG_SYSFS */
875734efb46Sjohn stultz 
876734efb46Sjohn stultz /**
877734efb46Sjohn stultz  * boot_override_clocksource - boot clock override
878734efb46Sjohn stultz  * @str:	override name
879734efb46Sjohn stultz  *
880734efb46Sjohn stultz  * Takes a clocksource= boot argument and uses it
881734efb46Sjohn stultz  * as the clocksource override name.
882734efb46Sjohn stultz  */
883734efb46Sjohn stultz static int __init boot_override_clocksource(char* str)
884734efb46Sjohn stultz {
88575c5158fSMartin Schwidefsky 	mutex_lock(&clocksource_mutex);
886734efb46Sjohn stultz 	if (str)
887734efb46Sjohn stultz 		strlcpy(override_name, str, sizeof(override_name));
88875c5158fSMartin Schwidefsky 	mutex_unlock(&clocksource_mutex);
889734efb46Sjohn stultz 	return 1;
890734efb46Sjohn stultz }
891734efb46Sjohn stultz 
892734efb46Sjohn stultz __setup("clocksource=", boot_override_clocksource);
893734efb46Sjohn stultz 
894734efb46Sjohn stultz /**
895734efb46Sjohn stultz  * boot_override_clock - Compatibility layer for deprecated boot option
896734efb46Sjohn stultz  * @str:	override name
897734efb46Sjohn stultz  *
898734efb46Sjohn stultz  * DEPRECATED! Takes a clock= boot argument and uses it
899734efb46Sjohn stultz  * as the clocksource override name
900734efb46Sjohn stultz  */
901734efb46Sjohn stultz static int __init boot_override_clock(char* str)
902734efb46Sjohn stultz {
9035d0cf410Sjohn stultz 	if (!strcmp(str, "pmtmr")) {
9045d0cf410Sjohn stultz 		printk("Warning: clock=pmtmr is deprecated. "
9055d0cf410Sjohn stultz 			"Use clocksource=acpi_pm.\n");
9065d0cf410Sjohn stultz 		return boot_override_clocksource("acpi_pm");
9075d0cf410Sjohn stultz 	}
9085d0cf410Sjohn stultz 	printk("Warning! clock= boot option is deprecated. "
9095d0cf410Sjohn stultz 		"Use clocksource=xyz\n");
910734efb46Sjohn stultz 	return boot_override_clocksource(str);
911734efb46Sjohn stultz }
912734efb46Sjohn stultz 
913734efb46Sjohn stultz __setup("clock=", boot_override_clock);
914