xref: /openbmc/linux/arch/powerpc/kernel/time.c (revision 4a4cfe3836916e12282ceb5c4bdd799dc71af567)
1f2783c15SPaul Mackerras /*
2f2783c15SPaul Mackerras  * Common time routines among all ppc machines.
3f2783c15SPaul Mackerras  *
4f2783c15SPaul Mackerras  * Written by Cort Dougan (cort@cs.nmt.edu) to merge
5f2783c15SPaul Mackerras  * Paul Mackerras' version and mine for PReP and Pmac.
6f2783c15SPaul Mackerras  * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
7f2783c15SPaul Mackerras  * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
8f2783c15SPaul Mackerras  *
9f2783c15SPaul Mackerras  * First round of bugfixes by Gabriel Paubert (paubert@iram.es)
10f2783c15SPaul Mackerras  * to make clock more stable (2.4.0-test5). The only thing
11f2783c15SPaul Mackerras  * that this code assumes is that the timebases have been synchronized
12f2783c15SPaul Mackerras  * by firmware on SMP and are never stopped (never do sleep
13f2783c15SPaul Mackerras  * on SMP then, nap and doze are OK).
14f2783c15SPaul Mackerras  *
15f2783c15SPaul Mackerras  * Speeded up do_gettimeofday by getting rid of references to
16f2783c15SPaul Mackerras  * xtime (which required locks for consistency). (mikejc@us.ibm.com)
17f2783c15SPaul Mackerras  *
18f2783c15SPaul Mackerras  * TODO (not necessarily in this file):
19f2783c15SPaul Mackerras  * - improve precision and reproducibility of timebase frequency
20f2783c15SPaul Mackerras  * measurement at boot time. (for iSeries, we calibrate the timebase
21f2783c15SPaul Mackerras  * against the Titan chip's clock.)
22f2783c15SPaul Mackerras  * - for astronomical applications: add a new function to get
23f2783c15SPaul Mackerras  * non ambiguous timestamps even around leap seconds. This needs
24f2783c15SPaul Mackerras  * a new timestamp format and a good name.
25f2783c15SPaul Mackerras  *
26f2783c15SPaul Mackerras  * 1997-09-10  Updated NTP code according to technical memorandum Jan '96
27f2783c15SPaul Mackerras  *             "A Kernel Model for Precision Timekeeping" by Dave Mills
28f2783c15SPaul Mackerras  *
29f2783c15SPaul Mackerras  *      This program is free software; you can redistribute it and/or
30f2783c15SPaul Mackerras  *      modify it under the terms of the GNU General Public License
31f2783c15SPaul Mackerras  *      as published by the Free Software Foundation; either version
32f2783c15SPaul Mackerras  *      2 of the License, or (at your option) any later version.
33f2783c15SPaul Mackerras  */
34f2783c15SPaul Mackerras 
35f2783c15SPaul Mackerras #include <linux/errno.h>
36f2783c15SPaul Mackerras #include <linux/module.h>
37f2783c15SPaul Mackerras #include <linux/sched.h>
38f2783c15SPaul Mackerras #include <linux/kernel.h>
39f2783c15SPaul Mackerras #include <linux/param.h>
40f2783c15SPaul Mackerras #include <linux/string.h>
41f2783c15SPaul Mackerras #include <linux/mm.h>
42f2783c15SPaul Mackerras #include <linux/interrupt.h>
43f2783c15SPaul Mackerras #include <linux/timex.h>
44f2783c15SPaul Mackerras #include <linux/kernel_stat.h>
45f2783c15SPaul Mackerras #include <linux/time.h>
46f2783c15SPaul Mackerras #include <linux/init.h>
47f2783c15SPaul Mackerras #include <linux/profile.h>
48f2783c15SPaul Mackerras #include <linux/cpu.h>
49f2783c15SPaul Mackerras #include <linux/security.h>
50f2783c15SPaul Mackerras #include <linux/percpu.h>
51f2783c15SPaul Mackerras #include <linux/rtc.h>
52092b8f34SPaul Mackerras #include <linux/jiffies.h>
53c6622f63SPaul Mackerras #include <linux/posix-timers.h>
547d12e780SDavid Howells #include <linux/irq.h>
55f2783c15SPaul Mackerras 
56f2783c15SPaul Mackerras #include <asm/io.h>
57f2783c15SPaul Mackerras #include <asm/processor.h>
58f2783c15SPaul Mackerras #include <asm/nvram.h>
59f2783c15SPaul Mackerras #include <asm/cache.h>
60f2783c15SPaul Mackerras #include <asm/machdep.h>
61f2783c15SPaul Mackerras #include <asm/uaccess.h>
62f2783c15SPaul Mackerras #include <asm/time.h>
63f2783c15SPaul Mackerras #include <asm/prom.h>
64f2783c15SPaul Mackerras #include <asm/irq.h>
65f2783c15SPaul Mackerras #include <asm/div64.h>
662249ca9dSPaul Mackerras #include <asm/smp.h>
67a7f290daSBenjamin Herrenschmidt #include <asm/vdso_datapage.h>
68f2783c15SPaul Mackerras #include <asm/firmware.h>
69f2783c15SPaul Mackerras #ifdef CONFIG_PPC_ISERIES
708875ccfbSKelly Daly #include <asm/iseries/it_lp_queue.h>
718021b8a7SKelly Daly #include <asm/iseries/hv_call_xm.h>
72f2783c15SPaul Mackerras #endif
73f2783c15SPaul Mackerras 
74*4a4cfe38STony Breeds /* powerpc clocksource/clockevent code */
75*4a4cfe38STony Breeds 
76*4a4cfe38STony Breeds #include <linux/clocksource.h>
77*4a4cfe38STony Breeds 
78*4a4cfe38STony Breeds static cycle_t rtc_read(void);
79*4a4cfe38STony Breeds static struct clocksource clocksource_rtc = {
80*4a4cfe38STony Breeds 	.name         = "rtc",
81*4a4cfe38STony Breeds 	.rating       = 400,
82*4a4cfe38STony Breeds 	.flags        = CLOCK_SOURCE_IS_CONTINUOUS,
83*4a4cfe38STony Breeds 	.mask         = CLOCKSOURCE_MASK(64),
84*4a4cfe38STony Breeds 	.shift        = 22,
85*4a4cfe38STony Breeds 	.mult         = 0,	/* To be filled in */
86*4a4cfe38STony Breeds 	.read         = rtc_read,
87*4a4cfe38STony Breeds };
88*4a4cfe38STony Breeds 
89*4a4cfe38STony Breeds static cycle_t timebase_read(void);
90*4a4cfe38STony Breeds static struct clocksource clocksource_timebase = {
91*4a4cfe38STony Breeds 	.name         = "timebase",
92*4a4cfe38STony Breeds 	.rating       = 400,
93*4a4cfe38STony Breeds 	.flags        = CLOCK_SOURCE_IS_CONTINUOUS,
94*4a4cfe38STony Breeds 	.mask         = CLOCKSOURCE_MASK(64),
95*4a4cfe38STony Breeds 	.shift        = 22,
96*4a4cfe38STony Breeds 	.mult         = 0,	/* To be filled in */
97*4a4cfe38STony Breeds 	.read         = timebase_read,
98*4a4cfe38STony Breeds };
99*4a4cfe38STony Breeds 
100f2783c15SPaul Mackerras #ifdef CONFIG_PPC_ISERIES
10171712b45STony Breeds static unsigned long __initdata iSeries_recal_titan;
10271712b45STony Breeds static signed long __initdata iSeries_recal_tb;
103*4a4cfe38STony Breeds 
104*4a4cfe38STony Breeds /* Forward declaration is only needed for iSereis compiles */
105*4a4cfe38STony Breeds void __init clocksource_init(void);
106f2783c15SPaul Mackerras #endif
107f2783c15SPaul Mackerras 
108f2783c15SPaul Mackerras #define XSEC_PER_SEC (1024*1024)
109f2783c15SPaul Mackerras 
110f2783c15SPaul Mackerras #ifdef CONFIG_PPC64
111f2783c15SPaul Mackerras #define SCALE_XSEC(xsec, max)	(((xsec) * max) / XSEC_PER_SEC)
112f2783c15SPaul Mackerras #else
113f2783c15SPaul Mackerras /* compute ((xsec << 12) * max) >> 32 */
114f2783c15SPaul Mackerras #define SCALE_XSEC(xsec, max)	mulhwu((xsec) << 12, max)
115f2783c15SPaul Mackerras #endif
116f2783c15SPaul Mackerras 
117f2783c15SPaul Mackerras unsigned long tb_ticks_per_jiffy;
118f2783c15SPaul Mackerras unsigned long tb_ticks_per_usec = 100; /* sane default */
119f2783c15SPaul Mackerras EXPORT_SYMBOL(tb_ticks_per_usec);
120f2783c15SPaul Mackerras unsigned long tb_ticks_per_sec;
1212cf82c02SPaul Mackerras EXPORT_SYMBOL(tb_ticks_per_sec);	/* for cputime_t conversions */
122f2783c15SPaul Mackerras u64 tb_to_xs;
123f2783c15SPaul Mackerras unsigned tb_to_us;
124092b8f34SPaul Mackerras 
12519923c19SRoman Zippel #define TICKLEN_SCALE	TICK_LENGTH_SHIFT
126092b8f34SPaul Mackerras u64 last_tick_len;	/* units are ns / 2^TICKLEN_SCALE */
127092b8f34SPaul Mackerras u64 ticklen_to_xs;	/* 0.64 fraction */
128092b8f34SPaul Mackerras 
129092b8f34SPaul Mackerras /* If last_tick_len corresponds to about 1/HZ seconds, then
130092b8f34SPaul Mackerras    last_tick_len << TICKLEN_SHIFT will be about 2^63. */
131092b8f34SPaul Mackerras #define TICKLEN_SHIFT	(63 - 30 - TICKLEN_SCALE + SHIFT_HZ)
132092b8f34SPaul Mackerras 
133f2783c15SPaul Mackerras DEFINE_SPINLOCK(rtc_lock);
134f2783c15SPaul Mackerras EXPORT_SYMBOL_GPL(rtc_lock);
135f2783c15SPaul Mackerras 
136fc9069feSTony Breeds static u64 tb_to_ns_scale __read_mostly;
137fc9069feSTony Breeds static unsigned tb_to_ns_shift __read_mostly;
138fc9069feSTony Breeds static unsigned long boot_tb __read_mostly;
139f2783c15SPaul Mackerras 
140f2783c15SPaul Mackerras struct gettimeofday_struct do_gtod;
141f2783c15SPaul Mackerras 
142f2783c15SPaul Mackerras extern struct timezone sys_tz;
143f2783c15SPaul Mackerras static long timezone_offset;
144f2783c15SPaul Mackerras 
145f2783c15SPaul Mackerras unsigned long ppc_proc_freq;
1461474855dSBob Nelson EXPORT_SYMBOL(ppc_proc_freq);
147f2783c15SPaul Mackerras unsigned long ppc_tb_freq;
148f2783c15SPaul Mackerras 
149eb36c288SPaul Mackerras static u64 tb_last_jiffy __cacheline_aligned_in_smp;
150eb36c288SPaul Mackerras static DEFINE_PER_CPU(u64, last_jiffy);
15196c44507SPaul Mackerras 
152c6622f63SPaul Mackerras #ifdef CONFIG_VIRT_CPU_ACCOUNTING
153c6622f63SPaul Mackerras /*
154c6622f63SPaul Mackerras  * Factors for converting from cputime_t (timebase ticks) to
155c6622f63SPaul Mackerras  * jiffies, milliseconds, seconds, and clock_t (1/USER_HZ seconds).
156c6622f63SPaul Mackerras  * These are all stored as 0.64 fixed-point binary fractions.
157c6622f63SPaul Mackerras  */
158c6622f63SPaul Mackerras u64 __cputime_jiffies_factor;
1592cf82c02SPaul Mackerras EXPORT_SYMBOL(__cputime_jiffies_factor);
160c6622f63SPaul Mackerras u64 __cputime_msec_factor;
1612cf82c02SPaul Mackerras EXPORT_SYMBOL(__cputime_msec_factor);
162c6622f63SPaul Mackerras u64 __cputime_sec_factor;
1632cf82c02SPaul Mackerras EXPORT_SYMBOL(__cputime_sec_factor);
164c6622f63SPaul Mackerras u64 __cputime_clockt_factor;
1652cf82c02SPaul Mackerras EXPORT_SYMBOL(__cputime_clockt_factor);
166c6622f63SPaul Mackerras 
167c6622f63SPaul Mackerras static void calc_cputime_factors(void)
168c6622f63SPaul Mackerras {
169c6622f63SPaul Mackerras 	struct div_result res;
170c6622f63SPaul Mackerras 
171c6622f63SPaul Mackerras 	div128_by_32(HZ, 0, tb_ticks_per_sec, &res);
172c6622f63SPaul Mackerras 	__cputime_jiffies_factor = res.result_low;
173c6622f63SPaul Mackerras 	div128_by_32(1000, 0, tb_ticks_per_sec, &res);
174c6622f63SPaul Mackerras 	__cputime_msec_factor = res.result_low;
175c6622f63SPaul Mackerras 	div128_by_32(1, 0, tb_ticks_per_sec, &res);
176c6622f63SPaul Mackerras 	__cputime_sec_factor = res.result_low;
177c6622f63SPaul Mackerras 	div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res);
178c6622f63SPaul Mackerras 	__cputime_clockt_factor = res.result_low;
179c6622f63SPaul Mackerras }
180c6622f63SPaul Mackerras 
181c6622f63SPaul Mackerras /*
182c6622f63SPaul Mackerras  * Read the PURR on systems that have it, otherwise the timebase.
183c6622f63SPaul Mackerras  */
184c6622f63SPaul Mackerras static u64 read_purr(void)
185c6622f63SPaul Mackerras {
186c6622f63SPaul Mackerras 	if (cpu_has_feature(CPU_FTR_PURR))
187c6622f63SPaul Mackerras 		return mfspr(SPRN_PURR);
188c6622f63SPaul Mackerras 	return mftb();
189c6622f63SPaul Mackerras }
190c6622f63SPaul Mackerras 
191c6622f63SPaul Mackerras /*
192c6622f63SPaul Mackerras  * Account time for a transition between system, hard irq
193c6622f63SPaul Mackerras  * or soft irq state.
194c6622f63SPaul Mackerras  */
195c6622f63SPaul Mackerras void account_system_vtime(struct task_struct *tsk)
196c6622f63SPaul Mackerras {
197c6622f63SPaul Mackerras 	u64 now, delta;
198c6622f63SPaul Mackerras 	unsigned long flags;
199c6622f63SPaul Mackerras 
200c6622f63SPaul Mackerras 	local_irq_save(flags);
201c6622f63SPaul Mackerras 	now = read_purr();
202c6622f63SPaul Mackerras 	delta = now - get_paca()->startpurr;
203c6622f63SPaul Mackerras 	get_paca()->startpurr = now;
204c6622f63SPaul Mackerras 	if (!in_interrupt()) {
205c6622f63SPaul Mackerras 		delta += get_paca()->system_time;
206c6622f63SPaul Mackerras 		get_paca()->system_time = 0;
207c6622f63SPaul Mackerras 	}
208c6622f63SPaul Mackerras 	account_system_time(tsk, 0, delta);
209c6622f63SPaul Mackerras 	local_irq_restore(flags);
210c6622f63SPaul Mackerras }
211c6622f63SPaul Mackerras 
212c6622f63SPaul Mackerras /*
213c6622f63SPaul Mackerras  * Transfer the user and system times accumulated in the paca
214c6622f63SPaul Mackerras  * by the exception entry and exit code to the generic process
215c6622f63SPaul Mackerras  * user and system time records.
216c6622f63SPaul Mackerras  * Must be called with interrupts disabled.
217c6622f63SPaul Mackerras  */
218c6622f63SPaul Mackerras void account_process_vtime(struct task_struct *tsk)
219c6622f63SPaul Mackerras {
220c6622f63SPaul Mackerras 	cputime_t utime;
221c6622f63SPaul Mackerras 
222c6622f63SPaul Mackerras 	utime = get_paca()->user_time;
223c6622f63SPaul Mackerras 	get_paca()->user_time = 0;
224c6622f63SPaul Mackerras 	account_user_time(tsk, utime);
225c6622f63SPaul Mackerras }
226c6622f63SPaul Mackerras 
227c6622f63SPaul Mackerras static void account_process_time(struct pt_regs *regs)
228c6622f63SPaul Mackerras {
229c6622f63SPaul Mackerras 	int cpu = smp_processor_id();
230c6622f63SPaul Mackerras 
231c6622f63SPaul Mackerras 	account_process_vtime(current);
232c6622f63SPaul Mackerras 	run_local_timers();
233c6622f63SPaul Mackerras 	if (rcu_pending(cpu))
234c6622f63SPaul Mackerras 		rcu_check_callbacks(cpu, user_mode(regs));
235c6622f63SPaul Mackerras 	scheduler_tick();
236c6622f63SPaul Mackerras  	run_posix_cpu_timers(current);
237c6622f63SPaul Mackerras }
238c6622f63SPaul Mackerras 
239c6622f63SPaul Mackerras /*
240c6622f63SPaul Mackerras  * Stuff for accounting stolen time.
241c6622f63SPaul Mackerras  */
242c6622f63SPaul Mackerras struct cpu_purr_data {
243c6622f63SPaul Mackerras 	int	initialized;			/* thread is running */
244c6622f63SPaul Mackerras 	u64	tb;			/* last TB value read */
245c6622f63SPaul Mackerras 	u64	purr;			/* last PURR value read */
246c6622f63SPaul Mackerras };
247c6622f63SPaul Mackerras 
248df211c8aSNathan Lynch /*
249df211c8aSNathan Lynch  * Each entry in the cpu_purr_data array is manipulated only by its
250df211c8aSNathan Lynch  * "owner" cpu -- usually in the timer interrupt but also occasionally
251df211c8aSNathan Lynch  * in process context for cpu online.  As long as cpus do not touch
252df211c8aSNathan Lynch  * each others' cpu_purr_data, disabling local interrupts is
253df211c8aSNathan Lynch  * sufficient to serialize accesses.
254df211c8aSNathan Lynch  */
255c6622f63SPaul Mackerras static DEFINE_PER_CPU(struct cpu_purr_data, cpu_purr_data);
256c6622f63SPaul Mackerras 
257c6622f63SPaul Mackerras static void snapshot_tb_and_purr(void *data)
258c6622f63SPaul Mackerras {
259df211c8aSNathan Lynch 	unsigned long flags;
260c6622f63SPaul Mackerras 	struct cpu_purr_data *p = &__get_cpu_var(cpu_purr_data);
261c6622f63SPaul Mackerras 
262df211c8aSNathan Lynch 	local_irq_save(flags);
263c27da339SBenjamin Herrenschmidt 	p->tb = get_tb_or_rtc();
264cbcdb93dSStephen Rothwell 	p->purr = mfspr(SPRN_PURR);
265c6622f63SPaul Mackerras 	wmb();
266c6622f63SPaul Mackerras 	p->initialized = 1;
267df211c8aSNathan Lynch 	local_irq_restore(flags);
268c6622f63SPaul Mackerras }
269c6622f63SPaul Mackerras 
270c6622f63SPaul Mackerras /*
271c6622f63SPaul Mackerras  * Called during boot when all cpus have come up.
272c6622f63SPaul Mackerras  */
273c6622f63SPaul Mackerras void snapshot_timebases(void)
274c6622f63SPaul Mackerras {
275c6622f63SPaul Mackerras 	if (!cpu_has_feature(CPU_FTR_PURR))
276c6622f63SPaul Mackerras 		return;
277c6622f63SPaul Mackerras 	on_each_cpu(snapshot_tb_and_purr, NULL, 0, 1);
278c6622f63SPaul Mackerras }
279c6622f63SPaul Mackerras 
280df211c8aSNathan Lynch /*
281df211c8aSNathan Lynch  * Must be called with interrupts disabled.
282df211c8aSNathan Lynch  */
283c6622f63SPaul Mackerras void calculate_steal_time(void)
284c6622f63SPaul Mackerras {
285cbcdb93dSStephen Rothwell 	u64 tb, purr;
286c6622f63SPaul Mackerras 	s64 stolen;
287cbcdb93dSStephen Rothwell 	struct cpu_purr_data *pme;
288c6622f63SPaul Mackerras 
289c6622f63SPaul Mackerras 	if (!cpu_has_feature(CPU_FTR_PURR))
290c6622f63SPaul Mackerras 		return;
291cbcdb93dSStephen Rothwell 	pme = &per_cpu(cpu_purr_data, smp_processor_id());
292c6622f63SPaul Mackerras 	if (!pme->initialized)
293c6622f63SPaul Mackerras 		return;		/* this can happen in early boot */
294c6622f63SPaul Mackerras 	tb = mftb();
295cbcdb93dSStephen Rothwell 	purr = mfspr(SPRN_PURR);
296c6622f63SPaul Mackerras 	stolen = (tb - pme->tb) - (purr - pme->purr);
297cbcdb93dSStephen Rothwell 	if (stolen > 0)
298c6622f63SPaul Mackerras 		account_steal_time(current, stolen);
299c6622f63SPaul Mackerras 	pme->tb = tb;
300c6622f63SPaul Mackerras 	pme->purr = purr;
301c6622f63SPaul Mackerras }
302c6622f63SPaul Mackerras 
3034cefebb1SMichael Neuling #ifdef CONFIG_PPC_SPLPAR
304c6622f63SPaul Mackerras /*
305c6622f63SPaul Mackerras  * Must be called before the cpu is added to the online map when
306c6622f63SPaul Mackerras  * a cpu is being brought up at runtime.
307c6622f63SPaul Mackerras  */
308c6622f63SPaul Mackerras static void snapshot_purr(void)
309c6622f63SPaul Mackerras {
310cbcdb93dSStephen Rothwell 	struct cpu_purr_data *pme;
311c6622f63SPaul Mackerras 	unsigned long flags;
312c6622f63SPaul Mackerras 
313c6622f63SPaul Mackerras 	if (!cpu_has_feature(CPU_FTR_PURR))
314c6622f63SPaul Mackerras 		return;
315df211c8aSNathan Lynch 	local_irq_save(flags);
316cbcdb93dSStephen Rothwell 	pme = &per_cpu(cpu_purr_data, smp_processor_id());
317cbcdb93dSStephen Rothwell 	pme->tb = mftb();
318cbcdb93dSStephen Rothwell 	pme->purr = mfspr(SPRN_PURR);
319c6622f63SPaul Mackerras 	pme->initialized = 1;
320df211c8aSNathan Lynch 	local_irq_restore(flags);
321c6622f63SPaul Mackerras }
322c6622f63SPaul Mackerras 
323c6622f63SPaul Mackerras #endif /* CONFIG_PPC_SPLPAR */
324c6622f63SPaul Mackerras 
325c6622f63SPaul Mackerras #else /* ! CONFIG_VIRT_CPU_ACCOUNTING */
326c6622f63SPaul Mackerras #define calc_cputime_factors()
327c6622f63SPaul Mackerras #define account_process_time(regs)	update_process_times(user_mode(regs))
328c6622f63SPaul Mackerras #define calculate_steal_time()		do { } while (0)
329c6622f63SPaul Mackerras #endif
330c6622f63SPaul Mackerras 
331c6622f63SPaul Mackerras #if !(defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR))
332c6622f63SPaul Mackerras #define snapshot_purr()			do { } while (0)
333c6622f63SPaul Mackerras #endif
334c6622f63SPaul Mackerras 
335c6622f63SPaul Mackerras /*
336c6622f63SPaul Mackerras  * Called when a cpu comes up after the system has finished booting,
337c6622f63SPaul Mackerras  * i.e. as a result of a hotplug cpu action.
338c6622f63SPaul Mackerras  */
339c6622f63SPaul Mackerras void snapshot_timebase(void)
340c6622f63SPaul Mackerras {
341c27da339SBenjamin Herrenschmidt 	__get_cpu_var(last_jiffy) = get_tb_or_rtc();
342c6622f63SPaul Mackerras 	snapshot_purr();
343c6622f63SPaul Mackerras }
344c6622f63SPaul Mackerras 
3456defa38bSPaul Mackerras void __delay(unsigned long loops)
3466defa38bSPaul Mackerras {
3476defa38bSPaul Mackerras 	unsigned long start;
3486defa38bSPaul Mackerras 	int diff;
3496defa38bSPaul Mackerras 
3506defa38bSPaul Mackerras 	if (__USE_RTC()) {
3516defa38bSPaul Mackerras 		start = get_rtcl();
3526defa38bSPaul Mackerras 		do {
3536defa38bSPaul Mackerras 			/* the RTCL register wraps at 1000000000 */
3546defa38bSPaul Mackerras 			diff = get_rtcl() - start;
3556defa38bSPaul Mackerras 			if (diff < 0)
3566defa38bSPaul Mackerras 				diff += 1000000000;
3576defa38bSPaul Mackerras 		} while (diff < loops);
3586defa38bSPaul Mackerras 	} else {
3596defa38bSPaul Mackerras 		start = get_tbl();
3606defa38bSPaul Mackerras 		while (get_tbl() - start < loops)
3616defa38bSPaul Mackerras 			HMT_low();
3626defa38bSPaul Mackerras 		HMT_medium();
3636defa38bSPaul Mackerras 	}
3646defa38bSPaul Mackerras }
3656defa38bSPaul Mackerras EXPORT_SYMBOL(__delay);
3666defa38bSPaul Mackerras 
3676defa38bSPaul Mackerras void udelay(unsigned long usecs)
3686defa38bSPaul Mackerras {
3696defa38bSPaul Mackerras 	__delay(tb_ticks_per_usec * usecs);
3706defa38bSPaul Mackerras }
3716defa38bSPaul Mackerras EXPORT_SYMBOL(udelay);
3726defa38bSPaul Mackerras 
373f2783c15SPaul Mackerras 
374f2783c15SPaul Mackerras /*
375f2783c15SPaul Mackerras  * There are two copies of tb_to_xs and stamp_xsec so that no
376f2783c15SPaul Mackerras  * lock is needed to access and use these values in
377f2783c15SPaul Mackerras  * do_gettimeofday.  We alternate the copies and as long as a
378f2783c15SPaul Mackerras  * reasonable time elapses between changes, there will never
379f2783c15SPaul Mackerras  * be inconsistent values.  ntpd has a minimum of one minute
380f2783c15SPaul Mackerras  * between updates.
381f2783c15SPaul Mackerras  */
382f2783c15SPaul Mackerras static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec,
3835d14a18dSPaul Mackerras 			       u64 new_tb_to_xs)
384f2783c15SPaul Mackerras {
385f2783c15SPaul Mackerras 	unsigned temp_idx;
386f2783c15SPaul Mackerras 	struct gettimeofday_vars *temp_varp;
387f2783c15SPaul Mackerras 
388f2783c15SPaul Mackerras 	temp_idx = (do_gtod.var_idx == 0);
389f2783c15SPaul Mackerras 	temp_varp = &do_gtod.vars[temp_idx];
390f2783c15SPaul Mackerras 
391f2783c15SPaul Mackerras 	temp_varp->tb_to_xs = new_tb_to_xs;
392f2783c15SPaul Mackerras 	temp_varp->tb_orig_stamp = new_tb_stamp;
393f2783c15SPaul Mackerras 	temp_varp->stamp_xsec = new_stamp_xsec;
394f2783c15SPaul Mackerras 	smp_mb();
395f2783c15SPaul Mackerras 	do_gtod.varp = temp_varp;
396f2783c15SPaul Mackerras 	do_gtod.var_idx = temp_idx;
397f2783c15SPaul Mackerras 
398f2783c15SPaul Mackerras 	/*
399f2783c15SPaul Mackerras 	 * tb_update_count is used to allow the userspace gettimeofday code
400f2783c15SPaul Mackerras 	 * to assure itself that it sees a consistent view of the tb_to_xs and
401f2783c15SPaul Mackerras 	 * stamp_xsec variables.  It reads the tb_update_count, then reads
402f2783c15SPaul Mackerras 	 * tb_to_xs and stamp_xsec and then reads tb_update_count again.  If
403f2783c15SPaul Mackerras 	 * the two values of tb_update_count match and are even then the
404f2783c15SPaul Mackerras 	 * tb_to_xs and stamp_xsec values are consistent.  If not, then it
405f2783c15SPaul Mackerras 	 * loops back and reads them again until this criteria is met.
4060a45d449SPaul Mackerras 	 * We expect the caller to have done the first increment of
4070a45d449SPaul Mackerras 	 * vdso_data->tb_update_count already.
408f2783c15SPaul Mackerras 	 */
409a7f290daSBenjamin Herrenschmidt 	vdso_data->tb_orig_stamp = new_tb_stamp;
410a7f290daSBenjamin Herrenschmidt 	vdso_data->stamp_xsec = new_stamp_xsec;
411a7f290daSBenjamin Herrenschmidt 	vdso_data->tb_to_xs = new_tb_to_xs;
412a7f290daSBenjamin Herrenschmidt 	vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec;
413a7f290daSBenjamin Herrenschmidt 	vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec;
414f2783c15SPaul Mackerras 	smp_wmb();
415a7f290daSBenjamin Herrenschmidt 	++(vdso_data->tb_update_count);
416f2783c15SPaul Mackerras }
417f2783c15SPaul Mackerras 
418f2783c15SPaul Mackerras #ifdef CONFIG_SMP
419f2783c15SPaul Mackerras unsigned long profile_pc(struct pt_regs *regs)
420f2783c15SPaul Mackerras {
421f2783c15SPaul Mackerras 	unsigned long pc = instruction_pointer(regs);
422f2783c15SPaul Mackerras 
423f2783c15SPaul Mackerras 	if (in_lock_functions(pc))
424f2783c15SPaul Mackerras 		return regs->link;
425f2783c15SPaul Mackerras 
426f2783c15SPaul Mackerras 	return pc;
427f2783c15SPaul Mackerras }
428f2783c15SPaul Mackerras EXPORT_SYMBOL(profile_pc);
429f2783c15SPaul Mackerras #endif
430f2783c15SPaul Mackerras 
431f2783c15SPaul Mackerras #ifdef CONFIG_PPC_ISERIES
432f2783c15SPaul Mackerras 
433f2783c15SPaul Mackerras /*
434f2783c15SPaul Mackerras  * This function recalibrates the timebase based on the 49-bit time-of-day
435f2783c15SPaul Mackerras  * value in the Titan chip.  The Titan is much more accurate than the value
436f2783c15SPaul Mackerras  * returned by the service processor for the timebase frequency.
437f2783c15SPaul Mackerras  */
438f2783c15SPaul Mackerras 
43971712b45STony Breeds static int __init iSeries_tb_recal(void)
440f2783c15SPaul Mackerras {
441f2783c15SPaul Mackerras 	struct div_result divres;
442f2783c15SPaul Mackerras 	unsigned long titan, tb;
44371712b45STony Breeds 
44471712b45STony Breeds 	/* Make sure we only run on iSeries */
44571712b45STony Breeds 	if (!firmware_has_feature(FW_FEATURE_ISERIES))
44671712b45STony Breeds 		return -ENODEV;
44771712b45STony Breeds 
448f2783c15SPaul Mackerras 	tb = get_tb();
449f2783c15SPaul Mackerras 	titan = HvCallXm_loadTod();
450f2783c15SPaul Mackerras 	if ( iSeries_recal_titan ) {
451f2783c15SPaul Mackerras 		unsigned long tb_ticks = tb - iSeries_recal_tb;
452f2783c15SPaul Mackerras 		unsigned long titan_usec = (titan - iSeries_recal_titan) >> 12;
453f2783c15SPaul Mackerras 		unsigned long new_tb_ticks_per_sec   = (tb_ticks * USEC_PER_SEC)/titan_usec;
454f2783c15SPaul Mackerras 		unsigned long new_tb_ticks_per_jiffy = (new_tb_ticks_per_sec+(HZ/2))/HZ;
455f2783c15SPaul Mackerras 		long tick_diff = new_tb_ticks_per_jiffy - tb_ticks_per_jiffy;
456f2783c15SPaul Mackerras 		char sign = '+';
457f2783c15SPaul Mackerras 		/* make sure tb_ticks_per_sec and tb_ticks_per_jiffy are consistent */
458f2783c15SPaul Mackerras 		new_tb_ticks_per_sec = new_tb_ticks_per_jiffy * HZ;
459f2783c15SPaul Mackerras 
460f2783c15SPaul Mackerras 		if ( tick_diff < 0 ) {
461f2783c15SPaul Mackerras 			tick_diff = -tick_diff;
462f2783c15SPaul Mackerras 			sign = '-';
463f2783c15SPaul Mackerras 		}
464f2783c15SPaul Mackerras 		if ( tick_diff ) {
465f2783c15SPaul Mackerras 			if ( tick_diff < tb_ticks_per_jiffy/25 ) {
466f2783c15SPaul Mackerras 				printk( "Titan recalibrate: new tb_ticks_per_jiffy = %lu (%c%ld)\n",
467f2783c15SPaul Mackerras 						new_tb_ticks_per_jiffy, sign, tick_diff );
468f2783c15SPaul Mackerras 				tb_ticks_per_jiffy = new_tb_ticks_per_jiffy;
469f2783c15SPaul Mackerras 				tb_ticks_per_sec   = new_tb_ticks_per_sec;
470c6622f63SPaul Mackerras 				calc_cputime_factors();
471f2783c15SPaul Mackerras 				div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres );
472f2783c15SPaul Mackerras 				do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
473f2783c15SPaul Mackerras 				tb_to_xs = divres.result_low;
474f2783c15SPaul Mackerras 				do_gtod.varp->tb_to_xs = tb_to_xs;
475a7f290daSBenjamin Herrenschmidt 				vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
476a7f290daSBenjamin Herrenschmidt 				vdso_data->tb_to_xs = tb_to_xs;
477f2783c15SPaul Mackerras 			}
478f2783c15SPaul Mackerras 			else {
479f2783c15SPaul Mackerras 				printk( "Titan recalibrate: FAILED (difference > 4 percent)\n"
480f2783c15SPaul Mackerras 					"                   new tb_ticks_per_jiffy = %lu\n"
481f2783c15SPaul Mackerras 					"                   old tb_ticks_per_jiffy = %lu\n",
482f2783c15SPaul Mackerras 					new_tb_ticks_per_jiffy, tb_ticks_per_jiffy );
483f2783c15SPaul Mackerras 			}
484f2783c15SPaul Mackerras 		}
485f2783c15SPaul Mackerras 	}
486f2783c15SPaul Mackerras 	iSeries_recal_titan = titan;
487f2783c15SPaul Mackerras 	iSeries_recal_tb = tb;
48871712b45STony Breeds 
489*4a4cfe38STony Breeds 	/* Called here as now we know accurate values for the timebase */
490*4a4cfe38STony Breeds 	clocksource_init();
49171712b45STony Breeds 	return 0;
492f2783c15SPaul Mackerras }
49371712b45STony Breeds late_initcall(iSeries_tb_recal);
49471712b45STony Breeds 
49571712b45STony Breeds /* Called from platform early init */
49671712b45STony Breeds void __init iSeries_time_init_early(void)
49771712b45STony Breeds {
49871712b45STony Breeds 	iSeries_recal_tb = get_tb();
49971712b45STony Breeds 	iSeries_recal_titan = HvCallXm_loadTod();
50071712b45STony Breeds }
50171712b45STony Breeds #endif /* CONFIG_PPC_ISERIES */
502f2783c15SPaul Mackerras 
503f2783c15SPaul Mackerras /*
504f2783c15SPaul Mackerras  * For iSeries shared processors, we have to let the hypervisor
505f2783c15SPaul Mackerras  * set the hardware decrementer.  We set a virtual decrementer
506f2783c15SPaul Mackerras  * in the lppaca and call the hypervisor if the virtual
507f2783c15SPaul Mackerras  * decrementer is less than the current value in the hardware
508f2783c15SPaul Mackerras  * decrementer. (almost always the new decrementer value will
509f2783c15SPaul Mackerras  * be greater than the current hardware decementer so the hypervisor
510f2783c15SPaul Mackerras  * call will not be needed)
511f2783c15SPaul Mackerras  */
512f2783c15SPaul Mackerras 
513f2783c15SPaul Mackerras /*
514f2783c15SPaul Mackerras  * timer_interrupt - gets called when the decrementer overflows,
515f2783c15SPaul Mackerras  * with interrupts disabled.
516f2783c15SPaul Mackerras  */
517f2783c15SPaul Mackerras void timer_interrupt(struct pt_regs * regs)
518f2783c15SPaul Mackerras {
5197d12e780SDavid Howells 	struct pt_regs *old_regs;
520f2783c15SPaul Mackerras 	int next_dec;
521f2783c15SPaul Mackerras 	int cpu = smp_processor_id();
522f2783c15SPaul Mackerras 	unsigned long ticks;
5235db9fa95SNathan Lynch 	u64 tb_next_jiffy;
524f2783c15SPaul Mackerras 
525f2783c15SPaul Mackerras #ifdef CONFIG_PPC32
526f2783c15SPaul Mackerras 	if (atomic_read(&ppc_n_lost_interrupts) != 0)
527f2783c15SPaul Mackerras 		do_IRQ(regs);
528f2783c15SPaul Mackerras #endif
529f2783c15SPaul Mackerras 
5307d12e780SDavid Howells 	old_regs = set_irq_regs(regs);
531f2783c15SPaul Mackerras 	irq_enter();
532f2783c15SPaul Mackerras 
5337d12e780SDavid Howells 	profile_tick(CPU_PROFILING);
534c6622f63SPaul Mackerras 	calculate_steal_time();
535f2783c15SPaul Mackerras 
536f2783c15SPaul Mackerras #ifdef CONFIG_PPC_ISERIES
537501b6d29SStephen Rothwell 	if (firmware_has_feature(FW_FEATURE_ISERIES))
5383356bb9fSDavid Gibson 		get_lppaca()->int_dword.fields.decr_int = 0;
539f2783c15SPaul Mackerras #endif
540f2783c15SPaul Mackerras 
541f2783c15SPaul Mackerras 	while ((ticks = tb_ticks_since(per_cpu(last_jiffy, cpu)))
542f2783c15SPaul Mackerras 	       >= tb_ticks_per_jiffy) {
543f2783c15SPaul Mackerras 		/* Update last_jiffy */
544f2783c15SPaul Mackerras 		per_cpu(last_jiffy, cpu) += tb_ticks_per_jiffy;
545f2783c15SPaul Mackerras 		/* Handle RTCL overflow on 601 */
546f2783c15SPaul Mackerras 		if (__USE_RTC() && per_cpu(last_jiffy, cpu) >= 1000000000)
547f2783c15SPaul Mackerras 			per_cpu(last_jiffy, cpu) -= 1000000000;
548f2783c15SPaul Mackerras 
549f2783c15SPaul Mackerras 		/*
550f2783c15SPaul Mackerras 		 * We cannot disable the decrementer, so in the period
551f2783c15SPaul Mackerras 		 * between this cpu's being marked offline in cpu_online_map
552f2783c15SPaul Mackerras 		 * and calling stop-self, it is taking timer interrupts.
553f2783c15SPaul Mackerras 		 * Avoid calling into the scheduler rebalancing code if this
554f2783c15SPaul Mackerras 		 * is the case.
555f2783c15SPaul Mackerras 		 */
556f2783c15SPaul Mackerras 		if (!cpu_is_offline(cpu))
557c6622f63SPaul Mackerras 			account_process_time(regs);
558f2783c15SPaul Mackerras 
559f2783c15SPaul Mackerras 		/*
560f2783c15SPaul Mackerras 		 * No need to check whether cpu is offline here; boot_cpuid
561f2783c15SPaul Mackerras 		 * should have been fixed up by now.
562f2783c15SPaul Mackerras 		 */
563f2783c15SPaul Mackerras 		if (cpu != boot_cpuid)
564f2783c15SPaul Mackerras 			continue;
565f2783c15SPaul Mackerras 
566f2783c15SPaul Mackerras 		write_seqlock(&xtime_lock);
5675db9fa95SNathan Lynch 		tb_next_jiffy = tb_last_jiffy + tb_ticks_per_jiffy;
568c27da339SBenjamin Herrenschmidt 		if (__USE_RTC() && tb_next_jiffy >= 1000000000)
569c27da339SBenjamin Herrenschmidt 			tb_next_jiffy -= 1000000000;
5705db9fa95SNathan Lynch 		if (per_cpu(last_jiffy, cpu) >= tb_next_jiffy) {
5715db9fa95SNathan Lynch 			tb_last_jiffy = tb_next_jiffy;
5723171a030SAtsushi Nemoto 			do_timer(1);
5735db9fa95SNathan Lynch 		}
574f2783c15SPaul Mackerras 		write_sequnlock(&xtime_lock);
575f2783c15SPaul Mackerras 	}
576f2783c15SPaul Mackerras 
577f2783c15SPaul Mackerras 	next_dec = tb_ticks_per_jiffy - ticks;
578f2783c15SPaul Mackerras 	set_dec(next_dec);
579f2783c15SPaul Mackerras 
580f2783c15SPaul Mackerras #ifdef CONFIG_PPC_ISERIES
581501b6d29SStephen Rothwell 	if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending())
58235a84c2fSOlaf Hering 		process_hvlpevents();
583f2783c15SPaul Mackerras #endif
584f2783c15SPaul Mackerras 
585f2783c15SPaul Mackerras #ifdef CONFIG_PPC64
586f2783c15SPaul Mackerras 	/* collect purr register values often, for accurate calculations */
587f2783c15SPaul Mackerras 	if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
588f2783c15SPaul Mackerras 		struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
589f2783c15SPaul Mackerras 		cu->current_tb = mfspr(SPRN_PURR);
590f2783c15SPaul Mackerras 	}
591f2783c15SPaul Mackerras #endif
592f2783c15SPaul Mackerras 
593f2783c15SPaul Mackerras 	irq_exit();
5947d12e780SDavid Howells 	set_irq_regs(old_regs);
595f2783c15SPaul Mackerras }
596f2783c15SPaul Mackerras 
597f2783c15SPaul Mackerras void wakeup_decrementer(void)
598f2783c15SPaul Mackerras {
599092b8f34SPaul Mackerras 	unsigned long ticks;
600f2783c15SPaul Mackerras 
601f2783c15SPaul Mackerras 	/*
602092b8f34SPaul Mackerras 	 * The timebase gets saved on sleep and restored on wakeup,
603092b8f34SPaul Mackerras 	 * so all we need to do is to reset the decrementer.
604f2783c15SPaul Mackerras 	 */
605092b8f34SPaul Mackerras 	ticks = tb_ticks_since(__get_cpu_var(last_jiffy));
606092b8f34SPaul Mackerras 	if (ticks < tb_ticks_per_jiffy)
607092b8f34SPaul Mackerras 		ticks = tb_ticks_per_jiffy - ticks;
608092b8f34SPaul Mackerras 	else
609092b8f34SPaul Mackerras 		ticks = 1;
610092b8f34SPaul Mackerras 	set_dec(ticks);
611f2783c15SPaul Mackerras }
612f2783c15SPaul Mackerras 
613a5b518edSPaul Mackerras #ifdef CONFIG_SMP
614f2783c15SPaul Mackerras void __init smp_space_timers(unsigned int max_cpus)
615f2783c15SPaul Mackerras {
616f2783c15SPaul Mackerras 	int i;
617eb36c288SPaul Mackerras 	u64 previous_tb = per_cpu(last_jiffy, boot_cpuid);
618f2783c15SPaul Mackerras 
619cbe62e2bSPaul Mackerras 	/* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */
620cbe62e2bSPaul Mackerras 	previous_tb -= tb_ticks_per_jiffy;
621e147ec8fSwill schmidt 
6220e551954SKAMEZAWA Hiroyuki 	for_each_possible_cpu(i) {
623c6622f63SPaul Mackerras 		if (i == boot_cpuid)
624c6622f63SPaul Mackerras 			continue;
625f2783c15SPaul Mackerras 		per_cpu(last_jiffy, i) = previous_tb;
626f2783c15SPaul Mackerras 	}
627f2783c15SPaul Mackerras }
628f2783c15SPaul Mackerras #endif
629f2783c15SPaul Mackerras 
630f2783c15SPaul Mackerras /*
631f2783c15SPaul Mackerras  * Scheduler clock - returns current time in nanosec units.
632f2783c15SPaul Mackerras  *
633f2783c15SPaul Mackerras  * Note: mulhdu(a, b) (multiply high double unsigned) returns
634f2783c15SPaul Mackerras  * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
635f2783c15SPaul Mackerras  * are 64-bit unsigned numbers.
636f2783c15SPaul Mackerras  */
637f2783c15SPaul Mackerras unsigned long long sched_clock(void)
638f2783c15SPaul Mackerras {
63996c44507SPaul Mackerras 	if (__USE_RTC())
64096c44507SPaul Mackerras 		return get_rtc();
641fc9069feSTony Breeds 	return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
642f2783c15SPaul Mackerras }
643f2783c15SPaul Mackerras 
6440bb474a4SAnton Blanchard static int __init get_freq(char *name, int cells, unsigned long *val)
645f2783c15SPaul Mackerras {
646f2783c15SPaul Mackerras 	struct device_node *cpu;
647a7f67bdfSJeremy Kerr 	const unsigned int *fp;
6480bb474a4SAnton Blanchard 	int found = 0;
649f2783c15SPaul Mackerras 
6500bb474a4SAnton Blanchard 	/* The cpu node should have timebase and clock frequency properties */
651f2783c15SPaul Mackerras 	cpu = of_find_node_by_type(NULL, "cpu");
652f2783c15SPaul Mackerras 
653d8a8188dSOlaf Hering 	if (cpu) {
654e2eb6392SStephen Rothwell 		fp = of_get_property(cpu, name, NULL);
655d8a8188dSOlaf Hering 		if (fp) {
6560bb474a4SAnton Blanchard 			found = 1;
657a4dc7ff0SPaul Mackerras 			*val = of_read_ulong(fp, cells);
658f2783c15SPaul Mackerras 		}
6590bb474a4SAnton Blanchard 
6600bb474a4SAnton Blanchard 		of_node_put(cpu);
661f2783c15SPaul Mackerras 	}
6620bb474a4SAnton Blanchard 
6630bb474a4SAnton Blanchard 	return found;
6640bb474a4SAnton Blanchard }
6650bb474a4SAnton Blanchard 
6660bb474a4SAnton Blanchard void __init generic_calibrate_decr(void)
6670bb474a4SAnton Blanchard {
6680bb474a4SAnton Blanchard 	ppc_tb_freq = DEFAULT_TB_FREQ;		/* hardcoded default */
6690bb474a4SAnton Blanchard 
6700bb474a4SAnton Blanchard 	if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) &&
6710bb474a4SAnton Blanchard 	    !get_freq("timebase-frequency", 1, &ppc_tb_freq)) {
6720bb474a4SAnton Blanchard 
673f2783c15SPaul Mackerras 		printk(KERN_ERR "WARNING: Estimating decrementer frequency "
674f2783c15SPaul Mackerras 				"(not found)\n");
6750bb474a4SAnton Blanchard 	}
676f2783c15SPaul Mackerras 
6770bb474a4SAnton Blanchard 	ppc_proc_freq = DEFAULT_PROC_FREQ;	/* hardcoded default */
6780bb474a4SAnton Blanchard 
6790bb474a4SAnton Blanchard 	if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) &&
6800bb474a4SAnton Blanchard 	    !get_freq("clock-frequency", 1, &ppc_proc_freq)) {
6810bb474a4SAnton Blanchard 
6820bb474a4SAnton Blanchard 		printk(KERN_ERR "WARNING: Estimating processor frequency "
6830bb474a4SAnton Blanchard 				"(not found)\n");
684f2783c15SPaul Mackerras 	}
6850bb474a4SAnton Blanchard 
686aab69292SJosh Boyer #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
6870fd6f717SKumar Gala 	/* Set the time base to zero */
6880fd6f717SKumar Gala 	mtspr(SPRN_TBWL, 0);
6890fd6f717SKumar Gala 	mtspr(SPRN_TBWU, 0);
6900fd6f717SKumar Gala 
6910fd6f717SKumar Gala 	/* Clear any pending timer interrupts */
6920fd6f717SKumar Gala 	mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
6930fd6f717SKumar Gala 
6940fd6f717SKumar Gala 	/* Enable decrementer interrupt */
6950fd6f717SKumar Gala 	mtspr(SPRN_TCR, TCR_DIE);
6960fd6f717SKumar Gala #endif
697f2783c15SPaul Mackerras }
698f2783c15SPaul Mackerras 
699aa3be5f3STony Breeds int update_persistent_clock(struct timespec now)
700f2783c15SPaul Mackerras {
701f2783c15SPaul Mackerras 	struct rtc_time tm;
702f2783c15SPaul Mackerras 
703aa3be5f3STony Breeds 	if (!ppc_md.set_rtc_time)
704aa3be5f3STony Breeds 		return 0;
705aa3be5f3STony Breeds 
706aa3be5f3STony Breeds 	to_tm(now.tv_sec + 1 + timezone_offset, &tm);
707aa3be5f3STony Breeds 	tm.tm_year -= 1900;
708aa3be5f3STony Breeds 	tm.tm_mon -= 1;
709aa3be5f3STony Breeds 
710aa3be5f3STony Breeds 	return ppc_md.set_rtc_time(&tm);
711aa3be5f3STony Breeds }
712aa3be5f3STony Breeds 
713aa3be5f3STony Breeds unsigned long read_persistent_clock(void)
714aa3be5f3STony Breeds {
715aa3be5f3STony Breeds 	struct rtc_time tm;
716aa3be5f3STony Breeds 	static int first = 1;
717aa3be5f3STony Breeds 
718aa3be5f3STony Breeds 	/* XXX this is a litle fragile but will work okay in the short term */
719aa3be5f3STony Breeds 	if (first) {
720aa3be5f3STony Breeds 		first = 0;
721aa3be5f3STony Breeds 		if (ppc_md.time_init)
722aa3be5f3STony Breeds 			timezone_offset = ppc_md.time_init();
723aa3be5f3STony Breeds 
724aa3be5f3STony Breeds 		/* get_boot_time() isn't guaranteed to be safe to call late */
725f2783c15SPaul Mackerras 		if (ppc_md.get_boot_time)
726aa3be5f3STony Breeds 			return ppc_md.get_boot_time() -timezone_offset;
727aa3be5f3STony Breeds 	}
728f2783c15SPaul Mackerras 	if (!ppc_md.get_rtc_time)
729f2783c15SPaul Mackerras 		return 0;
730f2783c15SPaul Mackerras 	ppc_md.get_rtc_time(&tm);
731f2783c15SPaul Mackerras 	return mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday,
732f2783c15SPaul Mackerras 		      tm.tm_hour, tm.tm_min, tm.tm_sec);
733f2783c15SPaul Mackerras }
734f2783c15SPaul Mackerras 
735*4a4cfe38STony Breeds /* clocksource code */
736*4a4cfe38STony Breeds static cycle_t rtc_read(void)
737*4a4cfe38STony Breeds {
738*4a4cfe38STony Breeds 	return (cycle_t)get_rtc();
739*4a4cfe38STony Breeds }
740*4a4cfe38STony Breeds 
741*4a4cfe38STony Breeds static cycle_t timebase_read(void)
742*4a4cfe38STony Breeds {
743*4a4cfe38STony Breeds 	return (cycle_t)get_tb();
744*4a4cfe38STony Breeds }
745*4a4cfe38STony Breeds 
746*4a4cfe38STony Breeds void update_vsyscall(struct timespec *wall_time, struct clocksource *clock)
747*4a4cfe38STony Breeds {
748*4a4cfe38STony Breeds 	u64 t2x, stamp_xsec;
749*4a4cfe38STony Breeds 
750*4a4cfe38STony Breeds 	if (clock != &clocksource_timebase)
751*4a4cfe38STony Breeds 		return;
752*4a4cfe38STony Breeds 
753*4a4cfe38STony Breeds 	/* Make userspace gettimeofday spin until we're done. */
754*4a4cfe38STony Breeds 	++vdso_data->tb_update_count;
755*4a4cfe38STony Breeds 	smp_mb();
756*4a4cfe38STony Breeds 
757*4a4cfe38STony Breeds 	/* XXX this assumes clock->shift == 22 */
758*4a4cfe38STony Breeds 	/* 4611686018 ~= 2^(20+64-22) / 1e9 */
759*4a4cfe38STony Breeds 	t2x = (u64) clock->mult * 4611686018ULL;
760*4a4cfe38STony Breeds 	stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC;
761*4a4cfe38STony Breeds 	do_div(stamp_xsec, 1000000000);
762*4a4cfe38STony Breeds 	stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC;
763*4a4cfe38STony Breeds 	update_gtod(clock->cycle_last, stamp_xsec, t2x);
764*4a4cfe38STony Breeds }
765*4a4cfe38STony Breeds 
766*4a4cfe38STony Breeds void update_vsyscall_tz(void)
767*4a4cfe38STony Breeds {
768*4a4cfe38STony Breeds 	/* Make userspace gettimeofday spin until we're done. */
769*4a4cfe38STony Breeds 	++vdso_data->tb_update_count;
770*4a4cfe38STony Breeds 	smp_mb();
771*4a4cfe38STony Breeds 	vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
772*4a4cfe38STony Breeds 	vdso_data->tz_dsttime = sys_tz.tz_dsttime;
773*4a4cfe38STony Breeds 	smp_mb();
774*4a4cfe38STony Breeds 	++vdso_data->tb_update_count;
775*4a4cfe38STony Breeds }
776*4a4cfe38STony Breeds 
777*4a4cfe38STony Breeds void __init clocksource_init(void)
778*4a4cfe38STony Breeds {
779*4a4cfe38STony Breeds 	struct clocksource *clock;
780*4a4cfe38STony Breeds 
781*4a4cfe38STony Breeds 	if (__USE_RTC())
782*4a4cfe38STony Breeds 		clock = &clocksource_rtc;
783*4a4cfe38STony Breeds 	else
784*4a4cfe38STony Breeds 		clock = &clocksource_timebase;
785*4a4cfe38STony Breeds 
786*4a4cfe38STony Breeds 	clock->mult = clocksource_hz2mult(tb_ticks_per_sec, clock->shift);
787*4a4cfe38STony Breeds 
788*4a4cfe38STony Breeds 	if (clocksource_register(clock)) {
789*4a4cfe38STony Breeds 		printk(KERN_ERR "clocksource: %s is already registered\n",
790*4a4cfe38STony Breeds 		       clock->name);
791*4a4cfe38STony Breeds 		return;
792*4a4cfe38STony Breeds 	}
793*4a4cfe38STony Breeds 
794*4a4cfe38STony Breeds 	printk(KERN_INFO "clocksource: %s mult[%x] shift[%d] registered\n",
795*4a4cfe38STony Breeds 	       clock->name, clock->mult, clock->shift);
796*4a4cfe38STony Breeds }
797*4a4cfe38STony Breeds 
798f2783c15SPaul Mackerras /* This function is only called on the boot processor */
799f2783c15SPaul Mackerras void __init time_init(void)
800f2783c15SPaul Mackerras {
801f2783c15SPaul Mackerras 	unsigned long flags;
802f2783c15SPaul Mackerras 	struct div_result res;
803092b8f34SPaul Mackerras 	u64 scale, x;
804f2783c15SPaul Mackerras 	unsigned shift;
805f2783c15SPaul Mackerras 
80696c44507SPaul Mackerras 	if (__USE_RTC()) {
80796c44507SPaul Mackerras 		/* 601 processor: dec counts down by 128 every 128ns */
80896c44507SPaul Mackerras 		ppc_tb_freq = 1000000000;
809eb36c288SPaul Mackerras 		tb_last_jiffy = get_rtcl();
81096c44507SPaul Mackerras 	} else {
81196c44507SPaul Mackerras 		/* Normal PowerPC with timebase register */
812f2783c15SPaul Mackerras 		ppc_md.calibrate_decr();
813224ad80aSOlof Johansson 		printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n",
814374e99d4SPaul Mackerras 		       ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
815224ad80aSOlof Johansson 		printk(KERN_DEBUG "time_init: processor frequency   = %lu.%.6lu MHz\n",
816374e99d4SPaul Mackerras 		       ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
817eb36c288SPaul Mackerras 		tb_last_jiffy = get_tb();
81896c44507SPaul Mackerras 	}
819374e99d4SPaul Mackerras 
820374e99d4SPaul Mackerras 	tb_ticks_per_jiffy = ppc_tb_freq / HZ;
821092b8f34SPaul Mackerras 	tb_ticks_per_sec = ppc_tb_freq;
822374e99d4SPaul Mackerras 	tb_ticks_per_usec = ppc_tb_freq / 1000000;
823374e99d4SPaul Mackerras 	tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
824c6622f63SPaul Mackerras 	calc_cputime_factors();
825092b8f34SPaul Mackerras 
826092b8f34SPaul Mackerras 	/*
827092b8f34SPaul Mackerras 	 * Calculate the length of each tick in ns.  It will not be
828092b8f34SPaul Mackerras 	 * exactly 1e9/HZ unless ppc_tb_freq is divisible by HZ.
829092b8f34SPaul Mackerras 	 * We compute 1e9 * tb_ticks_per_jiffy / ppc_tb_freq,
830092b8f34SPaul Mackerras 	 * rounded up.
831092b8f34SPaul Mackerras 	 */
832092b8f34SPaul Mackerras 	x = (u64) NSEC_PER_SEC * tb_ticks_per_jiffy + ppc_tb_freq - 1;
833092b8f34SPaul Mackerras 	do_div(x, ppc_tb_freq);
834092b8f34SPaul Mackerras 	tick_nsec = x;
835092b8f34SPaul Mackerras 	last_tick_len = x << TICKLEN_SCALE;
836092b8f34SPaul Mackerras 
837092b8f34SPaul Mackerras 	/*
838092b8f34SPaul Mackerras 	 * Compute ticklen_to_xs, which is a factor which gets multiplied
839092b8f34SPaul Mackerras 	 * by (last_tick_len << TICKLEN_SHIFT) to get a tb_to_xs value.
840092b8f34SPaul Mackerras 	 * It is computed as:
841092b8f34SPaul Mackerras 	 * ticklen_to_xs = 2^N / (tb_ticks_per_jiffy * 1e9)
842092b8f34SPaul Mackerras 	 * where N = 64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT
8430a45d449SPaul Mackerras 	 * which turns out to be N = 51 - SHIFT_HZ.
8440a45d449SPaul Mackerras 	 * This gives the result as a 0.64 fixed-point fraction.
8450a45d449SPaul Mackerras 	 * That value is reduced by an offset amounting to 1 xsec per
8460a45d449SPaul Mackerras 	 * 2^31 timebase ticks to avoid problems with time going backwards
8470a45d449SPaul Mackerras 	 * by 1 xsec when we do timer_recalc_offset due to losing the
8480a45d449SPaul Mackerras 	 * fractional xsec.  That offset is equal to ppc_tb_freq/2^51
8490a45d449SPaul Mackerras 	 * since there are 2^20 xsec in a second.
850092b8f34SPaul Mackerras 	 */
8510a45d449SPaul Mackerras 	div128_by_32((1ULL << 51) - ppc_tb_freq, 0,
8520a45d449SPaul Mackerras 		     tb_ticks_per_jiffy << SHIFT_HZ, &res);
853092b8f34SPaul Mackerras 	div128_by_32(res.result_high, res.result_low, NSEC_PER_SEC, &res);
854092b8f34SPaul Mackerras 	ticklen_to_xs = res.result_low;
855092b8f34SPaul Mackerras 
856092b8f34SPaul Mackerras 	/* Compute tb_to_xs from tick_nsec */
857092b8f34SPaul Mackerras 	tb_to_xs = mulhdu(last_tick_len << TICKLEN_SHIFT, ticklen_to_xs);
858374e99d4SPaul Mackerras 
859f2783c15SPaul Mackerras 	/*
860f2783c15SPaul Mackerras 	 * Compute scale factor for sched_clock.
861f2783c15SPaul Mackerras 	 * The calibrate_decr() function has set tb_ticks_per_sec,
862f2783c15SPaul Mackerras 	 * which is the timebase frequency.
863f2783c15SPaul Mackerras 	 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret
864f2783c15SPaul Mackerras 	 * the 128-bit result as a 64.64 fixed-point number.
865f2783c15SPaul Mackerras 	 * We then shift that number right until it is less than 1.0,
866f2783c15SPaul Mackerras 	 * giving us the scale factor and shift count to use in
867f2783c15SPaul Mackerras 	 * sched_clock().
868f2783c15SPaul Mackerras 	 */
869f2783c15SPaul Mackerras 	div128_by_32(1000000000, 0, tb_ticks_per_sec, &res);
870f2783c15SPaul Mackerras 	scale = res.result_low;
871f2783c15SPaul Mackerras 	for (shift = 0; res.result_high != 0; ++shift) {
872f2783c15SPaul Mackerras 		scale = (scale >> 1) | (res.result_high << 63);
873f2783c15SPaul Mackerras 		res.result_high >>= 1;
874f2783c15SPaul Mackerras 	}
875f2783c15SPaul Mackerras 	tb_to_ns_scale = scale;
876f2783c15SPaul Mackerras 	tb_to_ns_shift = shift;
877fc9069feSTony Breeds 	/* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
878c27da339SBenjamin Herrenschmidt 	boot_tb = get_tb_or_rtc();
879f2783c15SPaul Mackerras 
880f2783c15SPaul Mackerras 	write_seqlock_irqsave(&xtime_lock, flags);
881092b8f34SPaul Mackerras 
882092b8f34SPaul Mackerras 	/* If platform provided a timezone (pmac), we correct the time */
883092b8f34SPaul Mackerras         if (timezone_offset) {
884092b8f34SPaul Mackerras 		sys_tz.tz_minuteswest = -timezone_offset / 60;
885092b8f34SPaul Mackerras 		sys_tz.tz_dsttime = 0;
886092b8f34SPaul Mackerras         }
887092b8f34SPaul Mackerras 
888f2783c15SPaul Mackerras 	do_gtod.varp = &do_gtod.vars[0];
889f2783c15SPaul Mackerras 	do_gtod.var_idx = 0;
89096c44507SPaul Mackerras 	do_gtod.varp->tb_orig_stamp = tb_last_jiffy;
891eb36c288SPaul Mackerras 	__get_cpu_var(last_jiffy) = tb_last_jiffy;
892f2783c15SPaul Mackerras 	do_gtod.varp->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
893f2783c15SPaul Mackerras 	do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
894f2783c15SPaul Mackerras 	do_gtod.varp->tb_to_xs = tb_to_xs;
895f2783c15SPaul Mackerras 	do_gtod.tb_to_us = tb_to_us;
896a7f290daSBenjamin Herrenschmidt 
897a7f290daSBenjamin Herrenschmidt 	vdso_data->tb_orig_stamp = tb_last_jiffy;
898a7f290daSBenjamin Herrenschmidt 	vdso_data->tb_update_count = 0;
899a7f290daSBenjamin Herrenschmidt 	vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
900092b8f34SPaul Mackerras 	vdso_data->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
901a7f290daSBenjamin Herrenschmidt 	vdso_data->tb_to_xs = tb_to_xs;
902f2783c15SPaul Mackerras 
903f2783c15SPaul Mackerras 	time_freq = 0;
904f2783c15SPaul Mackerras 
905f2783c15SPaul Mackerras 	write_sequnlock_irqrestore(&xtime_lock, flags);
906f2783c15SPaul Mackerras 
907*4a4cfe38STony Breeds 	/* Register the clocksource, if we're not running on iSeries */
908*4a4cfe38STony Breeds 	if (!firmware_has_feature(FW_FEATURE_ISERIES))
909*4a4cfe38STony Breeds 		clocksource_init();
910*4a4cfe38STony Breeds 
911f2783c15SPaul Mackerras 	/* Not exact, but the timer interrupt takes care of this */
912f2783c15SPaul Mackerras 	set_dec(tb_ticks_per_jiffy);
913f2783c15SPaul Mackerras }
914f2783c15SPaul Mackerras 
915f2783c15SPaul Mackerras 
916f2783c15SPaul Mackerras #define FEBRUARY	2
917f2783c15SPaul Mackerras #define	STARTOFTIME	1970
918f2783c15SPaul Mackerras #define SECDAY		86400L
919f2783c15SPaul Mackerras #define SECYR		(SECDAY * 365)
920f2783c15SPaul Mackerras #define	leapyear(year)		((year) % 4 == 0 && \
921f2783c15SPaul Mackerras 				 ((year) % 100 != 0 || (year) % 400 == 0))
922f2783c15SPaul Mackerras #define	days_in_year(a) 	(leapyear(a) ? 366 : 365)
923f2783c15SPaul Mackerras #define	days_in_month(a) 	(month_days[(a) - 1])
924f2783c15SPaul Mackerras 
925f2783c15SPaul Mackerras static int month_days[12] = {
926f2783c15SPaul Mackerras 	31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
927f2783c15SPaul Mackerras };
928f2783c15SPaul Mackerras 
929f2783c15SPaul Mackerras /*
930f2783c15SPaul Mackerras  * This only works for the Gregorian calendar - i.e. after 1752 (in the UK)
931f2783c15SPaul Mackerras  */
932f2783c15SPaul Mackerras void GregorianDay(struct rtc_time * tm)
933f2783c15SPaul Mackerras {
934f2783c15SPaul Mackerras 	int leapsToDate;
935f2783c15SPaul Mackerras 	int lastYear;
936f2783c15SPaul Mackerras 	int day;
937f2783c15SPaul Mackerras 	int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };
938f2783c15SPaul Mackerras 
939f2783c15SPaul Mackerras 	lastYear = tm->tm_year - 1;
940f2783c15SPaul Mackerras 
941f2783c15SPaul Mackerras 	/*
942f2783c15SPaul Mackerras 	 * Number of leap corrections to apply up to end of last year
943f2783c15SPaul Mackerras 	 */
944f2783c15SPaul Mackerras 	leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400;
945f2783c15SPaul Mackerras 
946f2783c15SPaul Mackerras 	/*
947f2783c15SPaul Mackerras 	 * This year is a leap year if it is divisible by 4 except when it is
948f2783c15SPaul Mackerras 	 * divisible by 100 unless it is divisible by 400
949f2783c15SPaul Mackerras 	 *
950f2783c15SPaul Mackerras 	 * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was
951f2783c15SPaul Mackerras 	 */
952f2783c15SPaul Mackerras 	day = tm->tm_mon > 2 && leapyear(tm->tm_year);
953f2783c15SPaul Mackerras 
954f2783c15SPaul Mackerras 	day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] +
955f2783c15SPaul Mackerras 		   tm->tm_mday;
956f2783c15SPaul Mackerras 
957f2783c15SPaul Mackerras 	tm->tm_wday = day % 7;
958f2783c15SPaul Mackerras }
959f2783c15SPaul Mackerras 
960f2783c15SPaul Mackerras void to_tm(int tim, struct rtc_time * tm)
961f2783c15SPaul Mackerras {
962f2783c15SPaul Mackerras 	register int    i;
963f2783c15SPaul Mackerras 	register long   hms, day;
964f2783c15SPaul Mackerras 
965f2783c15SPaul Mackerras 	day = tim / SECDAY;
966f2783c15SPaul Mackerras 	hms = tim % SECDAY;
967f2783c15SPaul Mackerras 
968f2783c15SPaul Mackerras 	/* Hours, minutes, seconds are easy */
969f2783c15SPaul Mackerras 	tm->tm_hour = hms / 3600;
970f2783c15SPaul Mackerras 	tm->tm_min = (hms % 3600) / 60;
971f2783c15SPaul Mackerras 	tm->tm_sec = (hms % 3600) % 60;
972f2783c15SPaul Mackerras 
973f2783c15SPaul Mackerras 	/* Number of years in days */
974f2783c15SPaul Mackerras 	for (i = STARTOFTIME; day >= days_in_year(i); i++)
975f2783c15SPaul Mackerras 		day -= days_in_year(i);
976f2783c15SPaul Mackerras 	tm->tm_year = i;
977f2783c15SPaul Mackerras 
978f2783c15SPaul Mackerras 	/* Number of months in days left */
979f2783c15SPaul Mackerras 	if (leapyear(tm->tm_year))
980f2783c15SPaul Mackerras 		days_in_month(FEBRUARY) = 29;
981f2783c15SPaul Mackerras 	for (i = 1; day >= days_in_month(i); i++)
982f2783c15SPaul Mackerras 		day -= days_in_month(i);
983f2783c15SPaul Mackerras 	days_in_month(FEBRUARY) = 28;
984f2783c15SPaul Mackerras 	tm->tm_mon = i;
985f2783c15SPaul Mackerras 
986f2783c15SPaul Mackerras 	/* Days are what is left over (+1) from all that. */
987f2783c15SPaul Mackerras 	tm->tm_mday = day + 1;
988f2783c15SPaul Mackerras 
989f2783c15SPaul Mackerras 	/*
990f2783c15SPaul Mackerras 	 * Determine the day of week
991f2783c15SPaul Mackerras 	 */
992f2783c15SPaul Mackerras 	GregorianDay(tm);
993f2783c15SPaul Mackerras }
994f2783c15SPaul Mackerras 
995f2783c15SPaul Mackerras /* Auxiliary function to compute scaling factors */
996f2783c15SPaul Mackerras /* Actually the choice of a timebase running at 1/4 the of the bus
997f2783c15SPaul Mackerras  * frequency giving resolution of a few tens of nanoseconds is quite nice.
998f2783c15SPaul Mackerras  * It makes this computation very precise (27-28 bits typically) which
999f2783c15SPaul Mackerras  * is optimistic considering the stability of most processor clock
1000f2783c15SPaul Mackerras  * oscillators and the precision with which the timebase frequency
1001f2783c15SPaul Mackerras  * is measured but does not harm.
1002f2783c15SPaul Mackerras  */
1003f2783c15SPaul Mackerras unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale)
1004f2783c15SPaul Mackerras {
1005f2783c15SPaul Mackerras         unsigned mlt=0, tmp, err;
1006f2783c15SPaul Mackerras         /* No concern for performance, it's done once: use a stupid
1007f2783c15SPaul Mackerras          * but safe and compact method to find the multiplier.
1008f2783c15SPaul Mackerras          */
1009f2783c15SPaul Mackerras 
1010f2783c15SPaul Mackerras         for (tmp = 1U<<31; tmp != 0; tmp >>= 1) {
1011f2783c15SPaul Mackerras                 if (mulhwu(inscale, mlt|tmp) < outscale)
1012f2783c15SPaul Mackerras 			mlt |= tmp;
1013f2783c15SPaul Mackerras         }
1014f2783c15SPaul Mackerras 
1015f2783c15SPaul Mackerras         /* We might still be off by 1 for the best approximation.
1016f2783c15SPaul Mackerras          * A side effect of this is that if outscale is too large
1017f2783c15SPaul Mackerras          * the returned value will be zero.
1018f2783c15SPaul Mackerras          * Many corner cases have been checked and seem to work,
1019f2783c15SPaul Mackerras          * some might have been forgotten in the test however.
1020f2783c15SPaul Mackerras          */
1021f2783c15SPaul Mackerras 
1022f2783c15SPaul Mackerras         err = inscale * (mlt+1);
1023f2783c15SPaul Mackerras         if (err <= inscale/2)
1024f2783c15SPaul Mackerras 		mlt++;
1025f2783c15SPaul Mackerras         return mlt;
1026f2783c15SPaul Mackerras }
1027f2783c15SPaul Mackerras 
1028f2783c15SPaul Mackerras /*
1029f2783c15SPaul Mackerras  * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
1030f2783c15SPaul Mackerras  * result.
1031f2783c15SPaul Mackerras  */
1032f2783c15SPaul Mackerras void div128_by_32(u64 dividend_high, u64 dividend_low,
1033f2783c15SPaul Mackerras 		  unsigned divisor, struct div_result *dr)
1034f2783c15SPaul Mackerras {
1035f2783c15SPaul Mackerras 	unsigned long a, b, c, d;
1036f2783c15SPaul Mackerras 	unsigned long w, x, y, z;
1037f2783c15SPaul Mackerras 	u64 ra, rb, rc;
1038f2783c15SPaul Mackerras 
1039f2783c15SPaul Mackerras 	a = dividend_high >> 32;
1040f2783c15SPaul Mackerras 	b = dividend_high & 0xffffffff;
1041f2783c15SPaul Mackerras 	c = dividend_low >> 32;
1042f2783c15SPaul Mackerras 	d = dividend_low & 0xffffffff;
1043f2783c15SPaul Mackerras 
1044f2783c15SPaul Mackerras 	w = a / divisor;
1045f2783c15SPaul Mackerras 	ra = ((u64)(a - (w * divisor)) << 32) + b;
1046f2783c15SPaul Mackerras 
1047f2783c15SPaul Mackerras 	rb = ((u64) do_div(ra, divisor) << 32) + c;
1048f2783c15SPaul Mackerras 	x = ra;
1049f2783c15SPaul Mackerras 
1050f2783c15SPaul Mackerras 	rc = ((u64) do_div(rb, divisor) << 32) + d;
1051f2783c15SPaul Mackerras 	y = rb;
1052f2783c15SPaul Mackerras 
1053f2783c15SPaul Mackerras 	do_div(rc, divisor);
1054f2783c15SPaul Mackerras 	z = rc;
1055f2783c15SPaul Mackerras 
1056f2783c15SPaul Mackerras 	dr->result_high = ((u64)w << 32) + x;
1057f2783c15SPaul Mackerras 	dr->result_low  = ((u64)y << 32) + z;
1058f2783c15SPaul Mackerras 
1059f2783c15SPaul Mackerras }
1060