xref: /openbmc/linux/arch/powerpc/kernel/time.c (revision 2b46b567)
1 /*
2  * Common time routines among all ppc machines.
3  *
4  * Written by Cort Dougan (cort@cs.nmt.edu) to merge
5  * Paul Mackerras' version and mine for PReP and Pmac.
6  * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
7  * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
8  *
9  * First round of bugfixes by Gabriel Paubert (paubert@iram.es)
10  * to make clock more stable (2.4.0-test5). The only thing
11  * that this code assumes is that the timebases have been synchronized
12  * by firmware on SMP and are never stopped (never do sleep
13  * on SMP then, nap and doze are OK).
14  *
15  * Speeded up do_gettimeofday by getting rid of references to
16  * xtime (which required locks for consistency). (mikejc@us.ibm.com)
17  *
18  * TODO (not necessarily in this file):
19  * - improve precision and reproducibility of timebase frequency
20  * measurement at boot time. (for iSeries, we calibrate the timebase
21  * against the Titan chip's clock.)
22  * - for astronomical applications: add a new function to get
23  * non ambiguous timestamps even around leap seconds. This needs
24  * a new timestamp format and a good name.
25  *
26  * 1997-09-10  Updated NTP code according to technical memorandum Jan '96
27  *             "A Kernel Model for Precision Timekeeping" by Dave Mills
28  *
29  *      This program is free software; you can redistribute it and/or
30  *      modify it under the terms of the GNU General Public License
31  *      as published by the Free Software Foundation; either version
32  *      2 of the License, or (at your option) any later version.
33  */
34 
35 #include <linux/errno.h>
36 #include <linux/module.h>
37 #include <linux/sched.h>
38 #include <linux/kernel.h>
39 #include <linux/param.h>
40 #include <linux/string.h>
41 #include <linux/mm.h>
42 #include <linux/interrupt.h>
43 #include <linux/timex.h>
44 #include <linux/kernel_stat.h>
45 #include <linux/time.h>
46 #include <linux/init.h>
47 #include <linux/profile.h>
48 #include <linux/cpu.h>
49 #include <linux/security.h>
50 #include <linux/percpu.h>
51 #include <linux/rtc.h>
52 #include <linux/jiffies.h>
53 #include <linux/posix-timers.h>
54 #include <linux/irq.h>
55 
56 #include <asm/io.h>
57 #include <asm/processor.h>
58 #include <asm/nvram.h>
59 #include <asm/cache.h>
60 #include <asm/machdep.h>
61 #include <asm/uaccess.h>
62 #include <asm/time.h>
63 #include <asm/prom.h>
64 #include <asm/irq.h>
65 #include <asm/div64.h>
66 #include <asm/smp.h>
67 #include <asm/vdso_datapage.h>
68 #include <asm/firmware.h>
69 #ifdef CONFIG_PPC_ISERIES
70 #include <asm/iseries/it_lp_queue.h>
71 #include <asm/iseries/hv_call_xm.h>
72 #endif
73 
74 /* powerpc clocksource/clockevent code */
75 
76 #include <linux/clockchips.h>
77 #include <linux/clocksource.h>
78 
79 static cycle_t rtc_read(void);
80 static struct clocksource clocksource_rtc = {
81 	.name         = "rtc",
82 	.rating       = 400,
83 	.flags        = CLOCK_SOURCE_IS_CONTINUOUS,
84 	.mask         = CLOCKSOURCE_MASK(64),
85 	.shift        = 22,
86 	.mult         = 0,	/* To be filled in */
87 	.read         = rtc_read,
88 };
89 
90 static cycle_t timebase_read(void);
91 static struct clocksource clocksource_timebase = {
92 	.name         = "timebase",
93 	.rating       = 400,
94 	.flags        = CLOCK_SOURCE_IS_CONTINUOUS,
95 	.mask         = CLOCKSOURCE_MASK(64),
96 	.shift        = 22,
97 	.mult         = 0,	/* To be filled in */
98 	.read         = timebase_read,
99 };
100 
101 #define DECREMENTER_MAX	0x7fffffff
102 
103 static int decrementer_set_next_event(unsigned long evt,
104 				      struct clock_event_device *dev);
105 static void decrementer_set_mode(enum clock_event_mode mode,
106 				 struct clock_event_device *dev);
107 
108 static struct clock_event_device decrementer_clockevent = {
109        .name           = "decrementer",
110        .rating         = 200,
111        .shift          = 16,
112        .mult           = 0,	/* To be filled in */
113        .irq            = 0,
114        .set_next_event = decrementer_set_next_event,
115        .set_mode       = decrementer_set_mode,
116        .features       = CLOCK_EVT_FEAT_ONESHOT,
117 };
118 
119 static DEFINE_PER_CPU(struct clock_event_device, decrementers);
120 void init_decrementer_clockevent(void);
121 static DEFINE_PER_CPU(u64, decrementer_next_tb);
122 
123 #ifdef CONFIG_PPC_ISERIES
124 static unsigned long __initdata iSeries_recal_titan;
125 static signed long __initdata iSeries_recal_tb;
126 
127 /* Forward declaration is only needed for iSereis compiles */
128 void __init clocksource_init(void);
129 #endif
130 
131 #define XSEC_PER_SEC (1024*1024)
132 
133 #ifdef CONFIG_PPC64
134 #define SCALE_XSEC(xsec, max)	(((xsec) * max) / XSEC_PER_SEC)
135 #else
136 /* compute ((xsec << 12) * max) >> 32 */
137 #define SCALE_XSEC(xsec, max)	mulhwu((xsec) << 12, max)
138 #endif
139 
140 unsigned long tb_ticks_per_jiffy;
141 unsigned long tb_ticks_per_usec = 100; /* sane default */
142 EXPORT_SYMBOL(tb_ticks_per_usec);
143 unsigned long tb_ticks_per_sec;
144 EXPORT_SYMBOL(tb_ticks_per_sec);	/* for cputime_t conversions */
145 u64 tb_to_xs;
146 unsigned tb_to_us;
147 
148 #define TICKLEN_SCALE	TICK_LENGTH_SHIFT
149 u64 last_tick_len;	/* units are ns / 2^TICKLEN_SCALE */
150 u64 ticklen_to_xs;	/* 0.64 fraction */
151 
152 /* If last_tick_len corresponds to about 1/HZ seconds, then
153    last_tick_len << TICKLEN_SHIFT will be about 2^63. */
154 #define TICKLEN_SHIFT	(63 - 30 - TICKLEN_SCALE + SHIFT_HZ)
155 
156 DEFINE_SPINLOCK(rtc_lock);
157 EXPORT_SYMBOL_GPL(rtc_lock);
158 
159 static u64 tb_to_ns_scale __read_mostly;
160 static unsigned tb_to_ns_shift __read_mostly;
161 static unsigned long boot_tb __read_mostly;
162 
163 struct gettimeofday_struct do_gtod;
164 
165 extern struct timezone sys_tz;
166 static long timezone_offset;
167 
168 unsigned long ppc_proc_freq;
169 EXPORT_SYMBOL(ppc_proc_freq);
170 unsigned long ppc_tb_freq;
171 
172 static u64 tb_last_jiffy __cacheline_aligned_in_smp;
173 static DEFINE_PER_CPU(u64, last_jiffy);
174 
175 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
176 /*
177  * Factors for converting from cputime_t (timebase ticks) to
178  * jiffies, milliseconds, seconds, and clock_t (1/USER_HZ seconds).
179  * These are all stored as 0.64 fixed-point binary fractions.
180  */
181 u64 __cputime_jiffies_factor;
182 EXPORT_SYMBOL(__cputime_jiffies_factor);
183 u64 __cputime_msec_factor;
184 EXPORT_SYMBOL(__cputime_msec_factor);
185 u64 __cputime_sec_factor;
186 EXPORT_SYMBOL(__cputime_sec_factor);
187 u64 __cputime_clockt_factor;
188 EXPORT_SYMBOL(__cputime_clockt_factor);
189 
190 static void calc_cputime_factors(void)
191 {
192 	struct div_result res;
193 
194 	div128_by_32(HZ, 0, tb_ticks_per_sec, &res);
195 	__cputime_jiffies_factor = res.result_low;
196 	div128_by_32(1000, 0, tb_ticks_per_sec, &res);
197 	__cputime_msec_factor = res.result_low;
198 	div128_by_32(1, 0, tb_ticks_per_sec, &res);
199 	__cputime_sec_factor = res.result_low;
200 	div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res);
201 	__cputime_clockt_factor = res.result_low;
202 }
203 
204 /*
205  * Read the PURR on systems that have it, otherwise the timebase.
206  */
207 static u64 read_purr(void)
208 {
209 	if (cpu_has_feature(CPU_FTR_PURR))
210 		return mfspr(SPRN_PURR);
211 	return mftb();
212 }
213 
214 /*
215  * Read the SPURR on systems that have it, otherwise the purr
216  */
217 static u64 read_spurr(u64 purr)
218 {
219 	if (cpu_has_feature(CPU_FTR_SPURR))
220 		return mfspr(SPRN_SPURR);
221 	return purr;
222 }
223 
224 /*
225  * Account time for a transition between system, hard irq
226  * or soft irq state.
227  */
228 void account_system_vtime(struct task_struct *tsk)
229 {
230 	u64 now, nowscaled, delta, deltascaled;
231 	unsigned long flags;
232 
233 	local_irq_save(flags);
234 	now = read_purr();
235 	delta = now - get_paca()->startpurr;
236 	get_paca()->startpurr = now;
237 	nowscaled = read_spurr(now);
238 	deltascaled = nowscaled - get_paca()->startspurr;
239 	get_paca()->startspurr = nowscaled;
240 	if (!in_interrupt()) {
241 		/* deltascaled includes both user and system time.
242 		 * Hence scale it based on the purr ratio to estimate
243 		 * the system time */
244 		if (get_paca()->user_time)
245 			deltascaled = deltascaled * get_paca()->system_time /
246 			     (get_paca()->system_time + get_paca()->user_time);
247 		delta += get_paca()->system_time;
248 		get_paca()->system_time = 0;
249 	}
250 	account_system_time(tsk, 0, delta);
251 	get_paca()->purrdelta = delta;
252 	account_system_time_scaled(tsk, deltascaled);
253 	get_paca()->spurrdelta = deltascaled;
254 	local_irq_restore(flags);
255 }
256 
257 /*
258  * Transfer the user and system times accumulated in the paca
259  * by the exception entry and exit code to the generic process
260  * user and system time records.
261  * Must be called with interrupts disabled.
262  */
263 void account_process_tick(struct task_struct *tsk, int user_tick)
264 {
265 	cputime_t utime, utimescaled;
266 
267 	utime = get_paca()->user_time;
268 	get_paca()->user_time = 0;
269 	account_user_time(tsk, utime);
270 
271 	/* Estimate the scaled utime by scaling the real utime based
272 	 * on the last spurr to purr ratio */
273 	utimescaled = utime * get_paca()->spurrdelta / get_paca()->purrdelta;
274 	get_paca()->spurrdelta = get_paca()->purrdelta = 0;
275 	account_user_time_scaled(tsk, utimescaled);
276 }
277 
278 /*
279  * Stuff for accounting stolen time.
280  */
281 struct cpu_purr_data {
282 	int	initialized;			/* thread is running */
283 	u64	tb;			/* last TB value read */
284 	u64	purr;			/* last PURR value read */
285 	u64	spurr;			/* last SPURR value read */
286 };
287 
288 /*
289  * Each entry in the cpu_purr_data array is manipulated only by its
290  * "owner" cpu -- usually in the timer interrupt but also occasionally
291  * in process context for cpu online.  As long as cpus do not touch
292  * each others' cpu_purr_data, disabling local interrupts is
293  * sufficient to serialize accesses.
294  */
295 static DEFINE_PER_CPU(struct cpu_purr_data, cpu_purr_data);
296 
297 static void snapshot_tb_and_purr(void *data)
298 {
299 	unsigned long flags;
300 	struct cpu_purr_data *p = &__get_cpu_var(cpu_purr_data);
301 
302 	local_irq_save(flags);
303 	p->tb = get_tb_or_rtc();
304 	p->purr = mfspr(SPRN_PURR);
305 	wmb();
306 	p->initialized = 1;
307 	local_irq_restore(flags);
308 }
309 
310 /*
311  * Called during boot when all cpus have come up.
312  */
313 void snapshot_timebases(void)
314 {
315 	if (!cpu_has_feature(CPU_FTR_PURR))
316 		return;
317 	on_each_cpu(snapshot_tb_and_purr, NULL, 0, 1);
318 }
319 
320 /*
321  * Must be called with interrupts disabled.
322  */
323 void calculate_steal_time(void)
324 {
325 	u64 tb, purr;
326 	s64 stolen;
327 	struct cpu_purr_data *pme;
328 
329 	if (!cpu_has_feature(CPU_FTR_PURR))
330 		return;
331 	pme = &per_cpu(cpu_purr_data, smp_processor_id());
332 	if (!pme->initialized)
333 		return;		/* this can happen in early boot */
334 	tb = mftb();
335 	purr = mfspr(SPRN_PURR);
336 	stolen = (tb - pme->tb) - (purr - pme->purr);
337 	if (stolen > 0)
338 		account_steal_time(current, stolen);
339 	pme->tb = tb;
340 	pme->purr = purr;
341 }
342 
343 #ifdef CONFIG_PPC_SPLPAR
344 /*
345  * Must be called before the cpu is added to the online map when
346  * a cpu is being brought up at runtime.
347  */
348 static void snapshot_purr(void)
349 {
350 	struct cpu_purr_data *pme;
351 	unsigned long flags;
352 
353 	if (!cpu_has_feature(CPU_FTR_PURR))
354 		return;
355 	local_irq_save(flags);
356 	pme = &per_cpu(cpu_purr_data, smp_processor_id());
357 	pme->tb = mftb();
358 	pme->purr = mfspr(SPRN_PURR);
359 	pme->initialized = 1;
360 	local_irq_restore(flags);
361 }
362 
363 #endif /* CONFIG_PPC_SPLPAR */
364 
365 #else /* ! CONFIG_VIRT_CPU_ACCOUNTING */
366 #define calc_cputime_factors()
367 #define calculate_steal_time()		do { } while (0)
368 #endif
369 
370 #if !(defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR))
371 #define snapshot_purr()			do { } while (0)
372 #endif
373 
374 /*
375  * Called when a cpu comes up after the system has finished booting,
376  * i.e. as a result of a hotplug cpu action.
377  */
378 void snapshot_timebase(void)
379 {
380 	__get_cpu_var(last_jiffy) = get_tb_or_rtc();
381 	snapshot_purr();
382 }
383 
384 void __delay(unsigned long loops)
385 {
386 	unsigned long start;
387 	int diff;
388 
389 	if (__USE_RTC()) {
390 		start = get_rtcl();
391 		do {
392 			/* the RTCL register wraps at 1000000000 */
393 			diff = get_rtcl() - start;
394 			if (diff < 0)
395 				diff += 1000000000;
396 		} while (diff < loops);
397 	} else {
398 		start = get_tbl();
399 		while (get_tbl() - start < loops)
400 			HMT_low();
401 		HMT_medium();
402 	}
403 }
404 EXPORT_SYMBOL(__delay);
405 
406 void udelay(unsigned long usecs)
407 {
408 	__delay(tb_ticks_per_usec * usecs);
409 }
410 EXPORT_SYMBOL(udelay);
411 
412 
413 /*
414  * There are two copies of tb_to_xs and stamp_xsec so that no
415  * lock is needed to access and use these values in
416  * do_gettimeofday.  We alternate the copies and as long as a
417  * reasonable time elapses between changes, there will never
418  * be inconsistent values.  ntpd has a minimum of one minute
419  * between updates.
420  */
421 static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec,
422 			       u64 new_tb_to_xs)
423 {
424 	unsigned temp_idx;
425 	struct gettimeofday_vars *temp_varp;
426 
427 	temp_idx = (do_gtod.var_idx == 0);
428 	temp_varp = &do_gtod.vars[temp_idx];
429 
430 	temp_varp->tb_to_xs = new_tb_to_xs;
431 	temp_varp->tb_orig_stamp = new_tb_stamp;
432 	temp_varp->stamp_xsec = new_stamp_xsec;
433 	smp_mb();
434 	do_gtod.varp = temp_varp;
435 	do_gtod.var_idx = temp_idx;
436 
437 	/*
438 	 * tb_update_count is used to allow the userspace gettimeofday code
439 	 * to assure itself that it sees a consistent view of the tb_to_xs and
440 	 * stamp_xsec variables.  It reads the tb_update_count, then reads
441 	 * tb_to_xs and stamp_xsec and then reads tb_update_count again.  If
442 	 * the two values of tb_update_count match and are even then the
443 	 * tb_to_xs and stamp_xsec values are consistent.  If not, then it
444 	 * loops back and reads them again until this criteria is met.
445 	 * We expect the caller to have done the first increment of
446 	 * vdso_data->tb_update_count already.
447 	 */
448 	vdso_data->tb_orig_stamp = new_tb_stamp;
449 	vdso_data->stamp_xsec = new_stamp_xsec;
450 	vdso_data->tb_to_xs = new_tb_to_xs;
451 	vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec;
452 	vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec;
453 	smp_wmb();
454 	++(vdso_data->tb_update_count);
455 }
456 
457 #ifdef CONFIG_SMP
458 unsigned long profile_pc(struct pt_regs *regs)
459 {
460 	unsigned long pc = instruction_pointer(regs);
461 
462 	if (in_lock_functions(pc))
463 		return regs->link;
464 
465 	return pc;
466 }
467 EXPORT_SYMBOL(profile_pc);
468 #endif
469 
470 #ifdef CONFIG_PPC_ISERIES
471 
472 /*
473  * This function recalibrates the timebase based on the 49-bit time-of-day
474  * value in the Titan chip.  The Titan is much more accurate than the value
475  * returned by the service processor for the timebase frequency.
476  */
477 
478 static int __init iSeries_tb_recal(void)
479 {
480 	struct div_result divres;
481 	unsigned long titan, tb;
482 
483 	/* Make sure we only run on iSeries */
484 	if (!firmware_has_feature(FW_FEATURE_ISERIES))
485 		return -ENODEV;
486 
487 	tb = get_tb();
488 	titan = HvCallXm_loadTod();
489 	if ( iSeries_recal_titan ) {
490 		unsigned long tb_ticks = tb - iSeries_recal_tb;
491 		unsigned long titan_usec = (titan - iSeries_recal_titan) >> 12;
492 		unsigned long new_tb_ticks_per_sec   = (tb_ticks * USEC_PER_SEC)/titan_usec;
493 		unsigned long new_tb_ticks_per_jiffy = (new_tb_ticks_per_sec+(HZ/2))/HZ;
494 		long tick_diff = new_tb_ticks_per_jiffy - tb_ticks_per_jiffy;
495 		char sign = '+';
496 		/* make sure tb_ticks_per_sec and tb_ticks_per_jiffy are consistent */
497 		new_tb_ticks_per_sec = new_tb_ticks_per_jiffy * HZ;
498 
499 		if ( tick_diff < 0 ) {
500 			tick_diff = -tick_diff;
501 			sign = '-';
502 		}
503 		if ( tick_diff ) {
504 			if ( tick_diff < tb_ticks_per_jiffy/25 ) {
505 				printk( "Titan recalibrate: new tb_ticks_per_jiffy = %lu (%c%ld)\n",
506 						new_tb_ticks_per_jiffy, sign, tick_diff );
507 				tb_ticks_per_jiffy = new_tb_ticks_per_jiffy;
508 				tb_ticks_per_sec   = new_tb_ticks_per_sec;
509 				calc_cputime_factors();
510 				div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres );
511 				do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
512 				tb_to_xs = divres.result_low;
513 				do_gtod.varp->tb_to_xs = tb_to_xs;
514 				vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
515 				vdso_data->tb_to_xs = tb_to_xs;
516 			}
517 			else {
518 				printk( "Titan recalibrate: FAILED (difference > 4 percent)\n"
519 					"                   new tb_ticks_per_jiffy = %lu\n"
520 					"                   old tb_ticks_per_jiffy = %lu\n",
521 					new_tb_ticks_per_jiffy, tb_ticks_per_jiffy );
522 			}
523 		}
524 	}
525 	iSeries_recal_titan = titan;
526 	iSeries_recal_tb = tb;
527 
528 	/* Called here as now we know accurate values for the timebase */
529 	clocksource_init();
530 	return 0;
531 }
532 late_initcall(iSeries_tb_recal);
533 
534 /* Called from platform early init */
535 void __init iSeries_time_init_early(void)
536 {
537 	iSeries_recal_tb = get_tb();
538 	iSeries_recal_titan = HvCallXm_loadTod();
539 }
540 #endif /* CONFIG_PPC_ISERIES */
541 
542 /*
543  * For iSeries shared processors, we have to let the hypervisor
544  * set the hardware decrementer.  We set a virtual decrementer
545  * in the lppaca and call the hypervisor if the virtual
546  * decrementer is less than the current value in the hardware
547  * decrementer. (almost always the new decrementer value will
548  * be greater than the current hardware decementer so the hypervisor
549  * call will not be needed)
550  */
551 
552 /*
553  * timer_interrupt - gets called when the decrementer overflows,
554  * with interrupts disabled.
555  */
556 void timer_interrupt(struct pt_regs * regs)
557 {
558 	struct pt_regs *old_regs;
559 	int cpu = smp_processor_id();
560 	struct clock_event_device *evt = &per_cpu(decrementers, cpu);
561 	u64 now;
562 
563 	/* Ensure a positive value is written to the decrementer, or else
564 	 * some CPUs will continuue to take decrementer exceptions */
565 	set_dec(DECREMENTER_MAX);
566 
567 #ifdef CONFIG_PPC32
568 	if (atomic_read(&ppc_n_lost_interrupts) != 0)
569 		do_IRQ(regs);
570 #endif
571 
572 	now = get_tb_or_rtc();
573 	if (now < per_cpu(decrementer_next_tb, cpu)) {
574 		/* not time for this event yet */
575 		now = per_cpu(decrementer_next_tb, cpu) - now;
576 		if (now <= DECREMENTER_MAX)
577 			set_dec((int)now);
578 		return;
579 	}
580 	old_regs = set_irq_regs(regs);
581 	irq_enter();
582 
583 	calculate_steal_time();
584 
585 #ifdef CONFIG_PPC_ISERIES
586 	if (firmware_has_feature(FW_FEATURE_ISERIES))
587 		get_lppaca()->int_dword.fields.decr_int = 0;
588 #endif
589 
590 	if (evt->event_handler)
591 		evt->event_handler(evt);
592 
593 #ifdef CONFIG_PPC_ISERIES
594 	if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending())
595 		process_hvlpevents();
596 #endif
597 
598 #ifdef CONFIG_PPC64
599 	/* collect purr register values often, for accurate calculations */
600 	if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
601 		struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
602 		cu->current_tb = mfspr(SPRN_PURR);
603 	}
604 #endif
605 
606 	irq_exit();
607 	set_irq_regs(old_regs);
608 }
609 
610 void wakeup_decrementer(void)
611 {
612 	unsigned long ticks;
613 
614 	/*
615 	 * The timebase gets saved on sleep and restored on wakeup,
616 	 * so all we need to do is to reset the decrementer.
617 	 */
618 	ticks = tb_ticks_since(__get_cpu_var(last_jiffy));
619 	if (ticks < tb_ticks_per_jiffy)
620 		ticks = tb_ticks_per_jiffy - ticks;
621 	else
622 		ticks = 1;
623 	set_dec(ticks);
624 }
625 
626 #ifdef CONFIG_SMP
627 void __init smp_space_timers(unsigned int max_cpus)
628 {
629 	int i;
630 	u64 previous_tb = per_cpu(last_jiffy, boot_cpuid);
631 
632 	/* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */
633 	previous_tb -= tb_ticks_per_jiffy;
634 
635 	for_each_possible_cpu(i) {
636 		if (i == boot_cpuid)
637 			continue;
638 		per_cpu(last_jiffy, i) = previous_tb;
639 	}
640 }
641 #endif
642 
643 /*
644  * Scheduler clock - returns current time in nanosec units.
645  *
646  * Note: mulhdu(a, b) (multiply high double unsigned) returns
647  * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
648  * are 64-bit unsigned numbers.
649  */
650 unsigned long long sched_clock(void)
651 {
652 	if (__USE_RTC())
653 		return get_rtc();
654 	return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
655 }
656 
657 static int __init get_freq(char *name, int cells, unsigned long *val)
658 {
659 	struct device_node *cpu;
660 	const unsigned int *fp;
661 	int found = 0;
662 
663 	/* The cpu node should have timebase and clock frequency properties */
664 	cpu = of_find_node_by_type(NULL, "cpu");
665 
666 	if (cpu) {
667 		fp = of_get_property(cpu, name, NULL);
668 		if (fp) {
669 			found = 1;
670 			*val = of_read_ulong(fp, cells);
671 		}
672 
673 		of_node_put(cpu);
674 	}
675 
676 	return found;
677 }
678 
679 void __init generic_calibrate_decr(void)
680 {
681 	ppc_tb_freq = DEFAULT_TB_FREQ;		/* hardcoded default */
682 
683 	if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) &&
684 	    !get_freq("timebase-frequency", 1, &ppc_tb_freq)) {
685 
686 		printk(KERN_ERR "WARNING: Estimating decrementer frequency "
687 				"(not found)\n");
688 	}
689 
690 	ppc_proc_freq = DEFAULT_PROC_FREQ;	/* hardcoded default */
691 
692 	if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) &&
693 	    !get_freq("clock-frequency", 1, &ppc_proc_freq)) {
694 
695 		printk(KERN_ERR "WARNING: Estimating processor frequency "
696 				"(not found)\n");
697 	}
698 
699 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
700 	/* Set the time base to zero */
701 	mtspr(SPRN_TBWL, 0);
702 	mtspr(SPRN_TBWU, 0);
703 
704 	/* Clear any pending timer interrupts */
705 	mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
706 
707 	/* Enable decrementer interrupt */
708 	mtspr(SPRN_TCR, TCR_DIE);
709 #endif
710 }
711 
712 int update_persistent_clock(struct timespec now)
713 {
714 	struct rtc_time tm;
715 
716 	if (!ppc_md.set_rtc_time)
717 		return 0;
718 
719 	to_tm(now.tv_sec + 1 + timezone_offset, &tm);
720 	tm.tm_year -= 1900;
721 	tm.tm_mon -= 1;
722 
723 	return ppc_md.set_rtc_time(&tm);
724 }
725 
726 unsigned long read_persistent_clock(void)
727 {
728 	struct rtc_time tm;
729 	static int first = 1;
730 
731 	/* XXX this is a litle fragile but will work okay in the short term */
732 	if (first) {
733 		first = 0;
734 		if (ppc_md.time_init)
735 			timezone_offset = ppc_md.time_init();
736 
737 		/* get_boot_time() isn't guaranteed to be safe to call late */
738 		if (ppc_md.get_boot_time)
739 			return ppc_md.get_boot_time() -timezone_offset;
740 	}
741 	if (!ppc_md.get_rtc_time)
742 		return 0;
743 	ppc_md.get_rtc_time(&tm);
744 	return mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday,
745 		      tm.tm_hour, tm.tm_min, tm.tm_sec);
746 }
747 
748 /* clocksource code */
749 static cycle_t rtc_read(void)
750 {
751 	return (cycle_t)get_rtc();
752 }
753 
754 static cycle_t timebase_read(void)
755 {
756 	return (cycle_t)get_tb();
757 }
758 
759 void update_vsyscall(struct timespec *wall_time, struct clocksource *clock)
760 {
761 	u64 t2x, stamp_xsec;
762 
763 	if (clock != &clocksource_timebase)
764 		return;
765 
766 	/* Make userspace gettimeofday spin until we're done. */
767 	++vdso_data->tb_update_count;
768 	smp_mb();
769 
770 	/* XXX this assumes clock->shift == 22 */
771 	/* 4611686018 ~= 2^(20+64-22) / 1e9 */
772 	t2x = (u64) clock->mult * 4611686018ULL;
773 	stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC;
774 	do_div(stamp_xsec, 1000000000);
775 	stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC;
776 	update_gtod(clock->cycle_last, stamp_xsec, t2x);
777 }
778 
779 void update_vsyscall_tz(void)
780 {
781 	/* Make userspace gettimeofday spin until we're done. */
782 	++vdso_data->tb_update_count;
783 	smp_mb();
784 	vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
785 	vdso_data->tz_dsttime = sys_tz.tz_dsttime;
786 	smp_mb();
787 	++vdso_data->tb_update_count;
788 }
789 
790 void __init clocksource_init(void)
791 {
792 	struct clocksource *clock;
793 
794 	if (__USE_RTC())
795 		clock = &clocksource_rtc;
796 	else
797 		clock = &clocksource_timebase;
798 
799 	clock->mult = clocksource_hz2mult(tb_ticks_per_sec, clock->shift);
800 
801 	if (clocksource_register(clock)) {
802 		printk(KERN_ERR "clocksource: %s is already registered\n",
803 		       clock->name);
804 		return;
805 	}
806 
807 	printk(KERN_INFO "clocksource: %s mult[%x] shift[%d] registered\n",
808 	       clock->name, clock->mult, clock->shift);
809 }
810 
811 static int decrementer_set_next_event(unsigned long evt,
812 				      struct clock_event_device *dev)
813 {
814 	__get_cpu_var(decrementer_next_tb) = get_tb_or_rtc() + evt;
815 	set_dec(evt);
816 	return 0;
817 }
818 
819 static void decrementer_set_mode(enum clock_event_mode mode,
820 				 struct clock_event_device *dev)
821 {
822 	if (mode != CLOCK_EVT_MODE_ONESHOT)
823 		decrementer_set_next_event(DECREMENTER_MAX, dev);
824 }
825 
826 static void register_decrementer_clockevent(int cpu)
827 {
828 	struct clock_event_device *dec = &per_cpu(decrementers, cpu);
829 
830 	*dec = decrementer_clockevent;
831 	dec->cpumask = cpumask_of_cpu(cpu);
832 
833 	printk(KERN_DEBUG "clockevent: %s mult[%lx] shift[%d] cpu[%d]\n",
834 	       dec->name, dec->mult, dec->shift, cpu);
835 
836 	clockevents_register_device(dec);
837 }
838 
839 void init_decrementer_clockevent(void)
840 {
841 	int cpu = smp_processor_id();
842 
843 	decrementer_clockevent.mult = div_sc(ppc_tb_freq, NSEC_PER_SEC,
844 					     decrementer_clockevent.shift);
845 	decrementer_clockevent.max_delta_ns =
846 		clockevent_delta2ns(DECREMENTER_MAX, &decrementer_clockevent);
847 	decrementer_clockevent.min_delta_ns =
848 		clockevent_delta2ns(2, &decrementer_clockevent);
849 
850 	register_decrementer_clockevent(cpu);
851 }
852 
853 void secondary_cpu_time_init(void)
854 {
855 	/* FIME: Should make unrelatred change to move snapshot_timebase
856 	 * call here ! */
857 	register_decrementer_clockevent(smp_processor_id());
858 }
859 
860 /* This function is only called on the boot processor */
861 void __init time_init(void)
862 {
863 	unsigned long flags;
864 	struct div_result res;
865 	u64 scale, x;
866 	unsigned shift;
867 
868 	if (__USE_RTC()) {
869 		/* 601 processor: dec counts down by 128 every 128ns */
870 		ppc_tb_freq = 1000000000;
871 		tb_last_jiffy = get_rtcl();
872 	} else {
873 		/* Normal PowerPC with timebase register */
874 		ppc_md.calibrate_decr();
875 		printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n",
876 		       ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
877 		printk(KERN_DEBUG "time_init: processor frequency   = %lu.%.6lu MHz\n",
878 		       ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
879 		tb_last_jiffy = get_tb();
880 	}
881 
882 	tb_ticks_per_jiffy = ppc_tb_freq / HZ;
883 	tb_ticks_per_sec = ppc_tb_freq;
884 	tb_ticks_per_usec = ppc_tb_freq / 1000000;
885 	tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
886 	calc_cputime_factors();
887 
888 	/*
889 	 * Calculate the length of each tick in ns.  It will not be
890 	 * exactly 1e9/HZ unless ppc_tb_freq is divisible by HZ.
891 	 * We compute 1e9 * tb_ticks_per_jiffy / ppc_tb_freq,
892 	 * rounded up.
893 	 */
894 	x = (u64) NSEC_PER_SEC * tb_ticks_per_jiffy + ppc_tb_freq - 1;
895 	do_div(x, ppc_tb_freq);
896 	tick_nsec = x;
897 	last_tick_len = x << TICKLEN_SCALE;
898 
899 	/*
900 	 * Compute ticklen_to_xs, which is a factor which gets multiplied
901 	 * by (last_tick_len << TICKLEN_SHIFT) to get a tb_to_xs value.
902 	 * It is computed as:
903 	 * ticklen_to_xs = 2^N / (tb_ticks_per_jiffy * 1e9)
904 	 * where N = 64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT
905 	 * which turns out to be N = 51 - SHIFT_HZ.
906 	 * This gives the result as a 0.64 fixed-point fraction.
907 	 * That value is reduced by an offset amounting to 1 xsec per
908 	 * 2^31 timebase ticks to avoid problems with time going backwards
909 	 * by 1 xsec when we do timer_recalc_offset due to losing the
910 	 * fractional xsec.  That offset is equal to ppc_tb_freq/2^51
911 	 * since there are 2^20 xsec in a second.
912 	 */
913 	div128_by_32((1ULL << 51) - ppc_tb_freq, 0,
914 		     tb_ticks_per_jiffy << SHIFT_HZ, &res);
915 	div128_by_32(res.result_high, res.result_low, NSEC_PER_SEC, &res);
916 	ticklen_to_xs = res.result_low;
917 
918 	/* Compute tb_to_xs from tick_nsec */
919 	tb_to_xs = mulhdu(last_tick_len << TICKLEN_SHIFT, ticklen_to_xs);
920 
921 	/*
922 	 * Compute scale factor for sched_clock.
923 	 * The calibrate_decr() function has set tb_ticks_per_sec,
924 	 * which is the timebase frequency.
925 	 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret
926 	 * the 128-bit result as a 64.64 fixed-point number.
927 	 * We then shift that number right until it is less than 1.0,
928 	 * giving us the scale factor and shift count to use in
929 	 * sched_clock().
930 	 */
931 	div128_by_32(1000000000, 0, tb_ticks_per_sec, &res);
932 	scale = res.result_low;
933 	for (shift = 0; res.result_high != 0; ++shift) {
934 		scale = (scale >> 1) | (res.result_high << 63);
935 		res.result_high >>= 1;
936 	}
937 	tb_to_ns_scale = scale;
938 	tb_to_ns_shift = shift;
939 	/* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
940 	boot_tb = get_tb_or_rtc();
941 
942 	write_seqlock_irqsave(&xtime_lock, flags);
943 
944 	/* If platform provided a timezone (pmac), we correct the time */
945         if (timezone_offset) {
946 		sys_tz.tz_minuteswest = -timezone_offset / 60;
947 		sys_tz.tz_dsttime = 0;
948         }
949 
950 	do_gtod.varp = &do_gtod.vars[0];
951 	do_gtod.var_idx = 0;
952 	do_gtod.varp->tb_orig_stamp = tb_last_jiffy;
953 	__get_cpu_var(last_jiffy) = tb_last_jiffy;
954 	do_gtod.varp->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
955 	do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
956 	do_gtod.varp->tb_to_xs = tb_to_xs;
957 	do_gtod.tb_to_us = tb_to_us;
958 
959 	vdso_data->tb_orig_stamp = tb_last_jiffy;
960 	vdso_data->tb_update_count = 0;
961 	vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
962 	vdso_data->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
963 	vdso_data->tb_to_xs = tb_to_xs;
964 
965 	time_freq = 0;
966 
967 	write_sequnlock_irqrestore(&xtime_lock, flags);
968 
969 	/* Register the clocksource, if we're not running on iSeries */
970 	if (!firmware_has_feature(FW_FEATURE_ISERIES))
971 		clocksource_init();
972 
973 	init_decrementer_clockevent();
974 }
975 
976 
977 #define FEBRUARY	2
978 #define	STARTOFTIME	1970
979 #define SECDAY		86400L
980 #define SECYR		(SECDAY * 365)
981 #define	leapyear(year)		((year) % 4 == 0 && \
982 				 ((year) % 100 != 0 || (year) % 400 == 0))
983 #define	days_in_year(a) 	(leapyear(a) ? 366 : 365)
984 #define	days_in_month(a) 	(month_days[(a) - 1])
985 
986 static int month_days[12] = {
987 	31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
988 };
989 
990 /*
991  * This only works for the Gregorian calendar - i.e. after 1752 (in the UK)
992  */
993 void GregorianDay(struct rtc_time * tm)
994 {
995 	int leapsToDate;
996 	int lastYear;
997 	int day;
998 	int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };
999 
1000 	lastYear = tm->tm_year - 1;
1001 
1002 	/*
1003 	 * Number of leap corrections to apply up to end of last year
1004 	 */
1005 	leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400;
1006 
1007 	/*
1008 	 * This year is a leap year if it is divisible by 4 except when it is
1009 	 * divisible by 100 unless it is divisible by 400
1010 	 *
1011 	 * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was
1012 	 */
1013 	day = tm->tm_mon > 2 && leapyear(tm->tm_year);
1014 
1015 	day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] +
1016 		   tm->tm_mday;
1017 
1018 	tm->tm_wday = day % 7;
1019 }
1020 
1021 void to_tm(int tim, struct rtc_time * tm)
1022 {
1023 	register int    i;
1024 	register long   hms, day;
1025 
1026 	day = tim / SECDAY;
1027 	hms = tim % SECDAY;
1028 
1029 	/* Hours, minutes, seconds are easy */
1030 	tm->tm_hour = hms / 3600;
1031 	tm->tm_min = (hms % 3600) / 60;
1032 	tm->tm_sec = (hms % 3600) % 60;
1033 
1034 	/* Number of years in days */
1035 	for (i = STARTOFTIME; day >= days_in_year(i); i++)
1036 		day -= days_in_year(i);
1037 	tm->tm_year = i;
1038 
1039 	/* Number of months in days left */
1040 	if (leapyear(tm->tm_year))
1041 		days_in_month(FEBRUARY) = 29;
1042 	for (i = 1; day >= days_in_month(i); i++)
1043 		day -= days_in_month(i);
1044 	days_in_month(FEBRUARY) = 28;
1045 	tm->tm_mon = i;
1046 
1047 	/* Days are what is left over (+1) from all that. */
1048 	tm->tm_mday = day + 1;
1049 
1050 	/*
1051 	 * Determine the day of week
1052 	 */
1053 	GregorianDay(tm);
1054 }
1055 
1056 /* Auxiliary function to compute scaling factors */
1057 /* Actually the choice of a timebase running at 1/4 the of the bus
1058  * frequency giving resolution of a few tens of nanoseconds is quite nice.
1059  * It makes this computation very precise (27-28 bits typically) which
1060  * is optimistic considering the stability of most processor clock
1061  * oscillators and the precision with which the timebase frequency
1062  * is measured but does not harm.
1063  */
1064 unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale)
1065 {
1066         unsigned mlt=0, tmp, err;
1067         /* No concern for performance, it's done once: use a stupid
1068          * but safe and compact method to find the multiplier.
1069          */
1070 
1071         for (tmp = 1U<<31; tmp != 0; tmp >>= 1) {
1072                 if (mulhwu(inscale, mlt|tmp) < outscale)
1073 			mlt |= tmp;
1074         }
1075 
1076         /* We might still be off by 1 for the best approximation.
1077          * A side effect of this is that if outscale is too large
1078          * the returned value will be zero.
1079          * Many corner cases have been checked and seem to work,
1080          * some might have been forgotten in the test however.
1081          */
1082 
1083         err = inscale * (mlt+1);
1084         if (err <= inscale/2)
1085 		mlt++;
1086         return mlt;
1087 }
1088 
1089 /*
1090  * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
1091  * result.
1092  */
1093 void div128_by_32(u64 dividend_high, u64 dividend_low,
1094 		  unsigned divisor, struct div_result *dr)
1095 {
1096 	unsigned long a, b, c, d;
1097 	unsigned long w, x, y, z;
1098 	u64 ra, rb, rc;
1099 
1100 	a = dividend_high >> 32;
1101 	b = dividend_high & 0xffffffff;
1102 	c = dividend_low >> 32;
1103 	d = dividend_low & 0xffffffff;
1104 
1105 	w = a / divisor;
1106 	ra = ((u64)(a - (w * divisor)) << 32) + b;
1107 
1108 	rb = ((u64) do_div(ra, divisor) << 32) + c;
1109 	x = ra;
1110 
1111 	rc = ((u64) do_div(rb, divisor) << 32) + d;
1112 	y = rb;
1113 
1114 	do_div(rc, divisor);
1115 	z = rc;
1116 
1117 	dr->result_high = ((u64)w << 32) + x;
1118 	dr->result_low  = ((u64)y << 32) + z;
1119 
1120 }
1121