xref: /openbmc/linux/arch/powerpc/kernel/time.c (revision 8b5621f1)
1 /*
2  * Common time routines among all ppc machines.
3  *
4  * Written by Cort Dougan (cort@cs.nmt.edu) to merge
5  * Paul Mackerras' version and mine for PReP and Pmac.
6  * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
7  * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
8  *
9  * First round of bugfixes by Gabriel Paubert (paubert@iram.es)
10  * to make clock more stable (2.4.0-test5). The only thing
11  * that this code assumes is that the timebases have been synchronized
12  * by firmware on SMP and are never stopped (never do sleep
13  * on SMP then, nap and doze are OK).
14  *
15  * Speeded up do_gettimeofday by getting rid of references to
16  * xtime (which required locks for consistency). (mikejc@us.ibm.com)
17  *
18  * TODO (not necessarily in this file):
19  * - improve precision and reproducibility of timebase frequency
20  * measurement at boot time. (for iSeries, we calibrate the timebase
21  * against the Titan chip's clock.)
22  * - for astronomical applications: add a new function to get
23  * non ambiguous timestamps even around leap seconds. This needs
24  * a new timestamp format and a good name.
25  *
26  * 1997-09-10  Updated NTP code according to technical memorandum Jan '96
27  *             "A Kernel Model for Precision Timekeeping" by Dave Mills
28  *
29  *      This program is free software; you can redistribute it and/or
30  *      modify it under the terms of the GNU General Public License
31  *      as published by the Free Software Foundation; either version
32  *      2 of the License, or (at your option) any later version.
33  */
34 
35 #include <linux/errno.h>
36 #include <linux/module.h>
37 #include <linux/sched.h>
38 #include <linux/kernel.h>
39 #include <linux/param.h>
40 #include <linux/string.h>
41 #include <linux/mm.h>
42 #include <linux/interrupt.h>
43 #include <linux/timex.h>
44 #include <linux/kernel_stat.h>
45 #include <linux/time.h>
46 #include <linux/init.h>
47 #include <linux/profile.h>
48 #include <linux/cpu.h>
49 #include <linux/security.h>
50 #include <linux/percpu.h>
51 #include <linux/rtc.h>
52 #include <linux/jiffies.h>
53 #include <linux/posix-timers.h>
54 #include <linux/irq.h>
55 
56 #include <asm/io.h>
57 #include <asm/processor.h>
58 #include <asm/nvram.h>
59 #include <asm/cache.h>
60 #include <asm/machdep.h>
61 #include <asm/uaccess.h>
62 #include <asm/time.h>
63 #include <asm/prom.h>
64 #include <asm/irq.h>
65 #include <asm/div64.h>
66 #include <asm/smp.h>
67 #include <asm/vdso_datapage.h>
68 #include <asm/firmware.h>
69 #ifdef CONFIG_PPC_ISERIES
70 #include <asm/iseries/it_lp_queue.h>
71 #include <asm/iseries/hv_call_xm.h>
72 #endif
73 
74 /* powerpc clocksource/clockevent code */
75 
76 #include <linux/clockchips.h>
77 #include <linux/clocksource.h>
78 
79 static cycle_t rtc_read(void);
80 static struct clocksource clocksource_rtc = {
81 	.name         = "rtc",
82 	.rating       = 400,
83 	.flags        = CLOCK_SOURCE_IS_CONTINUOUS,
84 	.mask         = CLOCKSOURCE_MASK(64),
85 	.shift        = 22,
86 	.mult         = 0,	/* To be filled in */
87 	.read         = rtc_read,
88 };
89 
90 static cycle_t timebase_read(void);
91 static struct clocksource clocksource_timebase = {
92 	.name         = "timebase",
93 	.rating       = 400,
94 	.flags        = CLOCK_SOURCE_IS_CONTINUOUS,
95 	.mask         = CLOCKSOURCE_MASK(64),
96 	.shift        = 22,
97 	.mult         = 0,	/* To be filled in */
98 	.read         = timebase_read,
99 };
100 
101 #define DECREMENTER_MAX	0x7fffffff
102 
103 static int decrementer_set_next_event(unsigned long evt,
104 				      struct clock_event_device *dev);
105 static void decrementer_set_mode(enum clock_event_mode mode,
106 				 struct clock_event_device *dev);
107 
108 static struct clock_event_device decrementer_clockevent = {
109        .name           = "decrementer",
110        .rating         = 200,
111        .shift          = 16,
112        .mult           = 0,	/* To be filled in */
113        .irq            = 0,
114        .set_next_event = decrementer_set_next_event,
115        .set_mode       = decrementer_set_mode,
116        .features       = CLOCK_EVT_FEAT_ONESHOT,
117 };
118 
119 static DEFINE_PER_CPU(struct clock_event_device, decrementers);
120 static DEFINE_PER_CPU(u64, decrementer_next_tb);
121 
122 #ifdef CONFIG_PPC_ISERIES
123 static unsigned long __initdata iSeries_recal_titan;
124 static signed long __initdata iSeries_recal_tb;
125 
126 /* Forward declaration is only needed for iSereis compiles */
127 void __init clocksource_init(void);
128 #endif
129 
130 #define XSEC_PER_SEC (1024*1024)
131 
132 #ifdef CONFIG_PPC64
133 #define SCALE_XSEC(xsec, max)	(((xsec) * max) / XSEC_PER_SEC)
134 #else
135 /* compute ((xsec << 12) * max) >> 32 */
136 #define SCALE_XSEC(xsec, max)	mulhwu((xsec) << 12, max)
137 #endif
138 
139 unsigned long tb_ticks_per_jiffy;
140 unsigned long tb_ticks_per_usec = 100; /* sane default */
141 EXPORT_SYMBOL(tb_ticks_per_usec);
142 unsigned long tb_ticks_per_sec;
143 EXPORT_SYMBOL(tb_ticks_per_sec);	/* for cputime_t conversions */
144 u64 tb_to_xs;
145 unsigned tb_to_us;
146 
147 #define TICKLEN_SCALE	TICK_LENGTH_SHIFT
148 u64 last_tick_len;	/* units are ns / 2^TICKLEN_SCALE */
149 u64 ticklen_to_xs;	/* 0.64 fraction */
150 
151 /* If last_tick_len corresponds to about 1/HZ seconds, then
152    last_tick_len << TICKLEN_SHIFT will be about 2^63. */
153 #define TICKLEN_SHIFT	(63 - 30 - TICKLEN_SCALE + SHIFT_HZ)
154 
155 DEFINE_SPINLOCK(rtc_lock);
156 EXPORT_SYMBOL_GPL(rtc_lock);
157 
158 static u64 tb_to_ns_scale __read_mostly;
159 static unsigned tb_to_ns_shift __read_mostly;
160 static unsigned long boot_tb __read_mostly;
161 
162 struct gettimeofday_struct do_gtod;
163 
164 extern struct timezone sys_tz;
165 static long timezone_offset;
166 
167 unsigned long ppc_proc_freq;
168 EXPORT_SYMBOL(ppc_proc_freq);
169 unsigned long ppc_tb_freq;
170 
171 static u64 tb_last_jiffy __cacheline_aligned_in_smp;
172 static DEFINE_PER_CPU(u64, last_jiffy);
173 
174 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
175 /*
176  * Factors for converting from cputime_t (timebase ticks) to
177  * jiffies, milliseconds, seconds, and clock_t (1/USER_HZ seconds).
178  * These are all stored as 0.64 fixed-point binary fractions.
179  */
180 u64 __cputime_jiffies_factor;
181 EXPORT_SYMBOL(__cputime_jiffies_factor);
182 u64 __cputime_msec_factor;
183 EXPORT_SYMBOL(__cputime_msec_factor);
184 u64 __cputime_sec_factor;
185 EXPORT_SYMBOL(__cputime_sec_factor);
186 u64 __cputime_clockt_factor;
187 EXPORT_SYMBOL(__cputime_clockt_factor);
188 
189 static void calc_cputime_factors(void)
190 {
191 	struct div_result res;
192 
193 	div128_by_32(HZ, 0, tb_ticks_per_sec, &res);
194 	__cputime_jiffies_factor = res.result_low;
195 	div128_by_32(1000, 0, tb_ticks_per_sec, &res);
196 	__cputime_msec_factor = res.result_low;
197 	div128_by_32(1, 0, tb_ticks_per_sec, &res);
198 	__cputime_sec_factor = res.result_low;
199 	div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res);
200 	__cputime_clockt_factor = res.result_low;
201 }
202 
203 /*
204  * Read the PURR on systems that have it, otherwise the timebase.
205  */
206 static u64 read_purr(void)
207 {
208 	if (cpu_has_feature(CPU_FTR_PURR))
209 		return mfspr(SPRN_PURR);
210 	return mftb();
211 }
212 
213 /*
214  * Read the SPURR on systems that have it, otherwise the purr
215  */
216 static u64 read_spurr(u64 purr)
217 {
218 	if (cpu_has_feature(CPU_FTR_SPURR))
219 		return mfspr(SPRN_SPURR);
220 	return purr;
221 }
222 
223 /*
224  * Account time for a transition between system, hard irq
225  * or soft irq state.
226  */
227 void account_system_vtime(struct task_struct *tsk)
228 {
229 	u64 now, nowscaled, delta, deltascaled;
230 	unsigned long flags;
231 
232 	local_irq_save(flags);
233 	now = read_purr();
234 	delta = now - get_paca()->startpurr;
235 	get_paca()->startpurr = now;
236 	nowscaled = read_spurr(now);
237 	deltascaled = nowscaled - get_paca()->startspurr;
238 	get_paca()->startspurr = nowscaled;
239 	if (!in_interrupt()) {
240 		/* deltascaled includes both user and system time.
241 		 * Hence scale it based on the purr ratio to estimate
242 		 * the system time */
243 		if (get_paca()->user_time)
244 			deltascaled = deltascaled * get_paca()->system_time /
245 			     (get_paca()->system_time + get_paca()->user_time);
246 		delta += get_paca()->system_time;
247 		get_paca()->system_time = 0;
248 	}
249 	account_system_time(tsk, 0, delta);
250 	get_paca()->purrdelta = delta;
251 	account_system_time_scaled(tsk, deltascaled);
252 	get_paca()->spurrdelta = deltascaled;
253 	local_irq_restore(flags);
254 }
255 
256 /*
257  * Transfer the user and system times accumulated in the paca
258  * by the exception entry and exit code to the generic process
259  * user and system time records.
260  * Must be called with interrupts disabled.
261  */
262 void account_process_tick(struct task_struct *tsk, int user_tick)
263 {
264 	cputime_t utime, utimescaled;
265 
266 	utime = get_paca()->user_time;
267 	get_paca()->user_time = 0;
268 	account_user_time(tsk, utime);
269 
270 	/* Estimate the scaled utime by scaling the real utime based
271 	 * on the last spurr to purr ratio */
272 	utimescaled = utime * get_paca()->spurrdelta / get_paca()->purrdelta;
273 	get_paca()->spurrdelta = get_paca()->purrdelta = 0;
274 	account_user_time_scaled(tsk, utimescaled);
275 }
276 
277 /*
278  * Stuff for accounting stolen time.
279  */
280 struct cpu_purr_data {
281 	int	initialized;			/* thread is running */
282 	u64	tb;			/* last TB value read */
283 	u64	purr;			/* last PURR value read */
284 	u64	spurr;			/* last SPURR value read */
285 };
286 
287 /*
288  * Each entry in the cpu_purr_data array is manipulated only by its
289  * "owner" cpu -- usually in the timer interrupt but also occasionally
290  * in process context for cpu online.  As long as cpus do not touch
291  * each others' cpu_purr_data, disabling local interrupts is
292  * sufficient to serialize accesses.
293  */
294 static DEFINE_PER_CPU(struct cpu_purr_data, cpu_purr_data);
295 
296 static void snapshot_tb_and_purr(void *data)
297 {
298 	unsigned long flags;
299 	struct cpu_purr_data *p = &__get_cpu_var(cpu_purr_data);
300 
301 	local_irq_save(flags);
302 	p->tb = get_tb_or_rtc();
303 	p->purr = mfspr(SPRN_PURR);
304 	wmb();
305 	p->initialized = 1;
306 	local_irq_restore(flags);
307 }
308 
309 /*
310  * Called during boot when all cpus have come up.
311  */
312 void snapshot_timebases(void)
313 {
314 	if (!cpu_has_feature(CPU_FTR_PURR))
315 		return;
316 	on_each_cpu(snapshot_tb_and_purr, NULL, 0, 1);
317 }
318 
319 /*
320  * Must be called with interrupts disabled.
321  */
322 void calculate_steal_time(void)
323 {
324 	u64 tb, purr;
325 	s64 stolen;
326 	struct cpu_purr_data *pme;
327 
328 	if (!cpu_has_feature(CPU_FTR_PURR))
329 		return;
330 	pme = &__get_cpu_var(cpu_purr_data);
331 	if (!pme->initialized)
332 		return;		/* this can happen in early boot */
333 	tb = mftb();
334 	purr = mfspr(SPRN_PURR);
335 	stolen = (tb - pme->tb) - (purr - pme->purr);
336 	if (stolen > 0)
337 		account_steal_time(current, stolen);
338 	pme->tb = tb;
339 	pme->purr = purr;
340 }
341 
342 #ifdef CONFIG_PPC_SPLPAR
343 /*
344  * Must be called before the cpu is added to the online map when
345  * a cpu is being brought up at runtime.
346  */
347 static void snapshot_purr(void)
348 {
349 	struct cpu_purr_data *pme;
350 	unsigned long flags;
351 
352 	if (!cpu_has_feature(CPU_FTR_PURR))
353 		return;
354 	local_irq_save(flags);
355 	pme = &__get_cpu_var(cpu_purr_data);
356 	pme->tb = mftb();
357 	pme->purr = mfspr(SPRN_PURR);
358 	pme->initialized = 1;
359 	local_irq_restore(flags);
360 }
361 
362 #endif /* CONFIG_PPC_SPLPAR */
363 
364 #else /* ! CONFIG_VIRT_CPU_ACCOUNTING */
365 #define calc_cputime_factors()
366 #define calculate_steal_time()		do { } while (0)
367 #endif
368 
369 #if !(defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR))
370 #define snapshot_purr()			do { } while (0)
371 #endif
372 
373 /*
374  * Called when a cpu comes up after the system has finished booting,
375  * i.e. as a result of a hotplug cpu action.
376  */
377 void snapshot_timebase(void)
378 {
379 	__get_cpu_var(last_jiffy) = get_tb_or_rtc();
380 	snapshot_purr();
381 }
382 
383 void __delay(unsigned long loops)
384 {
385 	unsigned long start;
386 	int diff;
387 
388 	if (__USE_RTC()) {
389 		start = get_rtcl();
390 		do {
391 			/* the RTCL register wraps at 1000000000 */
392 			diff = get_rtcl() - start;
393 			if (diff < 0)
394 				diff += 1000000000;
395 		} while (diff < loops);
396 	} else {
397 		start = get_tbl();
398 		while (get_tbl() - start < loops)
399 			HMT_low();
400 		HMT_medium();
401 	}
402 }
403 EXPORT_SYMBOL(__delay);
404 
405 void udelay(unsigned long usecs)
406 {
407 	__delay(tb_ticks_per_usec * usecs);
408 }
409 EXPORT_SYMBOL(udelay);
410 
411 
412 /*
413  * There are two copies of tb_to_xs and stamp_xsec so that no
414  * lock is needed to access and use these values in
415  * do_gettimeofday.  We alternate the copies and as long as a
416  * reasonable time elapses between changes, there will never
417  * be inconsistent values.  ntpd has a minimum of one minute
418  * between updates.
419  */
420 static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec,
421 			       u64 new_tb_to_xs)
422 {
423 	unsigned temp_idx;
424 	struct gettimeofday_vars *temp_varp;
425 
426 	temp_idx = (do_gtod.var_idx == 0);
427 	temp_varp = &do_gtod.vars[temp_idx];
428 
429 	temp_varp->tb_to_xs = new_tb_to_xs;
430 	temp_varp->tb_orig_stamp = new_tb_stamp;
431 	temp_varp->stamp_xsec = new_stamp_xsec;
432 	smp_mb();
433 	do_gtod.varp = temp_varp;
434 	do_gtod.var_idx = temp_idx;
435 
436 	/*
437 	 * tb_update_count is used to allow the userspace gettimeofday code
438 	 * to assure itself that it sees a consistent view of the tb_to_xs and
439 	 * stamp_xsec variables.  It reads the tb_update_count, then reads
440 	 * tb_to_xs and stamp_xsec and then reads tb_update_count again.  If
441 	 * the two values of tb_update_count match and are even then the
442 	 * tb_to_xs and stamp_xsec values are consistent.  If not, then it
443 	 * loops back and reads them again until this criteria is met.
444 	 * We expect the caller to have done the first increment of
445 	 * vdso_data->tb_update_count already.
446 	 */
447 	vdso_data->tb_orig_stamp = new_tb_stamp;
448 	vdso_data->stamp_xsec = new_stamp_xsec;
449 	vdso_data->tb_to_xs = new_tb_to_xs;
450 	vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec;
451 	vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec;
452 	smp_wmb();
453 	++(vdso_data->tb_update_count);
454 }
455 
456 #ifdef CONFIG_SMP
457 unsigned long profile_pc(struct pt_regs *regs)
458 {
459 	unsigned long pc = instruction_pointer(regs);
460 
461 	if (in_lock_functions(pc))
462 		return regs->link;
463 
464 	return pc;
465 }
466 EXPORT_SYMBOL(profile_pc);
467 #endif
468 
469 #ifdef CONFIG_PPC_ISERIES
470 
471 /*
472  * This function recalibrates the timebase based on the 49-bit time-of-day
473  * value in the Titan chip.  The Titan is much more accurate than the value
474  * returned by the service processor for the timebase frequency.
475  */
476 
477 static int __init iSeries_tb_recal(void)
478 {
479 	struct div_result divres;
480 	unsigned long titan, tb;
481 
482 	/* Make sure we only run on iSeries */
483 	if (!firmware_has_feature(FW_FEATURE_ISERIES))
484 		return -ENODEV;
485 
486 	tb = get_tb();
487 	titan = HvCallXm_loadTod();
488 	if ( iSeries_recal_titan ) {
489 		unsigned long tb_ticks = tb - iSeries_recal_tb;
490 		unsigned long titan_usec = (titan - iSeries_recal_titan) >> 12;
491 		unsigned long new_tb_ticks_per_sec   = (tb_ticks * USEC_PER_SEC)/titan_usec;
492 		unsigned long new_tb_ticks_per_jiffy = (new_tb_ticks_per_sec+(HZ/2))/HZ;
493 		long tick_diff = new_tb_ticks_per_jiffy - tb_ticks_per_jiffy;
494 		char sign = '+';
495 		/* make sure tb_ticks_per_sec and tb_ticks_per_jiffy are consistent */
496 		new_tb_ticks_per_sec = new_tb_ticks_per_jiffy * HZ;
497 
498 		if ( tick_diff < 0 ) {
499 			tick_diff = -tick_diff;
500 			sign = '-';
501 		}
502 		if ( tick_diff ) {
503 			if ( tick_diff < tb_ticks_per_jiffy/25 ) {
504 				printk( "Titan recalibrate: new tb_ticks_per_jiffy = %lu (%c%ld)\n",
505 						new_tb_ticks_per_jiffy, sign, tick_diff );
506 				tb_ticks_per_jiffy = new_tb_ticks_per_jiffy;
507 				tb_ticks_per_sec   = new_tb_ticks_per_sec;
508 				calc_cputime_factors();
509 				div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres );
510 				do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
511 				tb_to_xs = divres.result_low;
512 				do_gtod.varp->tb_to_xs = tb_to_xs;
513 				vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
514 				vdso_data->tb_to_xs = tb_to_xs;
515 			}
516 			else {
517 				printk( "Titan recalibrate: FAILED (difference > 4 percent)\n"
518 					"                   new tb_ticks_per_jiffy = %lu\n"
519 					"                   old tb_ticks_per_jiffy = %lu\n",
520 					new_tb_ticks_per_jiffy, tb_ticks_per_jiffy );
521 			}
522 		}
523 	}
524 	iSeries_recal_titan = titan;
525 	iSeries_recal_tb = tb;
526 
527 	/* Called here as now we know accurate values for the timebase */
528 	clocksource_init();
529 	return 0;
530 }
531 late_initcall(iSeries_tb_recal);
532 
533 /* Called from platform early init */
534 void __init iSeries_time_init_early(void)
535 {
536 	iSeries_recal_tb = get_tb();
537 	iSeries_recal_titan = HvCallXm_loadTod();
538 }
539 #endif /* CONFIG_PPC_ISERIES */
540 
541 /*
542  * For iSeries shared processors, we have to let the hypervisor
543  * set the hardware decrementer.  We set a virtual decrementer
544  * in the lppaca and call the hypervisor if the virtual
545  * decrementer is less than the current value in the hardware
546  * decrementer. (almost always the new decrementer value will
547  * be greater than the current hardware decementer so the hypervisor
548  * call will not be needed)
549  */
550 
551 /*
552  * timer_interrupt - gets called when the decrementer overflows,
553  * with interrupts disabled.
554  */
555 void timer_interrupt(struct pt_regs * regs)
556 {
557 	struct pt_regs *old_regs;
558 	int cpu = smp_processor_id();
559 	struct clock_event_device *evt = &per_cpu(decrementers, cpu);
560 	u64 now;
561 
562 	/* Ensure a positive value is written to the decrementer, or else
563 	 * some CPUs will continuue to take decrementer exceptions */
564 	set_dec(DECREMENTER_MAX);
565 
566 #ifdef CONFIG_PPC32
567 	if (atomic_read(&ppc_n_lost_interrupts) != 0)
568 		do_IRQ(regs);
569 #endif
570 
571 	now = get_tb_or_rtc();
572 	if (now < per_cpu(decrementer_next_tb, cpu)) {
573 		/* not time for this event yet */
574 		now = per_cpu(decrementer_next_tb, cpu) - now;
575 		if (now <= DECREMENTER_MAX)
576 			set_dec((int)now);
577 		return;
578 	}
579 	old_regs = set_irq_regs(regs);
580 	irq_enter();
581 
582 	calculate_steal_time();
583 
584 #ifdef CONFIG_PPC_ISERIES
585 	if (firmware_has_feature(FW_FEATURE_ISERIES))
586 		get_lppaca()->int_dword.fields.decr_int = 0;
587 #endif
588 
589 	if (evt->event_handler)
590 		evt->event_handler(evt);
591 
592 #ifdef CONFIG_PPC_ISERIES
593 	if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending())
594 		process_hvlpevents();
595 #endif
596 
597 #ifdef CONFIG_PPC64
598 	/* collect purr register values often, for accurate calculations */
599 	if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
600 		struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
601 		cu->current_tb = mfspr(SPRN_PURR);
602 	}
603 #endif
604 
605 	irq_exit();
606 	set_irq_regs(old_regs);
607 }
608 
609 void wakeup_decrementer(void)
610 {
611 	unsigned long ticks;
612 
613 	/*
614 	 * The timebase gets saved on sleep and restored on wakeup,
615 	 * so all we need to do is to reset the decrementer.
616 	 */
617 	ticks = tb_ticks_since(__get_cpu_var(last_jiffy));
618 	if (ticks < tb_ticks_per_jiffy)
619 		ticks = tb_ticks_per_jiffy - ticks;
620 	else
621 		ticks = 1;
622 	set_dec(ticks);
623 }
624 
625 #ifdef CONFIG_SMP
626 void __init smp_space_timers(unsigned int max_cpus)
627 {
628 	int i;
629 	u64 previous_tb = per_cpu(last_jiffy, boot_cpuid);
630 
631 	/* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */
632 	previous_tb -= tb_ticks_per_jiffy;
633 
634 	for_each_possible_cpu(i) {
635 		if (i == boot_cpuid)
636 			continue;
637 		per_cpu(last_jiffy, i) = previous_tb;
638 	}
639 }
640 #endif
641 
642 /*
643  * Scheduler clock - returns current time in nanosec units.
644  *
645  * Note: mulhdu(a, b) (multiply high double unsigned) returns
646  * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
647  * are 64-bit unsigned numbers.
648  */
649 unsigned long long sched_clock(void)
650 {
651 	if (__USE_RTC())
652 		return get_rtc();
653 	return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
654 }
655 
656 static int __init get_freq(char *name, int cells, unsigned long *val)
657 {
658 	struct device_node *cpu;
659 	const unsigned int *fp;
660 	int found = 0;
661 
662 	/* The cpu node should have timebase and clock frequency properties */
663 	cpu = of_find_node_by_type(NULL, "cpu");
664 
665 	if (cpu) {
666 		fp = of_get_property(cpu, name, NULL);
667 		if (fp) {
668 			found = 1;
669 			*val = of_read_ulong(fp, cells);
670 		}
671 
672 		of_node_put(cpu);
673 	}
674 
675 	return found;
676 }
677 
678 void __init generic_calibrate_decr(void)
679 {
680 	ppc_tb_freq = DEFAULT_TB_FREQ;		/* hardcoded default */
681 
682 	if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) &&
683 	    !get_freq("timebase-frequency", 1, &ppc_tb_freq)) {
684 
685 		printk(KERN_ERR "WARNING: Estimating decrementer frequency "
686 				"(not found)\n");
687 	}
688 
689 	ppc_proc_freq = DEFAULT_PROC_FREQ;	/* hardcoded default */
690 
691 	if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) &&
692 	    !get_freq("clock-frequency", 1, &ppc_proc_freq)) {
693 
694 		printk(KERN_ERR "WARNING: Estimating processor frequency "
695 				"(not found)\n");
696 	}
697 
698 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
699 	/* Set the time base to zero */
700 	mtspr(SPRN_TBWL, 0);
701 	mtspr(SPRN_TBWU, 0);
702 
703 	/* Clear any pending timer interrupts */
704 	mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
705 
706 	/* Enable decrementer interrupt */
707 	mtspr(SPRN_TCR, TCR_DIE);
708 #endif
709 }
710 
711 int update_persistent_clock(struct timespec now)
712 {
713 	struct rtc_time tm;
714 
715 	if (!ppc_md.set_rtc_time)
716 		return 0;
717 
718 	to_tm(now.tv_sec + 1 + timezone_offset, &tm);
719 	tm.tm_year -= 1900;
720 	tm.tm_mon -= 1;
721 
722 	return ppc_md.set_rtc_time(&tm);
723 }
724 
725 unsigned long read_persistent_clock(void)
726 {
727 	struct rtc_time tm;
728 	static int first = 1;
729 
730 	/* XXX this is a litle fragile but will work okay in the short term */
731 	if (first) {
732 		first = 0;
733 		if (ppc_md.time_init)
734 			timezone_offset = ppc_md.time_init();
735 
736 		/* get_boot_time() isn't guaranteed to be safe to call late */
737 		if (ppc_md.get_boot_time)
738 			return ppc_md.get_boot_time() -timezone_offset;
739 	}
740 	if (!ppc_md.get_rtc_time)
741 		return 0;
742 	ppc_md.get_rtc_time(&tm);
743 	return mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday,
744 		      tm.tm_hour, tm.tm_min, tm.tm_sec);
745 }
746 
747 /* clocksource code */
748 static cycle_t rtc_read(void)
749 {
750 	return (cycle_t)get_rtc();
751 }
752 
753 static cycle_t timebase_read(void)
754 {
755 	return (cycle_t)get_tb();
756 }
757 
758 void update_vsyscall(struct timespec *wall_time, struct clocksource *clock)
759 {
760 	u64 t2x, stamp_xsec;
761 
762 	if (clock != &clocksource_timebase)
763 		return;
764 
765 	/* Make userspace gettimeofday spin until we're done. */
766 	++vdso_data->tb_update_count;
767 	smp_mb();
768 
769 	/* XXX this assumes clock->shift == 22 */
770 	/* 4611686018 ~= 2^(20+64-22) / 1e9 */
771 	t2x = (u64) clock->mult * 4611686018ULL;
772 	stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC;
773 	do_div(stamp_xsec, 1000000000);
774 	stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC;
775 	update_gtod(clock->cycle_last, stamp_xsec, t2x);
776 }
777 
778 void update_vsyscall_tz(void)
779 {
780 	/* Make userspace gettimeofday spin until we're done. */
781 	++vdso_data->tb_update_count;
782 	smp_mb();
783 	vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
784 	vdso_data->tz_dsttime = sys_tz.tz_dsttime;
785 	smp_mb();
786 	++vdso_data->tb_update_count;
787 }
788 
789 void __init clocksource_init(void)
790 {
791 	struct clocksource *clock;
792 
793 	if (__USE_RTC())
794 		clock = &clocksource_rtc;
795 	else
796 		clock = &clocksource_timebase;
797 
798 	clock->mult = clocksource_hz2mult(tb_ticks_per_sec, clock->shift);
799 
800 	if (clocksource_register(clock)) {
801 		printk(KERN_ERR "clocksource: %s is already registered\n",
802 		       clock->name);
803 		return;
804 	}
805 
806 	printk(KERN_INFO "clocksource: %s mult[%x] shift[%d] registered\n",
807 	       clock->name, clock->mult, clock->shift);
808 }
809 
810 static int decrementer_set_next_event(unsigned long evt,
811 				      struct clock_event_device *dev)
812 {
813 	__get_cpu_var(decrementer_next_tb) = get_tb_or_rtc() + evt;
814 	set_dec(evt);
815 	return 0;
816 }
817 
818 static void decrementer_set_mode(enum clock_event_mode mode,
819 				 struct clock_event_device *dev)
820 {
821 	if (mode != CLOCK_EVT_MODE_ONESHOT)
822 		decrementer_set_next_event(DECREMENTER_MAX, dev);
823 }
824 
825 static void register_decrementer_clockevent(int cpu)
826 {
827 	struct clock_event_device *dec = &per_cpu(decrementers, cpu);
828 
829 	*dec = decrementer_clockevent;
830 	dec->cpumask = cpumask_of_cpu(cpu);
831 
832 	printk(KERN_DEBUG "clockevent: %s mult[%lx] shift[%d] cpu[%d]\n",
833 	       dec->name, dec->mult, dec->shift, cpu);
834 
835 	clockevents_register_device(dec);
836 }
837 
838 static void __init init_decrementer_clockevent(void)
839 {
840 	int cpu = smp_processor_id();
841 
842 	decrementer_clockevent.mult = div_sc(ppc_tb_freq, NSEC_PER_SEC,
843 					     decrementer_clockevent.shift);
844 	decrementer_clockevent.max_delta_ns =
845 		clockevent_delta2ns(DECREMENTER_MAX, &decrementer_clockevent);
846 	decrementer_clockevent.min_delta_ns =
847 		clockevent_delta2ns(2, &decrementer_clockevent);
848 
849 	register_decrementer_clockevent(cpu);
850 }
851 
852 void secondary_cpu_time_init(void)
853 {
854 	/* FIME: Should make unrelatred change to move snapshot_timebase
855 	 * call here ! */
856 	register_decrementer_clockevent(smp_processor_id());
857 }
858 
859 /* This function is only called on the boot processor */
860 void __init time_init(void)
861 {
862 	unsigned long flags;
863 	struct div_result res;
864 	u64 scale, x;
865 	unsigned shift;
866 
867 	if (__USE_RTC()) {
868 		/* 601 processor: dec counts down by 128 every 128ns */
869 		ppc_tb_freq = 1000000000;
870 		tb_last_jiffy = get_rtcl();
871 	} else {
872 		/* Normal PowerPC with timebase register */
873 		ppc_md.calibrate_decr();
874 		printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n",
875 		       ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
876 		printk(KERN_DEBUG "time_init: processor frequency   = %lu.%.6lu MHz\n",
877 		       ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
878 		tb_last_jiffy = get_tb();
879 	}
880 
881 	tb_ticks_per_jiffy = ppc_tb_freq / HZ;
882 	tb_ticks_per_sec = ppc_tb_freq;
883 	tb_ticks_per_usec = ppc_tb_freq / 1000000;
884 	tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
885 	calc_cputime_factors();
886 
887 	/*
888 	 * Calculate the length of each tick in ns.  It will not be
889 	 * exactly 1e9/HZ unless ppc_tb_freq is divisible by HZ.
890 	 * We compute 1e9 * tb_ticks_per_jiffy / ppc_tb_freq,
891 	 * rounded up.
892 	 */
893 	x = (u64) NSEC_PER_SEC * tb_ticks_per_jiffy + ppc_tb_freq - 1;
894 	do_div(x, ppc_tb_freq);
895 	tick_nsec = x;
896 	last_tick_len = x << TICKLEN_SCALE;
897 
898 	/*
899 	 * Compute ticklen_to_xs, which is a factor which gets multiplied
900 	 * by (last_tick_len << TICKLEN_SHIFT) to get a tb_to_xs value.
901 	 * It is computed as:
902 	 * ticklen_to_xs = 2^N / (tb_ticks_per_jiffy * 1e9)
903 	 * where N = 64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT
904 	 * which turns out to be N = 51 - SHIFT_HZ.
905 	 * This gives the result as a 0.64 fixed-point fraction.
906 	 * That value is reduced by an offset amounting to 1 xsec per
907 	 * 2^31 timebase ticks to avoid problems with time going backwards
908 	 * by 1 xsec when we do timer_recalc_offset due to losing the
909 	 * fractional xsec.  That offset is equal to ppc_tb_freq/2^51
910 	 * since there are 2^20 xsec in a second.
911 	 */
912 	div128_by_32((1ULL << 51) - ppc_tb_freq, 0,
913 		     tb_ticks_per_jiffy << SHIFT_HZ, &res);
914 	div128_by_32(res.result_high, res.result_low, NSEC_PER_SEC, &res);
915 	ticklen_to_xs = res.result_low;
916 
917 	/* Compute tb_to_xs from tick_nsec */
918 	tb_to_xs = mulhdu(last_tick_len << TICKLEN_SHIFT, ticklen_to_xs);
919 
920 	/*
921 	 * Compute scale factor for sched_clock.
922 	 * The calibrate_decr() function has set tb_ticks_per_sec,
923 	 * which is the timebase frequency.
924 	 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret
925 	 * the 128-bit result as a 64.64 fixed-point number.
926 	 * We then shift that number right until it is less than 1.0,
927 	 * giving us the scale factor and shift count to use in
928 	 * sched_clock().
929 	 */
930 	div128_by_32(1000000000, 0, tb_ticks_per_sec, &res);
931 	scale = res.result_low;
932 	for (shift = 0; res.result_high != 0; ++shift) {
933 		scale = (scale >> 1) | (res.result_high << 63);
934 		res.result_high >>= 1;
935 	}
936 	tb_to_ns_scale = scale;
937 	tb_to_ns_shift = shift;
938 	/* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
939 	boot_tb = get_tb_or_rtc();
940 
941 	write_seqlock_irqsave(&xtime_lock, flags);
942 
943 	/* If platform provided a timezone (pmac), we correct the time */
944         if (timezone_offset) {
945 		sys_tz.tz_minuteswest = -timezone_offset / 60;
946 		sys_tz.tz_dsttime = 0;
947         }
948 
949 	do_gtod.varp = &do_gtod.vars[0];
950 	do_gtod.var_idx = 0;
951 	do_gtod.varp->tb_orig_stamp = tb_last_jiffy;
952 	__get_cpu_var(last_jiffy) = tb_last_jiffy;
953 	do_gtod.varp->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
954 	do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
955 	do_gtod.varp->tb_to_xs = tb_to_xs;
956 	do_gtod.tb_to_us = tb_to_us;
957 
958 	vdso_data->tb_orig_stamp = tb_last_jiffy;
959 	vdso_data->tb_update_count = 0;
960 	vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
961 	vdso_data->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
962 	vdso_data->tb_to_xs = tb_to_xs;
963 
964 	time_freq = 0;
965 
966 	write_sequnlock_irqrestore(&xtime_lock, flags);
967 
968 	/* Register the clocksource, if we're not running on iSeries */
969 	if (!firmware_has_feature(FW_FEATURE_ISERIES))
970 		clocksource_init();
971 
972 	init_decrementer_clockevent();
973 }
974 
975 
976 #define FEBRUARY	2
977 #define	STARTOFTIME	1970
978 #define SECDAY		86400L
979 #define SECYR		(SECDAY * 365)
980 #define	leapyear(year)		((year) % 4 == 0 && \
981 				 ((year) % 100 != 0 || (year) % 400 == 0))
982 #define	days_in_year(a) 	(leapyear(a) ? 366 : 365)
983 #define	days_in_month(a) 	(month_days[(a) - 1])
984 
985 static int month_days[12] = {
986 	31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
987 };
988 
989 /*
990  * This only works for the Gregorian calendar - i.e. after 1752 (in the UK)
991  */
992 void GregorianDay(struct rtc_time * tm)
993 {
994 	int leapsToDate;
995 	int lastYear;
996 	int day;
997 	int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };
998 
999 	lastYear = tm->tm_year - 1;
1000 
1001 	/*
1002 	 * Number of leap corrections to apply up to end of last year
1003 	 */
1004 	leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400;
1005 
1006 	/*
1007 	 * This year is a leap year if it is divisible by 4 except when it is
1008 	 * divisible by 100 unless it is divisible by 400
1009 	 *
1010 	 * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was
1011 	 */
1012 	day = tm->tm_mon > 2 && leapyear(tm->tm_year);
1013 
1014 	day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] +
1015 		   tm->tm_mday;
1016 
1017 	tm->tm_wday = day % 7;
1018 }
1019 
1020 void to_tm(int tim, struct rtc_time * tm)
1021 {
1022 	register int    i;
1023 	register long   hms, day;
1024 
1025 	day = tim / SECDAY;
1026 	hms = tim % SECDAY;
1027 
1028 	/* Hours, minutes, seconds are easy */
1029 	tm->tm_hour = hms / 3600;
1030 	tm->tm_min = (hms % 3600) / 60;
1031 	tm->tm_sec = (hms % 3600) % 60;
1032 
1033 	/* Number of years in days */
1034 	for (i = STARTOFTIME; day >= days_in_year(i); i++)
1035 		day -= days_in_year(i);
1036 	tm->tm_year = i;
1037 
1038 	/* Number of months in days left */
1039 	if (leapyear(tm->tm_year))
1040 		days_in_month(FEBRUARY) = 29;
1041 	for (i = 1; day >= days_in_month(i); i++)
1042 		day -= days_in_month(i);
1043 	days_in_month(FEBRUARY) = 28;
1044 	tm->tm_mon = i;
1045 
1046 	/* Days are what is left over (+1) from all that. */
1047 	tm->tm_mday = day + 1;
1048 
1049 	/*
1050 	 * Determine the day of week
1051 	 */
1052 	GregorianDay(tm);
1053 }
1054 
1055 /* Auxiliary function to compute scaling factors */
1056 /* Actually the choice of a timebase running at 1/4 the of the bus
1057  * frequency giving resolution of a few tens of nanoseconds is quite nice.
1058  * It makes this computation very precise (27-28 bits typically) which
1059  * is optimistic considering the stability of most processor clock
1060  * oscillators and the precision with which the timebase frequency
1061  * is measured but does not harm.
1062  */
1063 unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale)
1064 {
1065         unsigned mlt=0, tmp, err;
1066         /* No concern for performance, it's done once: use a stupid
1067          * but safe and compact method to find the multiplier.
1068          */
1069 
1070         for (tmp = 1U<<31; tmp != 0; tmp >>= 1) {
1071                 if (mulhwu(inscale, mlt|tmp) < outscale)
1072 			mlt |= tmp;
1073         }
1074 
1075         /* We might still be off by 1 for the best approximation.
1076          * A side effect of this is that if outscale is too large
1077          * the returned value will be zero.
1078          * Many corner cases have been checked and seem to work,
1079          * some might have been forgotten in the test however.
1080          */
1081 
1082         err = inscale * (mlt+1);
1083         if (err <= inscale/2)
1084 		mlt++;
1085         return mlt;
1086 }
1087 
1088 /*
1089  * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
1090  * result.
1091  */
1092 void div128_by_32(u64 dividend_high, u64 dividend_low,
1093 		  unsigned divisor, struct div_result *dr)
1094 {
1095 	unsigned long a, b, c, d;
1096 	unsigned long w, x, y, z;
1097 	u64 ra, rb, rc;
1098 
1099 	a = dividend_high >> 32;
1100 	b = dividend_high & 0xffffffff;
1101 	c = dividend_low >> 32;
1102 	d = dividend_low & 0xffffffff;
1103 
1104 	w = a / divisor;
1105 	ra = ((u64)(a - (w * divisor)) << 32) + b;
1106 
1107 	rb = ((u64) do_div(ra, divisor) << 32) + c;
1108 	x = ra;
1109 
1110 	rc = ((u64) do_div(rb, divisor) << 32) + d;
1111 	y = rb;
1112 
1113 	do_div(rc, divisor);
1114 	z = rc;
1115 
1116 	dr->result_high = ((u64)w << 32) + x;
1117 	dr->result_low  = ((u64)y << 32) + z;
1118 
1119 }
1120