xref: /openbmc/linux/arch/powerpc/kernel/time.c (revision fed8b7e3)
1 /*
2  * Common time routines among all ppc machines.
3  *
4  * Written by Cort Dougan (cort@cs.nmt.edu) to merge
5  * Paul Mackerras' version and mine for PReP and Pmac.
6  * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
7  * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
8  *
9  * First round of bugfixes by Gabriel Paubert (paubert@iram.es)
10  * to make clock more stable (2.4.0-test5). The only thing
11  * that this code assumes is that the timebases have been synchronized
12  * by firmware on SMP and are never stopped (never do sleep
13  * on SMP then, nap and doze are OK).
14  *
15  * Speeded up do_gettimeofday by getting rid of references to
16  * xtime (which required locks for consistency). (mikejc@us.ibm.com)
17  *
18  * TODO (not necessarily in this file):
19  * - improve precision and reproducibility of timebase frequency
20  * measurement at boot time.
21  * - for astronomical applications: add a new function to get
22  * non ambiguous timestamps even around leap seconds. This needs
23  * a new timestamp format and a good name.
24  *
25  * 1997-09-10  Updated NTP code according to technical memorandum Jan '96
26  *             "A Kernel Model for Precision Timekeeping" by Dave Mills
27  *
28  *      This program is free software; you can redistribute it and/or
29  *      modify it under the terms of the GNU General Public License
30  *      as published by the Free Software Foundation; either version
31  *      2 of the License, or (at your option) any later version.
32  */
33 
34 #include <linux/errno.h>
35 #include <linux/export.h>
36 #include <linux/sched.h>
37 #include <linux/sched/clock.h>
38 #include <linux/kernel.h>
39 #include <linux/param.h>
40 #include <linux/string.h>
41 #include <linux/mm.h>
42 #include <linux/interrupt.h>
43 #include <linux/timex.h>
44 #include <linux/kernel_stat.h>
45 #include <linux/time.h>
46 #include <linux/clockchips.h>
47 #include <linux/init.h>
48 #include <linux/profile.h>
49 #include <linux/cpu.h>
50 #include <linux/security.h>
51 #include <linux/percpu.h>
52 #include <linux/rtc.h>
53 #include <linux/jiffies.h>
54 #include <linux/posix-timers.h>
55 #include <linux/irq.h>
56 #include <linux/delay.h>
57 #include <linux/irq_work.h>
58 #include <linux/clk-provider.h>
59 #include <linux/suspend.h>
60 #include <linux/rtc.h>
61 #include <linux/sched/cputime.h>
62 #include <linux/processor.h>
63 #include <asm/trace.h>
64 
65 #include <asm/io.h>
66 #include <asm/nvram.h>
67 #include <asm/cache.h>
68 #include <asm/machdep.h>
69 #include <linux/uaccess.h>
70 #include <asm/time.h>
71 #include <asm/prom.h>
72 #include <asm/irq.h>
73 #include <asm/div64.h>
74 #include <asm/smp.h>
75 #include <asm/vdso_datapage.h>
76 #include <asm/firmware.h>
77 #include <asm/asm-prototypes.h>
78 
79 /* powerpc clocksource/clockevent code */
80 
81 #include <linux/clockchips.h>
82 #include <linux/timekeeper_internal.h>
83 
84 static u64 rtc_read(struct clocksource *);
85 static struct clocksource clocksource_rtc = {
86 	.name         = "rtc",
87 	.rating       = 400,
88 	.flags        = CLOCK_SOURCE_IS_CONTINUOUS,
89 	.mask         = CLOCKSOURCE_MASK(64),
90 	.read         = rtc_read,
91 };
92 
93 static u64 timebase_read(struct clocksource *);
94 static struct clocksource clocksource_timebase = {
95 	.name         = "timebase",
96 	.rating       = 400,
97 	.flags        = CLOCK_SOURCE_IS_CONTINUOUS,
98 	.mask         = CLOCKSOURCE_MASK(64),
99 	.read         = timebase_read,
100 };
101 
102 #define DECREMENTER_DEFAULT_MAX 0x7FFFFFFF
103 u64 decrementer_max = DECREMENTER_DEFAULT_MAX;
104 
105 static int decrementer_set_next_event(unsigned long evt,
106 				      struct clock_event_device *dev);
107 static int decrementer_shutdown(struct clock_event_device *evt);
108 
109 struct clock_event_device decrementer_clockevent = {
110 	.name			= "decrementer",
111 	.rating			= 200,
112 	.irq			= 0,
113 	.set_next_event		= decrementer_set_next_event,
114 	.set_state_oneshot_stopped = decrementer_shutdown,
115 	.set_state_shutdown	= decrementer_shutdown,
116 	.tick_resume		= decrementer_shutdown,
117 	.features		= CLOCK_EVT_FEAT_ONESHOT |
118 				  CLOCK_EVT_FEAT_C3STOP,
119 };
120 EXPORT_SYMBOL(decrementer_clockevent);
121 
122 DEFINE_PER_CPU(u64, decrementers_next_tb);
123 static DEFINE_PER_CPU(struct clock_event_device, decrementers);
124 
125 #define XSEC_PER_SEC (1024*1024)
126 
127 #ifdef CONFIG_PPC64
128 #define SCALE_XSEC(xsec, max)	(((xsec) * max) / XSEC_PER_SEC)
129 #else
130 /* compute ((xsec << 12) * max) >> 32 */
131 #define SCALE_XSEC(xsec, max)	mulhwu((xsec) << 12, max)
132 #endif
133 
134 unsigned long tb_ticks_per_jiffy;
135 unsigned long tb_ticks_per_usec = 100; /* sane default */
136 EXPORT_SYMBOL(tb_ticks_per_usec);
137 unsigned long tb_ticks_per_sec;
138 EXPORT_SYMBOL(tb_ticks_per_sec);	/* for cputime_t conversions */
139 
140 DEFINE_SPINLOCK(rtc_lock);
141 EXPORT_SYMBOL_GPL(rtc_lock);
142 
143 static u64 tb_to_ns_scale __read_mostly;
144 static unsigned tb_to_ns_shift __read_mostly;
145 static u64 boot_tb __read_mostly;
146 
147 extern struct timezone sys_tz;
148 static long timezone_offset;
149 
150 unsigned long ppc_proc_freq;
151 EXPORT_SYMBOL_GPL(ppc_proc_freq);
152 unsigned long ppc_tb_freq;
153 EXPORT_SYMBOL_GPL(ppc_tb_freq);
154 
155 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
156 /*
157  * Factor for converting from cputime_t (timebase ticks) to
158  * microseconds. This is stored as 0.64 fixed-point binary fraction.
159  */
160 u64 __cputime_usec_factor;
161 EXPORT_SYMBOL(__cputime_usec_factor);
162 
163 #ifdef CONFIG_PPC_SPLPAR
164 void (*dtl_consumer)(struct dtl_entry *, u64);
165 #endif
166 
167 static void calc_cputime_factors(void)
168 {
169 	struct div_result res;
170 
171 	div128_by_32(1000000, 0, tb_ticks_per_sec, &res);
172 	__cputime_usec_factor = res.result_low;
173 }
174 
175 /*
176  * Read the SPURR on systems that have it, otherwise the PURR,
177  * or if that doesn't exist return the timebase value passed in.
178  */
179 static inline unsigned long read_spurr(unsigned long tb)
180 {
181 	if (cpu_has_feature(CPU_FTR_SPURR))
182 		return mfspr(SPRN_SPURR);
183 	if (cpu_has_feature(CPU_FTR_PURR))
184 		return mfspr(SPRN_PURR);
185 	return tb;
186 }
187 
188 #ifdef CONFIG_PPC_SPLPAR
189 
190 /*
191  * Scan the dispatch trace log and count up the stolen time.
192  * Should be called with interrupts disabled.
193  */
194 static u64 scan_dispatch_log(u64 stop_tb)
195 {
196 	u64 i = local_paca->dtl_ridx;
197 	struct dtl_entry *dtl = local_paca->dtl_curr;
198 	struct dtl_entry *dtl_end = local_paca->dispatch_log_end;
199 	struct lppaca *vpa = local_paca->lppaca_ptr;
200 	u64 tb_delta;
201 	u64 stolen = 0;
202 	u64 dtb;
203 
204 	if (!dtl)
205 		return 0;
206 
207 	if (i == be64_to_cpu(vpa->dtl_idx))
208 		return 0;
209 	while (i < be64_to_cpu(vpa->dtl_idx)) {
210 		dtb = be64_to_cpu(dtl->timebase);
211 		tb_delta = be32_to_cpu(dtl->enqueue_to_dispatch_time) +
212 			be32_to_cpu(dtl->ready_to_enqueue_time);
213 		barrier();
214 		if (i + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) {
215 			/* buffer has overflowed */
216 			i = be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG;
217 			dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG);
218 			continue;
219 		}
220 		if (dtb > stop_tb)
221 			break;
222 		if (dtl_consumer)
223 			dtl_consumer(dtl, i);
224 		stolen += tb_delta;
225 		++i;
226 		++dtl;
227 		if (dtl == dtl_end)
228 			dtl = local_paca->dispatch_log;
229 	}
230 	local_paca->dtl_ridx = i;
231 	local_paca->dtl_curr = dtl;
232 	return stolen;
233 }
234 
235 /*
236  * Accumulate stolen time by scanning the dispatch trace log.
237  * Called on entry from user mode.
238  */
239 void accumulate_stolen_time(void)
240 {
241 	u64 sst, ust;
242 	unsigned long save_irq_soft_mask = irq_soft_mask_return();
243 	struct cpu_accounting_data *acct = &local_paca->accounting;
244 
245 	/* We are called early in the exception entry, before
246 	 * soft/hard_enabled are sync'ed to the expected state
247 	 * for the exception. We are hard disabled but the PACA
248 	 * needs to reflect that so various debug stuff doesn't
249 	 * complain
250 	 */
251 	irq_soft_mask_set(IRQS_DISABLED);
252 
253 	sst = scan_dispatch_log(acct->starttime_user);
254 	ust = scan_dispatch_log(acct->starttime);
255 	acct->stime -= sst;
256 	acct->utime -= ust;
257 	acct->steal_time += ust + sst;
258 
259 	irq_soft_mask_set(save_irq_soft_mask);
260 }
261 
262 static inline u64 calculate_stolen_time(u64 stop_tb)
263 {
264 	if (!firmware_has_feature(FW_FEATURE_SPLPAR))
265 		return 0;
266 
267 	if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx))
268 		return scan_dispatch_log(stop_tb);
269 
270 	return 0;
271 }
272 
273 #else /* CONFIG_PPC_SPLPAR */
274 static inline u64 calculate_stolen_time(u64 stop_tb)
275 {
276 	return 0;
277 }
278 
279 #endif /* CONFIG_PPC_SPLPAR */
280 
281 /*
282  * Account time for a transition between system, hard irq
283  * or soft irq state.
284  */
285 static unsigned long vtime_delta_scaled(struct cpu_accounting_data *acct,
286 					unsigned long now, unsigned long stime)
287 {
288 	unsigned long stime_scaled = 0;
289 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
290 	unsigned long nowscaled, deltascaled;
291 	unsigned long utime, utime_scaled;
292 
293 	nowscaled = read_spurr(now);
294 	deltascaled = nowscaled - acct->startspurr;
295 	acct->startspurr = nowscaled;
296 	utime = acct->utime - acct->utime_sspurr;
297 	acct->utime_sspurr = acct->utime;
298 
299 	/*
300 	 * Because we don't read the SPURR on every kernel entry/exit,
301 	 * deltascaled includes both user and system SPURR ticks.
302 	 * Apportion these ticks to system SPURR ticks and user
303 	 * SPURR ticks in the same ratio as the system time (delta)
304 	 * and user time (udelta) values obtained from the timebase
305 	 * over the same interval.  The system ticks get accounted here;
306 	 * the user ticks get saved up in paca->user_time_scaled to be
307 	 * used by account_process_tick.
308 	 */
309 	stime_scaled = stime;
310 	utime_scaled = utime;
311 	if (deltascaled != stime + utime) {
312 		if (utime) {
313 			stime_scaled = deltascaled * stime / (stime + utime);
314 			utime_scaled = deltascaled - stime_scaled;
315 		} else {
316 			stime_scaled = deltascaled;
317 		}
318 	}
319 	acct->utime_scaled += utime_scaled;
320 #endif
321 
322 	return stime_scaled;
323 }
324 
325 static unsigned long vtime_delta(struct task_struct *tsk,
326 				 unsigned long *stime_scaled,
327 				 unsigned long *steal_time)
328 {
329 	unsigned long now, stime;
330 	struct cpu_accounting_data *acct = get_accounting(tsk);
331 
332 	WARN_ON_ONCE(!irqs_disabled());
333 
334 	now = mftb();
335 	stime = now - acct->starttime;
336 	acct->starttime = now;
337 
338 	*stime_scaled = vtime_delta_scaled(acct, now, stime);
339 
340 	*steal_time = calculate_stolen_time(now);
341 
342 	return stime;
343 }
344 
345 void vtime_account_system(struct task_struct *tsk)
346 {
347 	unsigned long stime, stime_scaled, steal_time;
348 	struct cpu_accounting_data *acct = get_accounting(tsk);
349 
350 	stime = vtime_delta(tsk, &stime_scaled, &steal_time);
351 
352 	stime -= min(stime, steal_time);
353 	acct->steal_time += steal_time;
354 
355 	if ((tsk->flags & PF_VCPU) && !irq_count()) {
356 		acct->gtime += stime;
357 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
358 		acct->utime_scaled += stime_scaled;
359 #endif
360 	} else {
361 		if (hardirq_count())
362 			acct->hardirq_time += stime;
363 		else if (in_serving_softirq())
364 			acct->softirq_time += stime;
365 		else
366 			acct->stime += stime;
367 
368 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
369 		acct->stime_scaled += stime_scaled;
370 #endif
371 	}
372 }
373 EXPORT_SYMBOL_GPL(vtime_account_system);
374 
375 void vtime_account_idle(struct task_struct *tsk)
376 {
377 	unsigned long stime, stime_scaled, steal_time;
378 	struct cpu_accounting_data *acct = get_accounting(tsk);
379 
380 	stime = vtime_delta(tsk, &stime_scaled, &steal_time);
381 	acct->idle_time += stime + steal_time;
382 }
383 
384 static void vtime_flush_scaled(struct task_struct *tsk,
385 			       struct cpu_accounting_data *acct)
386 {
387 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
388 	if (acct->utime_scaled)
389 		tsk->utimescaled += cputime_to_nsecs(acct->utime_scaled);
390 	if (acct->stime_scaled)
391 		tsk->stimescaled += cputime_to_nsecs(acct->stime_scaled);
392 
393 	acct->utime_scaled = 0;
394 	acct->utime_sspurr = 0;
395 	acct->stime_scaled = 0;
396 #endif
397 }
398 
399 /*
400  * Account the whole cputime accumulated in the paca
401  * Must be called with interrupts disabled.
402  * Assumes that vtime_account_system/idle() has been called
403  * recently (i.e. since the last entry from usermode) so that
404  * get_paca()->user_time_scaled is up to date.
405  */
406 void vtime_flush(struct task_struct *tsk)
407 {
408 	struct cpu_accounting_data *acct = get_accounting(tsk);
409 
410 	if (acct->utime)
411 		account_user_time(tsk, cputime_to_nsecs(acct->utime));
412 
413 	if (acct->gtime)
414 		account_guest_time(tsk, cputime_to_nsecs(acct->gtime));
415 
416 	if (IS_ENABLED(CONFIG_PPC_SPLPAR) && acct->steal_time) {
417 		account_steal_time(cputime_to_nsecs(acct->steal_time));
418 		acct->steal_time = 0;
419 	}
420 
421 	if (acct->idle_time)
422 		account_idle_time(cputime_to_nsecs(acct->idle_time));
423 
424 	if (acct->stime)
425 		account_system_index_time(tsk, cputime_to_nsecs(acct->stime),
426 					  CPUTIME_SYSTEM);
427 
428 	if (acct->hardirq_time)
429 		account_system_index_time(tsk, cputime_to_nsecs(acct->hardirq_time),
430 					  CPUTIME_IRQ);
431 	if (acct->softirq_time)
432 		account_system_index_time(tsk, cputime_to_nsecs(acct->softirq_time),
433 					  CPUTIME_SOFTIRQ);
434 
435 	vtime_flush_scaled(tsk, acct);
436 
437 	acct->utime = 0;
438 	acct->gtime = 0;
439 	acct->idle_time = 0;
440 	acct->stime = 0;
441 	acct->hardirq_time = 0;
442 	acct->softirq_time = 0;
443 }
444 
445 #else /* ! CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
446 #define calc_cputime_factors()
447 #endif
448 
449 void __delay(unsigned long loops)
450 {
451 	unsigned long start;
452 	int diff;
453 
454 	spin_begin();
455 	if (__USE_RTC()) {
456 		start = get_rtcl();
457 		do {
458 			/* the RTCL register wraps at 1000000000 */
459 			diff = get_rtcl() - start;
460 			if (diff < 0)
461 				diff += 1000000000;
462 			spin_cpu_relax();
463 		} while (diff < loops);
464 	} else {
465 		start = get_tbl();
466 		while (get_tbl() - start < loops)
467 			spin_cpu_relax();
468 	}
469 	spin_end();
470 }
471 EXPORT_SYMBOL(__delay);
472 
473 void udelay(unsigned long usecs)
474 {
475 	__delay(tb_ticks_per_usec * usecs);
476 }
477 EXPORT_SYMBOL(udelay);
478 
479 #ifdef CONFIG_SMP
480 unsigned long profile_pc(struct pt_regs *regs)
481 {
482 	unsigned long pc = instruction_pointer(regs);
483 
484 	if (in_lock_functions(pc))
485 		return regs->link;
486 
487 	return pc;
488 }
489 EXPORT_SYMBOL(profile_pc);
490 #endif
491 
492 #ifdef CONFIG_IRQ_WORK
493 
494 /*
495  * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
496  */
497 #ifdef CONFIG_PPC64
498 static inline unsigned long test_irq_work_pending(void)
499 {
500 	unsigned long x;
501 
502 	asm volatile("lbz %0,%1(13)"
503 		: "=r" (x)
504 		: "i" (offsetof(struct paca_struct, irq_work_pending)));
505 	return x;
506 }
507 
508 static inline void set_irq_work_pending_flag(void)
509 {
510 	asm volatile("stb %0,%1(13)" : :
511 		"r" (1),
512 		"i" (offsetof(struct paca_struct, irq_work_pending)));
513 }
514 
515 static inline void clear_irq_work_pending(void)
516 {
517 	asm volatile("stb %0,%1(13)" : :
518 		"r" (0),
519 		"i" (offsetof(struct paca_struct, irq_work_pending)));
520 }
521 
522 void arch_irq_work_raise(void)
523 {
524 	preempt_disable();
525 	set_irq_work_pending_flag();
526 	/*
527 	 * Non-nmi code running with interrupts disabled will replay
528 	 * irq_happened before it re-enables interrupts, so setthe
529 	 * decrementer there instead of causing a hardware exception
530 	 * which would immediately hit the masked interrupt handler
531 	 * and have the net effect of setting the decrementer in
532 	 * irq_happened.
533 	 *
534 	 * NMI interrupts can not check this when they return, so the
535 	 * decrementer hardware exception is raised, which will fire
536 	 * when interrupts are next enabled.
537 	 *
538 	 * BookE does not support this yet, it must audit all NMI
539 	 * interrupt handlers to ensure they call nmi_enter() so this
540 	 * check would be correct.
541 	 */
542 	if (IS_ENABLED(CONFIG_BOOKE) || !irqs_disabled() || in_nmi()) {
543 		set_dec(1);
544 	} else {
545 		hard_irq_disable();
546 		local_paca->irq_happened |= PACA_IRQ_DEC;
547 	}
548 	preempt_enable();
549 }
550 
551 #else /* 32-bit */
552 
553 DEFINE_PER_CPU(u8, irq_work_pending);
554 
555 #define set_irq_work_pending_flag()	__this_cpu_write(irq_work_pending, 1)
556 #define test_irq_work_pending()		__this_cpu_read(irq_work_pending)
557 #define clear_irq_work_pending()	__this_cpu_write(irq_work_pending, 0)
558 
559 void arch_irq_work_raise(void)
560 {
561 	preempt_disable();
562 	set_irq_work_pending_flag();
563 	set_dec(1);
564 	preempt_enable();
565 }
566 
567 #endif /* 32 vs 64 bit */
568 
569 #else  /* CONFIG_IRQ_WORK */
570 
571 #define test_irq_work_pending()	0
572 #define clear_irq_work_pending()
573 
574 #endif /* CONFIG_IRQ_WORK */
575 
576 /*
577  * timer_interrupt - gets called when the decrementer overflows,
578  * with interrupts disabled.
579  */
580 void timer_interrupt(struct pt_regs *regs)
581 {
582 	struct clock_event_device *evt = this_cpu_ptr(&decrementers);
583 	u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
584 	struct pt_regs *old_regs;
585 	u64 now;
586 
587 	/* Some implementations of hotplug will get timer interrupts while
588 	 * offline, just ignore these and we also need to set
589 	 * decrementers_next_tb as MAX to make sure __check_irq_replay
590 	 * don't replay timer interrupt when return, otherwise we'll trap
591 	 * here infinitely :(
592 	 */
593 	if (unlikely(!cpu_online(smp_processor_id()))) {
594 		*next_tb = ~(u64)0;
595 		set_dec(decrementer_max);
596 		return;
597 	}
598 
599 	/* Ensure a positive value is written to the decrementer, or else
600 	 * some CPUs will continue to take decrementer exceptions. When the
601 	 * PPC_WATCHDOG (decrementer based) is configured, keep this at most
602 	 * 31 bits, which is about 4 seconds on most systems, which gives
603 	 * the watchdog a chance of catching timer interrupt hard lockups.
604 	 */
605 	if (IS_ENABLED(CONFIG_PPC_WATCHDOG))
606 		set_dec(0x7fffffff);
607 	else
608 		set_dec(decrementer_max);
609 
610 	/* Conditionally hard-enable interrupts now that the DEC has been
611 	 * bumped to its maximum value
612 	 */
613 	may_hard_irq_enable();
614 
615 
616 #if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
617 	if (atomic_read(&ppc_n_lost_interrupts) != 0)
618 		do_IRQ(regs);
619 #endif
620 
621 	old_regs = set_irq_regs(regs);
622 	irq_enter();
623 	trace_timer_interrupt_entry(regs);
624 
625 	if (test_irq_work_pending()) {
626 		clear_irq_work_pending();
627 		irq_work_run();
628 	}
629 
630 	now = get_tb_or_rtc();
631 	if (now >= *next_tb) {
632 		*next_tb = ~(u64)0;
633 		if (evt->event_handler)
634 			evt->event_handler(evt);
635 		__this_cpu_inc(irq_stat.timer_irqs_event);
636 	} else {
637 		now = *next_tb - now;
638 		if (now <= decrementer_max)
639 			set_dec(now);
640 		/* We may have raced with new irq work */
641 		if (test_irq_work_pending())
642 			set_dec(1);
643 		__this_cpu_inc(irq_stat.timer_irqs_others);
644 	}
645 
646 	trace_timer_interrupt_exit(regs);
647 	irq_exit();
648 	set_irq_regs(old_regs);
649 }
650 EXPORT_SYMBOL(timer_interrupt);
651 
652 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
653 void timer_broadcast_interrupt(void)
654 {
655 	u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
656 
657 	*next_tb = ~(u64)0;
658 	tick_receive_broadcast();
659 	__this_cpu_inc(irq_stat.broadcast_irqs_event);
660 }
661 #endif
662 
663 /*
664  * Hypervisor decrementer interrupts shouldn't occur but are sometimes
665  * left pending on exit from a KVM guest.  We don't need to do anything
666  * to clear them, as they are edge-triggered.
667  */
668 void hdec_interrupt(struct pt_regs *regs)
669 {
670 }
671 
672 #ifdef CONFIG_SUSPEND
673 static void generic_suspend_disable_irqs(void)
674 {
675 	/* Disable the decrementer, so that it doesn't interfere
676 	 * with suspending.
677 	 */
678 
679 	set_dec(decrementer_max);
680 	local_irq_disable();
681 	set_dec(decrementer_max);
682 }
683 
684 static void generic_suspend_enable_irqs(void)
685 {
686 	local_irq_enable();
687 }
688 
689 /* Overrides the weak version in kernel/power/main.c */
690 void arch_suspend_disable_irqs(void)
691 {
692 	if (ppc_md.suspend_disable_irqs)
693 		ppc_md.suspend_disable_irqs();
694 	generic_suspend_disable_irqs();
695 }
696 
697 /* Overrides the weak version in kernel/power/main.c */
698 void arch_suspend_enable_irqs(void)
699 {
700 	generic_suspend_enable_irqs();
701 	if (ppc_md.suspend_enable_irqs)
702 		ppc_md.suspend_enable_irqs();
703 }
704 #endif
705 
706 unsigned long long tb_to_ns(unsigned long long ticks)
707 {
708 	return mulhdu(ticks, tb_to_ns_scale) << tb_to_ns_shift;
709 }
710 EXPORT_SYMBOL_GPL(tb_to_ns);
711 
712 /*
713  * Scheduler clock - returns current time in nanosec units.
714  *
715  * Note: mulhdu(a, b) (multiply high double unsigned) returns
716  * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
717  * are 64-bit unsigned numbers.
718  */
719 notrace unsigned long long sched_clock(void)
720 {
721 	if (__USE_RTC())
722 		return get_rtc();
723 	return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
724 }
725 
726 
727 #ifdef CONFIG_PPC_PSERIES
728 
729 /*
730  * Running clock - attempts to give a view of time passing for a virtualised
731  * kernels.
732  * Uses the VTB register if available otherwise a next best guess.
733  */
734 unsigned long long running_clock(void)
735 {
736 	/*
737 	 * Don't read the VTB as a host since KVM does not switch in host
738 	 * timebase into the VTB when it takes a guest off the CPU, reading the
739 	 * VTB would result in reading 'last switched out' guest VTB.
740 	 *
741 	 * Host kernels are often compiled with CONFIG_PPC_PSERIES checked, it
742 	 * would be unsafe to rely only on the #ifdef above.
743 	 */
744 	if (firmware_has_feature(FW_FEATURE_LPAR) &&
745 	    cpu_has_feature(CPU_FTR_ARCH_207S))
746 		return mulhdu(get_vtb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
747 
748 	/*
749 	 * This is a next best approximation without a VTB.
750 	 * On a host which is running bare metal there should never be any stolen
751 	 * time and on a host which doesn't do any virtualisation TB *should* equal
752 	 * VTB so it makes no difference anyway.
753 	 */
754 	return local_clock() - kcpustat_this_cpu->cpustat[CPUTIME_STEAL];
755 }
756 #endif
757 
758 static int __init get_freq(char *name, int cells, unsigned long *val)
759 {
760 	struct device_node *cpu;
761 	const __be32 *fp;
762 	int found = 0;
763 
764 	/* The cpu node should have timebase and clock frequency properties */
765 	cpu = of_find_node_by_type(NULL, "cpu");
766 
767 	if (cpu) {
768 		fp = of_get_property(cpu, name, NULL);
769 		if (fp) {
770 			found = 1;
771 			*val = of_read_ulong(fp, cells);
772 		}
773 
774 		of_node_put(cpu);
775 	}
776 
777 	return found;
778 }
779 
780 static void start_cpu_decrementer(void)
781 {
782 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
783 	unsigned int tcr;
784 
785 	/* Clear any pending timer interrupts */
786 	mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
787 
788 	tcr = mfspr(SPRN_TCR);
789 	/*
790 	 * The watchdog may have already been enabled by u-boot. So leave
791 	 * TRC[WP] (Watchdog Period) alone.
792 	 */
793 	tcr &= TCR_WP_MASK;	/* Clear all bits except for TCR[WP] */
794 	tcr |= TCR_DIE;		/* Enable decrementer */
795 	mtspr(SPRN_TCR, tcr);
796 #endif
797 }
798 
799 void __init generic_calibrate_decr(void)
800 {
801 	ppc_tb_freq = DEFAULT_TB_FREQ;		/* hardcoded default */
802 
803 	if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) &&
804 	    !get_freq("timebase-frequency", 1, &ppc_tb_freq)) {
805 
806 		printk(KERN_ERR "WARNING: Estimating decrementer frequency "
807 				"(not found)\n");
808 	}
809 
810 	ppc_proc_freq = DEFAULT_PROC_FREQ;	/* hardcoded default */
811 
812 	if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) &&
813 	    !get_freq("clock-frequency", 1, &ppc_proc_freq)) {
814 
815 		printk(KERN_ERR "WARNING: Estimating processor frequency "
816 				"(not found)\n");
817 	}
818 }
819 
820 int update_persistent_clock64(struct timespec64 now)
821 {
822 	struct rtc_time tm;
823 
824 	if (!ppc_md.set_rtc_time)
825 		return -ENODEV;
826 
827 	rtc_time64_to_tm(now.tv_sec + 1 + timezone_offset, &tm);
828 
829 	return ppc_md.set_rtc_time(&tm);
830 }
831 
832 static void __read_persistent_clock(struct timespec64 *ts)
833 {
834 	struct rtc_time tm;
835 	static int first = 1;
836 
837 	ts->tv_nsec = 0;
838 	/* XXX this is a litle fragile but will work okay in the short term */
839 	if (first) {
840 		first = 0;
841 		if (ppc_md.time_init)
842 			timezone_offset = ppc_md.time_init();
843 
844 		/* get_boot_time() isn't guaranteed to be safe to call late */
845 		if (ppc_md.get_boot_time) {
846 			ts->tv_sec = ppc_md.get_boot_time() - timezone_offset;
847 			return;
848 		}
849 	}
850 	if (!ppc_md.get_rtc_time) {
851 		ts->tv_sec = 0;
852 		return;
853 	}
854 	ppc_md.get_rtc_time(&tm);
855 
856 	ts->tv_sec = rtc_tm_to_time64(&tm);
857 }
858 
859 void read_persistent_clock64(struct timespec64 *ts)
860 {
861 	__read_persistent_clock(ts);
862 
863 	/* Sanitize it in case real time clock is set below EPOCH */
864 	if (ts->tv_sec < 0) {
865 		ts->tv_sec = 0;
866 		ts->tv_nsec = 0;
867 	}
868 
869 }
870 
871 /* clocksource code */
872 static notrace u64 rtc_read(struct clocksource *cs)
873 {
874 	return (u64)get_rtc();
875 }
876 
877 static notrace u64 timebase_read(struct clocksource *cs)
878 {
879 	return (u64)get_tb();
880 }
881 
882 
883 void update_vsyscall(struct timekeeper *tk)
884 {
885 	struct timespec xt;
886 	struct clocksource *clock = tk->tkr_mono.clock;
887 	u32 mult = tk->tkr_mono.mult;
888 	u32 shift = tk->tkr_mono.shift;
889 	u64 cycle_last = tk->tkr_mono.cycle_last;
890 	u64 new_tb_to_xs, new_stamp_xsec;
891 	u64 frac_sec;
892 
893 	if (clock != &clocksource_timebase)
894 		return;
895 
896 	xt.tv_sec = tk->xtime_sec;
897 	xt.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
898 
899 	/* Make userspace gettimeofday spin until we're done. */
900 	++vdso_data->tb_update_count;
901 	smp_mb();
902 
903 	/*
904 	 * This computes ((2^20 / 1e9) * mult) >> shift as a
905 	 * 0.64 fixed-point fraction.
906 	 * The computation in the else clause below won't overflow
907 	 * (as long as the timebase frequency is >= 1.049 MHz)
908 	 * but loses precision because we lose the low bits of the constant
909 	 * in the shift.  Note that 19342813113834067 ~= 2^(20+64) / 1e9.
910 	 * For a shift of 24 the error is about 0.5e-9, or about 0.5ns
911 	 * over a second.  (Shift values are usually 22, 23 or 24.)
912 	 * For high frequency clocks such as the 512MHz timebase clock
913 	 * on POWER[6789], the mult value is small (e.g. 32768000)
914 	 * and so we can shift the constant by 16 initially
915 	 * (295147905179 ~= 2^(20+64-16) / 1e9) and then do the
916 	 * remaining shifts after the multiplication, which gives a
917 	 * more accurate result (e.g. with mult = 32768000, shift = 24,
918 	 * the error is only about 1.2e-12, or 0.7ns over 10 minutes).
919 	 */
920 	if (mult <= 62500000 && clock->shift >= 16)
921 		new_tb_to_xs = ((u64) mult * 295147905179ULL) >> (clock->shift - 16);
922 	else
923 		new_tb_to_xs = (u64) mult * (19342813113834067ULL >> clock->shift);
924 
925 	/*
926 	 * Compute the fractional second in units of 2^-32 seconds.
927 	 * The fractional second is tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift
928 	 * in nanoseconds, so multiplying that by 2^32 / 1e9 gives
929 	 * it in units of 2^-32 seconds.
930 	 * We assume shift <= 32 because clocks_calc_mult_shift()
931 	 * generates shift values in the range 0 - 32.
932 	 */
933 	frac_sec = tk->tkr_mono.xtime_nsec << (32 - shift);
934 	do_div(frac_sec, NSEC_PER_SEC);
935 
936 	/*
937 	 * Work out new stamp_xsec value for any legacy users of systemcfg.
938 	 * stamp_xsec is in units of 2^-20 seconds.
939 	 */
940 	new_stamp_xsec = frac_sec >> 12;
941 	new_stamp_xsec += tk->xtime_sec * XSEC_PER_SEC;
942 
943 	/*
944 	 * tb_update_count is used to allow the userspace gettimeofday code
945 	 * to assure itself that it sees a consistent view of the tb_to_xs and
946 	 * stamp_xsec variables.  It reads the tb_update_count, then reads
947 	 * tb_to_xs and stamp_xsec and then reads tb_update_count again.  If
948 	 * the two values of tb_update_count match and are even then the
949 	 * tb_to_xs and stamp_xsec values are consistent.  If not, then it
950 	 * loops back and reads them again until this criteria is met.
951 	 */
952 	vdso_data->tb_orig_stamp = cycle_last;
953 	vdso_data->stamp_xsec = new_stamp_xsec;
954 	vdso_data->tb_to_xs = new_tb_to_xs;
955 	vdso_data->wtom_clock_sec = tk->wall_to_monotonic.tv_sec;
956 	vdso_data->wtom_clock_nsec = tk->wall_to_monotonic.tv_nsec;
957 	vdso_data->stamp_xtime = xt;
958 	vdso_data->stamp_sec_fraction = frac_sec;
959 	smp_wmb();
960 	++(vdso_data->tb_update_count);
961 }
962 
963 void update_vsyscall_tz(void)
964 {
965 	vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
966 	vdso_data->tz_dsttime = sys_tz.tz_dsttime;
967 }
968 
969 static void __init clocksource_init(void)
970 {
971 	struct clocksource *clock;
972 
973 	if (__USE_RTC())
974 		clock = &clocksource_rtc;
975 	else
976 		clock = &clocksource_timebase;
977 
978 	if (clocksource_register_hz(clock, tb_ticks_per_sec)) {
979 		printk(KERN_ERR "clocksource: %s is already registered\n",
980 		       clock->name);
981 		return;
982 	}
983 
984 	printk(KERN_INFO "clocksource: %s mult[%x] shift[%d] registered\n",
985 	       clock->name, clock->mult, clock->shift);
986 }
987 
988 static int decrementer_set_next_event(unsigned long evt,
989 				      struct clock_event_device *dev)
990 {
991 	__this_cpu_write(decrementers_next_tb, get_tb_or_rtc() + evt);
992 	set_dec(evt);
993 
994 	/* We may have raced with new irq work */
995 	if (test_irq_work_pending())
996 		set_dec(1);
997 
998 	return 0;
999 }
1000 
1001 static int decrementer_shutdown(struct clock_event_device *dev)
1002 {
1003 	decrementer_set_next_event(decrementer_max, dev);
1004 	return 0;
1005 }
1006 
1007 static void register_decrementer_clockevent(int cpu)
1008 {
1009 	struct clock_event_device *dec = &per_cpu(decrementers, cpu);
1010 
1011 	*dec = decrementer_clockevent;
1012 	dec->cpumask = cpumask_of(cpu);
1013 
1014 	clockevents_config_and_register(dec, ppc_tb_freq, 2, decrementer_max);
1015 
1016 	printk_once(KERN_DEBUG "clockevent: %s mult[%x] shift[%d] cpu[%d]\n",
1017 		    dec->name, dec->mult, dec->shift, cpu);
1018 
1019 	/* Set values for KVM, see kvm_emulate_dec() */
1020 	decrementer_clockevent.mult = dec->mult;
1021 	decrementer_clockevent.shift = dec->shift;
1022 }
1023 
1024 static void enable_large_decrementer(void)
1025 {
1026 	if (!cpu_has_feature(CPU_FTR_ARCH_300))
1027 		return;
1028 
1029 	if (decrementer_max <= DECREMENTER_DEFAULT_MAX)
1030 		return;
1031 
1032 	/*
1033 	 * If we're running as the hypervisor we need to enable the LD manually
1034 	 * otherwise firmware should have done it for us.
1035 	 */
1036 	if (cpu_has_feature(CPU_FTR_HVMODE))
1037 		mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_LD);
1038 }
1039 
1040 static void __init set_decrementer_max(void)
1041 {
1042 	struct device_node *cpu;
1043 	u32 bits = 32;
1044 
1045 	/* Prior to ISAv3 the decrementer is always 32 bit */
1046 	if (!cpu_has_feature(CPU_FTR_ARCH_300))
1047 		return;
1048 
1049 	cpu = of_find_node_by_type(NULL, "cpu");
1050 
1051 	if (of_property_read_u32(cpu, "ibm,dec-bits", &bits) == 0) {
1052 		if (bits > 64 || bits < 32) {
1053 			pr_warn("time_init: firmware supplied invalid ibm,dec-bits");
1054 			bits = 32;
1055 		}
1056 
1057 		/* calculate the signed maximum given this many bits */
1058 		decrementer_max = (1ul << (bits - 1)) - 1;
1059 	}
1060 
1061 	of_node_put(cpu);
1062 
1063 	pr_info("time_init: %u bit decrementer (max: %llx)\n",
1064 		bits, decrementer_max);
1065 }
1066 
1067 static void __init init_decrementer_clockevent(void)
1068 {
1069 	register_decrementer_clockevent(smp_processor_id());
1070 }
1071 
1072 void secondary_cpu_time_init(void)
1073 {
1074 	/* Enable and test the large decrementer for this cpu */
1075 	enable_large_decrementer();
1076 
1077 	/* Start the decrementer on CPUs that have manual control
1078 	 * such as BookE
1079 	 */
1080 	start_cpu_decrementer();
1081 
1082 	/* FIME: Should make unrelatred change to move snapshot_timebase
1083 	 * call here ! */
1084 	register_decrementer_clockevent(smp_processor_id());
1085 }
1086 
1087 /* This function is only called on the boot processor */
1088 void __init time_init(void)
1089 {
1090 	struct div_result res;
1091 	u64 scale;
1092 	unsigned shift;
1093 
1094 	if (__USE_RTC()) {
1095 		/* 601 processor: dec counts down by 128 every 128ns */
1096 		ppc_tb_freq = 1000000000;
1097 	} else {
1098 		/* Normal PowerPC with timebase register */
1099 		ppc_md.calibrate_decr();
1100 		printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n",
1101 		       ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
1102 		printk(KERN_DEBUG "time_init: processor frequency   = %lu.%.6lu MHz\n",
1103 		       ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
1104 	}
1105 
1106 	tb_ticks_per_jiffy = ppc_tb_freq / HZ;
1107 	tb_ticks_per_sec = ppc_tb_freq;
1108 	tb_ticks_per_usec = ppc_tb_freq / 1000000;
1109 	calc_cputime_factors();
1110 
1111 	/*
1112 	 * Compute scale factor for sched_clock.
1113 	 * The calibrate_decr() function has set tb_ticks_per_sec,
1114 	 * which is the timebase frequency.
1115 	 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret
1116 	 * the 128-bit result as a 64.64 fixed-point number.
1117 	 * We then shift that number right until it is less than 1.0,
1118 	 * giving us the scale factor and shift count to use in
1119 	 * sched_clock().
1120 	 */
1121 	div128_by_32(1000000000, 0, tb_ticks_per_sec, &res);
1122 	scale = res.result_low;
1123 	for (shift = 0; res.result_high != 0; ++shift) {
1124 		scale = (scale >> 1) | (res.result_high << 63);
1125 		res.result_high >>= 1;
1126 	}
1127 	tb_to_ns_scale = scale;
1128 	tb_to_ns_shift = shift;
1129 	/* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
1130 	boot_tb = get_tb_or_rtc();
1131 
1132 	/* If platform provided a timezone (pmac), we correct the time */
1133 	if (timezone_offset) {
1134 		sys_tz.tz_minuteswest = -timezone_offset / 60;
1135 		sys_tz.tz_dsttime = 0;
1136 	}
1137 
1138 	vdso_data->tb_update_count = 0;
1139 	vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
1140 
1141 	/* initialise and enable the large decrementer (if we have one) */
1142 	set_decrementer_max();
1143 	enable_large_decrementer();
1144 
1145 	/* Start the decrementer on CPUs that have manual control
1146 	 * such as BookE
1147 	 */
1148 	start_cpu_decrementer();
1149 
1150 	/* Register the clocksource */
1151 	clocksource_init();
1152 
1153 	init_decrementer_clockevent();
1154 	tick_setup_hrtimer_broadcast();
1155 
1156 #ifdef CONFIG_COMMON_CLK
1157 	of_clk_init(NULL);
1158 #endif
1159 }
1160 
1161 /*
1162  * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
1163  * result.
1164  */
1165 void div128_by_32(u64 dividend_high, u64 dividend_low,
1166 		  unsigned divisor, struct div_result *dr)
1167 {
1168 	unsigned long a, b, c, d;
1169 	unsigned long w, x, y, z;
1170 	u64 ra, rb, rc;
1171 
1172 	a = dividend_high >> 32;
1173 	b = dividend_high & 0xffffffff;
1174 	c = dividend_low >> 32;
1175 	d = dividend_low & 0xffffffff;
1176 
1177 	w = a / divisor;
1178 	ra = ((u64)(a - (w * divisor)) << 32) + b;
1179 
1180 	rb = ((u64) do_div(ra, divisor) << 32) + c;
1181 	x = ra;
1182 
1183 	rc = ((u64) do_div(rb, divisor) << 32) + d;
1184 	y = rb;
1185 
1186 	do_div(rc, divisor);
1187 	z = rc;
1188 
1189 	dr->result_high = ((u64)w << 32) + x;
1190 	dr->result_low  = ((u64)y << 32) + z;
1191 
1192 }
1193 
1194 /* We don't need to calibrate delay, we use the CPU timebase for that */
1195 void calibrate_delay(void)
1196 {
1197 	/* Some generic code (such as spinlock debug) use loops_per_jiffy
1198 	 * as the number of __delay(1) in a jiffy, so make it so
1199 	 */
1200 	loops_per_jiffy = tb_ticks_per_jiffy;
1201 }
1202 
1203 #if IS_ENABLED(CONFIG_RTC_DRV_GENERIC)
1204 static int rtc_generic_get_time(struct device *dev, struct rtc_time *tm)
1205 {
1206 	ppc_md.get_rtc_time(tm);
1207 	return 0;
1208 }
1209 
1210 static int rtc_generic_set_time(struct device *dev, struct rtc_time *tm)
1211 {
1212 	if (!ppc_md.set_rtc_time)
1213 		return -EOPNOTSUPP;
1214 
1215 	if (ppc_md.set_rtc_time(tm) < 0)
1216 		return -EOPNOTSUPP;
1217 
1218 	return 0;
1219 }
1220 
1221 static const struct rtc_class_ops rtc_generic_ops = {
1222 	.read_time = rtc_generic_get_time,
1223 	.set_time = rtc_generic_set_time,
1224 };
1225 
1226 static int __init rtc_init(void)
1227 {
1228 	struct platform_device *pdev;
1229 
1230 	if (!ppc_md.get_rtc_time)
1231 		return -ENODEV;
1232 
1233 	pdev = platform_device_register_data(NULL, "rtc-generic", -1,
1234 					     &rtc_generic_ops,
1235 					     sizeof(rtc_generic_ops));
1236 
1237 	return PTR_ERR_OR_ZERO(pdev);
1238 }
1239 
1240 device_initcall(rtc_init);
1241 #endif
1242