xref: /openbmc/linux/arch/powerpc/include/asm/time.h (revision 88fb3094)
12874c5fdSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-or-later */
2b8b572e1SStephen Rothwell /*
3b8b572e1SStephen Rothwell  * Common time prototypes and such for all ppc machines.
4b8b572e1SStephen Rothwell  *
5b8b572e1SStephen Rothwell  * Written by Cort Dougan (cort@cs.nmt.edu) to merge
6b8b572e1SStephen Rothwell  * Paul Mackerras' version and mine for PReP and Pmac.
7b8b572e1SStephen Rothwell  */
8b8b572e1SStephen Rothwell 
9b8b572e1SStephen Rothwell #ifndef __POWERPC_TIME_H
10b8b572e1SStephen Rothwell #define __POWERPC_TIME_H
11b8b572e1SStephen Rothwell 
12b8b572e1SStephen Rothwell #ifdef __KERNEL__
13b8b572e1SStephen Rothwell #include <linux/types.h>
14b8b572e1SStephen Rothwell #include <linux/percpu.h>
15b8b572e1SStephen Rothwell 
16b8b572e1SStephen Rothwell #include <asm/processor.h>
17b92a226eSKevin Hao #include <asm/cpu_has_feature.h>
18b8b572e1SStephen Rothwell 
19b8b572e1SStephen Rothwell /* time.c */
20b8b572e1SStephen Rothwell extern unsigned long tb_ticks_per_jiffy;
21b8b572e1SStephen Rothwell extern unsigned long tb_ticks_per_usec;
22b8b572e1SStephen Rothwell extern unsigned long tb_ticks_per_sec;
236e35994dSBharat Bhushan extern struct clock_event_device decrementer_clockevent;
24b8b572e1SStephen Rothwell 
25b8b572e1SStephen Rothwell 
26b8b572e1SStephen Rothwell extern void generic_calibrate_decr(void);
278b604fafSMathieu Malaterre extern void hdec_interrupt(struct pt_regs *regs);
28b8b572e1SStephen Rothwell 
29b8b572e1SStephen Rothwell /* Some sane defaults: 125 MHz timebase, 1GHz processor */
30b8b572e1SStephen Rothwell extern unsigned long ppc_proc_freq;
31b8b572e1SStephen Rothwell #define DEFAULT_PROC_FREQ	(DEFAULT_TB_FREQ * 8)
32b8b572e1SStephen Rothwell extern unsigned long ppc_tb_freq;
33b8b572e1SStephen Rothwell #define DEFAULT_TB_FREQ		125000000UL
34b8b572e1SStephen Rothwell 
35de269129SMahesh Salgaonkar extern bool tb_invalid;
36de269129SMahesh Salgaonkar 
37b8b572e1SStephen Rothwell struct div_result {
38b8b572e1SStephen Rothwell 	u64 result_high;
39b8b572e1SStephen Rothwell 	u64 result_low;
40b8b572e1SStephen Rothwell };
41b8b572e1SStephen Rothwell 
42b8b572e1SStephen Rothwell /* Accessor functions for the timebase (RTC on 601) registers. */
43b8b572e1SStephen Rothwell /* If one day CONFIG_POWER is added just define __USE_RTC as 1 */
4488fb3094SChristophe Leroy #define __USE_RTC()	(IS_ENABLED(CONFIG_PPC_BOOK3S_601))
45b8b572e1SStephen Rothwell 
46b8b572e1SStephen Rothwell #ifdef CONFIG_PPC64
47b8b572e1SStephen Rothwell 
48b8b572e1SStephen Rothwell /* For compatibility, get_tbl() is defined as get_tb() on ppc64 */
49b8b572e1SStephen Rothwell #define get_tbl		get_tb
50b8b572e1SStephen Rothwell 
51b8b572e1SStephen Rothwell #else
52b8b572e1SStephen Rothwell 
53b8b572e1SStephen Rothwell static inline unsigned long get_tbl(void)
54b8b572e1SStephen Rothwell {
55b8b572e1SStephen Rothwell #if defined(CONFIG_403GCX)
56b8b572e1SStephen Rothwell 	unsigned long tbl;
57b8b572e1SStephen Rothwell 	asm volatile("mfspr %0, 0x3dd" : "=r" (tbl));
58b8b572e1SStephen Rothwell 	return tbl;
59b8b572e1SStephen Rothwell #else
60b8b572e1SStephen Rothwell 	return mftbl();
61b8b572e1SStephen Rothwell #endif
62b8b572e1SStephen Rothwell }
63b8b572e1SStephen Rothwell 
64b8b572e1SStephen Rothwell static inline unsigned int get_tbu(void)
65b8b572e1SStephen Rothwell {
66b8b572e1SStephen Rothwell #ifdef CONFIG_403GCX
67b8b572e1SStephen Rothwell 	unsigned int tbu;
68b8b572e1SStephen Rothwell 	asm volatile("mfspr %0, 0x3dc" : "=r" (tbu));
69b8b572e1SStephen Rothwell 	return tbu;
70b8b572e1SStephen Rothwell #else
71b8b572e1SStephen Rothwell 	return mftbu();
72b8b572e1SStephen Rothwell #endif
73b8b572e1SStephen Rothwell }
74b8b572e1SStephen Rothwell #endif /* !CONFIG_PPC64 */
75b8b572e1SStephen Rothwell 
76b8b572e1SStephen Rothwell static inline unsigned int get_rtcl(void)
77b8b572e1SStephen Rothwell {
78b8b572e1SStephen Rothwell 	unsigned int rtcl;
79b8b572e1SStephen Rothwell 
80b8b572e1SStephen Rothwell 	asm volatile("mfrtcl %0" : "=r" (rtcl));
81b8b572e1SStephen Rothwell 	return rtcl;
82b8b572e1SStephen Rothwell }
83b8b572e1SStephen Rothwell 
84b8b572e1SStephen Rothwell static inline u64 get_rtc(void)
85b8b572e1SStephen Rothwell {
86b8b572e1SStephen Rothwell 	unsigned int hi, lo, hi2;
87b8b572e1SStephen Rothwell 
88b8b572e1SStephen Rothwell 	do {
89b8b572e1SStephen Rothwell 		asm volatile("mfrtcu %0; mfrtcl %1; mfrtcu %2"
90b8b572e1SStephen Rothwell 			     : "=r" (hi), "=r" (lo), "=r" (hi2));
91b8b572e1SStephen Rothwell 	} while (hi2 != hi);
92b8b572e1SStephen Rothwell 	return (u64)hi * 1000000000 + lo;
93b8b572e1SStephen Rothwell }
94b8b572e1SStephen Rothwell 
958f42ab27SAneesh Kumar K.V static inline u64 get_vtb(void)
968f42ab27SAneesh Kumar K.V {
978f42ab27SAneesh Kumar K.V #ifdef CONFIG_PPC_BOOK3S_64
988f42ab27SAneesh Kumar K.V 	if (cpu_has_feature(CPU_FTR_ARCH_207S))
99905259e3SKevin Hao 		return mfspr(SPRN_VTB);
1008f42ab27SAneesh Kumar K.V #endif
1018f42ab27SAneesh Kumar K.V 	return 0;
1028f42ab27SAneesh Kumar K.V }
1038f42ab27SAneesh Kumar K.V 
104b8b572e1SStephen Rothwell #ifdef CONFIG_PPC64
105b8b572e1SStephen Rothwell static inline u64 get_tb(void)
106b8b572e1SStephen Rothwell {
107b8b572e1SStephen Rothwell 	return mftb();
108b8b572e1SStephen Rothwell }
109b8b572e1SStephen Rothwell #else /* CONFIG_PPC64 */
110b8b572e1SStephen Rothwell static inline u64 get_tb(void)
111b8b572e1SStephen Rothwell {
112b8b572e1SStephen Rothwell 	unsigned int tbhi, tblo, tbhi2;
113b8b572e1SStephen Rothwell 
114b8b572e1SStephen Rothwell 	do {
115b8b572e1SStephen Rothwell 		tbhi = get_tbu();
116b8b572e1SStephen Rothwell 		tblo = get_tbl();
117b8b572e1SStephen Rothwell 		tbhi2 = get_tbu();
118b8b572e1SStephen Rothwell 	} while (tbhi != tbhi2);
119b8b572e1SStephen Rothwell 
120b8b572e1SStephen Rothwell 	return ((u64)tbhi << 32) | tblo;
121b8b572e1SStephen Rothwell }
122b8b572e1SStephen Rothwell #endif /* !CONFIG_PPC64 */
123b8b572e1SStephen Rothwell 
124b8b572e1SStephen Rothwell static inline u64 get_tb_or_rtc(void)
125b8b572e1SStephen Rothwell {
126b8b572e1SStephen Rothwell 	return __USE_RTC() ? get_rtc() : get_tb();
127b8b572e1SStephen Rothwell }
128b8b572e1SStephen Rothwell 
129b8b572e1SStephen Rothwell static inline void set_tb(unsigned int upper, unsigned int lower)
130b8b572e1SStephen Rothwell {
131b8b572e1SStephen Rothwell 	mtspr(SPRN_TBWL, 0);
132b8b572e1SStephen Rothwell 	mtspr(SPRN_TBWU, upper);
133b8b572e1SStephen Rothwell 	mtspr(SPRN_TBWL, lower);
134b8b572e1SStephen Rothwell }
135b8b572e1SStephen Rothwell 
136b8b572e1SStephen Rothwell /* Accessor functions for the decrementer register.
137b8b572e1SStephen Rothwell  * The 4xx doesn't even have a decrementer.  I tried to use the
138b8b572e1SStephen Rothwell  * generic timer interrupt code, which seems OK, with the 4xx PIT
139b8b572e1SStephen Rothwell  * in auto-reload mode.  The problem is PIT stops counting when it
140b8b572e1SStephen Rothwell  * hits zero.  If it would wrap, we could use it just like a decrementer.
141b8b572e1SStephen Rothwell  */
14279901024SOliver O'Halloran static inline u64 get_dec(void)
143b8b572e1SStephen Rothwell {
144b8b572e1SStephen Rothwell #if defined(CONFIG_40x)
145b8b572e1SStephen Rothwell 	return (mfspr(SPRN_PIT));
146b8b572e1SStephen Rothwell #else
147b8b572e1SStephen Rothwell 	return (mfspr(SPRN_DEC));
148b8b572e1SStephen Rothwell #endif
149b8b572e1SStephen Rothwell }
150b8b572e1SStephen Rothwell 
151b8b572e1SStephen Rothwell /*
152b8b572e1SStephen Rothwell  * Note: Book E and 4xx processors differ from other PowerPC processors
153b8b572e1SStephen Rothwell  * in when the decrementer generates its interrupt: on the 1 to 0
154b8b572e1SStephen Rothwell  * transition for Book E/4xx, but on the 0 to -1 transition for others.
155b8b572e1SStephen Rothwell  */
15679901024SOliver O'Halloran static inline void set_dec(u64 val)
157b8b572e1SStephen Rothwell {
158b8b572e1SStephen Rothwell #if defined(CONFIG_40x)
15979901024SOliver O'Halloran 	mtspr(SPRN_PIT, (u32) val);
160b8b572e1SStephen Rothwell #else
161b8b572e1SStephen Rothwell #ifndef CONFIG_BOOKE
162b8b572e1SStephen Rothwell 	--val;
163b8b572e1SStephen Rothwell #endif
164b8b572e1SStephen Rothwell 	mtspr(SPRN_DEC, val);
16563e9e1c2SChristophe Leroy #endif /* not 40x */
166b8b572e1SStephen Rothwell }
167b8b572e1SStephen Rothwell 
168b8b572e1SStephen Rothwell static inline unsigned long tb_ticks_since(unsigned long tstamp)
169b8b572e1SStephen Rothwell {
170b8b572e1SStephen Rothwell 	if (__USE_RTC()) {
171b8b572e1SStephen Rothwell 		int delta = get_rtcl() - (unsigned int) tstamp;
172b8b572e1SStephen Rothwell 		return delta < 0 ? delta + 1000000000 : delta;
173b8b572e1SStephen Rothwell 	}
174b8b572e1SStephen Rothwell 	return get_tbl() - tstamp;
175b8b572e1SStephen Rothwell }
176b8b572e1SStephen Rothwell 
177b8b572e1SStephen Rothwell #define mulhwu(x,y) \
178b8b572e1SStephen Rothwell ({unsigned z; asm ("mulhwu %0,%1,%2" : "=r" (z) : "r" (x), "r" (y)); z;})
179b8b572e1SStephen Rothwell 
180b8b572e1SStephen Rothwell #ifdef CONFIG_PPC64
181b8b572e1SStephen Rothwell #define mulhdu(x,y) \
182b8b572e1SStephen Rothwell ({unsigned long z; asm ("mulhdu %0,%1,%2" : "=r" (z) : "r" (x), "r" (y)); z;})
183b8b572e1SStephen Rothwell #else
184b8b572e1SStephen Rothwell extern u64 mulhdu(u64, u64);
185b8b572e1SStephen Rothwell #endif
186b8b572e1SStephen Rothwell 
187b8b572e1SStephen Rothwell extern void div128_by_32(u64 dividend_high, u64 dividend_low,
188b8b572e1SStephen Rothwell 			 unsigned divisor, struct div_result *dr);
189b8b572e1SStephen Rothwell 
190b8b572e1SStephen Rothwell extern void secondary_cpu_time_init(void);
191848092faSMathieu Malaterre extern void __init time_init(void);
192b8b572e1SStephen Rothwell 
1937df10275SAnton Blanchard DECLARE_PER_CPU(u64, decrementers_next_tb);
19437fb9a02SAnton Blanchard 
195b6c295dfSPaul Mackerras /* Convert timebase ticks to nanoseconds */
196b6c295dfSPaul Mackerras unsigned long long tb_to_ns(unsigned long long tb_ticks);
197b6c295dfSPaul Mackerras 
198b8b572e1SStephen Rothwell #endif /* __KERNEL__ */
199b8b572e1SStephen Rothwell #endif /* __POWERPC_TIME_H */
200