xref: /openbmc/linux/arch/powerpc/include/asm/time.h (revision 68b34588)
12874c5fdSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-or-later */
2b8b572e1SStephen Rothwell /*
3b8b572e1SStephen Rothwell  * Common time prototypes and such for all ppc machines.
4b8b572e1SStephen Rothwell  *
5b8b572e1SStephen Rothwell  * Written by Cort Dougan (cort@cs.nmt.edu) to merge
6b8b572e1SStephen Rothwell  * Paul Mackerras' version and mine for PReP and Pmac.
7b8b572e1SStephen Rothwell  */
8b8b572e1SStephen Rothwell 
9b8b572e1SStephen Rothwell #ifndef __POWERPC_TIME_H
10b8b572e1SStephen Rothwell #define __POWERPC_TIME_H
11b8b572e1SStephen Rothwell 
12b8b572e1SStephen Rothwell #ifdef __KERNEL__
13b8b572e1SStephen Rothwell #include <linux/types.h>
14b8b572e1SStephen Rothwell #include <linux/percpu.h>
15b8b572e1SStephen Rothwell 
16b8b572e1SStephen Rothwell #include <asm/processor.h>
17b92a226eSKevin Hao #include <asm/cpu_has_feature.h>
18b8b572e1SStephen Rothwell 
19b8b572e1SStephen Rothwell /* time.c */
20b8b572e1SStephen Rothwell extern unsigned long tb_ticks_per_jiffy;
21b8b572e1SStephen Rothwell extern unsigned long tb_ticks_per_usec;
22b8b572e1SStephen Rothwell extern unsigned long tb_ticks_per_sec;
236e35994dSBharat Bhushan extern struct clock_event_device decrementer_clockevent;
24b8b572e1SStephen Rothwell 
25b8b572e1SStephen Rothwell 
26b8b572e1SStephen Rothwell extern void generic_calibrate_decr(void);
27b8b572e1SStephen Rothwell 
28b8b572e1SStephen Rothwell /* Some sane defaults: 125 MHz timebase, 1GHz processor */
29b8b572e1SStephen Rothwell extern unsigned long ppc_proc_freq;
30b8b572e1SStephen Rothwell #define DEFAULT_PROC_FREQ	(DEFAULT_TB_FREQ * 8)
31b8b572e1SStephen Rothwell extern unsigned long ppc_tb_freq;
32b8b572e1SStephen Rothwell #define DEFAULT_TB_FREQ		125000000UL
33b8b572e1SStephen Rothwell 
34de269129SMahesh Salgaonkar extern bool tb_invalid;
35de269129SMahesh Salgaonkar 
36b8b572e1SStephen Rothwell struct div_result {
37b8b572e1SStephen Rothwell 	u64 result_high;
38b8b572e1SStephen Rothwell 	u64 result_low;
39b8b572e1SStephen Rothwell };
40b8b572e1SStephen Rothwell 
41b8b572e1SStephen Rothwell /* Accessor functions for the timebase (RTC on 601) registers. */
42b8b572e1SStephen Rothwell /* If one day CONFIG_POWER is added just define __USE_RTC as 1 */
4388fb3094SChristophe Leroy #define __USE_RTC()	(IS_ENABLED(CONFIG_PPC_BOOK3S_601))
44b8b572e1SStephen Rothwell 
45b8b572e1SStephen Rothwell #ifdef CONFIG_PPC64
46b8b572e1SStephen Rothwell 
47b8b572e1SStephen Rothwell /* For compatibility, get_tbl() is defined as get_tb() on ppc64 */
48b8b572e1SStephen Rothwell #define get_tbl		get_tb
49b8b572e1SStephen Rothwell 
50b8b572e1SStephen Rothwell #else
51b8b572e1SStephen Rothwell 
52b8b572e1SStephen Rothwell static inline unsigned long get_tbl(void)
53b8b572e1SStephen Rothwell {
54b8b572e1SStephen Rothwell #if defined(CONFIG_403GCX)
55b8b572e1SStephen Rothwell 	unsigned long tbl;
56b8b572e1SStephen Rothwell 	asm volatile("mfspr %0, 0x3dd" : "=r" (tbl));
57b8b572e1SStephen Rothwell 	return tbl;
58b8b572e1SStephen Rothwell #else
59b8b572e1SStephen Rothwell 	return mftbl();
60b8b572e1SStephen Rothwell #endif
61b8b572e1SStephen Rothwell }
62b8b572e1SStephen Rothwell 
63b8b572e1SStephen Rothwell static inline unsigned int get_tbu(void)
64b8b572e1SStephen Rothwell {
65b8b572e1SStephen Rothwell #ifdef CONFIG_403GCX
66b8b572e1SStephen Rothwell 	unsigned int tbu;
67b8b572e1SStephen Rothwell 	asm volatile("mfspr %0, 0x3dc" : "=r" (tbu));
68b8b572e1SStephen Rothwell 	return tbu;
69b8b572e1SStephen Rothwell #else
70b8b572e1SStephen Rothwell 	return mftbu();
71b8b572e1SStephen Rothwell #endif
72b8b572e1SStephen Rothwell }
73b8b572e1SStephen Rothwell #endif /* !CONFIG_PPC64 */
74b8b572e1SStephen Rothwell 
75b8b572e1SStephen Rothwell static inline unsigned int get_rtcl(void)
76b8b572e1SStephen Rothwell {
77b8b572e1SStephen Rothwell 	unsigned int rtcl;
78b8b572e1SStephen Rothwell 
79b8b572e1SStephen Rothwell 	asm volatile("mfrtcl %0" : "=r" (rtcl));
80b8b572e1SStephen Rothwell 	return rtcl;
81b8b572e1SStephen Rothwell }
82b8b572e1SStephen Rothwell 
83b8b572e1SStephen Rothwell static inline u64 get_rtc(void)
84b8b572e1SStephen Rothwell {
85b8b572e1SStephen Rothwell 	unsigned int hi, lo, hi2;
86b8b572e1SStephen Rothwell 
87b8b572e1SStephen Rothwell 	do {
88b8b572e1SStephen Rothwell 		asm volatile("mfrtcu %0; mfrtcl %1; mfrtcu %2"
89b8b572e1SStephen Rothwell 			     : "=r" (hi), "=r" (lo), "=r" (hi2));
90b8b572e1SStephen Rothwell 	} while (hi2 != hi);
91b8b572e1SStephen Rothwell 	return (u64)hi * 1000000000 + lo;
92b8b572e1SStephen Rothwell }
93b8b572e1SStephen Rothwell 
948f42ab27SAneesh Kumar K.V static inline u64 get_vtb(void)
958f42ab27SAneesh Kumar K.V {
968f42ab27SAneesh Kumar K.V #ifdef CONFIG_PPC_BOOK3S_64
978f42ab27SAneesh Kumar K.V 	if (cpu_has_feature(CPU_FTR_ARCH_207S))
98905259e3SKevin Hao 		return mfspr(SPRN_VTB);
998f42ab27SAneesh Kumar K.V #endif
1008f42ab27SAneesh Kumar K.V 	return 0;
1018f42ab27SAneesh Kumar K.V }
1028f42ab27SAneesh Kumar K.V 
103b8b572e1SStephen Rothwell #ifdef CONFIG_PPC64
104b8b572e1SStephen Rothwell static inline u64 get_tb(void)
105b8b572e1SStephen Rothwell {
106b8b572e1SStephen Rothwell 	return mftb();
107b8b572e1SStephen Rothwell }
108b8b572e1SStephen Rothwell #else /* CONFIG_PPC64 */
109b8b572e1SStephen Rothwell static inline u64 get_tb(void)
110b8b572e1SStephen Rothwell {
111b8b572e1SStephen Rothwell 	unsigned int tbhi, tblo, tbhi2;
112b8b572e1SStephen Rothwell 
113b8b572e1SStephen Rothwell 	do {
114b8b572e1SStephen Rothwell 		tbhi = get_tbu();
115b8b572e1SStephen Rothwell 		tblo = get_tbl();
116b8b572e1SStephen Rothwell 		tbhi2 = get_tbu();
117b8b572e1SStephen Rothwell 	} while (tbhi != tbhi2);
118b8b572e1SStephen Rothwell 
119b8b572e1SStephen Rothwell 	return ((u64)tbhi << 32) | tblo;
120b8b572e1SStephen Rothwell }
121b8b572e1SStephen Rothwell #endif /* !CONFIG_PPC64 */
122b8b572e1SStephen Rothwell 
123b8b572e1SStephen Rothwell static inline u64 get_tb_or_rtc(void)
124b8b572e1SStephen Rothwell {
125b8b572e1SStephen Rothwell 	return __USE_RTC() ? get_rtc() : get_tb();
126b8b572e1SStephen Rothwell }
127b8b572e1SStephen Rothwell 
128b8b572e1SStephen Rothwell static inline void set_tb(unsigned int upper, unsigned int lower)
129b8b572e1SStephen Rothwell {
130b8b572e1SStephen Rothwell 	mtspr(SPRN_TBWL, 0);
131b8b572e1SStephen Rothwell 	mtspr(SPRN_TBWU, upper);
132b8b572e1SStephen Rothwell 	mtspr(SPRN_TBWL, lower);
133b8b572e1SStephen Rothwell }
134b8b572e1SStephen Rothwell 
135b8b572e1SStephen Rothwell /* Accessor functions for the decrementer register.
136b8b572e1SStephen Rothwell  * The 4xx doesn't even have a decrementer.  I tried to use the
137b8b572e1SStephen Rothwell  * generic timer interrupt code, which seems OK, with the 4xx PIT
138b8b572e1SStephen Rothwell  * in auto-reload mode.  The problem is PIT stops counting when it
139b8b572e1SStephen Rothwell  * hits zero.  If it would wrap, we could use it just like a decrementer.
140b8b572e1SStephen Rothwell  */
14179901024SOliver O'Halloran static inline u64 get_dec(void)
142b8b572e1SStephen Rothwell {
143b8b572e1SStephen Rothwell #if defined(CONFIG_40x)
144b8b572e1SStephen Rothwell 	return (mfspr(SPRN_PIT));
145b8b572e1SStephen Rothwell #else
146b8b572e1SStephen Rothwell 	return (mfspr(SPRN_DEC));
147b8b572e1SStephen Rothwell #endif
148b8b572e1SStephen Rothwell }
149b8b572e1SStephen Rothwell 
150b8b572e1SStephen Rothwell /*
151b8b572e1SStephen Rothwell  * Note: Book E and 4xx processors differ from other PowerPC processors
152b8b572e1SStephen Rothwell  * in when the decrementer generates its interrupt: on the 1 to 0
153b8b572e1SStephen Rothwell  * transition for Book E/4xx, but on the 0 to -1 transition for others.
154b8b572e1SStephen Rothwell  */
15579901024SOliver O'Halloran static inline void set_dec(u64 val)
156b8b572e1SStephen Rothwell {
157b8b572e1SStephen Rothwell #if defined(CONFIG_40x)
15879901024SOliver O'Halloran 	mtspr(SPRN_PIT, (u32) val);
159b8b572e1SStephen Rothwell #else
160b8b572e1SStephen Rothwell #ifndef CONFIG_BOOKE
161b8b572e1SStephen Rothwell 	--val;
162b8b572e1SStephen Rothwell #endif
163b8b572e1SStephen Rothwell 	mtspr(SPRN_DEC, val);
16463e9e1c2SChristophe Leroy #endif /* not 40x */
165b8b572e1SStephen Rothwell }
166b8b572e1SStephen Rothwell 
167b8b572e1SStephen Rothwell static inline unsigned long tb_ticks_since(unsigned long tstamp)
168b8b572e1SStephen Rothwell {
169b8b572e1SStephen Rothwell 	if (__USE_RTC()) {
170b8b572e1SStephen Rothwell 		int delta = get_rtcl() - (unsigned int) tstamp;
171b8b572e1SStephen Rothwell 		return delta < 0 ? delta + 1000000000 : delta;
172b8b572e1SStephen Rothwell 	}
173b8b572e1SStephen Rothwell 	return get_tbl() - tstamp;
174b8b572e1SStephen Rothwell }
175b8b572e1SStephen Rothwell 
176b8b572e1SStephen Rothwell #define mulhwu(x,y) \
177b8b572e1SStephen Rothwell ({unsigned z; asm ("mulhwu %0,%1,%2" : "=r" (z) : "r" (x), "r" (y)); z;})
178b8b572e1SStephen Rothwell 
179b8b572e1SStephen Rothwell #ifdef CONFIG_PPC64
180b8b572e1SStephen Rothwell #define mulhdu(x,y) \
181b8b572e1SStephen Rothwell ({unsigned long z; asm ("mulhdu %0,%1,%2" : "=r" (z) : "r" (x), "r" (y)); z;})
182b8b572e1SStephen Rothwell #else
183b8b572e1SStephen Rothwell extern u64 mulhdu(u64, u64);
184b8b572e1SStephen Rothwell #endif
185b8b572e1SStephen Rothwell 
186b8b572e1SStephen Rothwell extern void div128_by_32(u64 dividend_high, u64 dividend_low,
187b8b572e1SStephen Rothwell 			 unsigned divisor, struct div_result *dr);
188b8b572e1SStephen Rothwell 
189b8b572e1SStephen Rothwell extern void secondary_cpu_time_init(void);
190848092faSMathieu Malaterre extern void __init time_init(void);
191b8b572e1SStephen Rothwell 
1927df10275SAnton Blanchard DECLARE_PER_CPU(u64, decrementers_next_tb);
19337fb9a02SAnton Blanchard 
194b6c295dfSPaul Mackerras /* Convert timebase ticks to nanoseconds */
195b6c295dfSPaul Mackerras unsigned long long tb_to_ns(unsigned long long tb_ticks);
196b6c295dfSPaul Mackerras 
19768b34588SNicholas Piggin /* SPLPAR */
19868b34588SNicholas Piggin void accumulate_stolen_time(void);
19968b34588SNicholas Piggin 
200b8b572e1SStephen Rothwell #endif /* __KERNEL__ */
201b8b572e1SStephen Rothwell #endif /* __POWERPC_TIME_H */
202