12874c5fdSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-or-later */ 2b8b572e1SStephen Rothwell /* 3b8b572e1SStephen Rothwell * Common time prototypes and such for all ppc machines. 4b8b572e1SStephen Rothwell * 5b8b572e1SStephen Rothwell * Written by Cort Dougan (cort@cs.nmt.edu) to merge 6b8b572e1SStephen Rothwell * Paul Mackerras' version and mine for PReP and Pmac. 7b8b572e1SStephen Rothwell */ 8b8b572e1SStephen Rothwell 9b8b572e1SStephen Rothwell #ifndef __POWERPC_TIME_H 10b8b572e1SStephen Rothwell #define __POWERPC_TIME_H 11b8b572e1SStephen Rothwell 12b8b572e1SStephen Rothwell #ifdef __KERNEL__ 13b8b572e1SStephen Rothwell #include <linux/types.h> 14b8b572e1SStephen Rothwell #include <linux/percpu.h> 15b8b572e1SStephen Rothwell 16b8b572e1SStephen Rothwell #include <asm/processor.h> 17b92a226eSKevin Hao #include <asm/cpu_has_feature.h> 18b8b572e1SStephen Rothwell 19b8b572e1SStephen Rothwell /* time.c */ 20b8b572e1SStephen Rothwell extern unsigned long tb_ticks_per_jiffy; 21b8b572e1SStephen Rothwell extern unsigned long tb_ticks_per_usec; 22b8b572e1SStephen Rothwell extern unsigned long tb_ticks_per_sec; 236e35994dSBharat Bhushan extern struct clock_event_device decrementer_clockevent; 24b8b572e1SStephen Rothwell 25b8b572e1SStephen Rothwell 26b8b572e1SStephen Rothwell extern void generic_calibrate_decr(void); 278b604fafSMathieu Malaterre extern void hdec_interrupt(struct pt_regs *regs); 28b8b572e1SStephen Rothwell 29b8b572e1SStephen Rothwell /* Some sane defaults: 125 MHz timebase, 1GHz processor */ 30b8b572e1SStephen Rothwell extern unsigned long ppc_proc_freq; 31b8b572e1SStephen Rothwell #define DEFAULT_PROC_FREQ (DEFAULT_TB_FREQ * 8) 32b8b572e1SStephen Rothwell extern unsigned long ppc_tb_freq; 33b8b572e1SStephen Rothwell #define DEFAULT_TB_FREQ 125000000UL 34b8b572e1SStephen Rothwell 35de269129SMahesh Salgaonkar extern bool tb_invalid; 36de269129SMahesh Salgaonkar 37b8b572e1SStephen Rothwell struct div_result { 38b8b572e1SStephen Rothwell u64 result_high; 39b8b572e1SStephen Rothwell u64 result_low; 40b8b572e1SStephen Rothwell }; 41b8b572e1SStephen Rothwell 42b8b572e1SStephen Rothwell /* Accessor functions for the timebase (RTC on 601) registers. */ 43b8b572e1SStephen Rothwell /* If one day CONFIG_POWER is added just define __USE_RTC as 1 */ 44d7cceda9SChristophe Leroy #ifdef CONFIG_PPC_BOOK3S_32 45c0d64cf9SPaul Mackerras #define __USE_RTC() (cpu_has_feature(CPU_FTR_USE_RTC)) 46b8b572e1SStephen Rothwell #else 47b8b572e1SStephen Rothwell #define __USE_RTC() 0 48b8b572e1SStephen Rothwell #endif 49b8b572e1SStephen Rothwell 50b8b572e1SStephen Rothwell #ifdef CONFIG_PPC64 51b8b572e1SStephen Rothwell 52b8b572e1SStephen Rothwell /* For compatibility, get_tbl() is defined as get_tb() on ppc64 */ 53b8b572e1SStephen Rothwell #define get_tbl get_tb 54b8b572e1SStephen Rothwell 55b8b572e1SStephen Rothwell #else 56b8b572e1SStephen Rothwell 57b8b572e1SStephen Rothwell static inline unsigned long get_tbl(void) 58b8b572e1SStephen Rothwell { 59b8b572e1SStephen Rothwell #if defined(CONFIG_403GCX) 60b8b572e1SStephen Rothwell unsigned long tbl; 61b8b572e1SStephen Rothwell asm volatile("mfspr %0, 0x3dd" : "=r" (tbl)); 62b8b572e1SStephen Rothwell return tbl; 63b8b572e1SStephen Rothwell #else 64b8b572e1SStephen Rothwell return mftbl(); 65b8b572e1SStephen Rothwell #endif 66b8b572e1SStephen Rothwell } 67b8b572e1SStephen Rothwell 68b8b572e1SStephen Rothwell static inline unsigned int get_tbu(void) 69b8b572e1SStephen Rothwell { 70b8b572e1SStephen Rothwell #ifdef CONFIG_403GCX 71b8b572e1SStephen Rothwell unsigned int tbu; 72b8b572e1SStephen Rothwell asm volatile("mfspr %0, 0x3dc" : "=r" (tbu)); 73b8b572e1SStephen Rothwell return tbu; 74b8b572e1SStephen Rothwell #else 75b8b572e1SStephen Rothwell return mftbu(); 76b8b572e1SStephen Rothwell #endif 77b8b572e1SStephen Rothwell } 78b8b572e1SStephen Rothwell #endif /* !CONFIG_PPC64 */ 79b8b572e1SStephen Rothwell 80b8b572e1SStephen Rothwell static inline unsigned int get_rtcl(void) 81b8b572e1SStephen Rothwell { 82b8b572e1SStephen Rothwell unsigned int rtcl; 83b8b572e1SStephen Rothwell 84b8b572e1SStephen Rothwell asm volatile("mfrtcl %0" : "=r" (rtcl)); 85b8b572e1SStephen Rothwell return rtcl; 86b8b572e1SStephen Rothwell } 87b8b572e1SStephen Rothwell 88b8b572e1SStephen Rothwell static inline u64 get_rtc(void) 89b8b572e1SStephen Rothwell { 90b8b572e1SStephen Rothwell unsigned int hi, lo, hi2; 91b8b572e1SStephen Rothwell 92b8b572e1SStephen Rothwell do { 93b8b572e1SStephen Rothwell asm volatile("mfrtcu %0; mfrtcl %1; mfrtcu %2" 94b8b572e1SStephen Rothwell : "=r" (hi), "=r" (lo), "=r" (hi2)); 95b8b572e1SStephen Rothwell } while (hi2 != hi); 96b8b572e1SStephen Rothwell return (u64)hi * 1000000000 + lo; 97b8b572e1SStephen Rothwell } 98b8b572e1SStephen Rothwell 998f42ab27SAneesh Kumar K.V static inline u64 get_vtb(void) 1008f42ab27SAneesh Kumar K.V { 1018f42ab27SAneesh Kumar K.V #ifdef CONFIG_PPC_BOOK3S_64 1028f42ab27SAneesh Kumar K.V if (cpu_has_feature(CPU_FTR_ARCH_207S)) 103905259e3SKevin Hao return mfspr(SPRN_VTB); 1048f42ab27SAneesh Kumar K.V #endif 1058f42ab27SAneesh Kumar K.V return 0; 1068f42ab27SAneesh Kumar K.V } 1078f42ab27SAneesh Kumar K.V 108b8b572e1SStephen Rothwell #ifdef CONFIG_PPC64 109b8b572e1SStephen Rothwell static inline u64 get_tb(void) 110b8b572e1SStephen Rothwell { 111b8b572e1SStephen Rothwell return mftb(); 112b8b572e1SStephen Rothwell } 113b8b572e1SStephen Rothwell #else /* CONFIG_PPC64 */ 114b8b572e1SStephen Rothwell static inline u64 get_tb(void) 115b8b572e1SStephen Rothwell { 116b8b572e1SStephen Rothwell unsigned int tbhi, tblo, tbhi2; 117b8b572e1SStephen Rothwell 118b8b572e1SStephen Rothwell do { 119b8b572e1SStephen Rothwell tbhi = get_tbu(); 120b8b572e1SStephen Rothwell tblo = get_tbl(); 121b8b572e1SStephen Rothwell tbhi2 = get_tbu(); 122b8b572e1SStephen Rothwell } while (tbhi != tbhi2); 123b8b572e1SStephen Rothwell 124b8b572e1SStephen Rothwell return ((u64)tbhi << 32) | tblo; 125b8b572e1SStephen Rothwell } 126b8b572e1SStephen Rothwell #endif /* !CONFIG_PPC64 */ 127b8b572e1SStephen Rothwell 128b8b572e1SStephen Rothwell static inline u64 get_tb_or_rtc(void) 129b8b572e1SStephen Rothwell { 130b8b572e1SStephen Rothwell return __USE_RTC() ? get_rtc() : get_tb(); 131b8b572e1SStephen Rothwell } 132b8b572e1SStephen Rothwell 133b8b572e1SStephen Rothwell static inline void set_tb(unsigned int upper, unsigned int lower) 134b8b572e1SStephen Rothwell { 135b8b572e1SStephen Rothwell mtspr(SPRN_TBWL, 0); 136b8b572e1SStephen Rothwell mtspr(SPRN_TBWU, upper); 137b8b572e1SStephen Rothwell mtspr(SPRN_TBWL, lower); 138b8b572e1SStephen Rothwell } 139b8b572e1SStephen Rothwell 140b8b572e1SStephen Rothwell /* Accessor functions for the decrementer register. 141b8b572e1SStephen Rothwell * The 4xx doesn't even have a decrementer. I tried to use the 142b8b572e1SStephen Rothwell * generic timer interrupt code, which seems OK, with the 4xx PIT 143b8b572e1SStephen Rothwell * in auto-reload mode. The problem is PIT stops counting when it 144b8b572e1SStephen Rothwell * hits zero. If it would wrap, we could use it just like a decrementer. 145b8b572e1SStephen Rothwell */ 14679901024SOliver O'Halloran static inline u64 get_dec(void) 147b8b572e1SStephen Rothwell { 148b8b572e1SStephen Rothwell #if defined(CONFIG_40x) 149b8b572e1SStephen Rothwell return (mfspr(SPRN_PIT)); 150b8b572e1SStephen Rothwell #else 151b8b572e1SStephen Rothwell return (mfspr(SPRN_DEC)); 152b8b572e1SStephen Rothwell #endif 153b8b572e1SStephen Rothwell } 154b8b572e1SStephen Rothwell 155b8b572e1SStephen Rothwell /* 156b8b572e1SStephen Rothwell * Note: Book E and 4xx processors differ from other PowerPC processors 157b8b572e1SStephen Rothwell * in when the decrementer generates its interrupt: on the 1 to 0 158b8b572e1SStephen Rothwell * transition for Book E/4xx, but on the 0 to -1 transition for others. 159b8b572e1SStephen Rothwell */ 16079901024SOliver O'Halloran static inline void set_dec(u64 val) 161b8b572e1SStephen Rothwell { 162b8b572e1SStephen Rothwell #if defined(CONFIG_40x) 16379901024SOliver O'Halloran mtspr(SPRN_PIT, (u32) val); 164b8b572e1SStephen Rothwell #else 165b8b572e1SStephen Rothwell #ifndef CONFIG_BOOKE 166b8b572e1SStephen Rothwell --val; 167b8b572e1SStephen Rothwell #endif 168b8b572e1SStephen Rothwell mtspr(SPRN_DEC, val); 16963e9e1c2SChristophe Leroy #endif /* not 40x */ 170b8b572e1SStephen Rothwell } 171b8b572e1SStephen Rothwell 172b8b572e1SStephen Rothwell static inline unsigned long tb_ticks_since(unsigned long tstamp) 173b8b572e1SStephen Rothwell { 174b8b572e1SStephen Rothwell if (__USE_RTC()) { 175b8b572e1SStephen Rothwell int delta = get_rtcl() - (unsigned int) tstamp; 176b8b572e1SStephen Rothwell return delta < 0 ? delta + 1000000000 : delta; 177b8b572e1SStephen Rothwell } 178b8b572e1SStephen Rothwell return get_tbl() - tstamp; 179b8b572e1SStephen Rothwell } 180b8b572e1SStephen Rothwell 181b8b572e1SStephen Rothwell #define mulhwu(x,y) \ 182b8b572e1SStephen Rothwell ({unsigned z; asm ("mulhwu %0,%1,%2" : "=r" (z) : "r" (x), "r" (y)); z;}) 183b8b572e1SStephen Rothwell 184b8b572e1SStephen Rothwell #ifdef CONFIG_PPC64 185b8b572e1SStephen Rothwell #define mulhdu(x,y) \ 186b8b572e1SStephen Rothwell ({unsigned long z; asm ("mulhdu %0,%1,%2" : "=r" (z) : "r" (x), "r" (y)); z;}) 187b8b572e1SStephen Rothwell #else 188b8b572e1SStephen Rothwell extern u64 mulhdu(u64, u64); 189b8b572e1SStephen Rothwell #endif 190b8b572e1SStephen Rothwell 191b8b572e1SStephen Rothwell extern void div128_by_32(u64 dividend_high, u64 dividend_low, 192b8b572e1SStephen Rothwell unsigned divisor, struct div_result *dr); 193b8b572e1SStephen Rothwell 194b8b572e1SStephen Rothwell extern void secondary_cpu_time_init(void); 195848092faSMathieu Malaterre extern void __init time_init(void); 196b8b572e1SStephen Rothwell 1977df10275SAnton Blanchard DECLARE_PER_CPU(u64, decrementers_next_tb); 19837fb9a02SAnton Blanchard 199b6c295dfSPaul Mackerras /* Convert timebase ticks to nanoseconds */ 200b6c295dfSPaul Mackerras unsigned long long tb_to_ns(unsigned long long tb_ticks); 201b6c295dfSPaul Mackerras 202b8b572e1SStephen Rothwell #endif /* __KERNEL__ */ 203b8b572e1SStephen Rothwell #endif /* __POWERPC_TIME_H */ 204