1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * arch/arm64/include/asm/arch_timer.h 4 * 5 * Copyright (C) 2012 ARM Ltd. 6 * Author: Marc Zyngier <marc.zyngier@arm.com> 7 */ 8 #ifndef __ASM_ARCH_TIMER_H 9 #define __ASM_ARCH_TIMER_H 10 11 #include <asm/barrier.h> 12 #include <asm/hwcap.h> 13 #include <asm/sysreg.h> 14 15 #include <linux/bug.h> 16 #include <linux/init.h> 17 #include <linux/jump_label.h> 18 #include <linux/smp.h> 19 #include <linux/types.h> 20 21 #include <clocksource/arm_arch_timer.h> 22 23 #if IS_ENABLED(CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND) 24 #define has_erratum_handler(h) \ 25 ({ \ 26 const struct arch_timer_erratum_workaround *__wa; \ 27 __wa = __this_cpu_read(timer_unstable_counter_workaround); \ 28 (__wa && __wa->h); \ 29 }) 30 31 #define erratum_handler(h) \ 32 ({ \ 33 const struct arch_timer_erratum_workaround *__wa; \ 34 __wa = __this_cpu_read(timer_unstable_counter_workaround); \ 35 (__wa && __wa->h) ? __wa->h : arch_timer_##h; \ 36 }) 37 38 #else 39 #define has_erratum_handler(h) false 40 #define erratum_handler(h) (arch_timer_##h) 41 #endif 42 43 enum arch_timer_erratum_match_type { 44 ate_match_dt, 45 ate_match_local_cap_id, 46 ate_match_acpi_oem_info, 47 }; 48 49 struct clock_event_device; 50 51 struct arch_timer_erratum_workaround { 52 enum arch_timer_erratum_match_type match_type; 53 const void *id; 54 const char *desc; 55 u32 (*read_cntp_tval_el0)(void); 56 u32 (*read_cntv_tval_el0)(void); 57 u64 (*read_cntpct_el0)(void); 58 u64 (*read_cntvct_el0)(void); 59 int (*set_next_event_phys)(unsigned long, struct clock_event_device *); 60 int (*set_next_event_virt)(unsigned long, struct clock_event_device *); 61 bool disable_compat_vdso; 62 }; 63 64 DECLARE_PER_CPU(const struct arch_timer_erratum_workaround *, 65 timer_unstable_counter_workaround); 66 67 /* inline sysreg accessors that make erratum_handler() work */ 68 static inline notrace u32 arch_timer_read_cntp_tval_el0(void) 69 { 70 return read_sysreg(cntp_tval_el0); 71 } 72 73 static inline notrace u32 arch_timer_read_cntv_tval_el0(void) 74 { 75 return read_sysreg(cntv_tval_el0); 76 } 77 78 static inline notrace u64 arch_timer_read_cntpct_el0(void) 79 { 80 return read_sysreg(cntpct_el0); 81 } 82 83 static inline notrace u64 arch_timer_read_cntvct_el0(void) 84 { 85 return read_sysreg(cntvct_el0); 86 } 87 88 #define arch_timer_reg_read_stable(reg) \ 89 ({ \ 90 u64 _val; \ 91 \ 92 preempt_disable_notrace(); \ 93 _val = erratum_handler(read_ ## reg)(); \ 94 preempt_enable_notrace(); \ 95 \ 96 _val; \ 97 }) 98 99 /* 100 * These register accessors are marked inline so the compiler can 101 * nicely work out which register we want, and chuck away the rest of 102 * the code. 103 */ 104 static __always_inline 105 void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val) 106 { 107 if (access == ARCH_TIMER_PHYS_ACCESS) { 108 switch (reg) { 109 case ARCH_TIMER_REG_CTRL: 110 write_sysreg(val, cntp_ctl_el0); 111 break; 112 case ARCH_TIMER_REG_TVAL: 113 write_sysreg(val, cntp_tval_el0); 114 break; 115 } 116 } else if (access == ARCH_TIMER_VIRT_ACCESS) { 117 switch (reg) { 118 case ARCH_TIMER_REG_CTRL: 119 write_sysreg(val, cntv_ctl_el0); 120 break; 121 case ARCH_TIMER_REG_TVAL: 122 write_sysreg(val, cntv_tval_el0); 123 break; 124 } 125 } 126 127 isb(); 128 } 129 130 static __always_inline 131 u32 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg) 132 { 133 if (access == ARCH_TIMER_PHYS_ACCESS) { 134 switch (reg) { 135 case ARCH_TIMER_REG_CTRL: 136 return read_sysreg(cntp_ctl_el0); 137 case ARCH_TIMER_REG_TVAL: 138 return arch_timer_reg_read_stable(cntp_tval_el0); 139 } 140 } else if (access == ARCH_TIMER_VIRT_ACCESS) { 141 switch (reg) { 142 case ARCH_TIMER_REG_CTRL: 143 return read_sysreg(cntv_ctl_el0); 144 case ARCH_TIMER_REG_TVAL: 145 return arch_timer_reg_read_stable(cntv_tval_el0); 146 } 147 } 148 149 BUG(); 150 } 151 152 static inline u32 arch_timer_get_cntfrq(void) 153 { 154 return read_sysreg(cntfrq_el0); 155 } 156 157 static inline u32 arch_timer_get_cntkctl(void) 158 { 159 return read_sysreg(cntkctl_el1); 160 } 161 162 static inline void arch_timer_set_cntkctl(u32 cntkctl) 163 { 164 write_sysreg(cntkctl, cntkctl_el1); 165 isb(); 166 } 167 168 static __always_inline u64 __arch_counter_get_cntpct_stable(void) 169 { 170 u64 cnt; 171 172 isb(); 173 cnt = arch_timer_reg_read_stable(cntpct_el0); 174 arch_counter_enforce_ordering(cnt); 175 return cnt; 176 } 177 178 static __always_inline u64 __arch_counter_get_cntpct(void) 179 { 180 u64 cnt; 181 182 isb(); 183 cnt = read_sysreg(cntpct_el0); 184 arch_counter_enforce_ordering(cnt); 185 return cnt; 186 } 187 188 static __always_inline u64 __arch_counter_get_cntvct_stable(void) 189 { 190 u64 cnt; 191 192 isb(); 193 cnt = arch_timer_reg_read_stable(cntvct_el0); 194 arch_counter_enforce_ordering(cnt); 195 return cnt; 196 } 197 198 static __always_inline u64 __arch_counter_get_cntvct(void) 199 { 200 u64 cnt; 201 202 isb(); 203 cnt = read_sysreg(cntvct_el0); 204 arch_counter_enforce_ordering(cnt); 205 return cnt; 206 } 207 208 static inline int arch_timer_arch_init(void) 209 { 210 return 0; 211 } 212 213 static inline void arch_timer_set_evtstrm_feature(void) 214 { 215 cpu_set_named_feature(EVTSTRM); 216 #ifdef CONFIG_COMPAT 217 compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM; 218 #endif 219 } 220 221 static inline bool arch_timer_have_evtstrm_feature(void) 222 { 223 return cpu_have_named_feature(EVTSTRM); 224 } 225 #endif 226