1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * arch/arm64/include/asm/arch_timer.h
4  *
5  * Copyright (C) 2012 ARM Ltd.
6  * Author: Marc Zyngier <marc.zyngier@arm.com>
7  */
8 #ifndef __ASM_ARCH_TIMER_H
9 #define __ASM_ARCH_TIMER_H
10 
11 #include <asm/barrier.h>
12 #include <asm/sysreg.h>
13 
14 #include <linux/bug.h>
15 #include <linux/init.h>
16 #include <linux/jump_label.h>
17 #include <linux/smp.h>
18 #include <linux/types.h>
19 
20 #include <clocksource/arm_arch_timer.h>
21 
22 #if IS_ENABLED(CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND)
23 #define has_erratum_handler(h)						\
24 	({								\
25 		const struct arch_timer_erratum_workaround *__wa;	\
26 		__wa = __this_cpu_read(timer_unstable_counter_workaround); \
27 		(__wa && __wa->h);					\
28 	})
29 
30 #define erratum_handler(h)						\
31 	({								\
32 		const struct arch_timer_erratum_workaround *__wa;	\
33 		__wa = __this_cpu_read(timer_unstable_counter_workaround); \
34 		(__wa && __wa->h) ? __wa->h : arch_timer_##h;		\
35 	})
36 
37 #else
38 #define has_erratum_handler(h)			   false
39 #define erratum_handler(h)			   (arch_timer_##h)
40 #endif
41 
42 enum arch_timer_erratum_match_type {
43 	ate_match_dt,
44 	ate_match_local_cap_id,
45 	ate_match_acpi_oem_info,
46 };
47 
48 struct clock_event_device;
49 
50 struct arch_timer_erratum_workaround {
51 	enum arch_timer_erratum_match_type match_type;
52 	const void *id;
53 	const char *desc;
54 	u32 (*read_cntp_tval_el0)(void);
55 	u32 (*read_cntv_tval_el0)(void);
56 	u64 (*read_cntpct_el0)(void);
57 	u64 (*read_cntvct_el0)(void);
58 	int (*set_next_event_phys)(unsigned long, struct clock_event_device *);
59 	int (*set_next_event_virt)(unsigned long, struct clock_event_device *);
60 };
61 
62 DECLARE_PER_CPU(const struct arch_timer_erratum_workaround *,
63 		timer_unstable_counter_workaround);
64 
65 /* inline sysreg accessors that make erratum_handler() work */
66 static inline notrace u32 arch_timer_read_cntp_tval_el0(void)
67 {
68 	return read_sysreg(cntp_tval_el0);
69 }
70 
71 static inline notrace u32 arch_timer_read_cntv_tval_el0(void)
72 {
73 	return read_sysreg(cntv_tval_el0);
74 }
75 
76 static inline notrace u64 arch_timer_read_cntpct_el0(void)
77 {
78 	return read_sysreg(cntpct_el0);
79 }
80 
81 static inline notrace u64 arch_timer_read_cntvct_el0(void)
82 {
83 	return read_sysreg(cntvct_el0);
84 }
85 
86 #define arch_timer_reg_read_stable(reg)					\
87 	({								\
88 		u64 _val;						\
89 									\
90 		preempt_disable_notrace();				\
91 		_val = erratum_handler(read_ ## reg)();			\
92 		preempt_enable_notrace();				\
93 									\
94 		_val;							\
95 	})
96 
97 /*
98  * These register accessors are marked inline so the compiler can
99  * nicely work out which register we want, and chuck away the rest of
100  * the code.
101  */
102 static __always_inline
103 void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val)
104 {
105 	if (access == ARCH_TIMER_PHYS_ACCESS) {
106 		switch (reg) {
107 		case ARCH_TIMER_REG_CTRL:
108 			write_sysreg(val, cntp_ctl_el0);
109 			break;
110 		case ARCH_TIMER_REG_TVAL:
111 			write_sysreg(val, cntp_tval_el0);
112 			break;
113 		}
114 	} else if (access == ARCH_TIMER_VIRT_ACCESS) {
115 		switch (reg) {
116 		case ARCH_TIMER_REG_CTRL:
117 			write_sysreg(val, cntv_ctl_el0);
118 			break;
119 		case ARCH_TIMER_REG_TVAL:
120 			write_sysreg(val, cntv_tval_el0);
121 			break;
122 		}
123 	}
124 
125 	isb();
126 }
127 
128 static __always_inline
129 u32 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg)
130 {
131 	if (access == ARCH_TIMER_PHYS_ACCESS) {
132 		switch (reg) {
133 		case ARCH_TIMER_REG_CTRL:
134 			return read_sysreg(cntp_ctl_el0);
135 		case ARCH_TIMER_REG_TVAL:
136 			return arch_timer_reg_read_stable(cntp_tval_el0);
137 		}
138 	} else if (access == ARCH_TIMER_VIRT_ACCESS) {
139 		switch (reg) {
140 		case ARCH_TIMER_REG_CTRL:
141 			return read_sysreg(cntv_ctl_el0);
142 		case ARCH_TIMER_REG_TVAL:
143 			return arch_timer_reg_read_stable(cntv_tval_el0);
144 		}
145 	}
146 
147 	BUG();
148 }
149 
150 static inline u32 arch_timer_get_cntfrq(void)
151 {
152 	return read_sysreg(cntfrq_el0);
153 }
154 
155 static inline u32 arch_timer_get_cntkctl(void)
156 {
157 	return read_sysreg(cntkctl_el1);
158 }
159 
160 static inline void arch_timer_set_cntkctl(u32 cntkctl)
161 {
162 	write_sysreg(cntkctl, cntkctl_el1);
163 	isb();
164 }
165 
166 /*
167  * Ensure that reads of the counter are treated the same as memory reads
168  * for the purposes of ordering by subsequent memory barriers.
169  *
170  * This insanity brought to you by speculative system register reads,
171  * out-of-order memory accesses, sequence locks and Thomas Gleixner.
172  *
173  * http://lists.infradead.org/pipermail/linux-arm-kernel/2019-February/631195.html
174  */
175 #define arch_counter_enforce_ordering(val) do {				\
176 	u64 tmp, _val = (val);						\
177 									\
178 	asm volatile(							\
179 	"	eor	%0, %1, %1\n"					\
180 	"	add	%0, sp, %0\n"					\
181 	"	ldr	xzr, [%0]"					\
182 	: "=r" (tmp) : "r" (_val));					\
183 } while (0)
184 
185 static __always_inline u64 __arch_counter_get_cntpct_stable(void)
186 {
187 	u64 cnt;
188 
189 	isb();
190 	cnt = arch_timer_reg_read_stable(cntpct_el0);
191 	arch_counter_enforce_ordering(cnt);
192 	return cnt;
193 }
194 
195 static __always_inline u64 __arch_counter_get_cntpct(void)
196 {
197 	u64 cnt;
198 
199 	isb();
200 	cnt = read_sysreg(cntpct_el0);
201 	arch_counter_enforce_ordering(cnt);
202 	return cnt;
203 }
204 
205 static __always_inline u64 __arch_counter_get_cntvct_stable(void)
206 {
207 	u64 cnt;
208 
209 	isb();
210 	cnt = arch_timer_reg_read_stable(cntvct_el0);
211 	arch_counter_enforce_ordering(cnt);
212 	return cnt;
213 }
214 
215 static __always_inline u64 __arch_counter_get_cntvct(void)
216 {
217 	u64 cnt;
218 
219 	isb();
220 	cnt = read_sysreg(cntvct_el0);
221 	arch_counter_enforce_ordering(cnt);
222 	return cnt;
223 }
224 
225 #undef arch_counter_enforce_ordering
226 
227 static inline int arch_timer_arch_init(void)
228 {
229 	return 0;
230 }
231 
232 #endif
233