xref: /openbmc/linux/drivers/clocksource/arm_arch_timer.c (revision 06d5d6b7f9948a89543e1160ef852d57892c750d)
1 /*
2  *  linux/drivers/clocksource/arm_arch_timer.c
3  *
4  *  Copyright (C) 2011 ARM Ltd.
5  *  All Rights Reserved
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 
12 #define pr_fmt(fmt) 	"arch_timer: " fmt
13 
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/device.h>
17 #include <linux/smp.h>
18 #include <linux/cpu.h>
19 #include <linux/cpu_pm.h>
20 #include <linux/clockchips.h>
21 #include <linux/clocksource.h>
22 #include <linux/interrupt.h>
23 #include <linux/of_irq.h>
24 #include <linux/of_address.h>
25 #include <linux/io.h>
26 #include <linux/slab.h>
27 #include <linux/sched/clock.h>
28 #include <linux/sched_clock.h>
29 #include <linux/acpi.h>
30 
31 #include <asm/arch_timer.h>
32 #include <asm/virt.h>
33 
34 #include <clocksource/arm_arch_timer.h>
35 
36 #define CNTTIDR		0x08
37 #define CNTTIDR_VIRT(n)	(BIT(1) << ((n) * 4))
38 
39 #define CNTACR(n)	(0x40 + ((n) * 4))
40 #define CNTACR_RPCT	BIT(0)
41 #define CNTACR_RVCT	BIT(1)
42 #define CNTACR_RFRQ	BIT(2)
43 #define CNTACR_RVOFF	BIT(3)
44 #define CNTACR_RWVT	BIT(4)
45 #define CNTACR_RWPT	BIT(5)
46 
47 #define CNTVCT_LO	0x08
48 #define CNTVCT_HI	0x0c
49 #define CNTFRQ		0x10
50 #define CNTP_TVAL	0x28
51 #define CNTP_CTL	0x2c
52 #define CNTV_TVAL	0x38
53 #define CNTV_CTL	0x3c
54 
55 static unsigned arch_timers_present __initdata;
56 
57 static void __iomem *arch_counter_base;
58 
59 struct arch_timer {
60 	void __iomem *base;
61 	struct clock_event_device evt;
62 };
63 
64 #define to_arch_timer(e) container_of(e, struct arch_timer, evt)
65 
66 static u32 arch_timer_rate;
67 static int arch_timer_ppi[ARCH_TIMER_MAX_TIMER_PPI];
68 
69 static struct clock_event_device __percpu *arch_timer_evt;
70 
71 static enum arch_timer_ppi_nr arch_timer_uses_ppi = ARCH_TIMER_VIRT_PPI;
72 static bool arch_timer_c3stop;
73 static bool arch_timer_mem_use_virtual;
74 static bool arch_counter_suspend_stop;
75 static bool vdso_default = true;
76 
77 static cpumask_t evtstrm_available = CPU_MASK_NONE;
78 static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
79 
80 static int __init early_evtstrm_cfg(char *buf)
81 {
82 	return strtobool(buf, &evtstrm_enable);
83 }
84 early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg);
85 
86 /*
87  * Architected system timer support.
88  */
89 
90 static __always_inline
91 void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val,
92 			  struct clock_event_device *clk)
93 {
94 	if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
95 		struct arch_timer *timer = to_arch_timer(clk);
96 		switch (reg) {
97 		case ARCH_TIMER_REG_CTRL:
98 			writel_relaxed(val, timer->base + CNTP_CTL);
99 			break;
100 		case ARCH_TIMER_REG_TVAL:
101 			writel_relaxed(val, timer->base + CNTP_TVAL);
102 			break;
103 		}
104 	} else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
105 		struct arch_timer *timer = to_arch_timer(clk);
106 		switch (reg) {
107 		case ARCH_TIMER_REG_CTRL:
108 			writel_relaxed(val, timer->base + CNTV_CTL);
109 			break;
110 		case ARCH_TIMER_REG_TVAL:
111 			writel_relaxed(val, timer->base + CNTV_TVAL);
112 			break;
113 		}
114 	} else {
115 		arch_timer_reg_write_cp15(access, reg, val);
116 	}
117 }
118 
119 static __always_inline
120 u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
121 			struct clock_event_device *clk)
122 {
123 	u32 val;
124 
125 	if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
126 		struct arch_timer *timer = to_arch_timer(clk);
127 		switch (reg) {
128 		case ARCH_TIMER_REG_CTRL:
129 			val = readl_relaxed(timer->base + CNTP_CTL);
130 			break;
131 		case ARCH_TIMER_REG_TVAL:
132 			val = readl_relaxed(timer->base + CNTP_TVAL);
133 			break;
134 		}
135 	} else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
136 		struct arch_timer *timer = to_arch_timer(clk);
137 		switch (reg) {
138 		case ARCH_TIMER_REG_CTRL:
139 			val = readl_relaxed(timer->base + CNTV_CTL);
140 			break;
141 		case ARCH_TIMER_REG_TVAL:
142 			val = readl_relaxed(timer->base + CNTV_TVAL);
143 			break;
144 		}
145 	} else {
146 		val = arch_timer_reg_read_cp15(access, reg);
147 	}
148 
149 	return val;
150 }
151 
152 /*
153  * Default to cp15 based access because arm64 uses this function for
154  * sched_clock() before DT is probed and the cp15 method is guaranteed
155  * to exist on arm64. arm doesn't use this before DT is probed so even
156  * if we don't have the cp15 accessors we won't have a problem.
157  */
158 u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct;
159 EXPORT_SYMBOL_GPL(arch_timer_read_counter);
160 
161 static u64 arch_counter_read(struct clocksource *cs)
162 {
163 	return arch_timer_read_counter();
164 }
165 
166 static u64 arch_counter_read_cc(const struct cyclecounter *cc)
167 {
168 	return arch_timer_read_counter();
169 }
170 
171 static struct clocksource clocksource_counter = {
172 	.name	= "arch_sys_counter",
173 	.rating	= 400,
174 	.read	= arch_counter_read,
175 	.mask	= CLOCKSOURCE_MASK(56),
176 	.flags	= CLOCK_SOURCE_IS_CONTINUOUS,
177 };
178 
179 static struct cyclecounter cyclecounter __ro_after_init = {
180 	.read	= arch_counter_read_cc,
181 	.mask	= CLOCKSOURCE_MASK(56),
182 };
183 
184 struct ate_acpi_oem_info {
185 	char oem_id[ACPI_OEM_ID_SIZE + 1];
186 	char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
187 	u32 oem_revision;
188 };
189 
190 #ifdef CONFIG_FSL_ERRATUM_A008585
191 /*
192  * The number of retries is an arbitrary value well beyond the highest number
193  * of iterations the loop has been observed to take.
194  */
195 #define __fsl_a008585_read_reg(reg) ({			\
196 	u64 _old, _new;					\
197 	int _retries = 200;				\
198 							\
199 	do {						\
200 		_old = read_sysreg(reg);		\
201 		_new = read_sysreg(reg);		\
202 		_retries--;				\
203 	} while (unlikely(_old != _new) && _retries);	\
204 							\
205 	WARN_ON_ONCE(!_retries);			\
206 	_new;						\
207 })
208 
209 static u32 notrace fsl_a008585_read_cntp_tval_el0(void)
210 {
211 	return __fsl_a008585_read_reg(cntp_tval_el0);
212 }
213 
214 static u32 notrace fsl_a008585_read_cntv_tval_el0(void)
215 {
216 	return __fsl_a008585_read_reg(cntv_tval_el0);
217 }
218 
219 static u64 notrace fsl_a008585_read_cntpct_el0(void)
220 {
221 	return __fsl_a008585_read_reg(cntpct_el0);
222 }
223 
224 static u64 notrace fsl_a008585_read_cntvct_el0(void)
225 {
226 	return __fsl_a008585_read_reg(cntvct_el0);
227 }
228 #endif
229 
230 #ifdef CONFIG_HISILICON_ERRATUM_161010101
231 /*
232  * Verify whether the value of the second read is larger than the first by
233  * less than 32 is the only way to confirm the value is correct, so clear the
234  * lower 5 bits to check whether the difference is greater than 32 or not.
235  * Theoretically the erratum should not occur more than twice in succession
236  * when reading the system counter, but it is possible that some interrupts
237  * may lead to more than twice read errors, triggering the warning, so setting
238  * the number of retries far beyond the number of iterations the loop has been
239  * observed to take.
240  */
241 #define __hisi_161010101_read_reg(reg) ({				\
242 	u64 _old, _new;						\
243 	int _retries = 50;					\
244 								\
245 	do {							\
246 		_old = read_sysreg(reg);			\
247 		_new = read_sysreg(reg);			\
248 		_retries--;					\
249 	} while (unlikely((_new - _old) >> 5) && _retries);	\
250 								\
251 	WARN_ON_ONCE(!_retries);				\
252 	_new;							\
253 })
254 
255 static u32 notrace hisi_161010101_read_cntp_tval_el0(void)
256 {
257 	return __hisi_161010101_read_reg(cntp_tval_el0);
258 }
259 
260 static u32 notrace hisi_161010101_read_cntv_tval_el0(void)
261 {
262 	return __hisi_161010101_read_reg(cntv_tval_el0);
263 }
264 
265 static u64 notrace hisi_161010101_read_cntpct_el0(void)
266 {
267 	return __hisi_161010101_read_reg(cntpct_el0);
268 }
269 
270 static u64 notrace hisi_161010101_read_cntvct_el0(void)
271 {
272 	return __hisi_161010101_read_reg(cntvct_el0);
273 }
274 
275 static struct ate_acpi_oem_info hisi_161010101_oem_info[] = {
276 	/*
277 	 * Note that trailing spaces are required to properly match
278 	 * the OEM table information.
279 	 */
280 	{
281 		.oem_id		= "HISI  ",
282 		.oem_table_id	= "HIP05   ",
283 		.oem_revision	= 0,
284 	},
285 	{
286 		.oem_id		= "HISI  ",
287 		.oem_table_id	= "HIP06   ",
288 		.oem_revision	= 0,
289 	},
290 	{
291 		.oem_id		= "HISI  ",
292 		.oem_table_id	= "HIP07   ",
293 		.oem_revision	= 0,
294 	},
295 	{ /* Sentinel indicating the end of the OEM array */ },
296 };
297 #endif
298 
299 #ifdef CONFIG_ARM64_ERRATUM_858921
300 static u64 notrace arm64_858921_read_cntpct_el0(void)
301 {
302 	u64 old, new;
303 
304 	old = read_sysreg(cntpct_el0);
305 	new = read_sysreg(cntpct_el0);
306 	return (((old ^ new) >> 32) & 1) ? old : new;
307 }
308 
309 static u64 notrace arm64_858921_read_cntvct_el0(void)
310 {
311 	u64 old, new;
312 
313 	old = read_sysreg(cntvct_el0);
314 	new = read_sysreg(cntvct_el0);
315 	return (((old ^ new) >> 32) & 1) ? old : new;
316 }
317 #endif
318 
319 #ifdef CONFIG_ARM64_ERRATUM_1188873
320 static u64 notrace arm64_1188873_read_cntvct_el0(void)
321 {
322 	return read_sysreg(cntvct_el0);
323 }
324 #endif
325 
326 #ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
327 /*
328  * The low bits of the counter registers are indeterminate while bit 10 or
329  * greater is rolling over. Since the counter value can jump both backward
330  * (7ff -> 000 -> 800) and forward (7ff -> fff -> 800), ignore register values
331  * with all ones or all zeros in the low bits. Bound the loop by the maximum
332  * number of CPU cycles in 3 consecutive 24 MHz counter periods.
333  */
334 #define __sun50i_a64_read_reg(reg) ({					\
335 	u64 _val;							\
336 	int _retries = 150;						\
337 									\
338 	do {								\
339 		_val = read_sysreg(reg);				\
340 		_retries--;						\
341 	} while (((_val + 1) & GENMASK(9, 0)) <= 1 && _retries);	\
342 									\
343 	WARN_ON_ONCE(!_retries);					\
344 	_val;								\
345 })
346 
347 static u64 notrace sun50i_a64_read_cntpct_el0(void)
348 {
349 	return __sun50i_a64_read_reg(cntpct_el0);
350 }
351 
352 static u64 notrace sun50i_a64_read_cntvct_el0(void)
353 {
354 	return __sun50i_a64_read_reg(cntvct_el0);
355 }
356 
357 static u32 notrace sun50i_a64_read_cntp_tval_el0(void)
358 {
359 	return read_sysreg(cntp_cval_el0) - sun50i_a64_read_cntpct_el0();
360 }
361 
362 static u32 notrace sun50i_a64_read_cntv_tval_el0(void)
363 {
364 	return read_sysreg(cntv_cval_el0) - sun50i_a64_read_cntvct_el0();
365 }
366 #endif
367 
368 #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
369 DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, timer_unstable_counter_workaround);
370 EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
371 
372 DEFINE_STATIC_KEY_FALSE(arch_timer_read_ool_enabled);
373 EXPORT_SYMBOL_GPL(arch_timer_read_ool_enabled);
374 
375 static void erratum_set_next_event_tval_generic(const int access, unsigned long evt,
376 						struct clock_event_device *clk)
377 {
378 	unsigned long ctrl;
379 	u64 cval;
380 
381 	ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
382 	ctrl |= ARCH_TIMER_CTRL_ENABLE;
383 	ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
384 
385 	if (access == ARCH_TIMER_PHYS_ACCESS) {
386 		cval = evt + arch_counter_get_cntpct();
387 		write_sysreg(cval, cntp_cval_el0);
388 	} else {
389 		cval = evt + arch_counter_get_cntvct();
390 		write_sysreg(cval, cntv_cval_el0);
391 	}
392 
393 	arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
394 }
395 
396 static __maybe_unused int erratum_set_next_event_tval_virt(unsigned long evt,
397 					    struct clock_event_device *clk)
398 {
399 	erratum_set_next_event_tval_generic(ARCH_TIMER_VIRT_ACCESS, evt, clk);
400 	return 0;
401 }
402 
403 static __maybe_unused int erratum_set_next_event_tval_phys(unsigned long evt,
404 					    struct clock_event_device *clk)
405 {
406 	erratum_set_next_event_tval_generic(ARCH_TIMER_PHYS_ACCESS, evt, clk);
407 	return 0;
408 }
409 
410 static const struct arch_timer_erratum_workaround ool_workarounds[] = {
411 #ifdef CONFIG_FSL_ERRATUM_A008585
412 	{
413 		.match_type = ate_match_dt,
414 		.id = "fsl,erratum-a008585",
415 		.desc = "Freescale erratum a005858",
416 		.read_cntp_tval_el0 = fsl_a008585_read_cntp_tval_el0,
417 		.read_cntv_tval_el0 = fsl_a008585_read_cntv_tval_el0,
418 		.read_cntpct_el0 = fsl_a008585_read_cntpct_el0,
419 		.read_cntvct_el0 = fsl_a008585_read_cntvct_el0,
420 		.set_next_event_phys = erratum_set_next_event_tval_phys,
421 		.set_next_event_virt = erratum_set_next_event_tval_virt,
422 	},
423 #endif
424 #ifdef CONFIG_HISILICON_ERRATUM_161010101
425 	{
426 		.match_type = ate_match_dt,
427 		.id = "hisilicon,erratum-161010101",
428 		.desc = "HiSilicon erratum 161010101",
429 		.read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
430 		.read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
431 		.read_cntpct_el0 = hisi_161010101_read_cntpct_el0,
432 		.read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
433 		.set_next_event_phys = erratum_set_next_event_tval_phys,
434 		.set_next_event_virt = erratum_set_next_event_tval_virt,
435 	},
436 	{
437 		.match_type = ate_match_acpi_oem_info,
438 		.id = hisi_161010101_oem_info,
439 		.desc = "HiSilicon erratum 161010101",
440 		.read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
441 		.read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
442 		.read_cntpct_el0 = hisi_161010101_read_cntpct_el0,
443 		.read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
444 		.set_next_event_phys = erratum_set_next_event_tval_phys,
445 		.set_next_event_virt = erratum_set_next_event_tval_virt,
446 	},
447 #endif
448 #ifdef CONFIG_ARM64_ERRATUM_858921
449 	{
450 		.match_type = ate_match_local_cap_id,
451 		.id = (void *)ARM64_WORKAROUND_858921,
452 		.desc = "ARM erratum 858921",
453 		.read_cntpct_el0 = arm64_858921_read_cntpct_el0,
454 		.read_cntvct_el0 = arm64_858921_read_cntvct_el0,
455 	},
456 #endif
457 #ifdef CONFIG_ARM64_ERRATUM_1188873
458 	{
459 		.match_type = ate_match_local_cap_id,
460 		.id = (void *)ARM64_WORKAROUND_1188873,
461 		.desc = "ARM erratum 1188873",
462 		.read_cntvct_el0 = arm64_1188873_read_cntvct_el0,
463 	},
464 #endif
465 #ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
466 	{
467 		.match_type = ate_match_dt,
468 		.id = "allwinner,erratum-unknown1",
469 		.desc = "Allwinner erratum UNKNOWN1",
470 		.read_cntp_tval_el0 = sun50i_a64_read_cntp_tval_el0,
471 		.read_cntv_tval_el0 = sun50i_a64_read_cntv_tval_el0,
472 		.read_cntpct_el0 = sun50i_a64_read_cntpct_el0,
473 		.read_cntvct_el0 = sun50i_a64_read_cntvct_el0,
474 		.set_next_event_phys = erratum_set_next_event_tval_phys,
475 		.set_next_event_virt = erratum_set_next_event_tval_virt,
476 	},
477 #endif
478 };
479 
480 typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *,
481 			       const void *);
482 
483 static
484 bool arch_timer_check_dt_erratum(const struct arch_timer_erratum_workaround *wa,
485 				 const void *arg)
486 {
487 	const struct device_node *np = arg;
488 
489 	return of_property_read_bool(np, wa->id);
490 }
491 
492 static
493 bool arch_timer_check_local_cap_erratum(const struct arch_timer_erratum_workaround *wa,
494 					const void *arg)
495 {
496 	return this_cpu_has_cap((uintptr_t)wa->id);
497 }
498 
499 
500 static
501 bool arch_timer_check_acpi_oem_erratum(const struct arch_timer_erratum_workaround *wa,
502 				       const void *arg)
503 {
504 	static const struct ate_acpi_oem_info empty_oem_info = {};
505 	const struct ate_acpi_oem_info *info = wa->id;
506 	const struct acpi_table_header *table = arg;
507 
508 	/* Iterate over the ACPI OEM info array, looking for a match */
509 	while (memcmp(info, &empty_oem_info, sizeof(*info))) {
510 		if (!memcmp(info->oem_id, table->oem_id, ACPI_OEM_ID_SIZE) &&
511 		    !memcmp(info->oem_table_id, table->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
512 		    info->oem_revision == table->oem_revision)
513 			return true;
514 
515 		info++;
516 	}
517 
518 	return false;
519 }
520 
521 static const struct arch_timer_erratum_workaround *
522 arch_timer_iterate_errata(enum arch_timer_erratum_match_type type,
523 			  ate_match_fn_t match_fn,
524 			  void *arg)
525 {
526 	int i;
527 
528 	for (i = 0; i < ARRAY_SIZE(ool_workarounds); i++) {
529 		if (ool_workarounds[i].match_type != type)
530 			continue;
531 
532 		if (match_fn(&ool_workarounds[i], arg))
533 			return &ool_workarounds[i];
534 	}
535 
536 	return NULL;
537 }
538 
539 static
540 void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa,
541 				  bool local)
542 {
543 	int i;
544 
545 	if (local) {
546 		__this_cpu_write(timer_unstable_counter_workaround, wa);
547 	} else {
548 		for_each_possible_cpu(i)
549 			per_cpu(timer_unstable_counter_workaround, i) = wa;
550 	}
551 
552 	/*
553 	 * Use the locked version, as we're called from the CPU
554 	 * hotplug framework. Otherwise, we end-up in deadlock-land.
555 	 */
556 	static_branch_enable_cpuslocked(&arch_timer_read_ool_enabled);
557 
558 	/*
559 	 * Don't use the vdso fastpath if errata require using the
560 	 * out-of-line counter accessor. We may change our mind pretty
561 	 * late in the game (with a per-CPU erratum, for example), so
562 	 * change both the default value and the vdso itself.
563 	 */
564 	if (wa->read_cntvct_el0) {
565 		clocksource_counter.archdata.vdso_direct = false;
566 		vdso_default = false;
567 	}
568 }
569 
570 static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type type,
571 					    void *arg)
572 {
573 	const struct arch_timer_erratum_workaround *wa;
574 	ate_match_fn_t match_fn = NULL;
575 	bool local = false;
576 
577 	switch (type) {
578 	case ate_match_dt:
579 		match_fn = arch_timer_check_dt_erratum;
580 		break;
581 	case ate_match_local_cap_id:
582 		match_fn = arch_timer_check_local_cap_erratum;
583 		local = true;
584 		break;
585 	case ate_match_acpi_oem_info:
586 		match_fn = arch_timer_check_acpi_oem_erratum;
587 		break;
588 	default:
589 		WARN_ON(1);
590 		return;
591 	}
592 
593 	wa = arch_timer_iterate_errata(type, match_fn, arg);
594 	if (!wa)
595 		return;
596 
597 	if (needs_unstable_timer_counter_workaround()) {
598 		const struct arch_timer_erratum_workaround *__wa;
599 		__wa = __this_cpu_read(timer_unstable_counter_workaround);
600 		if (__wa && wa != __wa)
601 			pr_warn("Can't enable workaround for %s (clashes with %s\n)",
602 				wa->desc, __wa->desc);
603 
604 		if (__wa)
605 			return;
606 	}
607 
608 	arch_timer_enable_workaround(wa, local);
609 	pr_info("Enabling %s workaround for %s\n",
610 		local ? "local" : "global", wa->desc);
611 }
612 
613 #define erratum_handler(fn, r, ...)					\
614 ({									\
615 	bool __val;							\
616 	if (needs_unstable_timer_counter_workaround()) {		\
617 		const struct arch_timer_erratum_workaround *__wa;	\
618 		__wa = __this_cpu_read(timer_unstable_counter_workaround); \
619 		if (__wa && __wa->fn) {					\
620 			r = __wa->fn(__VA_ARGS__);			\
621 			__val = true;					\
622 		} else {						\
623 			__val = false;					\
624 		}							\
625 	} else {							\
626 		__val = false;						\
627 	}								\
628 	__val;								\
629 })
630 
631 static bool arch_timer_this_cpu_has_cntvct_wa(void)
632 {
633 	const struct arch_timer_erratum_workaround *wa;
634 
635 	wa = __this_cpu_read(timer_unstable_counter_workaround);
636 	return wa && wa->read_cntvct_el0;
637 }
638 #else
639 #define arch_timer_check_ool_workaround(t,a)		do { } while(0)
640 #define erratum_set_next_event_tval_virt(...)		({BUG(); 0;})
641 #define erratum_set_next_event_tval_phys(...)		({BUG(); 0;})
642 #define erratum_handler(fn, r, ...)			({false;})
643 #define arch_timer_this_cpu_has_cntvct_wa()		({false;})
644 #endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
645 
646 static __always_inline irqreturn_t timer_handler(const int access,
647 					struct clock_event_device *evt)
648 {
649 	unsigned long ctrl;
650 
651 	ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt);
652 	if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
653 		ctrl |= ARCH_TIMER_CTRL_IT_MASK;
654 		arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt);
655 		evt->event_handler(evt);
656 		return IRQ_HANDLED;
657 	}
658 
659 	return IRQ_NONE;
660 }
661 
662 static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
663 {
664 	struct clock_event_device *evt = dev_id;
665 
666 	return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
667 }
668 
669 static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
670 {
671 	struct clock_event_device *evt = dev_id;
672 
673 	return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
674 }
675 
676 static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id)
677 {
678 	struct clock_event_device *evt = dev_id;
679 
680 	return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt);
681 }
682 
683 static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id)
684 {
685 	struct clock_event_device *evt = dev_id;
686 
687 	return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt);
688 }
689 
690 static __always_inline int timer_shutdown(const int access,
691 					  struct clock_event_device *clk)
692 {
693 	unsigned long ctrl;
694 
695 	ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
696 	ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
697 	arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
698 
699 	return 0;
700 }
701 
702 static int arch_timer_shutdown_virt(struct clock_event_device *clk)
703 {
704 	return timer_shutdown(ARCH_TIMER_VIRT_ACCESS, clk);
705 }
706 
707 static int arch_timer_shutdown_phys(struct clock_event_device *clk)
708 {
709 	return timer_shutdown(ARCH_TIMER_PHYS_ACCESS, clk);
710 }
711 
712 static int arch_timer_shutdown_virt_mem(struct clock_event_device *clk)
713 {
714 	return timer_shutdown(ARCH_TIMER_MEM_VIRT_ACCESS, clk);
715 }
716 
717 static int arch_timer_shutdown_phys_mem(struct clock_event_device *clk)
718 {
719 	return timer_shutdown(ARCH_TIMER_MEM_PHYS_ACCESS, clk);
720 }
721 
722 static __always_inline void set_next_event(const int access, unsigned long evt,
723 					   struct clock_event_device *clk)
724 {
725 	unsigned long ctrl;
726 	ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
727 	ctrl |= ARCH_TIMER_CTRL_ENABLE;
728 	ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
729 	arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt, clk);
730 	arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
731 }
732 
733 static int arch_timer_set_next_event_virt(unsigned long evt,
734 					  struct clock_event_device *clk)
735 {
736 	int ret;
737 
738 	if (erratum_handler(set_next_event_virt, ret, evt, clk))
739 		return ret;
740 
741 	set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk);
742 	return 0;
743 }
744 
745 static int arch_timer_set_next_event_phys(unsigned long evt,
746 					  struct clock_event_device *clk)
747 {
748 	int ret;
749 
750 	if (erratum_handler(set_next_event_phys, ret, evt, clk))
751 		return ret;
752 
753 	set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk);
754 	return 0;
755 }
756 
757 static int arch_timer_set_next_event_virt_mem(unsigned long evt,
758 					      struct clock_event_device *clk)
759 {
760 	set_next_event(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk);
761 	return 0;
762 }
763 
764 static int arch_timer_set_next_event_phys_mem(unsigned long evt,
765 					      struct clock_event_device *clk)
766 {
767 	set_next_event(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk);
768 	return 0;
769 }
770 
771 static void __arch_timer_setup(unsigned type,
772 			       struct clock_event_device *clk)
773 {
774 	clk->features = CLOCK_EVT_FEAT_ONESHOT;
775 
776 	if (type == ARCH_TIMER_TYPE_CP15) {
777 		if (arch_timer_c3stop)
778 			clk->features |= CLOCK_EVT_FEAT_C3STOP;
779 		clk->name = "arch_sys_timer";
780 		clk->rating = 450;
781 		clk->cpumask = cpumask_of(smp_processor_id());
782 		clk->irq = arch_timer_ppi[arch_timer_uses_ppi];
783 		switch (arch_timer_uses_ppi) {
784 		case ARCH_TIMER_VIRT_PPI:
785 			clk->set_state_shutdown = arch_timer_shutdown_virt;
786 			clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
787 			clk->set_next_event = arch_timer_set_next_event_virt;
788 			break;
789 		case ARCH_TIMER_PHYS_SECURE_PPI:
790 		case ARCH_TIMER_PHYS_NONSECURE_PPI:
791 		case ARCH_TIMER_HYP_PPI:
792 			clk->set_state_shutdown = arch_timer_shutdown_phys;
793 			clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
794 			clk->set_next_event = arch_timer_set_next_event_phys;
795 			break;
796 		default:
797 			BUG();
798 		}
799 
800 		arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL);
801 	} else {
802 		clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
803 		clk->name = "arch_mem_timer";
804 		clk->rating = 400;
805 		clk->cpumask = cpu_possible_mask;
806 		if (arch_timer_mem_use_virtual) {
807 			clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
808 			clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem;
809 			clk->set_next_event =
810 				arch_timer_set_next_event_virt_mem;
811 		} else {
812 			clk->set_state_shutdown = arch_timer_shutdown_phys_mem;
813 			clk->set_state_oneshot_stopped = arch_timer_shutdown_phys_mem;
814 			clk->set_next_event =
815 				arch_timer_set_next_event_phys_mem;
816 		}
817 	}
818 
819 	clk->set_state_shutdown(clk);
820 
821 	clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff);
822 }
823 
824 static void arch_timer_evtstrm_enable(int divider)
825 {
826 	u32 cntkctl = arch_timer_get_cntkctl();
827 
828 	cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
829 	/* Set the divider and enable virtual event stream */
830 	cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
831 			| ARCH_TIMER_VIRT_EVT_EN;
832 	arch_timer_set_cntkctl(cntkctl);
833 	elf_hwcap |= HWCAP_EVTSTRM;
834 #ifdef CONFIG_COMPAT
835 	compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
836 #endif
837 	cpumask_set_cpu(smp_processor_id(), &evtstrm_available);
838 }
839 
840 static void arch_timer_configure_evtstream(void)
841 {
842 	int evt_stream_div, pos;
843 
844 	/* Find the closest power of two to the divisor */
845 	evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ;
846 	pos = fls(evt_stream_div);
847 	if (pos > 1 && !(evt_stream_div & (1 << (pos - 2))))
848 		pos--;
849 	/* enable event stream */
850 	arch_timer_evtstrm_enable(min(pos, 15));
851 }
852 
853 static void arch_counter_set_user_access(void)
854 {
855 	u32 cntkctl = arch_timer_get_cntkctl();
856 
857 	/* Disable user access to the timers and both counters */
858 	/* Also disable virtual event stream */
859 	cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
860 			| ARCH_TIMER_USR_VT_ACCESS_EN
861 		        | ARCH_TIMER_USR_VCT_ACCESS_EN
862 			| ARCH_TIMER_VIRT_EVT_EN
863 			| ARCH_TIMER_USR_PCT_ACCESS_EN);
864 
865 	/*
866 	 * Enable user access to the virtual counter if it doesn't
867 	 * need to be workaround. The vdso may have been already
868 	 * disabled though.
869 	 */
870 	if (arch_timer_this_cpu_has_cntvct_wa())
871 		pr_info("CPU%d: Trapping CNTVCT access\n", smp_processor_id());
872 	else
873 		cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
874 
875 	arch_timer_set_cntkctl(cntkctl);
876 }
877 
878 static bool arch_timer_has_nonsecure_ppi(void)
879 {
880 	return (arch_timer_uses_ppi == ARCH_TIMER_PHYS_SECURE_PPI &&
881 		arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
882 }
883 
884 static u32 check_ppi_trigger(int irq)
885 {
886 	u32 flags = irq_get_trigger_type(irq);
887 
888 	if (flags != IRQF_TRIGGER_HIGH && flags != IRQF_TRIGGER_LOW) {
889 		pr_warn("WARNING: Invalid trigger for IRQ%d, assuming level low\n", irq);
890 		pr_warn("WARNING: Please fix your firmware\n");
891 		flags = IRQF_TRIGGER_LOW;
892 	}
893 
894 	return flags;
895 }
896 
897 static int arch_timer_starting_cpu(unsigned int cpu)
898 {
899 	struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
900 	u32 flags;
901 
902 	__arch_timer_setup(ARCH_TIMER_TYPE_CP15, clk);
903 
904 	flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]);
905 	enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags);
906 
907 	if (arch_timer_has_nonsecure_ppi()) {
908 		flags = check_ppi_trigger(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
909 		enable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI],
910 				  flags);
911 	}
912 
913 	arch_counter_set_user_access();
914 	if (evtstrm_enable)
915 		arch_timer_configure_evtstream();
916 
917 	return 0;
918 }
919 
920 /*
921  * For historical reasons, when probing with DT we use whichever (non-zero)
922  * rate was probed first, and don't verify that others match. If the first node
923  * probed has a clock-frequency property, this overrides the HW register.
924  */
925 static void arch_timer_of_configure_rate(u32 rate, struct device_node *np)
926 {
927 	/* Who has more than one independent system counter? */
928 	if (arch_timer_rate)
929 		return;
930 
931 	if (of_property_read_u32(np, "clock-frequency", &arch_timer_rate))
932 		arch_timer_rate = rate;
933 
934 	/* Check the timer frequency. */
935 	if (arch_timer_rate == 0)
936 		pr_warn("frequency not available\n");
937 }
938 
939 static void arch_timer_banner(unsigned type)
940 {
941 	pr_info("%s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n",
942 		type & ARCH_TIMER_TYPE_CP15 ? "cp15" : "",
943 		type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ?
944 			" and " : "",
945 		type & ARCH_TIMER_TYPE_MEM ? "mmio" : "",
946 		(unsigned long)arch_timer_rate / 1000000,
947 		(unsigned long)(arch_timer_rate / 10000) % 100,
948 		type & ARCH_TIMER_TYPE_CP15 ?
949 			(arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) ? "virt" : "phys" :
950 			"",
951 		type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ? "/" : "",
952 		type & ARCH_TIMER_TYPE_MEM ?
953 			arch_timer_mem_use_virtual ? "virt" : "phys" :
954 			"");
955 }
956 
957 u32 arch_timer_get_rate(void)
958 {
959 	return arch_timer_rate;
960 }
961 
962 bool arch_timer_evtstrm_available(void)
963 {
964 	/*
965 	 * We might get called from a preemptible context. This is fine
966 	 * because availability of the event stream should be always the same
967 	 * for a preemptible context and context where we might resume a task.
968 	 */
969 	return cpumask_test_cpu(raw_smp_processor_id(), &evtstrm_available);
970 }
971 
972 static u64 arch_counter_get_cntvct_mem(void)
973 {
974 	u32 vct_lo, vct_hi, tmp_hi;
975 
976 	do {
977 		vct_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
978 		vct_lo = readl_relaxed(arch_counter_base + CNTVCT_LO);
979 		tmp_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
980 	} while (vct_hi != tmp_hi);
981 
982 	return ((u64) vct_hi << 32) | vct_lo;
983 }
984 
985 static struct arch_timer_kvm_info arch_timer_kvm_info;
986 
987 struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
988 {
989 	return &arch_timer_kvm_info;
990 }
991 
992 static void __init arch_counter_register(unsigned type)
993 {
994 	u64 start_count;
995 
996 	/* Register the CP15 based counter if we have one */
997 	if (type & ARCH_TIMER_TYPE_CP15) {
998 		if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) ||
999 		    arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI)
1000 			arch_timer_read_counter = arch_counter_get_cntvct;
1001 		else
1002 			arch_timer_read_counter = arch_counter_get_cntpct;
1003 
1004 		clocksource_counter.archdata.vdso_direct = vdso_default;
1005 	} else {
1006 		arch_timer_read_counter = arch_counter_get_cntvct_mem;
1007 	}
1008 
1009 	if (!arch_counter_suspend_stop)
1010 		clocksource_counter.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
1011 	start_count = arch_timer_read_counter();
1012 	clocksource_register_hz(&clocksource_counter, arch_timer_rate);
1013 	cyclecounter.mult = clocksource_counter.mult;
1014 	cyclecounter.shift = clocksource_counter.shift;
1015 	timecounter_init(&arch_timer_kvm_info.timecounter,
1016 			 &cyclecounter, start_count);
1017 
1018 	/* 56 bits minimum, so we assume worst case rollover */
1019 	sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate);
1020 }
1021 
1022 static void arch_timer_stop(struct clock_event_device *clk)
1023 {
1024 	pr_debug("disable IRQ%d cpu #%d\n", clk->irq, smp_processor_id());
1025 
1026 	disable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi]);
1027 	if (arch_timer_has_nonsecure_ppi())
1028 		disable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
1029 
1030 	clk->set_state_shutdown(clk);
1031 }
1032 
1033 static int arch_timer_dying_cpu(unsigned int cpu)
1034 {
1035 	struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
1036 
1037 	cpumask_clear_cpu(smp_processor_id(), &evtstrm_available);
1038 
1039 	arch_timer_stop(clk);
1040 	return 0;
1041 }
1042 
1043 #ifdef CONFIG_CPU_PM
1044 static DEFINE_PER_CPU(unsigned long, saved_cntkctl);
1045 static int arch_timer_cpu_pm_notify(struct notifier_block *self,
1046 				    unsigned long action, void *hcpu)
1047 {
1048 	if (action == CPU_PM_ENTER) {
1049 		__this_cpu_write(saved_cntkctl, arch_timer_get_cntkctl());
1050 
1051 		cpumask_clear_cpu(smp_processor_id(), &evtstrm_available);
1052 	} else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT) {
1053 		arch_timer_set_cntkctl(__this_cpu_read(saved_cntkctl));
1054 
1055 		if (elf_hwcap & HWCAP_EVTSTRM)
1056 			cpumask_set_cpu(smp_processor_id(), &evtstrm_available);
1057 	}
1058 	return NOTIFY_OK;
1059 }
1060 
1061 static struct notifier_block arch_timer_cpu_pm_notifier = {
1062 	.notifier_call = arch_timer_cpu_pm_notify,
1063 };
1064 
1065 static int __init arch_timer_cpu_pm_init(void)
1066 {
1067 	return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier);
1068 }
1069 
1070 static void __init arch_timer_cpu_pm_deinit(void)
1071 {
1072 	WARN_ON(cpu_pm_unregister_notifier(&arch_timer_cpu_pm_notifier));
1073 }
1074 
1075 #else
1076 static int __init arch_timer_cpu_pm_init(void)
1077 {
1078 	return 0;
1079 }
1080 
1081 static void __init arch_timer_cpu_pm_deinit(void)
1082 {
1083 }
1084 #endif
1085 
1086 static int __init arch_timer_register(void)
1087 {
1088 	int err;
1089 	int ppi;
1090 
1091 	arch_timer_evt = alloc_percpu(struct clock_event_device);
1092 	if (!arch_timer_evt) {
1093 		err = -ENOMEM;
1094 		goto out;
1095 	}
1096 
1097 	ppi = arch_timer_ppi[arch_timer_uses_ppi];
1098 	switch (arch_timer_uses_ppi) {
1099 	case ARCH_TIMER_VIRT_PPI:
1100 		err = request_percpu_irq(ppi, arch_timer_handler_virt,
1101 					 "arch_timer", arch_timer_evt);
1102 		break;
1103 	case ARCH_TIMER_PHYS_SECURE_PPI:
1104 	case ARCH_TIMER_PHYS_NONSECURE_PPI:
1105 		err = request_percpu_irq(ppi, arch_timer_handler_phys,
1106 					 "arch_timer", arch_timer_evt);
1107 		if (!err && arch_timer_has_nonsecure_ppi()) {
1108 			ppi = arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI];
1109 			err = request_percpu_irq(ppi, arch_timer_handler_phys,
1110 						 "arch_timer", arch_timer_evt);
1111 			if (err)
1112 				free_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_SECURE_PPI],
1113 						arch_timer_evt);
1114 		}
1115 		break;
1116 	case ARCH_TIMER_HYP_PPI:
1117 		err = request_percpu_irq(ppi, arch_timer_handler_phys,
1118 					 "arch_timer", arch_timer_evt);
1119 		break;
1120 	default:
1121 		BUG();
1122 	}
1123 
1124 	if (err) {
1125 		pr_err("can't register interrupt %d (%d)\n", ppi, err);
1126 		goto out_free;
1127 	}
1128 
1129 	err = arch_timer_cpu_pm_init();
1130 	if (err)
1131 		goto out_unreg_notify;
1132 
1133 	/* Register and immediately configure the timer on the boot CPU */
1134 	err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING,
1135 				"clockevents/arm/arch_timer:starting",
1136 				arch_timer_starting_cpu, arch_timer_dying_cpu);
1137 	if (err)
1138 		goto out_unreg_cpupm;
1139 	return 0;
1140 
1141 out_unreg_cpupm:
1142 	arch_timer_cpu_pm_deinit();
1143 
1144 out_unreg_notify:
1145 	free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt);
1146 	if (arch_timer_has_nonsecure_ppi())
1147 		free_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI],
1148 				arch_timer_evt);
1149 
1150 out_free:
1151 	free_percpu(arch_timer_evt);
1152 out:
1153 	return err;
1154 }
1155 
1156 static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq)
1157 {
1158 	int ret;
1159 	irq_handler_t func;
1160 	struct arch_timer *t;
1161 
1162 	t = kzalloc(sizeof(*t), GFP_KERNEL);
1163 	if (!t)
1164 		return -ENOMEM;
1165 
1166 	t->base = base;
1167 	t->evt.irq = irq;
1168 	__arch_timer_setup(ARCH_TIMER_TYPE_MEM, &t->evt);
1169 
1170 	if (arch_timer_mem_use_virtual)
1171 		func = arch_timer_handler_virt_mem;
1172 	else
1173 		func = arch_timer_handler_phys_mem;
1174 
1175 	ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &t->evt);
1176 	if (ret) {
1177 		pr_err("Failed to request mem timer irq\n");
1178 		kfree(t);
1179 	}
1180 
1181 	return ret;
1182 }
1183 
1184 static const struct of_device_id arch_timer_of_match[] __initconst = {
1185 	{ .compatible   = "arm,armv7-timer",    },
1186 	{ .compatible   = "arm,armv8-timer",    },
1187 	{},
1188 };
1189 
1190 static const struct of_device_id arch_timer_mem_of_match[] __initconst = {
1191 	{ .compatible   = "arm,armv7-timer-mem", },
1192 	{},
1193 };
1194 
1195 static bool __init arch_timer_needs_of_probing(void)
1196 {
1197 	struct device_node *dn;
1198 	bool needs_probing = false;
1199 	unsigned int mask = ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM;
1200 
1201 	/* We have two timers, and both device-tree nodes are probed. */
1202 	if ((arch_timers_present & mask) == mask)
1203 		return false;
1204 
1205 	/*
1206 	 * Only one type of timer is probed,
1207 	 * check if we have another type of timer node in device-tree.
1208 	 */
1209 	if (arch_timers_present & ARCH_TIMER_TYPE_CP15)
1210 		dn = of_find_matching_node(NULL, arch_timer_mem_of_match);
1211 	else
1212 		dn = of_find_matching_node(NULL, arch_timer_of_match);
1213 
1214 	if (dn && of_device_is_available(dn))
1215 		needs_probing = true;
1216 
1217 	of_node_put(dn);
1218 
1219 	return needs_probing;
1220 }
1221 
1222 static int __init arch_timer_common_init(void)
1223 {
1224 	arch_timer_banner(arch_timers_present);
1225 	arch_counter_register(arch_timers_present);
1226 	return arch_timer_arch_init();
1227 }
1228 
1229 /**
1230  * arch_timer_select_ppi() - Select suitable PPI for the current system.
1231  *
1232  * If HYP mode is available, we know that the physical timer
1233  * has been configured to be accessible from PL1. Use it, so
1234  * that a guest can use the virtual timer instead.
1235  *
1236  * On ARMv8.1 with VH extensions, the kernel runs in HYP. VHE
1237  * accesses to CNTP_*_EL1 registers are silently redirected to
1238  * their CNTHP_*_EL2 counterparts, and use a different PPI
1239  * number.
1240  *
1241  * If no interrupt provided for virtual timer, we'll have to
1242  * stick to the physical timer. It'd better be accessible...
1243  * For arm64 we never use the secure interrupt.
1244  *
1245  * Return: a suitable PPI type for the current system.
1246  */
1247 static enum arch_timer_ppi_nr __init arch_timer_select_ppi(void)
1248 {
1249 	if (is_kernel_in_hyp_mode())
1250 		return ARCH_TIMER_HYP_PPI;
1251 
1252 	if (!is_hyp_mode_available() && arch_timer_ppi[ARCH_TIMER_VIRT_PPI])
1253 		return ARCH_TIMER_VIRT_PPI;
1254 
1255 	if (IS_ENABLED(CONFIG_ARM64))
1256 		return ARCH_TIMER_PHYS_NONSECURE_PPI;
1257 
1258 	return ARCH_TIMER_PHYS_SECURE_PPI;
1259 }
1260 
1261 static void __init arch_timer_populate_kvm_info(void)
1262 {
1263 	arch_timer_kvm_info.virtual_irq = arch_timer_ppi[ARCH_TIMER_VIRT_PPI];
1264 	if (is_kernel_in_hyp_mode())
1265 		arch_timer_kvm_info.physical_irq = arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI];
1266 }
1267 
1268 static int __init arch_timer_of_init(struct device_node *np)
1269 {
1270 	int i, ret;
1271 	u32 rate;
1272 
1273 	if (arch_timers_present & ARCH_TIMER_TYPE_CP15) {
1274 		pr_warn("multiple nodes in dt, skipping\n");
1275 		return 0;
1276 	}
1277 
1278 	arch_timers_present |= ARCH_TIMER_TYPE_CP15;
1279 	for (i = ARCH_TIMER_PHYS_SECURE_PPI; i < ARCH_TIMER_MAX_TIMER_PPI; i++)
1280 		arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
1281 
1282 	arch_timer_populate_kvm_info();
1283 
1284 	rate = arch_timer_get_cntfrq();
1285 	arch_timer_of_configure_rate(rate, np);
1286 
1287 	arch_timer_c3stop = !of_property_read_bool(np, "always-on");
1288 
1289 	/* Check for globally applicable workarounds */
1290 	arch_timer_check_ool_workaround(ate_match_dt, np);
1291 
1292 	/*
1293 	 * If we cannot rely on firmware initializing the timer registers then
1294 	 * we should use the physical timers instead.
1295 	 */
1296 	if (IS_ENABLED(CONFIG_ARM) &&
1297 	    of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
1298 		arch_timer_uses_ppi = ARCH_TIMER_PHYS_SECURE_PPI;
1299 	else
1300 		arch_timer_uses_ppi = arch_timer_select_ppi();
1301 
1302 	if (!arch_timer_ppi[arch_timer_uses_ppi]) {
1303 		pr_err("No interrupt available, giving up\n");
1304 		return -EINVAL;
1305 	}
1306 
1307 	/* On some systems, the counter stops ticking when in suspend. */
1308 	arch_counter_suspend_stop = of_property_read_bool(np,
1309 							 "arm,no-tick-in-suspend");
1310 
1311 	ret = arch_timer_register();
1312 	if (ret)
1313 		return ret;
1314 
1315 	if (arch_timer_needs_of_probing())
1316 		return 0;
1317 
1318 	return arch_timer_common_init();
1319 }
1320 TIMER_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
1321 TIMER_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init);
1322 
1323 static u32 __init
1324 arch_timer_mem_frame_get_cntfrq(struct arch_timer_mem_frame *frame)
1325 {
1326 	void __iomem *base;
1327 	u32 rate;
1328 
1329 	base = ioremap(frame->cntbase, frame->size);
1330 	if (!base) {
1331 		pr_err("Unable to map frame @ %pa\n", &frame->cntbase);
1332 		return 0;
1333 	}
1334 
1335 	rate = readl_relaxed(base + CNTFRQ);
1336 
1337 	iounmap(base);
1338 
1339 	return rate;
1340 }
1341 
1342 static struct arch_timer_mem_frame * __init
1343 arch_timer_mem_find_best_frame(struct arch_timer_mem *timer_mem)
1344 {
1345 	struct arch_timer_mem_frame *frame, *best_frame = NULL;
1346 	void __iomem *cntctlbase;
1347 	u32 cnttidr;
1348 	int i;
1349 
1350 	cntctlbase = ioremap(timer_mem->cntctlbase, timer_mem->size);
1351 	if (!cntctlbase) {
1352 		pr_err("Can't map CNTCTLBase @ %pa\n",
1353 			&timer_mem->cntctlbase);
1354 		return NULL;
1355 	}
1356 
1357 	cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
1358 
1359 	/*
1360 	 * Try to find a virtual capable frame. Otherwise fall back to a
1361 	 * physical capable frame.
1362 	 */
1363 	for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) {
1364 		u32 cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT |
1365 			     CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT;
1366 
1367 		frame = &timer_mem->frame[i];
1368 		if (!frame->valid)
1369 			continue;
1370 
1371 		/* Try enabling everything, and see what sticks */
1372 		writel_relaxed(cntacr, cntctlbase + CNTACR(i));
1373 		cntacr = readl_relaxed(cntctlbase + CNTACR(i));
1374 
1375 		if ((cnttidr & CNTTIDR_VIRT(i)) &&
1376 		    !(~cntacr & (CNTACR_RWVT | CNTACR_RVCT))) {
1377 			best_frame = frame;
1378 			arch_timer_mem_use_virtual = true;
1379 			break;
1380 		}
1381 
1382 		if (~cntacr & (CNTACR_RWPT | CNTACR_RPCT))
1383 			continue;
1384 
1385 		best_frame = frame;
1386 	}
1387 
1388 	iounmap(cntctlbase);
1389 
1390 	return best_frame;
1391 }
1392 
1393 static int __init
1394 arch_timer_mem_frame_register(struct arch_timer_mem_frame *frame)
1395 {
1396 	void __iomem *base;
1397 	int ret, irq = 0;
1398 
1399 	if (arch_timer_mem_use_virtual)
1400 		irq = frame->virt_irq;
1401 	else
1402 		irq = frame->phys_irq;
1403 
1404 	if (!irq) {
1405 		pr_err("Frame missing %s irq.\n",
1406 		       arch_timer_mem_use_virtual ? "virt" : "phys");
1407 		return -EINVAL;
1408 	}
1409 
1410 	if (!request_mem_region(frame->cntbase, frame->size,
1411 				"arch_mem_timer"))
1412 		return -EBUSY;
1413 
1414 	base = ioremap(frame->cntbase, frame->size);
1415 	if (!base) {
1416 		pr_err("Can't map frame's registers\n");
1417 		return -ENXIO;
1418 	}
1419 
1420 	ret = arch_timer_mem_register(base, irq);
1421 	if (ret) {
1422 		iounmap(base);
1423 		return ret;
1424 	}
1425 
1426 	arch_counter_base = base;
1427 	arch_timers_present |= ARCH_TIMER_TYPE_MEM;
1428 
1429 	return 0;
1430 }
1431 
1432 static int __init arch_timer_mem_of_init(struct device_node *np)
1433 {
1434 	struct arch_timer_mem *timer_mem;
1435 	struct arch_timer_mem_frame *frame;
1436 	struct device_node *frame_node;
1437 	struct resource res;
1438 	int ret = -EINVAL;
1439 	u32 rate;
1440 
1441 	timer_mem = kzalloc(sizeof(*timer_mem), GFP_KERNEL);
1442 	if (!timer_mem)
1443 		return -ENOMEM;
1444 
1445 	if (of_address_to_resource(np, 0, &res))
1446 		goto out;
1447 	timer_mem->cntctlbase = res.start;
1448 	timer_mem->size = resource_size(&res);
1449 
1450 	for_each_available_child_of_node(np, frame_node) {
1451 		u32 n;
1452 		struct arch_timer_mem_frame *frame;
1453 
1454 		if (of_property_read_u32(frame_node, "frame-number", &n)) {
1455 			pr_err(FW_BUG "Missing frame-number.\n");
1456 			of_node_put(frame_node);
1457 			goto out;
1458 		}
1459 		if (n >= ARCH_TIMER_MEM_MAX_FRAMES) {
1460 			pr_err(FW_BUG "Wrong frame-number, only 0-%u are permitted.\n",
1461 			       ARCH_TIMER_MEM_MAX_FRAMES - 1);
1462 			of_node_put(frame_node);
1463 			goto out;
1464 		}
1465 		frame = &timer_mem->frame[n];
1466 
1467 		if (frame->valid) {
1468 			pr_err(FW_BUG "Duplicated frame-number.\n");
1469 			of_node_put(frame_node);
1470 			goto out;
1471 		}
1472 
1473 		if (of_address_to_resource(frame_node, 0, &res)) {
1474 			of_node_put(frame_node);
1475 			goto out;
1476 		}
1477 		frame->cntbase = res.start;
1478 		frame->size = resource_size(&res);
1479 
1480 		frame->virt_irq = irq_of_parse_and_map(frame_node,
1481 						       ARCH_TIMER_VIRT_SPI);
1482 		frame->phys_irq = irq_of_parse_and_map(frame_node,
1483 						       ARCH_TIMER_PHYS_SPI);
1484 
1485 		frame->valid = true;
1486 	}
1487 
1488 	frame = arch_timer_mem_find_best_frame(timer_mem);
1489 	if (!frame) {
1490 		pr_err("Unable to find a suitable frame in timer @ %pa\n",
1491 			&timer_mem->cntctlbase);
1492 		ret = -EINVAL;
1493 		goto out;
1494 	}
1495 
1496 	rate = arch_timer_mem_frame_get_cntfrq(frame);
1497 	arch_timer_of_configure_rate(rate, np);
1498 
1499 	ret = arch_timer_mem_frame_register(frame);
1500 	if (!ret && !arch_timer_needs_of_probing())
1501 		ret = arch_timer_common_init();
1502 out:
1503 	kfree(timer_mem);
1504 	return ret;
1505 }
1506 TIMER_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
1507 		       arch_timer_mem_of_init);
1508 
1509 #ifdef CONFIG_ACPI_GTDT
1510 static int __init
1511 arch_timer_mem_verify_cntfrq(struct arch_timer_mem *timer_mem)
1512 {
1513 	struct arch_timer_mem_frame *frame;
1514 	u32 rate;
1515 	int i;
1516 
1517 	for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) {
1518 		frame = &timer_mem->frame[i];
1519 
1520 		if (!frame->valid)
1521 			continue;
1522 
1523 		rate = arch_timer_mem_frame_get_cntfrq(frame);
1524 		if (rate == arch_timer_rate)
1525 			continue;
1526 
1527 		pr_err(FW_BUG "CNTFRQ mismatch: frame @ %pa: (0x%08lx), CPU: (0x%08lx)\n",
1528 			&frame->cntbase,
1529 			(unsigned long)rate, (unsigned long)arch_timer_rate);
1530 
1531 		return -EINVAL;
1532 	}
1533 
1534 	return 0;
1535 }
1536 
1537 static int __init arch_timer_mem_acpi_init(int platform_timer_count)
1538 {
1539 	struct arch_timer_mem *timers, *timer;
1540 	struct arch_timer_mem_frame *frame, *best_frame = NULL;
1541 	int timer_count, i, ret = 0;
1542 
1543 	timers = kcalloc(platform_timer_count, sizeof(*timers),
1544 			    GFP_KERNEL);
1545 	if (!timers)
1546 		return -ENOMEM;
1547 
1548 	ret = acpi_arch_timer_mem_init(timers, &timer_count);
1549 	if (ret || !timer_count)
1550 		goto out;
1551 
1552 	/*
1553 	 * While unlikely, it's theoretically possible that none of the frames
1554 	 * in a timer expose the combination of feature we want.
1555 	 */
1556 	for (i = 0; i < timer_count; i++) {
1557 		timer = &timers[i];
1558 
1559 		frame = arch_timer_mem_find_best_frame(timer);
1560 		if (!best_frame)
1561 			best_frame = frame;
1562 
1563 		ret = arch_timer_mem_verify_cntfrq(timer);
1564 		if (ret) {
1565 			pr_err("Disabling MMIO timers due to CNTFRQ mismatch\n");
1566 			goto out;
1567 		}
1568 
1569 		if (!best_frame) /* implies !frame */
1570 			/*
1571 			 * Only complain about missing suitable frames if we
1572 			 * haven't already found one in a previous iteration.
1573 			 */
1574 			pr_err("Unable to find a suitable frame in timer @ %pa\n",
1575 				&timer->cntctlbase);
1576 	}
1577 
1578 	if (best_frame)
1579 		ret = arch_timer_mem_frame_register(best_frame);
1580 out:
1581 	kfree(timers);
1582 	return ret;
1583 }
1584 
1585 /* Initialize per-processor generic timer and memory-mapped timer(if present) */
1586 static int __init arch_timer_acpi_init(struct acpi_table_header *table)
1587 {
1588 	int ret, platform_timer_count;
1589 
1590 	if (arch_timers_present & ARCH_TIMER_TYPE_CP15) {
1591 		pr_warn("already initialized, skipping\n");
1592 		return -EINVAL;
1593 	}
1594 
1595 	arch_timers_present |= ARCH_TIMER_TYPE_CP15;
1596 
1597 	ret = acpi_gtdt_init(table, &platform_timer_count);
1598 	if (ret) {
1599 		pr_err("Failed to init GTDT table.\n");
1600 		return ret;
1601 	}
1602 
1603 	arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI] =
1604 		acpi_gtdt_map_ppi(ARCH_TIMER_PHYS_NONSECURE_PPI);
1605 
1606 	arch_timer_ppi[ARCH_TIMER_VIRT_PPI] =
1607 		acpi_gtdt_map_ppi(ARCH_TIMER_VIRT_PPI);
1608 
1609 	arch_timer_ppi[ARCH_TIMER_HYP_PPI] =
1610 		acpi_gtdt_map_ppi(ARCH_TIMER_HYP_PPI);
1611 
1612 	arch_timer_populate_kvm_info();
1613 
1614 	/*
1615 	 * When probing via ACPI, we have no mechanism to override the sysreg
1616 	 * CNTFRQ value. This *must* be correct.
1617 	 */
1618 	arch_timer_rate = arch_timer_get_cntfrq();
1619 	if (!arch_timer_rate) {
1620 		pr_err(FW_BUG "frequency not available.\n");
1621 		return -EINVAL;
1622 	}
1623 
1624 	arch_timer_uses_ppi = arch_timer_select_ppi();
1625 	if (!arch_timer_ppi[arch_timer_uses_ppi]) {
1626 		pr_err("No interrupt available, giving up\n");
1627 		return -EINVAL;
1628 	}
1629 
1630 	/* Always-on capability */
1631 	arch_timer_c3stop = acpi_gtdt_c3stop(arch_timer_uses_ppi);
1632 
1633 	/* Check for globally applicable workarounds */
1634 	arch_timer_check_ool_workaround(ate_match_acpi_oem_info, table);
1635 
1636 	ret = arch_timer_register();
1637 	if (ret)
1638 		return ret;
1639 
1640 	if (platform_timer_count &&
1641 	    arch_timer_mem_acpi_init(platform_timer_count))
1642 		pr_err("Failed to initialize memory-mapped timer.\n");
1643 
1644 	return arch_timer_common_init();
1645 }
1646 TIMER_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init);
1647 #endif
1648