1 /*
2  *  linux/drivers/clocksource/arm_arch_timer.c
3  *
4  *  Copyright (C) 2011 ARM Ltd.
5  *  All Rights Reserved
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 
12 #define pr_fmt(fmt)	"arm_arch_timer: " fmt
13 
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/device.h>
17 #include <linux/smp.h>
18 #include <linux/cpu.h>
19 #include <linux/cpu_pm.h>
20 #include <linux/clockchips.h>
21 #include <linux/clocksource.h>
22 #include <linux/interrupt.h>
23 #include <linux/of_irq.h>
24 #include <linux/of_address.h>
25 #include <linux/io.h>
26 #include <linux/slab.h>
27 #include <linux/sched/clock.h>
28 #include <linux/sched_clock.h>
29 #include <linux/acpi.h>
30 
31 #include <asm/arch_timer.h>
32 #include <asm/virt.h>
33 
34 #include <clocksource/arm_arch_timer.h>
35 
36 #undef pr_fmt
37 #define pr_fmt(fmt) "arch_timer: " fmt
38 
39 #define CNTTIDR		0x08
40 #define CNTTIDR_VIRT(n)	(BIT(1) << ((n) * 4))
41 
42 #define CNTACR(n)	(0x40 + ((n) * 4))
43 #define CNTACR_RPCT	BIT(0)
44 #define CNTACR_RVCT	BIT(1)
45 #define CNTACR_RFRQ	BIT(2)
46 #define CNTACR_RVOFF	BIT(3)
47 #define CNTACR_RWVT	BIT(4)
48 #define CNTACR_RWPT	BIT(5)
49 
50 #define CNTVCT_LO	0x08
51 #define CNTVCT_HI	0x0c
52 #define CNTFRQ		0x10
53 #define CNTP_TVAL	0x28
54 #define CNTP_CTL	0x2c
55 #define CNTV_TVAL	0x38
56 #define CNTV_CTL	0x3c
57 
58 static unsigned arch_timers_present __initdata;
59 
60 static void __iomem *arch_counter_base;
61 
62 struct arch_timer {
63 	void __iomem *base;
64 	struct clock_event_device evt;
65 };
66 
67 #define to_arch_timer(e) container_of(e, struct arch_timer, evt)
68 
69 static u32 arch_timer_rate;
70 static int arch_timer_ppi[ARCH_TIMER_MAX_TIMER_PPI];
71 
72 static struct clock_event_device __percpu *arch_timer_evt;
73 
74 static enum arch_timer_ppi_nr arch_timer_uses_ppi = ARCH_TIMER_VIRT_PPI;
75 static bool arch_timer_c3stop;
76 static bool arch_timer_mem_use_virtual;
77 static bool arch_counter_suspend_stop;
78 static bool vdso_default = true;
79 
80 static cpumask_t evtstrm_available = CPU_MASK_NONE;
81 static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
82 
83 static int __init early_evtstrm_cfg(char *buf)
84 {
85 	return strtobool(buf, &evtstrm_enable);
86 }
87 early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg);
88 
89 /*
90  * Architected system timer support.
91  */
92 
93 static __always_inline
94 void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val,
95 			  struct clock_event_device *clk)
96 {
97 	if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
98 		struct arch_timer *timer = to_arch_timer(clk);
99 		switch (reg) {
100 		case ARCH_TIMER_REG_CTRL:
101 			writel_relaxed(val, timer->base + CNTP_CTL);
102 			break;
103 		case ARCH_TIMER_REG_TVAL:
104 			writel_relaxed(val, timer->base + CNTP_TVAL);
105 			break;
106 		}
107 	} else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
108 		struct arch_timer *timer = to_arch_timer(clk);
109 		switch (reg) {
110 		case ARCH_TIMER_REG_CTRL:
111 			writel_relaxed(val, timer->base + CNTV_CTL);
112 			break;
113 		case ARCH_TIMER_REG_TVAL:
114 			writel_relaxed(val, timer->base + CNTV_TVAL);
115 			break;
116 		}
117 	} else {
118 		arch_timer_reg_write_cp15(access, reg, val);
119 	}
120 }
121 
122 static __always_inline
123 u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
124 			struct clock_event_device *clk)
125 {
126 	u32 val;
127 
128 	if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
129 		struct arch_timer *timer = to_arch_timer(clk);
130 		switch (reg) {
131 		case ARCH_TIMER_REG_CTRL:
132 			val = readl_relaxed(timer->base + CNTP_CTL);
133 			break;
134 		case ARCH_TIMER_REG_TVAL:
135 			val = readl_relaxed(timer->base + CNTP_TVAL);
136 			break;
137 		}
138 	} else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
139 		struct arch_timer *timer = to_arch_timer(clk);
140 		switch (reg) {
141 		case ARCH_TIMER_REG_CTRL:
142 			val = readl_relaxed(timer->base + CNTV_CTL);
143 			break;
144 		case ARCH_TIMER_REG_TVAL:
145 			val = readl_relaxed(timer->base + CNTV_TVAL);
146 			break;
147 		}
148 	} else {
149 		val = arch_timer_reg_read_cp15(access, reg);
150 	}
151 
152 	return val;
153 }
154 
155 /*
156  * Default to cp15 based access because arm64 uses this function for
157  * sched_clock() before DT is probed and the cp15 method is guaranteed
158  * to exist on arm64. arm doesn't use this before DT is probed so even
159  * if we don't have the cp15 accessors we won't have a problem.
160  */
161 u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct;
162 EXPORT_SYMBOL_GPL(arch_timer_read_counter);
163 
164 static u64 arch_counter_read(struct clocksource *cs)
165 {
166 	return arch_timer_read_counter();
167 }
168 
169 static u64 arch_counter_read_cc(const struct cyclecounter *cc)
170 {
171 	return arch_timer_read_counter();
172 }
173 
174 static struct clocksource clocksource_counter = {
175 	.name	= "arch_sys_counter",
176 	.rating	= 400,
177 	.read	= arch_counter_read,
178 	.mask	= CLOCKSOURCE_MASK(56),
179 	.flags	= CLOCK_SOURCE_IS_CONTINUOUS,
180 };
181 
182 static struct cyclecounter cyclecounter __ro_after_init = {
183 	.read	= arch_counter_read_cc,
184 	.mask	= CLOCKSOURCE_MASK(56),
185 };
186 
187 struct ate_acpi_oem_info {
188 	char oem_id[ACPI_OEM_ID_SIZE + 1];
189 	char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
190 	u32 oem_revision;
191 };
192 
193 #ifdef CONFIG_FSL_ERRATUM_A008585
194 /*
195  * The number of retries is an arbitrary value well beyond the highest number
196  * of iterations the loop has been observed to take.
197  */
198 #define __fsl_a008585_read_reg(reg) ({			\
199 	u64 _old, _new;					\
200 	int _retries = 200;				\
201 							\
202 	do {						\
203 		_old = read_sysreg(reg);		\
204 		_new = read_sysreg(reg);		\
205 		_retries--;				\
206 	} while (unlikely(_old != _new) && _retries);	\
207 							\
208 	WARN_ON_ONCE(!_retries);			\
209 	_new;						\
210 })
211 
212 static u32 notrace fsl_a008585_read_cntp_tval_el0(void)
213 {
214 	return __fsl_a008585_read_reg(cntp_tval_el0);
215 }
216 
217 static u32 notrace fsl_a008585_read_cntv_tval_el0(void)
218 {
219 	return __fsl_a008585_read_reg(cntv_tval_el0);
220 }
221 
222 static u64 notrace fsl_a008585_read_cntpct_el0(void)
223 {
224 	return __fsl_a008585_read_reg(cntpct_el0);
225 }
226 
227 static u64 notrace fsl_a008585_read_cntvct_el0(void)
228 {
229 	return __fsl_a008585_read_reg(cntvct_el0);
230 }
231 #endif
232 
233 #ifdef CONFIG_HISILICON_ERRATUM_161010101
234 /*
235  * Verify whether the value of the second read is larger than the first by
236  * less than 32 is the only way to confirm the value is correct, so clear the
237  * lower 5 bits to check whether the difference is greater than 32 or not.
238  * Theoretically the erratum should not occur more than twice in succession
239  * when reading the system counter, but it is possible that some interrupts
240  * may lead to more than twice read errors, triggering the warning, so setting
241  * the number of retries far beyond the number of iterations the loop has been
242  * observed to take.
243  */
244 #define __hisi_161010101_read_reg(reg) ({				\
245 	u64 _old, _new;						\
246 	int _retries = 50;					\
247 								\
248 	do {							\
249 		_old = read_sysreg(reg);			\
250 		_new = read_sysreg(reg);			\
251 		_retries--;					\
252 	} while (unlikely((_new - _old) >> 5) && _retries);	\
253 								\
254 	WARN_ON_ONCE(!_retries);				\
255 	_new;							\
256 })
257 
258 static u32 notrace hisi_161010101_read_cntp_tval_el0(void)
259 {
260 	return __hisi_161010101_read_reg(cntp_tval_el0);
261 }
262 
263 static u32 notrace hisi_161010101_read_cntv_tval_el0(void)
264 {
265 	return __hisi_161010101_read_reg(cntv_tval_el0);
266 }
267 
268 static u64 notrace hisi_161010101_read_cntpct_el0(void)
269 {
270 	return __hisi_161010101_read_reg(cntpct_el0);
271 }
272 
273 static u64 notrace hisi_161010101_read_cntvct_el0(void)
274 {
275 	return __hisi_161010101_read_reg(cntvct_el0);
276 }
277 
278 static struct ate_acpi_oem_info hisi_161010101_oem_info[] = {
279 	/*
280 	 * Note that trailing spaces are required to properly match
281 	 * the OEM table information.
282 	 */
283 	{
284 		.oem_id		= "HISI  ",
285 		.oem_table_id	= "HIP05   ",
286 		.oem_revision	= 0,
287 	},
288 	{
289 		.oem_id		= "HISI  ",
290 		.oem_table_id	= "HIP06   ",
291 		.oem_revision	= 0,
292 	},
293 	{
294 		.oem_id		= "HISI  ",
295 		.oem_table_id	= "HIP07   ",
296 		.oem_revision	= 0,
297 	},
298 	{ /* Sentinel indicating the end of the OEM array */ },
299 };
300 #endif
301 
302 #ifdef CONFIG_ARM64_ERRATUM_858921
303 static u64 notrace arm64_858921_read_cntpct_el0(void)
304 {
305 	u64 old, new;
306 
307 	old = read_sysreg(cntpct_el0);
308 	new = read_sysreg(cntpct_el0);
309 	return (((old ^ new) >> 32) & 1) ? old : new;
310 }
311 
312 static u64 notrace arm64_858921_read_cntvct_el0(void)
313 {
314 	u64 old, new;
315 
316 	old = read_sysreg(cntvct_el0);
317 	new = read_sysreg(cntvct_el0);
318 	return (((old ^ new) >> 32) & 1) ? old : new;
319 }
320 #endif
321 
322 #ifdef CONFIG_ARM64_ERRATUM_1188873
323 static u64 notrace arm64_1188873_read_cntvct_el0(void)
324 {
325 	return read_sysreg(cntvct_el0);
326 }
327 #endif
328 
329 #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
330 DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, timer_unstable_counter_workaround);
331 EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
332 
333 DEFINE_STATIC_KEY_FALSE(arch_timer_read_ool_enabled);
334 EXPORT_SYMBOL_GPL(arch_timer_read_ool_enabled);
335 
336 static void erratum_set_next_event_tval_generic(const int access, unsigned long evt,
337 						struct clock_event_device *clk)
338 {
339 	unsigned long ctrl;
340 	u64 cval;
341 
342 	ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
343 	ctrl |= ARCH_TIMER_CTRL_ENABLE;
344 	ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
345 
346 	if (access == ARCH_TIMER_PHYS_ACCESS) {
347 		cval = evt + arch_counter_get_cntpct();
348 		write_sysreg(cval, cntp_cval_el0);
349 	} else {
350 		cval = evt + arch_counter_get_cntvct();
351 		write_sysreg(cval, cntv_cval_el0);
352 	}
353 
354 	arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
355 }
356 
357 static __maybe_unused int erratum_set_next_event_tval_virt(unsigned long evt,
358 					    struct clock_event_device *clk)
359 {
360 	erratum_set_next_event_tval_generic(ARCH_TIMER_VIRT_ACCESS, evt, clk);
361 	return 0;
362 }
363 
364 static __maybe_unused int erratum_set_next_event_tval_phys(unsigned long evt,
365 					    struct clock_event_device *clk)
366 {
367 	erratum_set_next_event_tval_generic(ARCH_TIMER_PHYS_ACCESS, evt, clk);
368 	return 0;
369 }
370 
371 static const struct arch_timer_erratum_workaround ool_workarounds[] = {
372 #ifdef CONFIG_FSL_ERRATUM_A008585
373 	{
374 		.match_type = ate_match_dt,
375 		.id = "fsl,erratum-a008585",
376 		.desc = "Freescale erratum a005858",
377 		.read_cntp_tval_el0 = fsl_a008585_read_cntp_tval_el0,
378 		.read_cntv_tval_el0 = fsl_a008585_read_cntv_tval_el0,
379 		.read_cntpct_el0 = fsl_a008585_read_cntpct_el0,
380 		.read_cntvct_el0 = fsl_a008585_read_cntvct_el0,
381 		.set_next_event_phys = erratum_set_next_event_tval_phys,
382 		.set_next_event_virt = erratum_set_next_event_tval_virt,
383 	},
384 #endif
385 #ifdef CONFIG_HISILICON_ERRATUM_161010101
386 	{
387 		.match_type = ate_match_dt,
388 		.id = "hisilicon,erratum-161010101",
389 		.desc = "HiSilicon erratum 161010101",
390 		.read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
391 		.read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
392 		.read_cntpct_el0 = hisi_161010101_read_cntpct_el0,
393 		.read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
394 		.set_next_event_phys = erratum_set_next_event_tval_phys,
395 		.set_next_event_virt = erratum_set_next_event_tval_virt,
396 	},
397 	{
398 		.match_type = ate_match_acpi_oem_info,
399 		.id = hisi_161010101_oem_info,
400 		.desc = "HiSilicon erratum 161010101",
401 		.read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
402 		.read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
403 		.read_cntpct_el0 = hisi_161010101_read_cntpct_el0,
404 		.read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
405 		.set_next_event_phys = erratum_set_next_event_tval_phys,
406 		.set_next_event_virt = erratum_set_next_event_tval_virt,
407 	},
408 #endif
409 #ifdef CONFIG_ARM64_ERRATUM_858921
410 	{
411 		.match_type = ate_match_local_cap_id,
412 		.id = (void *)ARM64_WORKAROUND_858921,
413 		.desc = "ARM erratum 858921",
414 		.read_cntpct_el0 = arm64_858921_read_cntpct_el0,
415 		.read_cntvct_el0 = arm64_858921_read_cntvct_el0,
416 	},
417 #endif
418 #ifdef CONFIG_ARM64_ERRATUM_1188873
419 	{
420 		.match_type = ate_match_local_cap_id,
421 		.id = (void *)ARM64_WORKAROUND_1188873,
422 		.desc = "ARM erratum 1188873",
423 		.read_cntvct_el0 = arm64_1188873_read_cntvct_el0,
424 	},
425 #endif
426 };
427 
428 typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *,
429 			       const void *);
430 
431 static
432 bool arch_timer_check_dt_erratum(const struct arch_timer_erratum_workaround *wa,
433 				 const void *arg)
434 {
435 	const struct device_node *np = arg;
436 
437 	return of_property_read_bool(np, wa->id);
438 }
439 
440 static
441 bool arch_timer_check_local_cap_erratum(const struct arch_timer_erratum_workaround *wa,
442 					const void *arg)
443 {
444 	return this_cpu_has_cap((uintptr_t)wa->id);
445 }
446 
447 
448 static
449 bool arch_timer_check_acpi_oem_erratum(const struct arch_timer_erratum_workaround *wa,
450 				       const void *arg)
451 {
452 	static const struct ate_acpi_oem_info empty_oem_info = {};
453 	const struct ate_acpi_oem_info *info = wa->id;
454 	const struct acpi_table_header *table = arg;
455 
456 	/* Iterate over the ACPI OEM info array, looking for a match */
457 	while (memcmp(info, &empty_oem_info, sizeof(*info))) {
458 		if (!memcmp(info->oem_id, table->oem_id, ACPI_OEM_ID_SIZE) &&
459 		    !memcmp(info->oem_table_id, table->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
460 		    info->oem_revision == table->oem_revision)
461 			return true;
462 
463 		info++;
464 	}
465 
466 	return false;
467 }
468 
469 static const struct arch_timer_erratum_workaround *
470 arch_timer_iterate_errata(enum arch_timer_erratum_match_type type,
471 			  ate_match_fn_t match_fn,
472 			  void *arg)
473 {
474 	int i;
475 
476 	for (i = 0; i < ARRAY_SIZE(ool_workarounds); i++) {
477 		if (ool_workarounds[i].match_type != type)
478 			continue;
479 
480 		if (match_fn(&ool_workarounds[i], arg))
481 			return &ool_workarounds[i];
482 	}
483 
484 	return NULL;
485 }
486 
487 static
488 void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa,
489 				  bool local)
490 {
491 	int i;
492 
493 	if (local) {
494 		__this_cpu_write(timer_unstable_counter_workaround, wa);
495 	} else {
496 		for_each_possible_cpu(i)
497 			per_cpu(timer_unstable_counter_workaround, i) = wa;
498 	}
499 
500 	/*
501 	 * Use the locked version, as we're called from the CPU
502 	 * hotplug framework. Otherwise, we end-up in deadlock-land.
503 	 */
504 	static_branch_enable_cpuslocked(&arch_timer_read_ool_enabled);
505 
506 	/*
507 	 * Don't use the vdso fastpath if errata require using the
508 	 * out-of-line counter accessor. We may change our mind pretty
509 	 * late in the game (with a per-CPU erratum, for example), so
510 	 * change both the default value and the vdso itself.
511 	 */
512 	if (wa->read_cntvct_el0) {
513 		clocksource_counter.archdata.vdso_direct = false;
514 		vdso_default = false;
515 	}
516 }
517 
518 static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type type,
519 					    void *arg)
520 {
521 	const struct arch_timer_erratum_workaround *wa;
522 	ate_match_fn_t match_fn = NULL;
523 	bool local = false;
524 
525 	switch (type) {
526 	case ate_match_dt:
527 		match_fn = arch_timer_check_dt_erratum;
528 		break;
529 	case ate_match_local_cap_id:
530 		match_fn = arch_timer_check_local_cap_erratum;
531 		local = true;
532 		break;
533 	case ate_match_acpi_oem_info:
534 		match_fn = arch_timer_check_acpi_oem_erratum;
535 		break;
536 	default:
537 		WARN_ON(1);
538 		return;
539 	}
540 
541 	wa = arch_timer_iterate_errata(type, match_fn, arg);
542 	if (!wa)
543 		return;
544 
545 	if (needs_unstable_timer_counter_workaround()) {
546 		const struct arch_timer_erratum_workaround *__wa;
547 		__wa = __this_cpu_read(timer_unstable_counter_workaround);
548 		if (__wa && wa != __wa)
549 			pr_warn("Can't enable workaround for %s (clashes with %s\n)",
550 				wa->desc, __wa->desc);
551 
552 		if (__wa)
553 			return;
554 	}
555 
556 	arch_timer_enable_workaround(wa, local);
557 	pr_info("Enabling %s workaround for %s\n",
558 		local ? "local" : "global", wa->desc);
559 }
560 
561 #define erratum_handler(fn, r, ...)					\
562 ({									\
563 	bool __val;							\
564 	if (needs_unstable_timer_counter_workaround()) {		\
565 		const struct arch_timer_erratum_workaround *__wa;	\
566 		__wa = __this_cpu_read(timer_unstable_counter_workaround); \
567 		if (__wa && __wa->fn) {					\
568 			r = __wa->fn(__VA_ARGS__);			\
569 			__val = true;					\
570 		} else {						\
571 			__val = false;					\
572 		}							\
573 	} else {							\
574 		__val = false;						\
575 	}								\
576 	__val;								\
577 })
578 
579 static bool arch_timer_this_cpu_has_cntvct_wa(void)
580 {
581 	const struct arch_timer_erratum_workaround *wa;
582 
583 	wa = __this_cpu_read(timer_unstable_counter_workaround);
584 	return wa && wa->read_cntvct_el0;
585 }
586 #else
587 #define arch_timer_check_ool_workaround(t,a)		do { } while(0)
588 #define erratum_set_next_event_tval_virt(...)		({BUG(); 0;})
589 #define erratum_set_next_event_tval_phys(...)		({BUG(); 0;})
590 #define erratum_handler(fn, r, ...)			({false;})
591 #define arch_timer_this_cpu_has_cntvct_wa()		({false;})
592 #endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
593 
594 static __always_inline irqreturn_t timer_handler(const int access,
595 					struct clock_event_device *evt)
596 {
597 	unsigned long ctrl;
598 
599 	ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt);
600 	if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
601 		ctrl |= ARCH_TIMER_CTRL_IT_MASK;
602 		arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt);
603 		evt->event_handler(evt);
604 		return IRQ_HANDLED;
605 	}
606 
607 	return IRQ_NONE;
608 }
609 
610 static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
611 {
612 	struct clock_event_device *evt = dev_id;
613 
614 	return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
615 }
616 
617 static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
618 {
619 	struct clock_event_device *evt = dev_id;
620 
621 	return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
622 }
623 
624 static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id)
625 {
626 	struct clock_event_device *evt = dev_id;
627 
628 	return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt);
629 }
630 
631 static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id)
632 {
633 	struct clock_event_device *evt = dev_id;
634 
635 	return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt);
636 }
637 
638 static __always_inline int timer_shutdown(const int access,
639 					  struct clock_event_device *clk)
640 {
641 	unsigned long ctrl;
642 
643 	ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
644 	ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
645 	arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
646 
647 	return 0;
648 }
649 
650 static int arch_timer_shutdown_virt(struct clock_event_device *clk)
651 {
652 	return timer_shutdown(ARCH_TIMER_VIRT_ACCESS, clk);
653 }
654 
655 static int arch_timer_shutdown_phys(struct clock_event_device *clk)
656 {
657 	return timer_shutdown(ARCH_TIMER_PHYS_ACCESS, clk);
658 }
659 
660 static int arch_timer_shutdown_virt_mem(struct clock_event_device *clk)
661 {
662 	return timer_shutdown(ARCH_TIMER_MEM_VIRT_ACCESS, clk);
663 }
664 
665 static int arch_timer_shutdown_phys_mem(struct clock_event_device *clk)
666 {
667 	return timer_shutdown(ARCH_TIMER_MEM_PHYS_ACCESS, clk);
668 }
669 
670 static __always_inline void set_next_event(const int access, unsigned long evt,
671 					   struct clock_event_device *clk)
672 {
673 	unsigned long ctrl;
674 	ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
675 	ctrl |= ARCH_TIMER_CTRL_ENABLE;
676 	ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
677 	arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt, clk);
678 	arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
679 }
680 
681 static int arch_timer_set_next_event_virt(unsigned long evt,
682 					  struct clock_event_device *clk)
683 {
684 	int ret;
685 
686 	if (erratum_handler(set_next_event_virt, ret, evt, clk))
687 		return ret;
688 
689 	set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk);
690 	return 0;
691 }
692 
693 static int arch_timer_set_next_event_phys(unsigned long evt,
694 					  struct clock_event_device *clk)
695 {
696 	int ret;
697 
698 	if (erratum_handler(set_next_event_phys, ret, evt, clk))
699 		return ret;
700 
701 	set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk);
702 	return 0;
703 }
704 
705 static int arch_timer_set_next_event_virt_mem(unsigned long evt,
706 					      struct clock_event_device *clk)
707 {
708 	set_next_event(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk);
709 	return 0;
710 }
711 
712 static int arch_timer_set_next_event_phys_mem(unsigned long evt,
713 					      struct clock_event_device *clk)
714 {
715 	set_next_event(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk);
716 	return 0;
717 }
718 
719 static void __arch_timer_setup(unsigned type,
720 			       struct clock_event_device *clk)
721 {
722 	clk->features = CLOCK_EVT_FEAT_ONESHOT;
723 
724 	if (type == ARCH_TIMER_TYPE_CP15) {
725 		if (arch_timer_c3stop)
726 			clk->features |= CLOCK_EVT_FEAT_C3STOP;
727 		clk->name = "arch_sys_timer";
728 		clk->rating = 450;
729 		clk->cpumask = cpumask_of(smp_processor_id());
730 		clk->irq = arch_timer_ppi[arch_timer_uses_ppi];
731 		switch (arch_timer_uses_ppi) {
732 		case ARCH_TIMER_VIRT_PPI:
733 			clk->set_state_shutdown = arch_timer_shutdown_virt;
734 			clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
735 			clk->set_next_event = arch_timer_set_next_event_virt;
736 			break;
737 		case ARCH_TIMER_PHYS_SECURE_PPI:
738 		case ARCH_TIMER_PHYS_NONSECURE_PPI:
739 		case ARCH_TIMER_HYP_PPI:
740 			clk->set_state_shutdown = arch_timer_shutdown_phys;
741 			clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
742 			clk->set_next_event = arch_timer_set_next_event_phys;
743 			break;
744 		default:
745 			BUG();
746 		}
747 
748 		arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL);
749 	} else {
750 		clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
751 		clk->name = "arch_mem_timer";
752 		clk->rating = 400;
753 		clk->cpumask = cpu_possible_mask;
754 		if (arch_timer_mem_use_virtual) {
755 			clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
756 			clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem;
757 			clk->set_next_event =
758 				arch_timer_set_next_event_virt_mem;
759 		} else {
760 			clk->set_state_shutdown = arch_timer_shutdown_phys_mem;
761 			clk->set_state_oneshot_stopped = arch_timer_shutdown_phys_mem;
762 			clk->set_next_event =
763 				arch_timer_set_next_event_phys_mem;
764 		}
765 	}
766 
767 	clk->set_state_shutdown(clk);
768 
769 	clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff);
770 }
771 
772 static void arch_timer_evtstrm_enable(int divider)
773 {
774 	u32 cntkctl = arch_timer_get_cntkctl();
775 
776 	cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
777 	/* Set the divider and enable virtual event stream */
778 	cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
779 			| ARCH_TIMER_VIRT_EVT_EN;
780 	arch_timer_set_cntkctl(cntkctl);
781 	elf_hwcap |= HWCAP_EVTSTRM;
782 #ifdef CONFIG_COMPAT
783 	compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
784 #endif
785 	cpumask_set_cpu(smp_processor_id(), &evtstrm_available);
786 }
787 
788 static void arch_timer_configure_evtstream(void)
789 {
790 	int evt_stream_div, pos;
791 
792 	/* Find the closest power of two to the divisor */
793 	evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ;
794 	pos = fls(evt_stream_div);
795 	if (pos > 1 && !(evt_stream_div & (1 << (pos - 2))))
796 		pos--;
797 	/* enable event stream */
798 	arch_timer_evtstrm_enable(min(pos, 15));
799 }
800 
801 static void arch_counter_set_user_access(void)
802 {
803 	u32 cntkctl = arch_timer_get_cntkctl();
804 
805 	/* Disable user access to the timers and both counters */
806 	/* Also disable virtual event stream */
807 	cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
808 			| ARCH_TIMER_USR_VT_ACCESS_EN
809 		        | ARCH_TIMER_USR_VCT_ACCESS_EN
810 			| ARCH_TIMER_VIRT_EVT_EN
811 			| ARCH_TIMER_USR_PCT_ACCESS_EN);
812 
813 	/*
814 	 * Enable user access to the virtual counter if it doesn't
815 	 * need to be workaround. The vdso may have been already
816 	 * disabled though.
817 	 */
818 	if (arch_timer_this_cpu_has_cntvct_wa())
819 		pr_info("CPU%d: Trapping CNTVCT access\n", smp_processor_id());
820 	else
821 		cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
822 
823 	arch_timer_set_cntkctl(cntkctl);
824 }
825 
826 static bool arch_timer_has_nonsecure_ppi(void)
827 {
828 	return (arch_timer_uses_ppi == ARCH_TIMER_PHYS_SECURE_PPI &&
829 		arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
830 }
831 
832 static u32 check_ppi_trigger(int irq)
833 {
834 	u32 flags = irq_get_trigger_type(irq);
835 
836 	if (flags != IRQF_TRIGGER_HIGH && flags != IRQF_TRIGGER_LOW) {
837 		pr_warn("WARNING: Invalid trigger for IRQ%d, assuming level low\n", irq);
838 		pr_warn("WARNING: Please fix your firmware\n");
839 		flags = IRQF_TRIGGER_LOW;
840 	}
841 
842 	return flags;
843 }
844 
845 static int arch_timer_starting_cpu(unsigned int cpu)
846 {
847 	struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
848 	u32 flags;
849 
850 	__arch_timer_setup(ARCH_TIMER_TYPE_CP15, clk);
851 
852 	flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]);
853 	enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags);
854 
855 	if (arch_timer_has_nonsecure_ppi()) {
856 		flags = check_ppi_trigger(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
857 		enable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI],
858 				  flags);
859 	}
860 
861 	arch_counter_set_user_access();
862 	if (evtstrm_enable)
863 		arch_timer_configure_evtstream();
864 
865 	return 0;
866 }
867 
868 /*
869  * For historical reasons, when probing with DT we use whichever (non-zero)
870  * rate was probed first, and don't verify that others match. If the first node
871  * probed has a clock-frequency property, this overrides the HW register.
872  */
873 static void arch_timer_of_configure_rate(u32 rate, struct device_node *np)
874 {
875 	/* Who has more than one independent system counter? */
876 	if (arch_timer_rate)
877 		return;
878 
879 	if (of_property_read_u32(np, "clock-frequency", &arch_timer_rate))
880 		arch_timer_rate = rate;
881 
882 	/* Check the timer frequency. */
883 	if (arch_timer_rate == 0)
884 		pr_warn("frequency not available\n");
885 }
886 
887 static void arch_timer_banner(unsigned type)
888 {
889 	pr_info("%s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n",
890 		type & ARCH_TIMER_TYPE_CP15 ? "cp15" : "",
891 		type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ?
892 			" and " : "",
893 		type & ARCH_TIMER_TYPE_MEM ? "mmio" : "",
894 		(unsigned long)arch_timer_rate / 1000000,
895 		(unsigned long)(arch_timer_rate / 10000) % 100,
896 		type & ARCH_TIMER_TYPE_CP15 ?
897 			(arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) ? "virt" : "phys" :
898 			"",
899 		type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ? "/" : "",
900 		type & ARCH_TIMER_TYPE_MEM ?
901 			arch_timer_mem_use_virtual ? "virt" : "phys" :
902 			"");
903 }
904 
905 u32 arch_timer_get_rate(void)
906 {
907 	return arch_timer_rate;
908 }
909 
910 bool arch_timer_evtstrm_available(void)
911 {
912 	/*
913 	 * We might get called from a preemptible context. This is fine
914 	 * because availability of the event stream should be always the same
915 	 * for a preemptible context and context where we might resume a task.
916 	 */
917 	return cpumask_test_cpu(raw_smp_processor_id(), &evtstrm_available);
918 }
919 
920 static u64 arch_counter_get_cntvct_mem(void)
921 {
922 	u32 vct_lo, vct_hi, tmp_hi;
923 
924 	do {
925 		vct_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
926 		vct_lo = readl_relaxed(arch_counter_base + CNTVCT_LO);
927 		tmp_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
928 	} while (vct_hi != tmp_hi);
929 
930 	return ((u64) vct_hi << 32) | vct_lo;
931 }
932 
933 static struct arch_timer_kvm_info arch_timer_kvm_info;
934 
935 struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
936 {
937 	return &arch_timer_kvm_info;
938 }
939 
940 static void __init arch_counter_register(unsigned type)
941 {
942 	u64 start_count;
943 
944 	/* Register the CP15 based counter if we have one */
945 	if (type & ARCH_TIMER_TYPE_CP15) {
946 		if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) ||
947 		    arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI)
948 			arch_timer_read_counter = arch_counter_get_cntvct;
949 		else
950 			arch_timer_read_counter = arch_counter_get_cntpct;
951 
952 		clocksource_counter.archdata.vdso_direct = vdso_default;
953 	} else {
954 		arch_timer_read_counter = arch_counter_get_cntvct_mem;
955 	}
956 
957 	if (!arch_counter_suspend_stop)
958 		clocksource_counter.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
959 	start_count = arch_timer_read_counter();
960 	clocksource_register_hz(&clocksource_counter, arch_timer_rate);
961 	cyclecounter.mult = clocksource_counter.mult;
962 	cyclecounter.shift = clocksource_counter.shift;
963 	timecounter_init(&arch_timer_kvm_info.timecounter,
964 			 &cyclecounter, start_count);
965 
966 	/* 56 bits minimum, so we assume worst case rollover */
967 	sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate);
968 }
969 
970 static void arch_timer_stop(struct clock_event_device *clk)
971 {
972 	pr_debug("disable IRQ%d cpu #%d\n", clk->irq, smp_processor_id());
973 
974 	disable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi]);
975 	if (arch_timer_has_nonsecure_ppi())
976 		disable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
977 
978 	clk->set_state_shutdown(clk);
979 }
980 
981 static int arch_timer_dying_cpu(unsigned int cpu)
982 {
983 	struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
984 
985 	cpumask_clear_cpu(smp_processor_id(), &evtstrm_available);
986 
987 	arch_timer_stop(clk);
988 	return 0;
989 }
990 
991 #ifdef CONFIG_CPU_PM
992 static DEFINE_PER_CPU(unsigned long, saved_cntkctl);
993 static int arch_timer_cpu_pm_notify(struct notifier_block *self,
994 				    unsigned long action, void *hcpu)
995 {
996 	if (action == CPU_PM_ENTER) {
997 		__this_cpu_write(saved_cntkctl, arch_timer_get_cntkctl());
998 
999 		cpumask_clear_cpu(smp_processor_id(), &evtstrm_available);
1000 	} else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT) {
1001 		arch_timer_set_cntkctl(__this_cpu_read(saved_cntkctl));
1002 
1003 		if (elf_hwcap & HWCAP_EVTSTRM)
1004 			cpumask_set_cpu(smp_processor_id(), &evtstrm_available);
1005 	}
1006 	return NOTIFY_OK;
1007 }
1008 
1009 static struct notifier_block arch_timer_cpu_pm_notifier = {
1010 	.notifier_call = arch_timer_cpu_pm_notify,
1011 };
1012 
1013 static int __init arch_timer_cpu_pm_init(void)
1014 {
1015 	return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier);
1016 }
1017 
1018 static void __init arch_timer_cpu_pm_deinit(void)
1019 {
1020 	WARN_ON(cpu_pm_unregister_notifier(&arch_timer_cpu_pm_notifier));
1021 }
1022 
1023 #else
1024 static int __init arch_timer_cpu_pm_init(void)
1025 {
1026 	return 0;
1027 }
1028 
1029 static void __init arch_timer_cpu_pm_deinit(void)
1030 {
1031 }
1032 #endif
1033 
1034 static int __init arch_timer_register(void)
1035 {
1036 	int err;
1037 	int ppi;
1038 
1039 	arch_timer_evt = alloc_percpu(struct clock_event_device);
1040 	if (!arch_timer_evt) {
1041 		err = -ENOMEM;
1042 		goto out;
1043 	}
1044 
1045 	ppi = arch_timer_ppi[arch_timer_uses_ppi];
1046 	switch (arch_timer_uses_ppi) {
1047 	case ARCH_TIMER_VIRT_PPI:
1048 		err = request_percpu_irq(ppi, arch_timer_handler_virt,
1049 					 "arch_timer", arch_timer_evt);
1050 		break;
1051 	case ARCH_TIMER_PHYS_SECURE_PPI:
1052 	case ARCH_TIMER_PHYS_NONSECURE_PPI:
1053 		err = request_percpu_irq(ppi, arch_timer_handler_phys,
1054 					 "arch_timer", arch_timer_evt);
1055 		if (!err && arch_timer_has_nonsecure_ppi()) {
1056 			ppi = arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI];
1057 			err = request_percpu_irq(ppi, arch_timer_handler_phys,
1058 						 "arch_timer", arch_timer_evt);
1059 			if (err)
1060 				free_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_SECURE_PPI],
1061 						arch_timer_evt);
1062 		}
1063 		break;
1064 	case ARCH_TIMER_HYP_PPI:
1065 		err = request_percpu_irq(ppi, arch_timer_handler_phys,
1066 					 "arch_timer", arch_timer_evt);
1067 		break;
1068 	default:
1069 		BUG();
1070 	}
1071 
1072 	if (err) {
1073 		pr_err("can't register interrupt %d (%d)\n", ppi, err);
1074 		goto out_free;
1075 	}
1076 
1077 	err = arch_timer_cpu_pm_init();
1078 	if (err)
1079 		goto out_unreg_notify;
1080 
1081 	/* Register and immediately configure the timer on the boot CPU */
1082 	err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING,
1083 				"clockevents/arm/arch_timer:starting",
1084 				arch_timer_starting_cpu, arch_timer_dying_cpu);
1085 	if (err)
1086 		goto out_unreg_cpupm;
1087 	return 0;
1088 
1089 out_unreg_cpupm:
1090 	arch_timer_cpu_pm_deinit();
1091 
1092 out_unreg_notify:
1093 	free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt);
1094 	if (arch_timer_has_nonsecure_ppi())
1095 		free_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI],
1096 				arch_timer_evt);
1097 
1098 out_free:
1099 	free_percpu(arch_timer_evt);
1100 out:
1101 	return err;
1102 }
1103 
1104 static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq)
1105 {
1106 	int ret;
1107 	irq_handler_t func;
1108 	struct arch_timer *t;
1109 
1110 	t = kzalloc(sizeof(*t), GFP_KERNEL);
1111 	if (!t)
1112 		return -ENOMEM;
1113 
1114 	t->base = base;
1115 	t->evt.irq = irq;
1116 	__arch_timer_setup(ARCH_TIMER_TYPE_MEM, &t->evt);
1117 
1118 	if (arch_timer_mem_use_virtual)
1119 		func = arch_timer_handler_virt_mem;
1120 	else
1121 		func = arch_timer_handler_phys_mem;
1122 
1123 	ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &t->evt);
1124 	if (ret) {
1125 		pr_err("Failed to request mem timer irq\n");
1126 		kfree(t);
1127 	}
1128 
1129 	return ret;
1130 }
1131 
1132 static const struct of_device_id arch_timer_of_match[] __initconst = {
1133 	{ .compatible   = "arm,armv7-timer",    },
1134 	{ .compatible   = "arm,armv8-timer",    },
1135 	{},
1136 };
1137 
1138 static const struct of_device_id arch_timer_mem_of_match[] __initconst = {
1139 	{ .compatible   = "arm,armv7-timer-mem", },
1140 	{},
1141 };
1142 
1143 static bool __init arch_timer_needs_of_probing(void)
1144 {
1145 	struct device_node *dn;
1146 	bool needs_probing = false;
1147 	unsigned int mask = ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM;
1148 
1149 	/* We have two timers, and both device-tree nodes are probed. */
1150 	if ((arch_timers_present & mask) == mask)
1151 		return false;
1152 
1153 	/*
1154 	 * Only one type of timer is probed,
1155 	 * check if we have another type of timer node in device-tree.
1156 	 */
1157 	if (arch_timers_present & ARCH_TIMER_TYPE_CP15)
1158 		dn = of_find_matching_node(NULL, arch_timer_mem_of_match);
1159 	else
1160 		dn = of_find_matching_node(NULL, arch_timer_of_match);
1161 
1162 	if (dn && of_device_is_available(dn))
1163 		needs_probing = true;
1164 
1165 	of_node_put(dn);
1166 
1167 	return needs_probing;
1168 }
1169 
1170 static int __init arch_timer_common_init(void)
1171 {
1172 	arch_timer_banner(arch_timers_present);
1173 	arch_counter_register(arch_timers_present);
1174 	return arch_timer_arch_init();
1175 }
1176 
1177 /**
1178  * arch_timer_select_ppi() - Select suitable PPI for the current system.
1179  *
1180  * If HYP mode is available, we know that the physical timer
1181  * has been configured to be accessible from PL1. Use it, so
1182  * that a guest can use the virtual timer instead.
1183  *
1184  * On ARMv8.1 with VH extensions, the kernel runs in HYP. VHE
1185  * accesses to CNTP_*_EL1 registers are silently redirected to
1186  * their CNTHP_*_EL2 counterparts, and use a different PPI
1187  * number.
1188  *
1189  * If no interrupt provided for virtual timer, we'll have to
1190  * stick to the physical timer. It'd better be accessible...
1191  * For arm64 we never use the secure interrupt.
1192  *
1193  * Return: a suitable PPI type for the current system.
1194  */
1195 static enum arch_timer_ppi_nr __init arch_timer_select_ppi(void)
1196 {
1197 	if (is_kernel_in_hyp_mode())
1198 		return ARCH_TIMER_HYP_PPI;
1199 
1200 	if (!is_hyp_mode_available() && arch_timer_ppi[ARCH_TIMER_VIRT_PPI])
1201 		return ARCH_TIMER_VIRT_PPI;
1202 
1203 	if (IS_ENABLED(CONFIG_ARM64))
1204 		return ARCH_TIMER_PHYS_NONSECURE_PPI;
1205 
1206 	return ARCH_TIMER_PHYS_SECURE_PPI;
1207 }
1208 
1209 static int __init arch_timer_of_init(struct device_node *np)
1210 {
1211 	int i, ret;
1212 	u32 rate;
1213 
1214 	if (arch_timers_present & ARCH_TIMER_TYPE_CP15) {
1215 		pr_warn("multiple nodes in dt, skipping\n");
1216 		return 0;
1217 	}
1218 
1219 	arch_timers_present |= ARCH_TIMER_TYPE_CP15;
1220 	for (i = ARCH_TIMER_PHYS_SECURE_PPI; i < ARCH_TIMER_MAX_TIMER_PPI; i++)
1221 		arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
1222 
1223 	arch_timer_kvm_info.virtual_irq = arch_timer_ppi[ARCH_TIMER_VIRT_PPI];
1224 
1225 	rate = arch_timer_get_cntfrq();
1226 	arch_timer_of_configure_rate(rate, np);
1227 
1228 	arch_timer_c3stop = !of_property_read_bool(np, "always-on");
1229 
1230 	/* Check for globally applicable workarounds */
1231 	arch_timer_check_ool_workaround(ate_match_dt, np);
1232 
1233 	/*
1234 	 * If we cannot rely on firmware initializing the timer registers then
1235 	 * we should use the physical timers instead.
1236 	 */
1237 	if (IS_ENABLED(CONFIG_ARM) &&
1238 	    of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
1239 		arch_timer_uses_ppi = ARCH_TIMER_PHYS_SECURE_PPI;
1240 	else
1241 		arch_timer_uses_ppi = arch_timer_select_ppi();
1242 
1243 	if (!arch_timer_ppi[arch_timer_uses_ppi]) {
1244 		pr_err("No interrupt available, giving up\n");
1245 		return -EINVAL;
1246 	}
1247 
1248 	/* On some systems, the counter stops ticking when in suspend. */
1249 	arch_counter_suspend_stop = of_property_read_bool(np,
1250 							 "arm,no-tick-in-suspend");
1251 
1252 	ret = arch_timer_register();
1253 	if (ret)
1254 		return ret;
1255 
1256 	if (arch_timer_needs_of_probing())
1257 		return 0;
1258 
1259 	return arch_timer_common_init();
1260 }
1261 TIMER_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
1262 TIMER_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init);
1263 
1264 static u32 __init
1265 arch_timer_mem_frame_get_cntfrq(struct arch_timer_mem_frame *frame)
1266 {
1267 	void __iomem *base;
1268 	u32 rate;
1269 
1270 	base = ioremap(frame->cntbase, frame->size);
1271 	if (!base) {
1272 		pr_err("Unable to map frame @ %pa\n", &frame->cntbase);
1273 		return 0;
1274 	}
1275 
1276 	rate = readl_relaxed(base + CNTFRQ);
1277 
1278 	iounmap(base);
1279 
1280 	return rate;
1281 }
1282 
1283 static struct arch_timer_mem_frame * __init
1284 arch_timer_mem_find_best_frame(struct arch_timer_mem *timer_mem)
1285 {
1286 	struct arch_timer_mem_frame *frame, *best_frame = NULL;
1287 	void __iomem *cntctlbase;
1288 	u32 cnttidr;
1289 	int i;
1290 
1291 	cntctlbase = ioremap(timer_mem->cntctlbase, timer_mem->size);
1292 	if (!cntctlbase) {
1293 		pr_err("Can't map CNTCTLBase @ %pa\n",
1294 			&timer_mem->cntctlbase);
1295 		return NULL;
1296 	}
1297 
1298 	cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
1299 
1300 	/*
1301 	 * Try to find a virtual capable frame. Otherwise fall back to a
1302 	 * physical capable frame.
1303 	 */
1304 	for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) {
1305 		u32 cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT |
1306 			     CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT;
1307 
1308 		frame = &timer_mem->frame[i];
1309 		if (!frame->valid)
1310 			continue;
1311 
1312 		/* Try enabling everything, and see what sticks */
1313 		writel_relaxed(cntacr, cntctlbase + CNTACR(i));
1314 		cntacr = readl_relaxed(cntctlbase + CNTACR(i));
1315 
1316 		if ((cnttidr & CNTTIDR_VIRT(i)) &&
1317 		    !(~cntacr & (CNTACR_RWVT | CNTACR_RVCT))) {
1318 			best_frame = frame;
1319 			arch_timer_mem_use_virtual = true;
1320 			break;
1321 		}
1322 
1323 		if (~cntacr & (CNTACR_RWPT | CNTACR_RPCT))
1324 			continue;
1325 
1326 		best_frame = frame;
1327 	}
1328 
1329 	iounmap(cntctlbase);
1330 
1331 	return best_frame;
1332 }
1333 
1334 static int __init
1335 arch_timer_mem_frame_register(struct arch_timer_mem_frame *frame)
1336 {
1337 	void __iomem *base;
1338 	int ret, irq = 0;
1339 
1340 	if (arch_timer_mem_use_virtual)
1341 		irq = frame->virt_irq;
1342 	else
1343 		irq = frame->phys_irq;
1344 
1345 	if (!irq) {
1346 		pr_err("Frame missing %s irq.\n",
1347 		       arch_timer_mem_use_virtual ? "virt" : "phys");
1348 		return -EINVAL;
1349 	}
1350 
1351 	if (!request_mem_region(frame->cntbase, frame->size,
1352 				"arch_mem_timer"))
1353 		return -EBUSY;
1354 
1355 	base = ioremap(frame->cntbase, frame->size);
1356 	if (!base) {
1357 		pr_err("Can't map frame's registers\n");
1358 		return -ENXIO;
1359 	}
1360 
1361 	ret = arch_timer_mem_register(base, irq);
1362 	if (ret) {
1363 		iounmap(base);
1364 		return ret;
1365 	}
1366 
1367 	arch_counter_base = base;
1368 	arch_timers_present |= ARCH_TIMER_TYPE_MEM;
1369 
1370 	return 0;
1371 }
1372 
1373 static int __init arch_timer_mem_of_init(struct device_node *np)
1374 {
1375 	struct arch_timer_mem *timer_mem;
1376 	struct arch_timer_mem_frame *frame;
1377 	struct device_node *frame_node;
1378 	struct resource res;
1379 	int ret = -EINVAL;
1380 	u32 rate;
1381 
1382 	timer_mem = kzalloc(sizeof(*timer_mem), GFP_KERNEL);
1383 	if (!timer_mem)
1384 		return -ENOMEM;
1385 
1386 	if (of_address_to_resource(np, 0, &res))
1387 		goto out;
1388 	timer_mem->cntctlbase = res.start;
1389 	timer_mem->size = resource_size(&res);
1390 
1391 	for_each_available_child_of_node(np, frame_node) {
1392 		u32 n;
1393 		struct arch_timer_mem_frame *frame;
1394 
1395 		if (of_property_read_u32(frame_node, "frame-number", &n)) {
1396 			pr_err(FW_BUG "Missing frame-number.\n");
1397 			of_node_put(frame_node);
1398 			goto out;
1399 		}
1400 		if (n >= ARCH_TIMER_MEM_MAX_FRAMES) {
1401 			pr_err(FW_BUG "Wrong frame-number, only 0-%u are permitted.\n",
1402 			       ARCH_TIMER_MEM_MAX_FRAMES - 1);
1403 			of_node_put(frame_node);
1404 			goto out;
1405 		}
1406 		frame = &timer_mem->frame[n];
1407 
1408 		if (frame->valid) {
1409 			pr_err(FW_BUG "Duplicated frame-number.\n");
1410 			of_node_put(frame_node);
1411 			goto out;
1412 		}
1413 
1414 		if (of_address_to_resource(frame_node, 0, &res)) {
1415 			of_node_put(frame_node);
1416 			goto out;
1417 		}
1418 		frame->cntbase = res.start;
1419 		frame->size = resource_size(&res);
1420 
1421 		frame->virt_irq = irq_of_parse_and_map(frame_node,
1422 						       ARCH_TIMER_VIRT_SPI);
1423 		frame->phys_irq = irq_of_parse_and_map(frame_node,
1424 						       ARCH_TIMER_PHYS_SPI);
1425 
1426 		frame->valid = true;
1427 	}
1428 
1429 	frame = arch_timer_mem_find_best_frame(timer_mem);
1430 	if (!frame) {
1431 		pr_err("Unable to find a suitable frame in timer @ %pa\n",
1432 			&timer_mem->cntctlbase);
1433 		ret = -EINVAL;
1434 		goto out;
1435 	}
1436 
1437 	rate = arch_timer_mem_frame_get_cntfrq(frame);
1438 	arch_timer_of_configure_rate(rate, np);
1439 
1440 	ret = arch_timer_mem_frame_register(frame);
1441 	if (!ret && !arch_timer_needs_of_probing())
1442 		ret = arch_timer_common_init();
1443 out:
1444 	kfree(timer_mem);
1445 	return ret;
1446 }
1447 TIMER_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
1448 		       arch_timer_mem_of_init);
1449 
1450 #ifdef CONFIG_ACPI_GTDT
1451 static int __init
1452 arch_timer_mem_verify_cntfrq(struct arch_timer_mem *timer_mem)
1453 {
1454 	struct arch_timer_mem_frame *frame;
1455 	u32 rate;
1456 	int i;
1457 
1458 	for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) {
1459 		frame = &timer_mem->frame[i];
1460 
1461 		if (!frame->valid)
1462 			continue;
1463 
1464 		rate = arch_timer_mem_frame_get_cntfrq(frame);
1465 		if (rate == arch_timer_rate)
1466 			continue;
1467 
1468 		pr_err(FW_BUG "CNTFRQ mismatch: frame @ %pa: (0x%08lx), CPU: (0x%08lx)\n",
1469 			&frame->cntbase,
1470 			(unsigned long)rate, (unsigned long)arch_timer_rate);
1471 
1472 		return -EINVAL;
1473 	}
1474 
1475 	return 0;
1476 }
1477 
1478 static int __init arch_timer_mem_acpi_init(int platform_timer_count)
1479 {
1480 	struct arch_timer_mem *timers, *timer;
1481 	struct arch_timer_mem_frame *frame, *best_frame = NULL;
1482 	int timer_count, i, ret = 0;
1483 
1484 	timers = kcalloc(platform_timer_count, sizeof(*timers),
1485 			    GFP_KERNEL);
1486 	if (!timers)
1487 		return -ENOMEM;
1488 
1489 	ret = acpi_arch_timer_mem_init(timers, &timer_count);
1490 	if (ret || !timer_count)
1491 		goto out;
1492 
1493 	/*
1494 	 * While unlikely, it's theoretically possible that none of the frames
1495 	 * in a timer expose the combination of feature we want.
1496 	 */
1497 	for (i = 0; i < timer_count; i++) {
1498 		timer = &timers[i];
1499 
1500 		frame = arch_timer_mem_find_best_frame(timer);
1501 		if (!best_frame)
1502 			best_frame = frame;
1503 
1504 		ret = arch_timer_mem_verify_cntfrq(timer);
1505 		if (ret) {
1506 			pr_err("Disabling MMIO timers due to CNTFRQ mismatch\n");
1507 			goto out;
1508 		}
1509 
1510 		if (!best_frame) /* implies !frame */
1511 			/*
1512 			 * Only complain about missing suitable frames if we
1513 			 * haven't already found one in a previous iteration.
1514 			 */
1515 			pr_err("Unable to find a suitable frame in timer @ %pa\n",
1516 				&timer->cntctlbase);
1517 	}
1518 
1519 	if (best_frame)
1520 		ret = arch_timer_mem_frame_register(best_frame);
1521 out:
1522 	kfree(timers);
1523 	return ret;
1524 }
1525 
1526 /* Initialize per-processor generic timer and memory-mapped timer(if present) */
1527 static int __init arch_timer_acpi_init(struct acpi_table_header *table)
1528 {
1529 	int ret, platform_timer_count;
1530 
1531 	if (arch_timers_present & ARCH_TIMER_TYPE_CP15) {
1532 		pr_warn("already initialized, skipping\n");
1533 		return -EINVAL;
1534 	}
1535 
1536 	arch_timers_present |= ARCH_TIMER_TYPE_CP15;
1537 
1538 	ret = acpi_gtdt_init(table, &platform_timer_count);
1539 	if (ret) {
1540 		pr_err("Failed to init GTDT table.\n");
1541 		return ret;
1542 	}
1543 
1544 	arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI] =
1545 		acpi_gtdt_map_ppi(ARCH_TIMER_PHYS_NONSECURE_PPI);
1546 
1547 	arch_timer_ppi[ARCH_TIMER_VIRT_PPI] =
1548 		acpi_gtdt_map_ppi(ARCH_TIMER_VIRT_PPI);
1549 
1550 	arch_timer_ppi[ARCH_TIMER_HYP_PPI] =
1551 		acpi_gtdt_map_ppi(ARCH_TIMER_HYP_PPI);
1552 
1553 	arch_timer_kvm_info.virtual_irq = arch_timer_ppi[ARCH_TIMER_VIRT_PPI];
1554 
1555 	/*
1556 	 * When probing via ACPI, we have no mechanism to override the sysreg
1557 	 * CNTFRQ value. This *must* be correct.
1558 	 */
1559 	arch_timer_rate = arch_timer_get_cntfrq();
1560 	if (!arch_timer_rate) {
1561 		pr_err(FW_BUG "frequency not available.\n");
1562 		return -EINVAL;
1563 	}
1564 
1565 	arch_timer_uses_ppi = arch_timer_select_ppi();
1566 	if (!arch_timer_ppi[arch_timer_uses_ppi]) {
1567 		pr_err("No interrupt available, giving up\n");
1568 		return -EINVAL;
1569 	}
1570 
1571 	/* Always-on capability */
1572 	arch_timer_c3stop = acpi_gtdt_c3stop(arch_timer_uses_ppi);
1573 
1574 	/* Check for globally applicable workarounds */
1575 	arch_timer_check_ool_workaround(ate_match_acpi_oem_info, table);
1576 
1577 	ret = arch_timer_register();
1578 	if (ret)
1579 		return ret;
1580 
1581 	if (platform_timer_count &&
1582 	    arch_timer_mem_acpi_init(platform_timer_count))
1583 		pr_err("Failed to initialize memory-mapped timer.\n");
1584 
1585 	return arch_timer_common_init();
1586 }
1587 TIMER_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init);
1588 #endif
1589