xref: /openbmc/linux/drivers/clocksource/timer-clint.c (revision c14decfca23cfbeb423ad8be3961a58a31a5473c)
12ac6795fSAnup Patel // SPDX-License-Identifier: GPL-2.0
22ac6795fSAnup Patel /*
32ac6795fSAnup Patel  * Copyright (C) 2020 Western Digital Corporation or its affiliates.
42ac6795fSAnup Patel  *
52ac6795fSAnup Patel  * Most of the M-mode (i.e. NoMMU) RISC-V systems usually have a
62ac6795fSAnup Patel  * CLINT MMIO timer device.
72ac6795fSAnup Patel  */
82ac6795fSAnup Patel 
92ac6795fSAnup Patel #define pr_fmt(fmt) "clint: " fmt
102ac6795fSAnup Patel #include <linux/bitops.h>
112ac6795fSAnup Patel #include <linux/clocksource.h>
122ac6795fSAnup Patel #include <linux/clockchips.h>
132ac6795fSAnup Patel #include <linux/cpu.h>
142ac6795fSAnup Patel #include <linux/delay.h>
152ac6795fSAnup Patel #include <linux/module.h>
162ac6795fSAnup Patel #include <linux/of_address.h>
172ac6795fSAnup Patel #include <linux/sched_clock.h>
182ac6795fSAnup Patel #include <linux/io-64-nonatomic-lo-hi.h>
192ac6795fSAnup Patel #include <linux/interrupt.h>
202ac6795fSAnup Patel #include <linux/of_irq.h>
212ac6795fSAnup Patel #include <linux/smp.h>
22d5be89a8SPalmer Dabbelt #include <linux/timex.h>
23d5be89a8SPalmer Dabbelt 
24d5be89a8SPalmer Dabbelt #ifndef CONFIG_RISCV_M_MODE
25d5be89a8SPalmer Dabbelt #include <asm/clint.h>
26d5be89a8SPalmer Dabbelt #endif
272ac6795fSAnup Patel 
282ac6795fSAnup Patel #define CLINT_IPI_OFF		0
292ac6795fSAnup Patel #define CLINT_TIMER_CMP_OFF	0x4000
302ac6795fSAnup Patel #define CLINT_TIMER_VAL_OFF	0xbff8
312ac6795fSAnup Patel 
322ac6795fSAnup Patel /* CLINT manages IPI and Timer for RISC-V M-mode  */
332ac6795fSAnup Patel static u32 __iomem *clint_ipi_base;
342ac6795fSAnup Patel static u64 __iomem *clint_timer_cmp;
352ac6795fSAnup Patel static u64 __iomem *clint_timer_val;
362ac6795fSAnup Patel static unsigned long clint_timer_freq;
372ac6795fSAnup Patel static unsigned int clint_timer_irq;
382ac6795fSAnup Patel 
39d5be89a8SPalmer Dabbelt #ifdef CONFIG_RISCV_M_MODE
40d5be89a8SPalmer Dabbelt u64 __iomem *clint_time_val;
41*c14decfcSPalmer Dabbelt EXPORT_SYMBOL(clint_time_val);
42d5be89a8SPalmer Dabbelt #endif
43d5be89a8SPalmer Dabbelt 
442ac6795fSAnup Patel static void clint_send_ipi(const struct cpumask *target)
452ac6795fSAnup Patel {
462ac6795fSAnup Patel 	unsigned int cpu;
472ac6795fSAnup Patel 
482ac6795fSAnup Patel 	for_each_cpu(cpu, target)
492ac6795fSAnup Patel 		writel(1, clint_ipi_base + cpuid_to_hartid_map(cpu));
502ac6795fSAnup Patel }
512ac6795fSAnup Patel 
522ac6795fSAnup Patel static void clint_clear_ipi(void)
532ac6795fSAnup Patel {
542ac6795fSAnup Patel 	writel(0, clint_ipi_base + cpuid_to_hartid_map(smp_processor_id()));
552ac6795fSAnup Patel }
562ac6795fSAnup Patel 
572ac6795fSAnup Patel static struct riscv_ipi_ops clint_ipi_ops = {
582ac6795fSAnup Patel 	.ipi_inject = clint_send_ipi,
592ac6795fSAnup Patel 	.ipi_clear = clint_clear_ipi,
602ac6795fSAnup Patel };
612ac6795fSAnup Patel 
622ac6795fSAnup Patel #ifdef CONFIG_64BIT
632ac6795fSAnup Patel #define clint_get_cycles()	readq_relaxed(clint_timer_val)
642ac6795fSAnup Patel #else
652ac6795fSAnup Patel #define clint_get_cycles()	readl_relaxed(clint_timer_val)
662ac6795fSAnup Patel #define clint_get_cycles_hi()	readl_relaxed(((u32 *)clint_timer_val) + 1)
672ac6795fSAnup Patel #endif
682ac6795fSAnup Patel 
692ac6795fSAnup Patel #ifdef CONFIG_64BIT
702ac6795fSAnup Patel static u64 notrace clint_get_cycles64(void)
712ac6795fSAnup Patel {
722ac6795fSAnup Patel 	return clint_get_cycles();
732ac6795fSAnup Patel }
742ac6795fSAnup Patel #else /* CONFIG_64BIT */
752ac6795fSAnup Patel static u64 notrace clint_get_cycles64(void)
762ac6795fSAnup Patel {
772ac6795fSAnup Patel 	u32 hi, lo;
782ac6795fSAnup Patel 
792ac6795fSAnup Patel 	do {
802ac6795fSAnup Patel 		hi = clint_get_cycles_hi();
812ac6795fSAnup Patel 		lo = clint_get_cycles();
822ac6795fSAnup Patel 	} while (hi != clint_get_cycles_hi());
832ac6795fSAnup Patel 
842ac6795fSAnup Patel 	return ((u64)hi << 32) | lo;
852ac6795fSAnup Patel }
862ac6795fSAnup Patel #endif /* CONFIG_64BIT */
872ac6795fSAnup Patel 
882ac6795fSAnup Patel static u64 clint_rdtime(struct clocksource *cs)
892ac6795fSAnup Patel {
902ac6795fSAnup Patel 	return clint_get_cycles64();
912ac6795fSAnup Patel }
922ac6795fSAnup Patel 
932ac6795fSAnup Patel static struct clocksource clint_clocksource = {
942ac6795fSAnup Patel 	.name		= "clint_clocksource",
952ac6795fSAnup Patel 	.rating		= 300,
962ac6795fSAnup Patel 	.mask		= CLOCKSOURCE_MASK(64),
972ac6795fSAnup Patel 	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
982ac6795fSAnup Patel 	.read		= clint_rdtime,
992ac6795fSAnup Patel };
1002ac6795fSAnup Patel 
1012ac6795fSAnup Patel static int clint_clock_next_event(unsigned long delta,
1022ac6795fSAnup Patel 				   struct clock_event_device *ce)
1032ac6795fSAnup Patel {
1042ac6795fSAnup Patel 	void __iomem *r = clint_timer_cmp +
1052ac6795fSAnup Patel 			  cpuid_to_hartid_map(smp_processor_id());
1062ac6795fSAnup Patel 
1072ac6795fSAnup Patel 	csr_set(CSR_IE, IE_TIE);
1082ac6795fSAnup Patel 	writeq_relaxed(clint_get_cycles64() + delta, r);
1092ac6795fSAnup Patel 	return 0;
1102ac6795fSAnup Patel }
1112ac6795fSAnup Patel 
1122ac6795fSAnup Patel static DEFINE_PER_CPU(struct clock_event_device, clint_clock_event) = {
1132ac6795fSAnup Patel 	.name		= "clint_clockevent",
1142ac6795fSAnup Patel 	.features	= CLOCK_EVT_FEAT_ONESHOT,
1152ac6795fSAnup Patel 	.rating		= 100,
1162ac6795fSAnup Patel 	.set_next_event	= clint_clock_next_event,
1172ac6795fSAnup Patel };
1182ac6795fSAnup Patel 
1192ac6795fSAnup Patel static int clint_timer_starting_cpu(unsigned int cpu)
1202ac6795fSAnup Patel {
1212ac6795fSAnup Patel 	struct clock_event_device *ce = per_cpu_ptr(&clint_clock_event, cpu);
1222ac6795fSAnup Patel 
1232ac6795fSAnup Patel 	ce->cpumask = cpumask_of(cpu);
1242ac6795fSAnup Patel 	clockevents_config_and_register(ce, clint_timer_freq, 100, 0x7fffffff);
1252ac6795fSAnup Patel 
1262ac6795fSAnup Patel 	enable_percpu_irq(clint_timer_irq,
1272ac6795fSAnup Patel 			  irq_get_trigger_type(clint_timer_irq));
1282ac6795fSAnup Patel 	return 0;
1292ac6795fSAnup Patel }
1302ac6795fSAnup Patel 
1312ac6795fSAnup Patel static int clint_timer_dying_cpu(unsigned int cpu)
1322ac6795fSAnup Patel {
1332ac6795fSAnup Patel 	disable_percpu_irq(clint_timer_irq);
1342ac6795fSAnup Patel 	return 0;
1352ac6795fSAnup Patel }
1362ac6795fSAnup Patel 
1372ac6795fSAnup Patel static irqreturn_t clint_timer_interrupt(int irq, void *dev_id)
1382ac6795fSAnup Patel {
1392ac6795fSAnup Patel 	struct clock_event_device *evdev = this_cpu_ptr(&clint_clock_event);
1402ac6795fSAnup Patel 
1412ac6795fSAnup Patel 	csr_clear(CSR_IE, IE_TIE);
1422ac6795fSAnup Patel 	evdev->event_handler(evdev);
1432ac6795fSAnup Patel 
1442ac6795fSAnup Patel 	return IRQ_HANDLED;
1452ac6795fSAnup Patel }
1462ac6795fSAnup Patel 
1472ac6795fSAnup Patel static int __init clint_timer_init_dt(struct device_node *np)
1482ac6795fSAnup Patel {
1492ac6795fSAnup Patel 	int rc;
1502ac6795fSAnup Patel 	u32 i, nr_irqs;
1512ac6795fSAnup Patel 	void __iomem *base;
1522ac6795fSAnup Patel 	struct of_phandle_args oirq;
1532ac6795fSAnup Patel 
1542ac6795fSAnup Patel 	/*
1552ac6795fSAnup Patel 	 * Ensure that CLINT device interrupts are either RV_IRQ_TIMER or
1562ac6795fSAnup Patel 	 * RV_IRQ_SOFT. If it's anything else then we ignore the device.
1572ac6795fSAnup Patel 	 */
1582ac6795fSAnup Patel 	nr_irqs = of_irq_count(np);
1592ac6795fSAnup Patel 	for (i = 0; i < nr_irqs; i++) {
1602ac6795fSAnup Patel 		if (of_irq_parse_one(np, i, &oirq)) {
1612ac6795fSAnup Patel 			pr_err("%pOFP: failed to parse irq %d.\n", np, i);
1622ac6795fSAnup Patel 			continue;
1632ac6795fSAnup Patel 		}
1642ac6795fSAnup Patel 
1652ac6795fSAnup Patel 		if ((oirq.args_count != 1) ||
1662ac6795fSAnup Patel 		    (oirq.args[0] != RV_IRQ_TIMER &&
1672ac6795fSAnup Patel 		     oirq.args[0] != RV_IRQ_SOFT)) {
1682ac6795fSAnup Patel 			pr_err("%pOFP: invalid irq %d (hwirq %d)\n",
1692ac6795fSAnup Patel 			       np, i, oirq.args[0]);
1702ac6795fSAnup Patel 			return -ENODEV;
1712ac6795fSAnup Patel 		}
1722ac6795fSAnup Patel 
1732ac6795fSAnup Patel 		/* Find parent irq domain and map timer irq */
1742ac6795fSAnup Patel 		if (!clint_timer_irq &&
1752ac6795fSAnup Patel 		    oirq.args[0] == RV_IRQ_TIMER &&
1762ac6795fSAnup Patel 		    irq_find_host(oirq.np))
1772ac6795fSAnup Patel 			clint_timer_irq = irq_of_parse_and_map(np, i);
1782ac6795fSAnup Patel 	}
1792ac6795fSAnup Patel 
1802ac6795fSAnup Patel 	/* If CLINT timer irq not found then fail */
1812ac6795fSAnup Patel 	if (!clint_timer_irq) {
1822ac6795fSAnup Patel 		pr_err("%pOFP: timer irq not found\n", np);
1832ac6795fSAnup Patel 		return -ENODEV;
1842ac6795fSAnup Patel 	}
1852ac6795fSAnup Patel 
1862ac6795fSAnup Patel 	base = of_iomap(np, 0);
1872ac6795fSAnup Patel 	if (!base) {
1882ac6795fSAnup Patel 		pr_err("%pOFP: could not map registers\n", np);
1892ac6795fSAnup Patel 		return -ENODEV;
1902ac6795fSAnup Patel 	}
1912ac6795fSAnup Patel 
1922ac6795fSAnup Patel 	clint_ipi_base = base + CLINT_IPI_OFF;
1932ac6795fSAnup Patel 	clint_timer_cmp = base + CLINT_TIMER_CMP_OFF;
1942ac6795fSAnup Patel 	clint_timer_val = base + CLINT_TIMER_VAL_OFF;
1952ac6795fSAnup Patel 	clint_timer_freq = riscv_timebase;
1962ac6795fSAnup Patel 
197d5be89a8SPalmer Dabbelt #ifdef CONFIG_RISCV_M_MODE
198d5be89a8SPalmer Dabbelt 	/*
199d5be89a8SPalmer Dabbelt 	 * Yes, that's an odd naming scheme.  time_val is public, but hopefully
200d5be89a8SPalmer Dabbelt 	 * will die in favor of something cleaner.
201d5be89a8SPalmer Dabbelt 	 */
202d5be89a8SPalmer Dabbelt 	clint_time_val = clint_timer_val;
203d5be89a8SPalmer Dabbelt #endif
204d5be89a8SPalmer Dabbelt 
2052ac6795fSAnup Patel 	pr_info("%pOFP: timer running at %ld Hz\n", np, clint_timer_freq);
2062ac6795fSAnup Patel 
2072ac6795fSAnup Patel 	rc = clocksource_register_hz(&clint_clocksource, clint_timer_freq);
2082ac6795fSAnup Patel 	if (rc) {
2092ac6795fSAnup Patel 		pr_err("%pOFP: clocksource register failed [%d]\n", np, rc);
2102ac6795fSAnup Patel 		goto fail_iounmap;
2112ac6795fSAnup Patel 	}
2122ac6795fSAnup Patel 
2132ac6795fSAnup Patel 	sched_clock_register(clint_get_cycles64, 64, clint_timer_freq);
2142ac6795fSAnup Patel 
2152ac6795fSAnup Patel 	rc = request_percpu_irq(clint_timer_irq, clint_timer_interrupt,
2162ac6795fSAnup Patel 				 "clint-timer", &clint_clock_event);
2172ac6795fSAnup Patel 	if (rc) {
2182ac6795fSAnup Patel 		pr_err("registering percpu irq failed [%d]\n", rc);
2192ac6795fSAnup Patel 		goto fail_iounmap;
2202ac6795fSAnup Patel 	}
2212ac6795fSAnup Patel 
2222ac6795fSAnup Patel 	rc = cpuhp_setup_state(CPUHP_AP_CLINT_TIMER_STARTING,
2232ac6795fSAnup Patel 				"clockevents/clint/timer:starting",
2242ac6795fSAnup Patel 				clint_timer_starting_cpu,
2252ac6795fSAnup Patel 				clint_timer_dying_cpu);
2262ac6795fSAnup Patel 	if (rc) {
2272ac6795fSAnup Patel 		pr_err("%pOFP: cpuhp setup state failed [%d]\n", np, rc);
2282ac6795fSAnup Patel 		goto fail_free_irq;
2292ac6795fSAnup Patel 	}
2302ac6795fSAnup Patel 
2312ac6795fSAnup Patel 	riscv_set_ipi_ops(&clint_ipi_ops);
2322ac6795fSAnup Patel 	clint_clear_ipi();
2332ac6795fSAnup Patel 
2342ac6795fSAnup Patel 	return 0;
2352ac6795fSAnup Patel 
2362ac6795fSAnup Patel fail_free_irq:
2372ac6795fSAnup Patel 	free_irq(clint_timer_irq, &clint_clock_event);
2382ac6795fSAnup Patel fail_iounmap:
2392ac6795fSAnup Patel 	iounmap(base);
2402ac6795fSAnup Patel 	return rc;
2412ac6795fSAnup Patel }
2422ac6795fSAnup Patel 
2432ac6795fSAnup Patel TIMER_OF_DECLARE(clint_timer, "riscv,clint0", clint_timer_init_dt);
2442ac6795fSAnup Patel TIMER_OF_DECLARE(clint_timer1, "sifive,clint0", clint_timer_init_dt);
245