xref: /openbmc/linux/drivers/clocksource/timer-clint.c (revision d5be89a8d118a8e8d09cd74a921a808f17fbdd09)
12ac6795fSAnup Patel // SPDX-License-Identifier: GPL-2.0
22ac6795fSAnup Patel /*
32ac6795fSAnup Patel  * Copyright (C) 2020 Western Digital Corporation or its affiliates.
42ac6795fSAnup Patel  *
52ac6795fSAnup Patel  * Most of the M-mode (i.e. NoMMU) RISC-V systems usually have a
62ac6795fSAnup Patel  * CLINT MMIO timer device.
72ac6795fSAnup Patel  */
82ac6795fSAnup Patel 
92ac6795fSAnup Patel #define pr_fmt(fmt) "clint: " fmt
102ac6795fSAnup Patel #include <linux/bitops.h>
112ac6795fSAnup Patel #include <linux/clocksource.h>
122ac6795fSAnup Patel #include <linux/clockchips.h>
132ac6795fSAnup Patel #include <linux/cpu.h>
142ac6795fSAnup Patel #include <linux/delay.h>
152ac6795fSAnup Patel #include <linux/module.h>
162ac6795fSAnup Patel #include <linux/of_address.h>
172ac6795fSAnup Patel #include <linux/sched_clock.h>
182ac6795fSAnup Patel #include <linux/io-64-nonatomic-lo-hi.h>
192ac6795fSAnup Patel #include <linux/interrupt.h>
202ac6795fSAnup Patel #include <linux/of_irq.h>
212ac6795fSAnup Patel #include <linux/smp.h>
22*d5be89a8SPalmer Dabbelt #include <linux/timex.h>
23*d5be89a8SPalmer Dabbelt 
24*d5be89a8SPalmer Dabbelt #ifndef CONFIG_RISCV_M_MODE
25*d5be89a8SPalmer Dabbelt #include <asm/clint.h>
26*d5be89a8SPalmer Dabbelt #endif
272ac6795fSAnup Patel 
282ac6795fSAnup Patel #define CLINT_IPI_OFF		0
292ac6795fSAnup Patel #define CLINT_TIMER_CMP_OFF	0x4000
302ac6795fSAnup Patel #define CLINT_TIMER_VAL_OFF	0xbff8
312ac6795fSAnup Patel 
322ac6795fSAnup Patel /* CLINT manages IPI and Timer for RISC-V M-mode  */
332ac6795fSAnup Patel static u32 __iomem *clint_ipi_base;
342ac6795fSAnup Patel static u64 __iomem *clint_timer_cmp;
352ac6795fSAnup Patel static u64 __iomem *clint_timer_val;
362ac6795fSAnup Patel static unsigned long clint_timer_freq;
372ac6795fSAnup Patel static unsigned int clint_timer_irq;
382ac6795fSAnup Patel 
39*d5be89a8SPalmer Dabbelt #ifdef CONFIG_RISCV_M_MODE
40*d5be89a8SPalmer Dabbelt u64 __iomem *clint_time_val;
41*d5be89a8SPalmer Dabbelt #endif
42*d5be89a8SPalmer Dabbelt 
432ac6795fSAnup Patel static void clint_send_ipi(const struct cpumask *target)
442ac6795fSAnup Patel {
452ac6795fSAnup Patel 	unsigned int cpu;
462ac6795fSAnup Patel 
472ac6795fSAnup Patel 	for_each_cpu(cpu, target)
482ac6795fSAnup Patel 		writel(1, clint_ipi_base + cpuid_to_hartid_map(cpu));
492ac6795fSAnup Patel }
502ac6795fSAnup Patel 
512ac6795fSAnup Patel static void clint_clear_ipi(void)
522ac6795fSAnup Patel {
532ac6795fSAnup Patel 	writel(0, clint_ipi_base + cpuid_to_hartid_map(smp_processor_id()));
542ac6795fSAnup Patel }
552ac6795fSAnup Patel 
562ac6795fSAnup Patel static struct riscv_ipi_ops clint_ipi_ops = {
572ac6795fSAnup Patel 	.ipi_inject = clint_send_ipi,
582ac6795fSAnup Patel 	.ipi_clear = clint_clear_ipi,
592ac6795fSAnup Patel };
602ac6795fSAnup Patel 
612ac6795fSAnup Patel #ifdef CONFIG_64BIT
622ac6795fSAnup Patel #define clint_get_cycles()	readq_relaxed(clint_timer_val)
632ac6795fSAnup Patel #else
642ac6795fSAnup Patel #define clint_get_cycles()	readl_relaxed(clint_timer_val)
652ac6795fSAnup Patel #define clint_get_cycles_hi()	readl_relaxed(((u32 *)clint_timer_val) + 1)
662ac6795fSAnup Patel #endif
672ac6795fSAnup Patel 
682ac6795fSAnup Patel #ifdef CONFIG_64BIT
692ac6795fSAnup Patel static u64 notrace clint_get_cycles64(void)
702ac6795fSAnup Patel {
712ac6795fSAnup Patel 	return clint_get_cycles();
722ac6795fSAnup Patel }
732ac6795fSAnup Patel #else /* CONFIG_64BIT */
742ac6795fSAnup Patel static u64 notrace clint_get_cycles64(void)
752ac6795fSAnup Patel {
762ac6795fSAnup Patel 	u32 hi, lo;
772ac6795fSAnup Patel 
782ac6795fSAnup Patel 	do {
792ac6795fSAnup Patel 		hi = clint_get_cycles_hi();
802ac6795fSAnup Patel 		lo = clint_get_cycles();
812ac6795fSAnup Patel 	} while (hi != clint_get_cycles_hi());
822ac6795fSAnup Patel 
832ac6795fSAnup Patel 	return ((u64)hi << 32) | lo;
842ac6795fSAnup Patel }
852ac6795fSAnup Patel #endif /* CONFIG_64BIT */
862ac6795fSAnup Patel 
872ac6795fSAnup Patel static u64 clint_rdtime(struct clocksource *cs)
882ac6795fSAnup Patel {
892ac6795fSAnup Patel 	return clint_get_cycles64();
902ac6795fSAnup Patel }
912ac6795fSAnup Patel 
922ac6795fSAnup Patel static struct clocksource clint_clocksource = {
932ac6795fSAnup Patel 	.name		= "clint_clocksource",
942ac6795fSAnup Patel 	.rating		= 300,
952ac6795fSAnup Patel 	.mask		= CLOCKSOURCE_MASK(64),
962ac6795fSAnup Patel 	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
972ac6795fSAnup Patel 	.read		= clint_rdtime,
982ac6795fSAnup Patel };
992ac6795fSAnup Patel 
1002ac6795fSAnup Patel static int clint_clock_next_event(unsigned long delta,
1012ac6795fSAnup Patel 				   struct clock_event_device *ce)
1022ac6795fSAnup Patel {
1032ac6795fSAnup Patel 	void __iomem *r = clint_timer_cmp +
1042ac6795fSAnup Patel 			  cpuid_to_hartid_map(smp_processor_id());
1052ac6795fSAnup Patel 
1062ac6795fSAnup Patel 	csr_set(CSR_IE, IE_TIE);
1072ac6795fSAnup Patel 	writeq_relaxed(clint_get_cycles64() + delta, r);
1082ac6795fSAnup Patel 	return 0;
1092ac6795fSAnup Patel }
1102ac6795fSAnup Patel 
1112ac6795fSAnup Patel static DEFINE_PER_CPU(struct clock_event_device, clint_clock_event) = {
1122ac6795fSAnup Patel 	.name		= "clint_clockevent",
1132ac6795fSAnup Patel 	.features	= CLOCK_EVT_FEAT_ONESHOT,
1142ac6795fSAnup Patel 	.rating		= 100,
1152ac6795fSAnup Patel 	.set_next_event	= clint_clock_next_event,
1162ac6795fSAnup Patel };
1172ac6795fSAnup Patel 
1182ac6795fSAnup Patel static int clint_timer_starting_cpu(unsigned int cpu)
1192ac6795fSAnup Patel {
1202ac6795fSAnup Patel 	struct clock_event_device *ce = per_cpu_ptr(&clint_clock_event, cpu);
1212ac6795fSAnup Patel 
1222ac6795fSAnup Patel 	ce->cpumask = cpumask_of(cpu);
1232ac6795fSAnup Patel 	clockevents_config_and_register(ce, clint_timer_freq, 100, 0x7fffffff);
1242ac6795fSAnup Patel 
1252ac6795fSAnup Patel 	enable_percpu_irq(clint_timer_irq,
1262ac6795fSAnup Patel 			  irq_get_trigger_type(clint_timer_irq));
1272ac6795fSAnup Patel 	return 0;
1282ac6795fSAnup Patel }
1292ac6795fSAnup Patel 
1302ac6795fSAnup Patel static int clint_timer_dying_cpu(unsigned int cpu)
1312ac6795fSAnup Patel {
1322ac6795fSAnup Patel 	disable_percpu_irq(clint_timer_irq);
1332ac6795fSAnup Patel 	return 0;
1342ac6795fSAnup Patel }
1352ac6795fSAnup Patel 
1362ac6795fSAnup Patel static irqreturn_t clint_timer_interrupt(int irq, void *dev_id)
1372ac6795fSAnup Patel {
1382ac6795fSAnup Patel 	struct clock_event_device *evdev = this_cpu_ptr(&clint_clock_event);
1392ac6795fSAnup Patel 
1402ac6795fSAnup Patel 	csr_clear(CSR_IE, IE_TIE);
1412ac6795fSAnup Patel 	evdev->event_handler(evdev);
1422ac6795fSAnup Patel 
1432ac6795fSAnup Patel 	return IRQ_HANDLED;
1442ac6795fSAnup Patel }
1452ac6795fSAnup Patel 
1462ac6795fSAnup Patel static int __init clint_timer_init_dt(struct device_node *np)
1472ac6795fSAnup Patel {
1482ac6795fSAnup Patel 	int rc;
1492ac6795fSAnup Patel 	u32 i, nr_irqs;
1502ac6795fSAnup Patel 	void __iomem *base;
1512ac6795fSAnup Patel 	struct of_phandle_args oirq;
1522ac6795fSAnup Patel 
1532ac6795fSAnup Patel 	/*
1542ac6795fSAnup Patel 	 * Ensure that CLINT device interrupts are either RV_IRQ_TIMER or
1552ac6795fSAnup Patel 	 * RV_IRQ_SOFT. If it's anything else then we ignore the device.
1562ac6795fSAnup Patel 	 */
1572ac6795fSAnup Patel 	nr_irqs = of_irq_count(np);
1582ac6795fSAnup Patel 	for (i = 0; i < nr_irqs; i++) {
1592ac6795fSAnup Patel 		if (of_irq_parse_one(np, i, &oirq)) {
1602ac6795fSAnup Patel 			pr_err("%pOFP: failed to parse irq %d.\n", np, i);
1612ac6795fSAnup Patel 			continue;
1622ac6795fSAnup Patel 		}
1632ac6795fSAnup Patel 
1642ac6795fSAnup Patel 		if ((oirq.args_count != 1) ||
1652ac6795fSAnup Patel 		    (oirq.args[0] != RV_IRQ_TIMER &&
1662ac6795fSAnup Patel 		     oirq.args[0] != RV_IRQ_SOFT)) {
1672ac6795fSAnup Patel 			pr_err("%pOFP: invalid irq %d (hwirq %d)\n",
1682ac6795fSAnup Patel 			       np, i, oirq.args[0]);
1692ac6795fSAnup Patel 			return -ENODEV;
1702ac6795fSAnup Patel 		}
1712ac6795fSAnup Patel 
1722ac6795fSAnup Patel 		/* Find parent irq domain and map timer irq */
1732ac6795fSAnup Patel 		if (!clint_timer_irq &&
1742ac6795fSAnup Patel 		    oirq.args[0] == RV_IRQ_TIMER &&
1752ac6795fSAnup Patel 		    irq_find_host(oirq.np))
1762ac6795fSAnup Patel 			clint_timer_irq = irq_of_parse_and_map(np, i);
1772ac6795fSAnup Patel 	}
1782ac6795fSAnup Patel 
1792ac6795fSAnup Patel 	/* If CLINT timer irq not found then fail */
1802ac6795fSAnup Patel 	if (!clint_timer_irq) {
1812ac6795fSAnup Patel 		pr_err("%pOFP: timer irq not found\n", np);
1822ac6795fSAnup Patel 		return -ENODEV;
1832ac6795fSAnup Patel 	}
1842ac6795fSAnup Patel 
1852ac6795fSAnup Patel 	base = of_iomap(np, 0);
1862ac6795fSAnup Patel 	if (!base) {
1872ac6795fSAnup Patel 		pr_err("%pOFP: could not map registers\n", np);
1882ac6795fSAnup Patel 		return -ENODEV;
1892ac6795fSAnup Patel 	}
1902ac6795fSAnup Patel 
1912ac6795fSAnup Patel 	clint_ipi_base = base + CLINT_IPI_OFF;
1922ac6795fSAnup Patel 	clint_timer_cmp = base + CLINT_TIMER_CMP_OFF;
1932ac6795fSAnup Patel 	clint_timer_val = base + CLINT_TIMER_VAL_OFF;
1942ac6795fSAnup Patel 	clint_timer_freq = riscv_timebase;
1952ac6795fSAnup Patel 
196*d5be89a8SPalmer Dabbelt #ifdef CONFIG_RISCV_M_MODE
197*d5be89a8SPalmer Dabbelt 	/*
198*d5be89a8SPalmer Dabbelt 	 * Yes, that's an odd naming scheme.  time_val is public, but hopefully
199*d5be89a8SPalmer Dabbelt 	 * will die in favor of something cleaner.
200*d5be89a8SPalmer Dabbelt 	 */
201*d5be89a8SPalmer Dabbelt 	clint_time_val = clint_timer_val;
202*d5be89a8SPalmer Dabbelt #endif
203*d5be89a8SPalmer Dabbelt 
2042ac6795fSAnup Patel 	pr_info("%pOFP: timer running at %ld Hz\n", np, clint_timer_freq);
2052ac6795fSAnup Patel 
2062ac6795fSAnup Patel 	rc = clocksource_register_hz(&clint_clocksource, clint_timer_freq);
2072ac6795fSAnup Patel 	if (rc) {
2082ac6795fSAnup Patel 		pr_err("%pOFP: clocksource register failed [%d]\n", np, rc);
2092ac6795fSAnup Patel 		goto fail_iounmap;
2102ac6795fSAnup Patel 	}
2112ac6795fSAnup Patel 
2122ac6795fSAnup Patel 	sched_clock_register(clint_get_cycles64, 64, clint_timer_freq);
2132ac6795fSAnup Patel 
2142ac6795fSAnup Patel 	rc = request_percpu_irq(clint_timer_irq, clint_timer_interrupt,
2152ac6795fSAnup Patel 				 "clint-timer", &clint_clock_event);
2162ac6795fSAnup Patel 	if (rc) {
2172ac6795fSAnup Patel 		pr_err("registering percpu irq failed [%d]\n", rc);
2182ac6795fSAnup Patel 		goto fail_iounmap;
2192ac6795fSAnup Patel 	}
2202ac6795fSAnup Patel 
2212ac6795fSAnup Patel 	rc = cpuhp_setup_state(CPUHP_AP_CLINT_TIMER_STARTING,
2222ac6795fSAnup Patel 				"clockevents/clint/timer:starting",
2232ac6795fSAnup Patel 				clint_timer_starting_cpu,
2242ac6795fSAnup Patel 				clint_timer_dying_cpu);
2252ac6795fSAnup Patel 	if (rc) {
2262ac6795fSAnup Patel 		pr_err("%pOFP: cpuhp setup state failed [%d]\n", np, rc);
2272ac6795fSAnup Patel 		goto fail_free_irq;
2282ac6795fSAnup Patel 	}
2292ac6795fSAnup Patel 
2302ac6795fSAnup Patel 	riscv_set_ipi_ops(&clint_ipi_ops);
2312ac6795fSAnup Patel 	clint_clear_ipi();
2322ac6795fSAnup Patel 
2332ac6795fSAnup Patel 	return 0;
2342ac6795fSAnup Patel 
2352ac6795fSAnup Patel fail_free_irq:
2362ac6795fSAnup Patel 	free_irq(clint_timer_irq, &clint_clock_event);
2372ac6795fSAnup Patel fail_iounmap:
2382ac6795fSAnup Patel 	iounmap(base);
2392ac6795fSAnup Patel 	return rc;
2402ac6795fSAnup Patel }
2412ac6795fSAnup Patel 
2422ac6795fSAnup Patel TIMER_OF_DECLARE(clint_timer, "riscv,clint0", clint_timer_init_dt);
2432ac6795fSAnup Patel TIMER_OF_DECLARE(clint_timer1, "sifive,clint0", clint_timer_init_dt);
244