12ac6795fSAnup Patel // SPDX-License-Identifier: GPL-2.0
22ac6795fSAnup Patel /*
32ac6795fSAnup Patel * Copyright (C) 2020 Western Digital Corporation or its affiliates.
42ac6795fSAnup Patel *
52ac6795fSAnup Patel * Most of the M-mode (i.e. NoMMU) RISC-V systems usually have a
62ac6795fSAnup Patel * CLINT MMIO timer device.
72ac6795fSAnup Patel */
82ac6795fSAnup Patel
92ac6795fSAnup Patel #define pr_fmt(fmt) "clint: " fmt
102ac6795fSAnup Patel #include <linux/bitops.h>
112ac6795fSAnup Patel #include <linux/clocksource.h>
122ac6795fSAnup Patel #include <linux/clockchips.h>
132ac6795fSAnup Patel #include <linux/cpu.h>
142ac6795fSAnup Patel #include <linux/delay.h>
152ac6795fSAnup Patel #include <linux/module.h>
162ac6795fSAnup Patel #include <linux/of_address.h>
172ac6795fSAnup Patel #include <linux/sched_clock.h>
182ac6795fSAnup Patel #include <linux/io-64-nonatomic-lo-hi.h>
192ac6795fSAnup Patel #include <linux/interrupt.h>
20832f15f4SAnup Patel #include <linux/irq.h>
21832f15f4SAnup Patel #include <linux/irqchip/chained_irq.h>
22832f15f4SAnup Patel #include <linux/irqdomain.h>
232ac6795fSAnup Patel #include <linux/of_irq.h>
242ac6795fSAnup Patel #include <linux/smp.h>
25d5be89a8SPalmer Dabbelt #include <linux/timex.h>
26d5be89a8SPalmer Dabbelt
27d5be89a8SPalmer Dabbelt #ifndef CONFIG_RISCV_M_MODE
28d5be89a8SPalmer Dabbelt #include <asm/clint.h>
29d5be89a8SPalmer Dabbelt #endif
302ac6795fSAnup Patel
312ac6795fSAnup Patel #define CLINT_IPI_OFF 0
322ac6795fSAnup Patel #define CLINT_TIMER_CMP_OFF 0x4000
332ac6795fSAnup Patel #define CLINT_TIMER_VAL_OFF 0xbff8
342ac6795fSAnup Patel
352ac6795fSAnup Patel /* CLINT manages IPI and Timer for RISC-V M-mode */
362ac6795fSAnup Patel static u32 __iomem *clint_ipi_base;
37832f15f4SAnup Patel static unsigned int clint_ipi_irq;
382ac6795fSAnup Patel static u64 __iomem *clint_timer_cmp;
392ac6795fSAnup Patel static u64 __iomem *clint_timer_val;
402ac6795fSAnup Patel static unsigned long clint_timer_freq;
412ac6795fSAnup Patel static unsigned int clint_timer_irq;
422ac6795fSAnup Patel
43d5be89a8SPalmer Dabbelt #ifdef CONFIG_RISCV_M_MODE
44d5be89a8SPalmer Dabbelt u64 __iomem *clint_time_val;
45c14decfcSPalmer Dabbelt EXPORT_SYMBOL(clint_time_val);
46d5be89a8SPalmer Dabbelt #endif
47d5be89a8SPalmer Dabbelt
48832f15f4SAnup Patel #ifdef CONFIG_SMP
clint_send_ipi(unsigned int cpu)49832f15f4SAnup Patel static void clint_send_ipi(unsigned int cpu)
502ac6795fSAnup Patel {
512ac6795fSAnup Patel writel(1, clint_ipi_base + cpuid_to_hartid_map(cpu));
522ac6795fSAnup Patel }
532ac6795fSAnup Patel
clint_clear_ipi(void)542ac6795fSAnup Patel static void clint_clear_ipi(void)
552ac6795fSAnup Patel {
562ac6795fSAnup Patel writel(0, clint_ipi_base + cpuid_to_hartid_map(smp_processor_id()));
572ac6795fSAnup Patel }
582ac6795fSAnup Patel
clint_ipi_interrupt(struct irq_desc * desc)59832f15f4SAnup Patel static void clint_ipi_interrupt(struct irq_desc *desc)
60832f15f4SAnup Patel {
61832f15f4SAnup Patel struct irq_chip *chip = irq_desc_get_chip(desc);
62832f15f4SAnup Patel
63832f15f4SAnup Patel chained_irq_enter(chip, desc);
64832f15f4SAnup Patel
65832f15f4SAnup Patel clint_clear_ipi();
66832f15f4SAnup Patel ipi_mux_process();
67832f15f4SAnup Patel
68832f15f4SAnup Patel chained_irq_exit(chip, desc);
69832f15f4SAnup Patel }
70832f15f4SAnup Patel #endif
712ac6795fSAnup Patel
722ac6795fSAnup Patel #ifdef CONFIG_64BIT
732ac6795fSAnup Patel #define clint_get_cycles() readq_relaxed(clint_timer_val)
742ac6795fSAnup Patel #else
752ac6795fSAnup Patel #define clint_get_cycles() readl_relaxed(clint_timer_val)
762ac6795fSAnup Patel #define clint_get_cycles_hi() readl_relaxed(((u32 *)clint_timer_val) + 1)
772ac6795fSAnup Patel #endif
782ac6795fSAnup Patel
792ac6795fSAnup Patel #ifdef CONFIG_64BIT
clint_get_cycles64(void)802ac6795fSAnup Patel static u64 notrace clint_get_cycles64(void)
812ac6795fSAnup Patel {
822ac6795fSAnup Patel return clint_get_cycles();
832ac6795fSAnup Patel }
842ac6795fSAnup Patel #else /* CONFIG_64BIT */
clint_get_cycles64(void)852ac6795fSAnup Patel static u64 notrace clint_get_cycles64(void)
862ac6795fSAnup Patel {
872ac6795fSAnup Patel u32 hi, lo;
882ac6795fSAnup Patel
892ac6795fSAnup Patel do {
902ac6795fSAnup Patel hi = clint_get_cycles_hi();
912ac6795fSAnup Patel lo = clint_get_cycles();
922ac6795fSAnup Patel } while (hi != clint_get_cycles_hi());
932ac6795fSAnup Patel
942ac6795fSAnup Patel return ((u64)hi << 32) | lo;
952ac6795fSAnup Patel }
962ac6795fSAnup Patel #endif /* CONFIG_64BIT */
972ac6795fSAnup Patel
clint_rdtime(struct clocksource * cs)982ac6795fSAnup Patel static u64 clint_rdtime(struct clocksource *cs)
992ac6795fSAnup Patel {
1002ac6795fSAnup Patel return clint_get_cycles64();
1012ac6795fSAnup Patel }
1022ac6795fSAnup Patel
1032ac6795fSAnup Patel static struct clocksource clint_clocksource = {
1042ac6795fSAnup Patel .name = "clint_clocksource",
1052ac6795fSAnup Patel .rating = 300,
1062ac6795fSAnup Patel .mask = CLOCKSOURCE_MASK(64),
1072ac6795fSAnup Patel .flags = CLOCK_SOURCE_IS_CONTINUOUS,
1082ac6795fSAnup Patel .read = clint_rdtime,
1092ac6795fSAnup Patel };
1102ac6795fSAnup Patel
clint_clock_next_event(unsigned long delta,struct clock_event_device * ce)1112ac6795fSAnup Patel static int clint_clock_next_event(unsigned long delta,
1122ac6795fSAnup Patel struct clock_event_device *ce)
1132ac6795fSAnup Patel {
1142ac6795fSAnup Patel void __iomem *r = clint_timer_cmp +
1152ac6795fSAnup Patel cpuid_to_hartid_map(smp_processor_id());
1162ac6795fSAnup Patel
1172ac6795fSAnup Patel csr_set(CSR_IE, IE_TIE);
1182ac6795fSAnup Patel writeq_relaxed(clint_get_cycles64() + delta, r);
1192ac6795fSAnup Patel return 0;
1202ac6795fSAnup Patel }
1212ac6795fSAnup Patel
1222ac6795fSAnup Patel static DEFINE_PER_CPU(struct clock_event_device, clint_clock_event) = {
1232ac6795fSAnup Patel .name = "clint_clockevent",
1242ac6795fSAnup Patel .features = CLOCK_EVT_FEAT_ONESHOT,
1252ac6795fSAnup Patel .rating = 100,
1262ac6795fSAnup Patel .set_next_event = clint_clock_next_event,
1272ac6795fSAnup Patel };
1282ac6795fSAnup Patel
clint_timer_starting_cpu(unsigned int cpu)1292ac6795fSAnup Patel static int clint_timer_starting_cpu(unsigned int cpu)
1302ac6795fSAnup Patel {
1312ac6795fSAnup Patel struct clock_event_device *ce = per_cpu_ptr(&clint_clock_event, cpu);
1322ac6795fSAnup Patel
1332ac6795fSAnup Patel ce->cpumask = cpumask_of(cpu);
1342ac6795fSAnup Patel clockevents_config_and_register(ce, clint_timer_freq, 100, 0x7fffffff);
1352ac6795fSAnup Patel
1362ac6795fSAnup Patel enable_percpu_irq(clint_timer_irq,
1372ac6795fSAnup Patel irq_get_trigger_type(clint_timer_irq));
138832f15f4SAnup Patel enable_percpu_irq(clint_ipi_irq,
139832f15f4SAnup Patel irq_get_trigger_type(clint_ipi_irq));
1402ac6795fSAnup Patel return 0;
1412ac6795fSAnup Patel }
1422ac6795fSAnup Patel
clint_timer_dying_cpu(unsigned int cpu)1432ac6795fSAnup Patel static int clint_timer_dying_cpu(unsigned int cpu)
1442ac6795fSAnup Patel {
1452ac6795fSAnup Patel disable_percpu_irq(clint_timer_irq);
146832f15f4SAnup Patel /*
147832f15f4SAnup Patel * Don't disable IPI when CPU goes offline because
148832f15f4SAnup Patel * the masking/unmasking of virtual IPIs is done
149832f15f4SAnup Patel * via generic IPI-Mux
150832f15f4SAnup Patel */
1512ac6795fSAnup Patel return 0;
1522ac6795fSAnup Patel }
1532ac6795fSAnup Patel
clint_timer_interrupt(int irq,void * dev_id)1542ac6795fSAnup Patel static irqreturn_t clint_timer_interrupt(int irq, void *dev_id)
1552ac6795fSAnup Patel {
1562ac6795fSAnup Patel struct clock_event_device *evdev = this_cpu_ptr(&clint_clock_event);
1572ac6795fSAnup Patel
1582ac6795fSAnup Patel csr_clear(CSR_IE, IE_TIE);
1592ac6795fSAnup Patel evdev->event_handler(evdev);
1602ac6795fSAnup Patel
1612ac6795fSAnup Patel return IRQ_HANDLED;
1622ac6795fSAnup Patel }
1632ac6795fSAnup Patel
clint_timer_init_dt(struct device_node * np)1642ac6795fSAnup Patel static int __init clint_timer_init_dt(struct device_node *np)
1652ac6795fSAnup Patel {
1662ac6795fSAnup Patel int rc;
1672ac6795fSAnup Patel u32 i, nr_irqs;
1682ac6795fSAnup Patel void __iomem *base;
1692ac6795fSAnup Patel struct of_phandle_args oirq;
1702ac6795fSAnup Patel
1712ac6795fSAnup Patel /*
1722ac6795fSAnup Patel * Ensure that CLINT device interrupts are either RV_IRQ_TIMER or
1732ac6795fSAnup Patel * RV_IRQ_SOFT. If it's anything else then we ignore the device.
1742ac6795fSAnup Patel */
1752ac6795fSAnup Patel nr_irqs = of_irq_count(np);
1762ac6795fSAnup Patel for (i = 0; i < nr_irqs; i++) {
1772ac6795fSAnup Patel if (of_irq_parse_one(np, i, &oirq)) {
1782ac6795fSAnup Patel pr_err("%pOFP: failed to parse irq %d.\n", np, i);
1792ac6795fSAnup Patel continue;
1802ac6795fSAnup Patel }
1812ac6795fSAnup Patel
1822ac6795fSAnup Patel if ((oirq.args_count != 1) ||
1832ac6795fSAnup Patel (oirq.args[0] != RV_IRQ_TIMER &&
1842ac6795fSAnup Patel oirq.args[0] != RV_IRQ_SOFT)) {
1852ac6795fSAnup Patel pr_err("%pOFP: invalid irq %d (hwirq %d)\n",
1862ac6795fSAnup Patel np, i, oirq.args[0]);
1872ac6795fSAnup Patel return -ENODEV;
1882ac6795fSAnup Patel }
1892ac6795fSAnup Patel
190832f15f4SAnup Patel /* Find parent irq domain and map ipi irq */
191832f15f4SAnup Patel if (!clint_ipi_irq &&
192832f15f4SAnup Patel oirq.args[0] == RV_IRQ_SOFT &&
193832f15f4SAnup Patel irq_find_host(oirq.np))
194832f15f4SAnup Patel clint_ipi_irq = irq_of_parse_and_map(np, i);
195832f15f4SAnup Patel
1962ac6795fSAnup Patel /* Find parent irq domain and map timer irq */
1972ac6795fSAnup Patel if (!clint_timer_irq &&
1982ac6795fSAnup Patel oirq.args[0] == RV_IRQ_TIMER &&
1992ac6795fSAnup Patel irq_find_host(oirq.np))
2002ac6795fSAnup Patel clint_timer_irq = irq_of_parse_and_map(np, i);
2012ac6795fSAnup Patel }
2022ac6795fSAnup Patel
203832f15f4SAnup Patel /* If CLINT ipi or timer irq not found then fail */
204832f15f4SAnup Patel if (!clint_ipi_irq || !clint_timer_irq) {
205832f15f4SAnup Patel pr_err("%pOFP: ipi/timer irq not found\n", np);
2062ac6795fSAnup Patel return -ENODEV;
2072ac6795fSAnup Patel }
2082ac6795fSAnup Patel
2092ac6795fSAnup Patel base = of_iomap(np, 0);
2102ac6795fSAnup Patel if (!base) {
2112ac6795fSAnup Patel pr_err("%pOFP: could not map registers\n", np);
2122ac6795fSAnup Patel return -ENODEV;
2132ac6795fSAnup Patel }
2142ac6795fSAnup Patel
2152ac6795fSAnup Patel clint_ipi_base = base + CLINT_IPI_OFF;
2162ac6795fSAnup Patel clint_timer_cmp = base + CLINT_TIMER_CMP_OFF;
2172ac6795fSAnup Patel clint_timer_val = base + CLINT_TIMER_VAL_OFF;
2182ac6795fSAnup Patel clint_timer_freq = riscv_timebase;
2192ac6795fSAnup Patel
220d5be89a8SPalmer Dabbelt #ifdef CONFIG_RISCV_M_MODE
221d5be89a8SPalmer Dabbelt /*
222d5be89a8SPalmer Dabbelt * Yes, that's an odd naming scheme. time_val is public, but hopefully
223d5be89a8SPalmer Dabbelt * will die in favor of something cleaner.
224d5be89a8SPalmer Dabbelt */
225d5be89a8SPalmer Dabbelt clint_time_val = clint_timer_val;
226d5be89a8SPalmer Dabbelt #endif
227d5be89a8SPalmer Dabbelt
2282ac6795fSAnup Patel pr_info("%pOFP: timer running at %ld Hz\n", np, clint_timer_freq);
2292ac6795fSAnup Patel
2302ac6795fSAnup Patel rc = clocksource_register_hz(&clint_clocksource, clint_timer_freq);
2312ac6795fSAnup Patel if (rc) {
2322ac6795fSAnup Patel pr_err("%pOFP: clocksource register failed [%d]\n", np, rc);
2332ac6795fSAnup Patel goto fail_iounmap;
2342ac6795fSAnup Patel }
2352ac6795fSAnup Patel
2362ac6795fSAnup Patel sched_clock_register(clint_get_cycles64, 64, clint_timer_freq);
2372ac6795fSAnup Patel
2382ac6795fSAnup Patel rc = request_percpu_irq(clint_timer_irq, clint_timer_interrupt,
2392ac6795fSAnup Patel "clint-timer", &clint_clock_event);
2402ac6795fSAnup Patel if (rc) {
2412ac6795fSAnup Patel pr_err("registering percpu irq failed [%d]\n", rc);
2422ac6795fSAnup Patel goto fail_iounmap;
2432ac6795fSAnup Patel }
2442ac6795fSAnup Patel
245832f15f4SAnup Patel #ifdef CONFIG_SMP
246832f15f4SAnup Patel rc = ipi_mux_create(BITS_PER_BYTE, clint_send_ipi);
247832f15f4SAnup Patel if (rc <= 0) {
248832f15f4SAnup Patel pr_err("unable to create muxed IPIs\n");
249832f15f4SAnup Patel rc = (rc < 0) ? rc : -ENODEV;
250832f15f4SAnup Patel goto fail_free_irq;
251832f15f4SAnup Patel }
252832f15f4SAnup Patel
253832f15f4SAnup Patel irq_set_chained_handler(clint_ipi_irq, clint_ipi_interrupt);
254*fb0f3d28SAnup Patel riscv_ipi_set_virq_range(rc, BITS_PER_BYTE, true);
255832f15f4SAnup Patel clint_clear_ipi();
256832f15f4SAnup Patel #endif
257832f15f4SAnup Patel
2582ac6795fSAnup Patel rc = cpuhp_setup_state(CPUHP_AP_CLINT_TIMER_STARTING,
2592ac6795fSAnup Patel "clockevents/clint/timer:starting",
2602ac6795fSAnup Patel clint_timer_starting_cpu,
2612ac6795fSAnup Patel clint_timer_dying_cpu);
2622ac6795fSAnup Patel if (rc) {
2632ac6795fSAnup Patel pr_err("%pOFP: cpuhp setup state failed [%d]\n", np, rc);
2642ac6795fSAnup Patel goto fail_free_irq;
2652ac6795fSAnup Patel }
2662ac6795fSAnup Patel
2672ac6795fSAnup Patel return 0;
2682ac6795fSAnup Patel
2692ac6795fSAnup Patel fail_free_irq:
270832f15f4SAnup Patel free_percpu_irq(clint_timer_irq, &clint_clock_event);
2712ac6795fSAnup Patel fail_iounmap:
2722ac6795fSAnup Patel iounmap(base);
2732ac6795fSAnup Patel return rc;
2742ac6795fSAnup Patel }
2752ac6795fSAnup Patel
2762ac6795fSAnup Patel TIMER_OF_DECLARE(clint_timer, "riscv,clint0", clint_timer_init_dt);
2772ac6795fSAnup Patel TIMER_OF_DECLARE(clint_timer1, "sifive,clint0", clint_timer_init_dt);
278