1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2012 Regents of the University of California
4  * Copyright (C) 2017-2018 SiFive
5  * Copyright (C) 2020 Western Digital Corporation or its affiliates.
6  */
7 
8 #define pr_fmt(fmt) "riscv-intc: " fmt
9 #include <linux/acpi.h>
10 #include <linux/atomic.h>
11 #include <linux/bits.h>
12 #include <linux/cpu.h>
13 #include <linux/irq.h>
14 #include <linux/irqchip.h>
15 #include <linux/irqdomain.h>
16 #include <linux/interrupt.h>
17 #include <linux/module.h>
18 #include <linux/of.h>
19 #include <linux/smp.h>
20 #include <linux/soc/andes/irq.h>
21 
22 static struct irq_domain *intc_domain;
23 static unsigned int riscv_intc_nr_irqs __ro_after_init = BITS_PER_LONG;
24 static unsigned int riscv_intc_custom_base __ro_after_init = BITS_PER_LONG;
25 static unsigned int riscv_intc_custom_nr_irqs __ro_after_init;
26 
27 static asmlinkage void riscv_intc_irq(struct pt_regs *regs)
28 {
29 	unsigned long cause = regs->cause & ~CAUSE_IRQ_FLAG;
30 
31 	if (generic_handle_domain_irq(intc_domain, cause))
32 		pr_warn_ratelimited("Failed to handle interrupt (cause: %ld)\n", cause);
33 }
34 
35 /*
36  * On RISC-V systems local interrupts are masked or unmasked by writing
37  * the SIE (Supervisor Interrupt Enable) CSR.  As CSRs can only be written
38  * on the local hart, these functions can only be called on the hart that
39  * corresponds to the IRQ chip.
40  */
41 
42 static void riscv_intc_irq_mask(struct irq_data *d)
43 {
44 	csr_clear(CSR_IE, BIT(d->hwirq));
45 }
46 
47 static void riscv_intc_irq_unmask(struct irq_data *d)
48 {
49 	csr_set(CSR_IE, BIT(d->hwirq));
50 }
51 
52 static void andes_intc_irq_mask(struct irq_data *d)
53 {
54 	/*
55 	 * Andes specific S-mode local interrupt causes (hwirq)
56 	 * are defined as (256 + n) and controlled by n-th bit
57 	 * of SLIE.
58 	 */
59 	unsigned int mask = BIT(d->hwirq % BITS_PER_LONG);
60 
61 	if (d->hwirq < ANDES_SLI_CAUSE_BASE)
62 		csr_clear(CSR_IE, mask);
63 	else
64 		csr_clear(ANDES_CSR_SLIE, mask);
65 }
66 
67 static void andes_intc_irq_unmask(struct irq_data *d)
68 {
69 	unsigned int mask = BIT(d->hwirq % BITS_PER_LONG);
70 
71 	if (d->hwirq < ANDES_SLI_CAUSE_BASE)
72 		csr_set(CSR_IE, mask);
73 	else
74 		csr_set(ANDES_CSR_SLIE, mask);
75 }
76 
77 static void riscv_intc_irq_eoi(struct irq_data *d)
78 {
79 	/*
80 	 * The RISC-V INTC driver uses handle_percpu_devid_irq() flow
81 	 * for the per-HART local interrupts and child irqchip drivers
82 	 * (such as PLIC, SBI IPI, CLINT, APLIC, IMSIC, etc) implement
83 	 * chained handlers for the per-HART local interrupts.
84 	 *
85 	 * In the absence of irq_eoi(), the chained_irq_enter() and
86 	 * chained_irq_exit() functions (used by child irqchip drivers)
87 	 * will do unnecessary mask/unmask of per-HART local interrupts
88 	 * at the time of handling interrupts. To avoid this, we provide
89 	 * an empty irq_eoi() callback for RISC-V INTC irqchip.
90 	 */
91 }
92 
93 static struct irq_chip riscv_intc_chip = {
94 	.name = "RISC-V INTC",
95 	.irq_mask = riscv_intc_irq_mask,
96 	.irq_unmask = riscv_intc_irq_unmask,
97 	.irq_eoi = riscv_intc_irq_eoi,
98 };
99 
100 static struct irq_chip andes_intc_chip = {
101 	.name		= "RISC-V INTC",
102 	.irq_mask	= andes_intc_irq_mask,
103 	.irq_unmask	= andes_intc_irq_unmask,
104 	.irq_eoi	= riscv_intc_irq_eoi,
105 };
106 
107 static int riscv_intc_domain_map(struct irq_domain *d, unsigned int irq,
108 				 irq_hw_number_t hwirq)
109 {
110 	struct irq_chip *chip = d->host_data;
111 
112 	irq_set_percpu_devid(irq);
113 	irq_domain_set_info(d, irq, hwirq, chip, NULL, handle_percpu_devid_irq,
114 			    NULL, NULL);
115 
116 	return 0;
117 }
118 
119 static int riscv_intc_domain_alloc(struct irq_domain *domain,
120 				   unsigned int virq, unsigned int nr_irqs,
121 				   void *arg)
122 {
123 	int i, ret;
124 	irq_hw_number_t hwirq;
125 	unsigned int type = IRQ_TYPE_NONE;
126 	struct irq_fwspec *fwspec = arg;
127 
128 	ret = irq_domain_translate_onecell(domain, fwspec, &hwirq, &type);
129 	if (ret)
130 		return ret;
131 
132 	/*
133 	 * Only allow hwirq for which we have corresponding standard or
134 	 * custom interrupt enable register.
135 	 */
136 	if ((hwirq >= riscv_intc_nr_irqs && hwirq < riscv_intc_custom_base) ||
137 	    (hwirq >= riscv_intc_custom_base + riscv_intc_custom_nr_irqs))
138 		return -EINVAL;
139 
140 	for (i = 0; i < nr_irqs; i++) {
141 		ret = riscv_intc_domain_map(domain, virq + i, hwirq + i);
142 		if (ret)
143 			return ret;
144 	}
145 
146 	return 0;
147 }
148 
149 static const struct irq_domain_ops riscv_intc_domain_ops = {
150 	.map	= riscv_intc_domain_map,
151 	.xlate	= irq_domain_xlate_onecell,
152 	.alloc	= riscv_intc_domain_alloc
153 };
154 
155 static struct fwnode_handle *riscv_intc_hwnode(void)
156 {
157 	return intc_domain->fwnode;
158 }
159 
160 static int __init riscv_intc_init_common(struct fwnode_handle *fn,
161 					 struct irq_chip *chip)
162 {
163 	int rc;
164 
165 	intc_domain = irq_domain_create_tree(fn, &riscv_intc_domain_ops, chip);
166 	if (!intc_domain) {
167 		pr_err("unable to add IRQ domain\n");
168 		return -ENXIO;
169 	}
170 
171 	rc = set_handle_irq(&riscv_intc_irq);
172 	if (rc) {
173 		pr_err("failed to set irq handler\n");
174 		return rc;
175 	}
176 
177 	riscv_set_intc_hwnode_fn(riscv_intc_hwnode);
178 
179 	pr_info("%d local interrupts mapped\n", riscv_intc_nr_irqs);
180 	if (riscv_intc_custom_nr_irqs) {
181 		pr_info("%d custom local interrupts mapped\n",
182 			riscv_intc_custom_nr_irqs);
183 	}
184 
185 	return 0;
186 }
187 
188 static int __init riscv_intc_init(struct device_node *node,
189 				  struct device_node *parent)
190 {
191 	struct irq_chip *chip = &riscv_intc_chip;
192 	unsigned long hartid;
193 	int rc;
194 
195 	rc = riscv_of_parent_hartid(node, &hartid);
196 	if (rc < 0) {
197 		pr_warn("unable to find hart id for %pOF\n", node);
198 		return 0;
199 	}
200 
201 	/*
202 	 * The DT will have one INTC DT node under each CPU (or HART)
203 	 * DT node so riscv_intc_init() function will be called once
204 	 * for each INTC DT node. We only need to do INTC initialization
205 	 * for the INTC DT node belonging to boot CPU (or boot HART).
206 	 */
207 	if (riscv_hartid_to_cpuid(hartid) != smp_processor_id()) {
208 		/*
209 		 * The INTC nodes of each CPU are suppliers for downstream
210 		 * interrupt controllers (such as PLIC, IMSIC and APLIC
211 		 * direct-mode) so we should mark an INTC node as initialized
212 		 * if we are not creating IRQ domain for it.
213 		 */
214 		fwnode_dev_initialized(of_fwnode_handle(node), true);
215 		return 0;
216 	}
217 
218 	if (of_device_is_compatible(node, "andestech,cpu-intc")) {
219 		riscv_intc_custom_base = ANDES_SLI_CAUSE_BASE;
220 		riscv_intc_custom_nr_irqs = ANDES_RV_IRQ_LAST;
221 		chip = &andes_intc_chip;
222 	}
223 
224 	return riscv_intc_init_common(of_node_to_fwnode(node), chip);
225 }
226 
227 IRQCHIP_DECLARE(riscv, "riscv,cpu-intc", riscv_intc_init);
228 IRQCHIP_DECLARE(andes, "andestech,cpu-intc", riscv_intc_init);
229 
230 #ifdef CONFIG_ACPI
231 
232 static int __init riscv_intc_acpi_init(union acpi_subtable_headers *header,
233 				       const unsigned long end)
234 {
235 	struct acpi_madt_rintc *rintc;
236 	struct fwnode_handle *fn;
237 	int rc;
238 
239 	rintc = (struct acpi_madt_rintc *)header;
240 
241 	/*
242 	 * The ACPI MADT will have one INTC for each CPU (or HART)
243 	 * so riscv_intc_acpi_init() function will be called once
244 	 * for each INTC. We only do INTC initialization
245 	 * for the INTC belonging to the boot CPU (or boot HART).
246 	 */
247 	if (riscv_hartid_to_cpuid(rintc->hart_id) != smp_processor_id())
248 		return 0;
249 
250 	fn = irq_domain_alloc_named_fwnode("RISCV-INTC");
251 	if (!fn) {
252 		pr_err("unable to allocate INTC FW node\n");
253 		return -ENOMEM;
254 	}
255 
256 	rc = riscv_intc_init_common(fn, &riscv_intc_chip);
257 	if (rc)
258 		irq_domain_free_fwnode(fn);
259 
260 	return rc;
261 }
262 
263 IRQCHIP_ACPI_DECLARE(riscv_intc, ACPI_MADT_TYPE_RINTC, NULL,
264 		     ACPI_MADT_RINTC_VERSION_V1, riscv_intc_acpi_init);
265 #endif
266