1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2017 SiFive 4 * Copyright (C) 2018 Christoph Hellwig 5 */ 6 #define pr_fmt(fmt) "plic: " fmt 7 #include <linux/interrupt.h> 8 #include <linux/io.h> 9 #include <linux/irq.h> 10 #include <linux/irqchip.h> 11 #include <linux/irqdomain.h> 12 #include <linux/module.h> 13 #include <linux/of.h> 14 #include <linux/of_address.h> 15 #include <linux/of_irq.h> 16 #include <linux/platform_device.h> 17 #include <linux/spinlock.h> 18 #include <asm/smp.h> 19 20 /* 21 * This driver implements a version of the RISC-V PLIC with the actual layout 22 * specified in chapter 8 of the SiFive U5 Coreplex Series Manual: 23 * 24 * https://static.dev.sifive.com/U54-MC-RVCoreIP.pdf 25 * 26 * The largest number supported by devices marked as 'sifive,plic-1.0.0', is 27 * 1024, of which device 0 is defined as non-existent by the RISC-V Privileged 28 * Spec. 29 */ 30 31 #define MAX_DEVICES 1024 32 #define MAX_CONTEXTS 15872 33 34 /* 35 * Each interrupt source has a priority register associated with it. 36 * We always hardwire it to one in Linux. 37 */ 38 #define PRIORITY_BASE 0 39 #define PRIORITY_PER_ID 4 40 41 /* 42 * Each hart context has a vector of interrupt enable bits associated with it. 43 * There's one bit for each interrupt source. 44 */ 45 #define ENABLE_BASE 0x2000 46 #define ENABLE_PER_HART 0x80 47 48 /* 49 * Each hart context has a set of control registers associated with it. Right 50 * now there's only two: a source priority threshold over which the hart will 51 * take an interrupt, and a register to claim interrupts. 52 */ 53 #define CONTEXT_BASE 0x200000 54 #define CONTEXT_PER_HART 0x1000 55 #define CONTEXT_THRESHOLD 0x00 56 #define CONTEXT_CLAIM 0x04 57 58 static void __iomem *plic_regs; 59 60 struct plic_handler { 61 bool present; 62 void __iomem *hart_base; 63 /* 64 * Protect mask operations on the registers given that we can't 65 * assume atomic memory operations work on them. 66 */ 67 raw_spinlock_t enable_lock; 68 void __iomem *enable_base; 69 }; 70 static DEFINE_PER_CPU(struct plic_handler, plic_handlers); 71 72 static inline void plic_toggle(struct plic_handler *handler, 73 int hwirq, int enable) 74 { 75 u32 __iomem *reg = handler->enable_base + (hwirq / 32) * sizeof(u32); 76 u32 hwirq_mask = 1 << (hwirq % 32); 77 78 raw_spin_lock(&handler->enable_lock); 79 if (enable) 80 writel(readl(reg) | hwirq_mask, reg); 81 else 82 writel(readl(reg) & ~hwirq_mask, reg); 83 raw_spin_unlock(&handler->enable_lock); 84 } 85 86 static inline void plic_irq_toggle(const struct cpumask *mask, 87 int hwirq, int enable) 88 { 89 int cpu; 90 91 writel(enable, plic_regs + PRIORITY_BASE + hwirq * PRIORITY_PER_ID); 92 for_each_cpu(cpu, mask) { 93 struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); 94 95 if (handler->present) 96 plic_toggle(handler, hwirq, enable); 97 } 98 } 99 100 static void plic_irq_unmask(struct irq_data *d) 101 { 102 unsigned int cpu = cpumask_any_and(irq_data_get_affinity_mask(d), 103 cpu_online_mask); 104 if (WARN_ON_ONCE(cpu >= nr_cpu_ids)) 105 return; 106 plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1); 107 } 108 109 static void plic_irq_mask(struct irq_data *d) 110 { 111 plic_irq_toggle(cpu_possible_mask, d->hwirq, 0); 112 } 113 114 #ifdef CONFIG_SMP 115 static int plic_set_affinity(struct irq_data *d, 116 const struct cpumask *mask_val, bool force) 117 { 118 unsigned int cpu; 119 120 if (force) 121 cpu = cpumask_first(mask_val); 122 else 123 cpu = cpumask_any_and(mask_val, cpu_online_mask); 124 125 if (cpu >= nr_cpu_ids) 126 return -EINVAL; 127 128 plic_irq_toggle(cpu_possible_mask, d->hwirq, 0); 129 plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1); 130 131 irq_data_update_effective_affinity(d, cpumask_of(cpu)); 132 133 return IRQ_SET_MASK_OK_DONE; 134 } 135 #endif 136 137 static void plic_irq_eoi(struct irq_data *d) 138 { 139 struct plic_handler *handler = this_cpu_ptr(&plic_handlers); 140 141 writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM); 142 } 143 144 static struct irq_chip plic_chip = { 145 .name = "SiFive PLIC", 146 .irq_mask = plic_irq_mask, 147 .irq_unmask = plic_irq_unmask, 148 .irq_eoi = plic_irq_eoi, 149 #ifdef CONFIG_SMP 150 .irq_set_affinity = plic_set_affinity, 151 #endif 152 }; 153 154 static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq, 155 irq_hw_number_t hwirq) 156 { 157 irq_domain_set_info(d, irq, hwirq, &plic_chip, d->host_data, 158 handle_fasteoi_irq, NULL, NULL); 159 irq_set_noprobe(irq); 160 return 0; 161 } 162 163 static int plic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 164 unsigned int nr_irqs, void *arg) 165 { 166 int i, ret; 167 irq_hw_number_t hwirq; 168 unsigned int type; 169 struct irq_fwspec *fwspec = arg; 170 171 ret = irq_domain_translate_onecell(domain, fwspec, &hwirq, &type); 172 if (ret) 173 return ret; 174 175 for (i = 0; i < nr_irqs; i++) { 176 ret = plic_irqdomain_map(domain, virq + i, hwirq + i); 177 if (ret) 178 return ret; 179 } 180 181 return 0; 182 } 183 184 static const struct irq_domain_ops plic_irqdomain_ops = { 185 .translate = irq_domain_translate_onecell, 186 .alloc = plic_irq_domain_alloc, 187 .free = irq_domain_free_irqs_top, 188 }; 189 190 static struct irq_domain *plic_irqdomain; 191 192 /* 193 * Handling an interrupt is a two-step process: first you claim the interrupt 194 * by reading the claim register, then you complete the interrupt by writing 195 * that source ID back to the same claim register. This automatically enables 196 * and disables the interrupt, so there's nothing else to do. 197 */ 198 static void plic_handle_irq(struct pt_regs *regs) 199 { 200 struct plic_handler *handler = this_cpu_ptr(&plic_handlers); 201 void __iomem *claim = handler->hart_base + CONTEXT_CLAIM; 202 irq_hw_number_t hwirq; 203 204 WARN_ON_ONCE(!handler->present); 205 206 csr_clear(CSR_IE, IE_EIE); 207 while ((hwirq = readl(claim))) { 208 int irq = irq_find_mapping(plic_irqdomain, hwirq); 209 210 if (unlikely(irq <= 0)) 211 pr_warn_ratelimited("can't find mapping for hwirq %lu\n", 212 hwirq); 213 else 214 generic_handle_irq(irq); 215 } 216 csr_set(CSR_IE, IE_EIE); 217 } 218 219 /* 220 * Walk up the DT tree until we find an active RISC-V core (HART) node and 221 * extract the cpuid from it. 222 */ 223 static int plic_find_hart_id(struct device_node *node) 224 { 225 for (; node; node = node->parent) { 226 if (of_device_is_compatible(node, "riscv")) 227 return riscv_of_processor_hartid(node); 228 } 229 230 return -1; 231 } 232 233 static int __init plic_init(struct device_node *node, 234 struct device_node *parent) 235 { 236 int error = 0, nr_contexts, nr_handlers = 0, i; 237 u32 nr_irqs; 238 239 if (plic_regs) { 240 pr_warn("PLIC already present.\n"); 241 return -ENXIO; 242 } 243 244 plic_regs = of_iomap(node, 0); 245 if (WARN_ON(!plic_regs)) 246 return -EIO; 247 248 error = -EINVAL; 249 of_property_read_u32(node, "riscv,ndev", &nr_irqs); 250 if (WARN_ON(!nr_irqs)) 251 goto out_iounmap; 252 253 nr_contexts = of_irq_count(node); 254 if (WARN_ON(!nr_contexts)) 255 goto out_iounmap; 256 if (WARN_ON(nr_contexts < num_possible_cpus())) 257 goto out_iounmap; 258 259 error = -ENOMEM; 260 plic_irqdomain = irq_domain_add_linear(node, nr_irqs + 1, 261 &plic_irqdomain_ops, NULL); 262 if (WARN_ON(!plic_irqdomain)) 263 goto out_iounmap; 264 265 for (i = 0; i < nr_contexts; i++) { 266 struct of_phandle_args parent; 267 struct plic_handler *handler; 268 irq_hw_number_t hwirq; 269 int cpu, hartid; 270 u32 threshold = 0; 271 272 if (of_irq_parse_one(node, i, &parent)) { 273 pr_err("failed to parse parent for context %d.\n", i); 274 continue; 275 } 276 277 /* 278 * Skip contexts other than external interrupts for our 279 * privilege level. 280 */ 281 if (parent.args[0] != RV_IRQ_EXT) 282 continue; 283 284 hartid = plic_find_hart_id(parent.np); 285 if (hartid < 0) { 286 pr_warn("failed to parse hart ID for context %d.\n", i); 287 continue; 288 } 289 290 cpu = riscv_hartid_to_cpuid(hartid); 291 if (cpu < 0) { 292 pr_warn("Invalid cpuid for context %d\n", i); 293 continue; 294 } 295 296 /* 297 * When running in M-mode we need to ignore the S-mode handler. 298 * Here we assume it always comes later, but that might be a 299 * little fragile. 300 */ 301 handler = per_cpu_ptr(&plic_handlers, cpu); 302 if (handler->present) { 303 pr_warn("handler already present for context %d.\n", i); 304 threshold = 0xffffffff; 305 goto done; 306 } 307 308 handler->present = true; 309 handler->hart_base = 310 plic_regs + CONTEXT_BASE + i * CONTEXT_PER_HART; 311 raw_spin_lock_init(&handler->enable_lock); 312 handler->enable_base = 313 plic_regs + ENABLE_BASE + i * ENABLE_PER_HART; 314 315 done: 316 /* priority must be > threshold to trigger an interrupt */ 317 writel(threshold, handler->hart_base + CONTEXT_THRESHOLD); 318 for (hwirq = 1; hwirq <= nr_irqs; hwirq++) 319 plic_toggle(handler, hwirq, 0); 320 nr_handlers++; 321 } 322 323 pr_info("mapped %d interrupts with %d handlers for %d contexts.\n", 324 nr_irqs, nr_handlers, nr_contexts); 325 set_handle_irq(plic_handle_irq); 326 return 0; 327 328 out_iounmap: 329 iounmap(plic_regs); 330 return error; 331 } 332 333 IRQCHIP_DECLARE(sifive_plic, "sifive,plic-1.0.0", plic_init); 334 IRQCHIP_DECLARE(riscv_plic0, "riscv,plic0", plic_init); /* for legacy systems */ 335