1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2017 SiFive 4 * Copyright (C) 2018 Christoph Hellwig 5 */ 6 #define pr_fmt(fmt) "plic: " fmt 7 #include <linux/cpu.h> 8 #include <linux/interrupt.h> 9 #include <linux/io.h> 10 #include <linux/irq.h> 11 #include <linux/irqchip.h> 12 #include <linux/irqdomain.h> 13 #include <linux/module.h> 14 #include <linux/of.h> 15 #include <linux/of_address.h> 16 #include <linux/of_irq.h> 17 #include <linux/platform_device.h> 18 #include <linux/spinlock.h> 19 #include <asm/smp.h> 20 21 /* 22 * This driver implements a version of the RISC-V PLIC with the actual layout 23 * specified in chapter 8 of the SiFive U5 Coreplex Series Manual: 24 * 25 * https://static.dev.sifive.com/U54-MC-RVCoreIP.pdf 26 * 27 * The largest number supported by devices marked as 'sifive,plic-1.0.0', is 28 * 1024, of which device 0 is defined as non-existent by the RISC-V Privileged 29 * Spec. 30 */ 31 32 #define MAX_DEVICES 1024 33 #define MAX_CONTEXTS 15872 34 35 /* 36 * Each interrupt source has a priority register associated with it. 37 * We always hardwire it to one in Linux. 38 */ 39 #define PRIORITY_BASE 0 40 #define PRIORITY_PER_ID 4 41 42 /* 43 * Each hart context has a vector of interrupt enable bits associated with it. 44 * There's one bit for each interrupt source. 45 */ 46 #define ENABLE_BASE 0x2000 47 #define ENABLE_PER_HART 0x80 48 49 /* 50 * Each hart context has a set of control registers associated with it. Right 51 * now there's only two: a source priority threshold over which the hart will 52 * take an interrupt, and a register to claim interrupts. 53 */ 54 #define CONTEXT_BASE 0x200000 55 #define CONTEXT_PER_HART 0x1000 56 #define CONTEXT_THRESHOLD 0x00 57 #define CONTEXT_CLAIM 0x04 58 59 #define PLIC_DISABLE_THRESHOLD 0x7 60 #define PLIC_ENABLE_THRESHOLD 0 61 62 struct plic_priv { 63 struct cpumask lmask; 64 struct irq_domain *irqdomain; 65 void __iomem *regs; 66 }; 67 68 struct plic_handler { 69 bool present; 70 void __iomem *hart_base; 71 /* 72 * Protect mask operations on the registers given that we can't 73 * assume atomic memory operations work on them. 74 */ 75 raw_spinlock_t enable_lock; 76 void __iomem *enable_base; 77 struct plic_priv *priv; 78 }; 79 static bool plic_cpuhp_setup_done; 80 static DEFINE_PER_CPU(struct plic_handler, plic_handlers); 81 82 static inline void plic_toggle(struct plic_handler *handler, 83 int hwirq, int enable) 84 { 85 u32 __iomem *reg = handler->enable_base + (hwirq / 32) * sizeof(u32); 86 u32 hwirq_mask = 1 << (hwirq % 32); 87 88 raw_spin_lock(&handler->enable_lock); 89 if (enable) 90 writel(readl(reg) | hwirq_mask, reg); 91 else 92 writel(readl(reg) & ~hwirq_mask, reg); 93 raw_spin_unlock(&handler->enable_lock); 94 } 95 96 static inline void plic_irq_toggle(const struct cpumask *mask, 97 struct irq_data *d, int enable) 98 { 99 int cpu; 100 struct plic_priv *priv = irq_get_chip_data(d->irq); 101 102 writel(enable, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID); 103 for_each_cpu(cpu, mask) { 104 struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); 105 106 if (handler->present && 107 cpumask_test_cpu(cpu, &handler->priv->lmask)) 108 plic_toggle(handler, d->hwirq, enable); 109 } 110 } 111 112 static void plic_irq_unmask(struct irq_data *d) 113 { 114 struct cpumask amask; 115 unsigned int cpu; 116 struct plic_priv *priv = irq_get_chip_data(d->irq); 117 118 cpumask_and(&amask, &priv->lmask, cpu_online_mask); 119 cpu = cpumask_any_and(irq_data_get_affinity_mask(d), 120 &amask); 121 if (WARN_ON_ONCE(cpu >= nr_cpu_ids)) 122 return; 123 plic_irq_toggle(cpumask_of(cpu), d, 1); 124 } 125 126 static void plic_irq_mask(struct irq_data *d) 127 { 128 struct plic_priv *priv = irq_get_chip_data(d->irq); 129 130 plic_irq_toggle(&priv->lmask, d, 0); 131 } 132 133 #ifdef CONFIG_SMP 134 static int plic_set_affinity(struct irq_data *d, 135 const struct cpumask *mask_val, bool force) 136 { 137 unsigned int cpu; 138 struct cpumask amask; 139 struct plic_priv *priv = irq_get_chip_data(d->irq); 140 141 cpumask_and(&amask, &priv->lmask, mask_val); 142 143 if (force) 144 cpu = cpumask_first(&amask); 145 else 146 cpu = cpumask_any_and(&amask, cpu_online_mask); 147 148 if (cpu >= nr_cpu_ids) 149 return -EINVAL; 150 151 plic_irq_toggle(&priv->lmask, d, 0); 152 plic_irq_toggle(cpumask_of(cpu), d, 1); 153 154 irq_data_update_effective_affinity(d, cpumask_of(cpu)); 155 156 return IRQ_SET_MASK_OK_DONE; 157 } 158 #endif 159 160 static void plic_irq_eoi(struct irq_data *d) 161 { 162 struct plic_handler *handler = this_cpu_ptr(&plic_handlers); 163 164 writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM); 165 } 166 167 static struct irq_chip plic_chip = { 168 .name = "SiFive PLIC", 169 .irq_mask = plic_irq_mask, 170 .irq_unmask = plic_irq_unmask, 171 .irq_eoi = plic_irq_eoi, 172 #ifdef CONFIG_SMP 173 .irq_set_affinity = plic_set_affinity, 174 #endif 175 }; 176 177 static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq, 178 irq_hw_number_t hwirq) 179 { 180 struct plic_priv *priv = d->host_data; 181 182 irq_domain_set_info(d, irq, hwirq, &plic_chip, d->host_data, 183 handle_fasteoi_irq, NULL, NULL); 184 irq_set_noprobe(irq); 185 irq_set_affinity(irq, &priv->lmask); 186 return 0; 187 } 188 189 static int plic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 190 unsigned int nr_irqs, void *arg) 191 { 192 int i, ret; 193 irq_hw_number_t hwirq; 194 unsigned int type; 195 struct irq_fwspec *fwspec = arg; 196 197 ret = irq_domain_translate_onecell(domain, fwspec, &hwirq, &type); 198 if (ret) 199 return ret; 200 201 for (i = 0; i < nr_irqs; i++) { 202 ret = plic_irqdomain_map(domain, virq + i, hwirq + i); 203 if (ret) 204 return ret; 205 } 206 207 return 0; 208 } 209 210 static const struct irq_domain_ops plic_irqdomain_ops = { 211 .translate = irq_domain_translate_onecell, 212 .alloc = plic_irq_domain_alloc, 213 .free = irq_domain_free_irqs_top, 214 }; 215 216 /* 217 * Handling an interrupt is a two-step process: first you claim the interrupt 218 * by reading the claim register, then you complete the interrupt by writing 219 * that source ID back to the same claim register. This automatically enables 220 * and disables the interrupt, so there's nothing else to do. 221 */ 222 static void plic_handle_irq(struct pt_regs *regs) 223 { 224 struct plic_handler *handler = this_cpu_ptr(&plic_handlers); 225 void __iomem *claim = handler->hart_base + CONTEXT_CLAIM; 226 irq_hw_number_t hwirq; 227 228 WARN_ON_ONCE(!handler->present); 229 230 csr_clear(CSR_IE, IE_EIE); 231 while ((hwirq = readl(claim))) { 232 int irq = irq_find_mapping(handler->priv->irqdomain, hwirq); 233 234 if (unlikely(irq <= 0)) 235 pr_warn_ratelimited("can't find mapping for hwirq %lu\n", 236 hwirq); 237 else 238 generic_handle_irq(irq); 239 } 240 csr_set(CSR_IE, IE_EIE); 241 } 242 243 /* 244 * Walk up the DT tree until we find an active RISC-V core (HART) node and 245 * extract the cpuid from it. 246 */ 247 static int plic_find_hart_id(struct device_node *node) 248 { 249 for (; node; node = node->parent) { 250 if (of_device_is_compatible(node, "riscv")) 251 return riscv_of_processor_hartid(node); 252 } 253 254 return -1; 255 } 256 257 static void plic_set_threshold(struct plic_handler *handler, u32 threshold) 258 { 259 /* priority must be > threshold to trigger an interrupt */ 260 writel(threshold, handler->hart_base + CONTEXT_THRESHOLD); 261 } 262 263 static int plic_dying_cpu(unsigned int cpu) 264 { 265 struct plic_handler *handler = this_cpu_ptr(&plic_handlers); 266 267 csr_clear(CSR_IE, IE_EIE); 268 plic_set_threshold(handler, PLIC_DISABLE_THRESHOLD); 269 270 return 0; 271 } 272 273 static int plic_starting_cpu(unsigned int cpu) 274 { 275 struct plic_handler *handler = this_cpu_ptr(&plic_handlers); 276 277 csr_set(CSR_IE, IE_EIE); 278 plic_set_threshold(handler, PLIC_ENABLE_THRESHOLD); 279 280 return 0; 281 } 282 283 static int __init plic_init(struct device_node *node, 284 struct device_node *parent) 285 { 286 int error = 0, nr_contexts, nr_handlers = 0, i; 287 u32 nr_irqs; 288 struct plic_priv *priv; 289 struct plic_handler *handler; 290 291 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 292 if (!priv) 293 return -ENOMEM; 294 295 priv->regs = of_iomap(node, 0); 296 if (WARN_ON(!priv->regs)) { 297 error = -EIO; 298 goto out_free_priv; 299 } 300 301 error = -EINVAL; 302 of_property_read_u32(node, "riscv,ndev", &nr_irqs); 303 if (WARN_ON(!nr_irqs)) 304 goto out_iounmap; 305 306 nr_contexts = of_irq_count(node); 307 if (WARN_ON(!nr_contexts)) 308 goto out_iounmap; 309 310 error = -ENOMEM; 311 priv->irqdomain = irq_domain_add_linear(node, nr_irqs + 1, 312 &plic_irqdomain_ops, priv); 313 if (WARN_ON(!priv->irqdomain)) 314 goto out_iounmap; 315 316 for (i = 0; i < nr_contexts; i++) { 317 struct of_phandle_args parent; 318 irq_hw_number_t hwirq; 319 int cpu, hartid; 320 321 if (of_irq_parse_one(node, i, &parent)) { 322 pr_err("failed to parse parent for context %d.\n", i); 323 continue; 324 } 325 326 /* 327 * Skip contexts other than external interrupts for our 328 * privilege level. 329 */ 330 if (parent.args[0] != RV_IRQ_EXT) 331 continue; 332 333 hartid = plic_find_hart_id(parent.np); 334 if (hartid < 0) { 335 pr_warn("failed to parse hart ID for context %d.\n", i); 336 continue; 337 } 338 339 cpu = riscv_hartid_to_cpuid(hartid); 340 if (cpu < 0) { 341 pr_warn("Invalid cpuid for context %d\n", i); 342 continue; 343 } 344 345 /* 346 * When running in M-mode we need to ignore the S-mode handler. 347 * Here we assume it always comes later, but that might be a 348 * little fragile. 349 */ 350 handler = per_cpu_ptr(&plic_handlers, cpu); 351 if (handler->present) { 352 pr_warn("handler already present for context %d.\n", i); 353 plic_set_threshold(handler, PLIC_DISABLE_THRESHOLD); 354 goto done; 355 } 356 357 cpumask_set_cpu(cpu, &priv->lmask); 358 handler->present = true; 359 handler->hart_base = 360 priv->regs + CONTEXT_BASE + i * CONTEXT_PER_HART; 361 raw_spin_lock_init(&handler->enable_lock); 362 handler->enable_base = 363 priv->regs + ENABLE_BASE + i * ENABLE_PER_HART; 364 handler->priv = priv; 365 done: 366 for (hwirq = 1; hwirq <= nr_irqs; hwirq++) 367 plic_toggle(handler, hwirq, 0); 368 nr_handlers++; 369 } 370 371 /* 372 * We can have multiple PLIC instances so setup cpuhp state only 373 * when context handler for current/boot CPU is present. 374 */ 375 handler = this_cpu_ptr(&plic_handlers); 376 if (handler->present && !plic_cpuhp_setup_done) { 377 cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, 378 "irqchip/sifive/plic:starting", 379 plic_starting_cpu, plic_dying_cpu); 380 plic_cpuhp_setup_done = true; 381 } 382 383 pr_info("%pOFP: mapped %d interrupts with %d handlers for" 384 " %d contexts.\n", node, nr_irqs, nr_handlers, nr_contexts); 385 set_handle_irq(plic_handle_irq); 386 return 0; 387 388 out_iounmap: 389 iounmap(priv->regs); 390 out_free_priv: 391 kfree(priv); 392 return error; 393 } 394 395 IRQCHIP_DECLARE(sifive_plic, "sifive,plic-1.0.0", plic_init); 396 IRQCHIP_DECLARE(riscv_plic0, "riscv,plic0", plic_init); /* for legacy systems */ 397