1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Broadcom BCM7038 style Level 1 interrupt controller driver 4 * 5 * Copyright (C) 2014 Broadcom Corporation 6 * Author: Kevin Cernekee 7 */ 8 9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 11 #include <linux/bitops.h> 12 #include <linux/kernel.h> 13 #include <linux/init.h> 14 #include <linux/interrupt.h> 15 #include <linux/io.h> 16 #include <linux/ioport.h> 17 #include <linux/irq.h> 18 #include <linux/irqdomain.h> 19 #include <linux/module.h> 20 #include <linux/of.h> 21 #include <linux/of_irq.h> 22 #include <linux/of_address.h> 23 #include <linux/of_platform.h> 24 #include <linux/platform_device.h> 25 #include <linux/slab.h> 26 #include <linux/smp.h> 27 #include <linux/types.h> 28 #include <linux/irqchip.h> 29 #include <linux/irqchip/chained_irq.h> 30 #include <linux/syscore_ops.h> 31 #ifdef CONFIG_ARM 32 #include <asm/smp_plat.h> 33 #endif 34 35 #define IRQS_PER_WORD 32 36 #define REG_BYTES_PER_IRQ_WORD (sizeof(u32) * 4) 37 #define MAX_WORDS 8 38 39 struct bcm7038_l1_cpu; 40 41 struct bcm7038_l1_chip { 42 raw_spinlock_t lock; 43 unsigned int n_words; 44 struct irq_domain *domain; 45 struct bcm7038_l1_cpu *cpus[NR_CPUS]; 46 #ifdef CONFIG_PM_SLEEP 47 struct list_head list; 48 u32 wake_mask[MAX_WORDS]; 49 #endif 50 u32 irq_fwd_mask[MAX_WORDS]; 51 u8 affinity[MAX_WORDS * IRQS_PER_WORD]; 52 }; 53 54 struct bcm7038_l1_cpu { 55 void __iomem *map_base; 56 u32 mask_cache[]; 57 }; 58 59 /* 60 * STATUS/MASK_STATUS/MASK_SET/MASK_CLEAR are packed one right after another: 61 * 62 * 7038: 63 * 0x1000_1400: W0_STATUS 64 * 0x1000_1404: W1_STATUS 65 * 0x1000_1408: W0_MASK_STATUS 66 * 0x1000_140c: W1_MASK_STATUS 67 * 0x1000_1410: W0_MASK_SET 68 * 0x1000_1414: W1_MASK_SET 69 * 0x1000_1418: W0_MASK_CLEAR 70 * 0x1000_141c: W1_MASK_CLEAR 71 * 72 * 7445: 73 * 0xf03e_1500: W0_STATUS 74 * 0xf03e_1504: W1_STATUS 75 * 0xf03e_1508: W2_STATUS 76 * 0xf03e_150c: W3_STATUS 77 * 0xf03e_1510: W4_STATUS 78 * 0xf03e_1514: W0_MASK_STATUS 79 * 0xf03e_1518: W1_MASK_STATUS 80 * [...] 81 */ 82 83 static inline unsigned int reg_status(struct bcm7038_l1_chip *intc, 84 unsigned int word) 85 { 86 return (0 * intc->n_words + word) * sizeof(u32); 87 } 88 89 static inline unsigned int reg_mask_status(struct bcm7038_l1_chip *intc, 90 unsigned int word) 91 { 92 return (1 * intc->n_words + word) * sizeof(u32); 93 } 94 95 static inline unsigned int reg_mask_set(struct bcm7038_l1_chip *intc, 96 unsigned int word) 97 { 98 return (2 * intc->n_words + word) * sizeof(u32); 99 } 100 101 static inline unsigned int reg_mask_clr(struct bcm7038_l1_chip *intc, 102 unsigned int word) 103 { 104 return (3 * intc->n_words + word) * sizeof(u32); 105 } 106 107 static inline u32 l1_readl(void __iomem *reg) 108 { 109 if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) 110 return ioread32be(reg); 111 else 112 return readl(reg); 113 } 114 115 static inline void l1_writel(u32 val, void __iomem *reg) 116 { 117 if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) 118 iowrite32be(val, reg); 119 else 120 writel(val, reg); 121 } 122 123 static void bcm7038_l1_irq_handle(struct irq_desc *desc) 124 { 125 struct bcm7038_l1_chip *intc = irq_desc_get_handler_data(desc); 126 struct bcm7038_l1_cpu *cpu; 127 struct irq_chip *chip = irq_desc_get_chip(desc); 128 unsigned int idx; 129 130 #ifdef CONFIG_SMP 131 cpu = intc->cpus[cpu_logical_map(smp_processor_id())]; 132 #else 133 cpu = intc->cpus[0]; 134 #endif 135 136 chained_irq_enter(chip, desc); 137 138 for (idx = 0; idx < intc->n_words; idx++) { 139 int base = idx * IRQS_PER_WORD; 140 unsigned long pending, flags; 141 int hwirq; 142 143 raw_spin_lock_irqsave(&intc->lock, flags); 144 pending = l1_readl(cpu->map_base + reg_status(intc, idx)) & 145 ~cpu->mask_cache[idx]; 146 raw_spin_unlock_irqrestore(&intc->lock, flags); 147 148 for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) { 149 generic_handle_irq(irq_find_mapping(intc->domain, 150 base + hwirq)); 151 } 152 } 153 154 chained_irq_exit(chip, desc); 155 } 156 157 static void __bcm7038_l1_unmask(struct irq_data *d, unsigned int cpu_idx) 158 { 159 struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d); 160 u32 word = d->hwirq / IRQS_PER_WORD; 161 u32 mask = BIT(d->hwirq % IRQS_PER_WORD); 162 163 intc->cpus[cpu_idx]->mask_cache[word] &= ~mask; 164 l1_writel(mask, intc->cpus[cpu_idx]->map_base + 165 reg_mask_clr(intc, word)); 166 } 167 168 static void __bcm7038_l1_mask(struct irq_data *d, unsigned int cpu_idx) 169 { 170 struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d); 171 u32 word = d->hwirq / IRQS_PER_WORD; 172 u32 mask = BIT(d->hwirq % IRQS_PER_WORD); 173 174 intc->cpus[cpu_idx]->mask_cache[word] |= mask; 175 l1_writel(mask, intc->cpus[cpu_idx]->map_base + 176 reg_mask_set(intc, word)); 177 } 178 179 static void bcm7038_l1_unmask(struct irq_data *d) 180 { 181 struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d); 182 unsigned long flags; 183 184 raw_spin_lock_irqsave(&intc->lock, flags); 185 __bcm7038_l1_unmask(d, intc->affinity[d->hwirq]); 186 raw_spin_unlock_irqrestore(&intc->lock, flags); 187 } 188 189 static void bcm7038_l1_mask(struct irq_data *d) 190 { 191 struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d); 192 unsigned long flags; 193 194 raw_spin_lock_irqsave(&intc->lock, flags); 195 __bcm7038_l1_mask(d, intc->affinity[d->hwirq]); 196 raw_spin_unlock_irqrestore(&intc->lock, flags); 197 } 198 199 static int bcm7038_l1_set_affinity(struct irq_data *d, 200 const struct cpumask *dest, 201 bool force) 202 { 203 struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d); 204 unsigned long flags; 205 irq_hw_number_t hw = d->hwirq; 206 u32 word = hw / IRQS_PER_WORD; 207 u32 mask = BIT(hw % IRQS_PER_WORD); 208 unsigned int first_cpu = cpumask_any_and(dest, cpu_online_mask); 209 bool was_disabled; 210 211 raw_spin_lock_irqsave(&intc->lock, flags); 212 213 was_disabled = !!(intc->cpus[intc->affinity[hw]]->mask_cache[word] & 214 mask); 215 __bcm7038_l1_mask(d, intc->affinity[hw]); 216 intc->affinity[hw] = first_cpu; 217 if (!was_disabled) 218 __bcm7038_l1_unmask(d, first_cpu); 219 220 raw_spin_unlock_irqrestore(&intc->lock, flags); 221 irq_data_update_effective_affinity(d, cpumask_of(first_cpu)); 222 223 return 0; 224 } 225 226 #ifdef CONFIG_SMP 227 static void bcm7038_l1_cpu_offline(struct irq_data *d) 228 { 229 struct cpumask *mask = irq_data_get_affinity_mask(d); 230 int cpu = smp_processor_id(); 231 cpumask_t new_affinity; 232 233 /* This CPU was not on the affinity mask */ 234 if (!cpumask_test_cpu(cpu, mask)) 235 return; 236 237 if (cpumask_weight(mask) > 1) { 238 /* 239 * Multiple CPU affinity, remove this CPU from the affinity 240 * mask 241 */ 242 cpumask_copy(&new_affinity, mask); 243 cpumask_clear_cpu(cpu, &new_affinity); 244 } else { 245 /* Only CPU, put on the lowest online CPU */ 246 cpumask_clear(&new_affinity); 247 cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity); 248 } 249 irq_set_affinity_locked(d, &new_affinity, false); 250 } 251 #endif 252 253 static int __init bcm7038_l1_init_one(struct device_node *dn, 254 unsigned int idx, 255 struct bcm7038_l1_chip *intc) 256 { 257 struct resource res; 258 resource_size_t sz; 259 struct bcm7038_l1_cpu *cpu; 260 unsigned int i, n_words, parent_irq; 261 int ret; 262 263 if (of_address_to_resource(dn, idx, &res)) 264 return -EINVAL; 265 sz = resource_size(&res); 266 n_words = sz / REG_BYTES_PER_IRQ_WORD; 267 268 if (n_words > MAX_WORDS) 269 return -EINVAL; 270 else if (!intc->n_words) 271 intc->n_words = n_words; 272 else if (intc->n_words != n_words) 273 return -EINVAL; 274 275 ret = of_property_read_u32_array(dn , "brcm,int-fwd-mask", 276 intc->irq_fwd_mask, n_words); 277 if (ret != 0 && ret != -EINVAL) { 278 /* property exists but has the wrong number of words */ 279 pr_err("invalid brcm,int-fwd-mask property\n"); 280 return -EINVAL; 281 } 282 283 cpu = intc->cpus[idx] = kzalloc(sizeof(*cpu) + n_words * sizeof(u32), 284 GFP_KERNEL); 285 if (!cpu) 286 return -ENOMEM; 287 288 cpu->map_base = ioremap(res.start, sz); 289 if (!cpu->map_base) 290 return -ENOMEM; 291 292 for (i = 0; i < n_words; i++) { 293 l1_writel(~intc->irq_fwd_mask[i], 294 cpu->map_base + reg_mask_set(intc, i)); 295 l1_writel(intc->irq_fwd_mask[i], 296 cpu->map_base + reg_mask_clr(intc, i)); 297 cpu->mask_cache[i] = ~intc->irq_fwd_mask[i]; 298 } 299 300 parent_irq = irq_of_parse_and_map(dn, idx); 301 if (!parent_irq) { 302 pr_err("failed to map parent interrupt %d\n", parent_irq); 303 return -EINVAL; 304 } 305 306 if (of_property_read_bool(dn, "brcm,irq-can-wake")) 307 enable_irq_wake(parent_irq); 308 309 irq_set_chained_handler_and_data(parent_irq, bcm7038_l1_irq_handle, 310 intc); 311 312 return 0; 313 } 314 315 #ifdef CONFIG_PM_SLEEP 316 /* 317 * We keep a list of bcm7038_l1_chip used for suspend/resume. This hack is 318 * used because the struct chip_type suspend/resume hooks are not called 319 * unless chip_type is hooked onto a generic_chip. Since this driver does 320 * not use generic_chip, we need to manually hook our resume/suspend to 321 * syscore_ops. 322 */ 323 static LIST_HEAD(bcm7038_l1_intcs_list); 324 static DEFINE_RAW_SPINLOCK(bcm7038_l1_intcs_lock); 325 326 static int bcm7038_l1_suspend(void) 327 { 328 struct bcm7038_l1_chip *intc; 329 int boot_cpu, word; 330 u32 val; 331 332 /* Wakeup interrupt should only come from the boot cpu */ 333 #ifdef CONFIG_SMP 334 boot_cpu = cpu_logical_map(0); 335 #else 336 boot_cpu = 0; 337 #endif 338 339 list_for_each_entry(intc, &bcm7038_l1_intcs_list, list) { 340 for (word = 0; word < intc->n_words; word++) { 341 val = intc->wake_mask[word] | intc->irq_fwd_mask[word]; 342 l1_writel(~val, 343 intc->cpus[boot_cpu]->map_base + reg_mask_set(intc, word)); 344 l1_writel(val, 345 intc->cpus[boot_cpu]->map_base + reg_mask_clr(intc, word)); 346 } 347 } 348 349 return 0; 350 } 351 352 static void bcm7038_l1_resume(void) 353 { 354 struct bcm7038_l1_chip *intc; 355 int boot_cpu, word; 356 357 #ifdef CONFIG_SMP 358 boot_cpu = cpu_logical_map(0); 359 #else 360 boot_cpu = 0; 361 #endif 362 363 list_for_each_entry(intc, &bcm7038_l1_intcs_list, list) { 364 for (word = 0; word < intc->n_words; word++) { 365 l1_writel(intc->cpus[boot_cpu]->mask_cache[word], 366 intc->cpus[boot_cpu]->map_base + reg_mask_set(intc, word)); 367 l1_writel(~intc->cpus[boot_cpu]->mask_cache[word], 368 intc->cpus[boot_cpu]->map_base + reg_mask_clr(intc, word)); 369 } 370 } 371 } 372 373 static struct syscore_ops bcm7038_l1_syscore_ops = { 374 .suspend = bcm7038_l1_suspend, 375 .resume = bcm7038_l1_resume, 376 }; 377 378 static int bcm7038_l1_set_wake(struct irq_data *d, unsigned int on) 379 { 380 struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d); 381 unsigned long flags; 382 u32 word = d->hwirq / IRQS_PER_WORD; 383 u32 mask = BIT(d->hwirq % IRQS_PER_WORD); 384 385 raw_spin_lock_irqsave(&intc->lock, flags); 386 if (on) 387 intc->wake_mask[word] |= mask; 388 else 389 intc->wake_mask[word] &= ~mask; 390 raw_spin_unlock_irqrestore(&intc->lock, flags); 391 392 return 0; 393 } 394 #endif 395 396 static struct irq_chip bcm7038_l1_irq_chip = { 397 .name = "bcm7038-l1", 398 .irq_mask = bcm7038_l1_mask, 399 .irq_unmask = bcm7038_l1_unmask, 400 .irq_set_affinity = bcm7038_l1_set_affinity, 401 #ifdef CONFIG_SMP 402 .irq_cpu_offline = bcm7038_l1_cpu_offline, 403 #endif 404 #ifdef CONFIG_PM_SLEEP 405 .irq_set_wake = bcm7038_l1_set_wake, 406 #endif 407 }; 408 409 static int bcm7038_l1_map(struct irq_domain *d, unsigned int virq, 410 irq_hw_number_t hw_irq) 411 { 412 struct bcm7038_l1_chip *intc = d->host_data; 413 u32 mask = BIT(hw_irq % IRQS_PER_WORD); 414 u32 word = hw_irq / IRQS_PER_WORD; 415 416 if (intc->irq_fwd_mask[word] & mask) 417 return -EPERM; 418 419 irq_set_chip_and_handler(virq, &bcm7038_l1_irq_chip, handle_level_irq); 420 irq_set_chip_data(virq, d->host_data); 421 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq))); 422 return 0; 423 } 424 425 static const struct irq_domain_ops bcm7038_l1_domain_ops = { 426 .xlate = irq_domain_xlate_onecell, 427 .map = bcm7038_l1_map, 428 }; 429 430 static int __init bcm7038_l1_of_init(struct device_node *dn, 431 struct device_node *parent) 432 { 433 struct bcm7038_l1_chip *intc; 434 int idx, ret; 435 436 intc = kzalloc(sizeof(*intc), GFP_KERNEL); 437 if (!intc) 438 return -ENOMEM; 439 440 raw_spin_lock_init(&intc->lock); 441 for_each_possible_cpu(idx) { 442 ret = bcm7038_l1_init_one(dn, idx, intc); 443 if (ret < 0) { 444 if (idx) 445 break; 446 pr_err("failed to remap intc L1 registers\n"); 447 goto out_free; 448 } 449 } 450 451 intc->domain = irq_domain_add_linear(dn, IRQS_PER_WORD * intc->n_words, 452 &bcm7038_l1_domain_ops, 453 intc); 454 if (!intc->domain) { 455 ret = -ENOMEM; 456 goto out_unmap; 457 } 458 459 #ifdef CONFIG_PM_SLEEP 460 /* Add bcm7038_l1_chip into a list */ 461 raw_spin_lock(&bcm7038_l1_intcs_lock); 462 list_add_tail(&intc->list, &bcm7038_l1_intcs_list); 463 raw_spin_unlock(&bcm7038_l1_intcs_lock); 464 465 if (list_is_singular(&bcm7038_l1_intcs_list)) 466 register_syscore_ops(&bcm7038_l1_syscore_ops); 467 #endif 468 469 pr_info("registered BCM7038 L1 intc (%pOF, IRQs: %d)\n", 470 dn, IRQS_PER_WORD * intc->n_words); 471 472 return 0; 473 474 out_unmap: 475 for_each_possible_cpu(idx) { 476 struct bcm7038_l1_cpu *cpu = intc->cpus[idx]; 477 478 if (cpu) { 479 if (cpu->map_base) 480 iounmap(cpu->map_base); 481 kfree(cpu); 482 } 483 } 484 out_free: 485 kfree(intc); 486 return ret; 487 } 488 489 IRQCHIP_DECLARE(bcm7038_l1, "brcm,bcm7038-l1-intc", bcm7038_l1_of_init); 490