1 /* 2 * Marvell Armada 370 and Armada XP SoC IRQ handling 3 * 4 * Copyright (C) 2012 Marvell 5 * 6 * Lior Amsalem <alior@marvell.com> 7 * Gregory CLEMENT <gregory.clement@free-electrons.com> 8 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> 9 * Ben Dooks <ben.dooks@codethink.co.uk> 10 * 11 * This file is licensed under the terms of the GNU General Public 12 * License version 2. This program is licensed "as is" without any 13 * warranty of any kind, whether express or implied. 14 */ 15 16 #include <linux/kernel.h> 17 #include <linux/module.h> 18 #include <linux/init.h> 19 #include <linux/irq.h> 20 #include <linux/interrupt.h> 21 #include <linux/irqchip.h> 22 #include <linux/irqchip/chained_irq.h> 23 #include <linux/cpu.h> 24 #include <linux/io.h> 25 #include <linux/of_address.h> 26 #include <linux/of_irq.h> 27 #include <linux/of_pci.h> 28 #include <linux/irqdomain.h> 29 #include <linux/slab.h> 30 #include <linux/syscore_ops.h> 31 #include <linux/msi.h> 32 #include <asm/mach/arch.h> 33 #include <asm/exception.h> 34 #include <asm/smp_plat.h> 35 #include <asm/mach/irq.h> 36 37 /* Interrupt Controller Registers Map */ 38 #define ARMADA_370_XP_INT_SET_MASK_OFFS (0x48) 39 #define ARMADA_370_XP_INT_CLEAR_MASK_OFFS (0x4C) 40 #define ARMADA_370_XP_INT_FABRIC_MASK_OFFS (0x54) 41 #define ARMADA_370_XP_INT_CAUSE_PERF(cpu) (1 << cpu) 42 43 #define ARMADA_370_XP_INT_CONTROL (0x00) 44 #define ARMADA_370_XP_INT_SET_ENABLE_OFFS (0x30) 45 #define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34) 46 #define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4) 47 #define ARMADA_370_XP_INT_SOURCE_CPU_MASK 0xF 48 #define ARMADA_370_XP_INT_IRQ_FIQ_MASK(cpuid) ((BIT(0) | BIT(8)) << cpuid) 49 50 #define ARMADA_370_XP_CPU_INTACK_OFFS (0x44) 51 #define ARMADA_375_PPI_CAUSE (0x10) 52 53 #define ARMADA_370_XP_SW_TRIG_INT_OFFS (0x4) 54 #define ARMADA_370_XP_IN_DRBEL_MSK_OFFS (0xc) 55 #define ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS (0x8) 56 57 #define ARMADA_370_XP_MAX_PER_CPU_IRQS (28) 58 59 #define IPI_DOORBELL_START (0) 60 #define IPI_DOORBELL_END (8) 61 #define IPI_DOORBELL_MASK 0xFF 62 #define PCI_MSI_DOORBELL_START (16) 63 #define PCI_MSI_DOORBELL_NR (16) 64 #define PCI_MSI_DOORBELL_END (32) 65 #define PCI_MSI_DOORBELL_MASK 0xFFFF0000 66 67 static void __iomem *per_cpu_int_base; 68 static void __iomem *main_int_base; 69 static struct irq_domain *armada_370_xp_mpic_domain; 70 static u32 doorbell_mask_reg; 71 static int parent_irq; 72 #ifdef CONFIG_PCI_MSI 73 static struct irq_domain *armada_370_xp_msi_domain; 74 static struct irq_domain *armada_370_xp_msi_inner_domain; 75 static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR); 76 static DEFINE_MUTEX(msi_used_lock); 77 static phys_addr_t msi_doorbell_addr; 78 #endif 79 80 static inline bool is_percpu_irq(irq_hw_number_t irq) 81 { 82 if (irq <= ARMADA_370_XP_MAX_PER_CPU_IRQS) 83 return true; 84 85 return false; 86 } 87 88 /* 89 * In SMP mode: 90 * For shared global interrupts, mask/unmask global enable bit 91 * For CPU interrupts, mask/unmask the calling CPU's bit 92 */ 93 static void armada_370_xp_irq_mask(struct irq_data *d) 94 { 95 irq_hw_number_t hwirq = irqd_to_hwirq(d); 96 97 if (!is_percpu_irq(hwirq)) 98 writel(hwirq, main_int_base + 99 ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS); 100 else 101 writel(hwirq, per_cpu_int_base + 102 ARMADA_370_XP_INT_SET_MASK_OFFS); 103 } 104 105 static void armada_370_xp_irq_unmask(struct irq_data *d) 106 { 107 irq_hw_number_t hwirq = irqd_to_hwirq(d); 108 109 if (!is_percpu_irq(hwirq)) 110 writel(hwirq, main_int_base + 111 ARMADA_370_XP_INT_SET_ENABLE_OFFS); 112 else 113 writel(hwirq, per_cpu_int_base + 114 ARMADA_370_XP_INT_CLEAR_MASK_OFFS); 115 } 116 117 #ifdef CONFIG_PCI_MSI 118 119 static struct irq_chip armada_370_xp_msi_irq_chip = { 120 .name = "MPIC MSI", 121 .irq_mask = pci_msi_mask_irq, 122 .irq_unmask = pci_msi_unmask_irq, 123 }; 124 125 static struct msi_domain_info armada_370_xp_msi_domain_info = { 126 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | 127 MSI_FLAG_MULTI_PCI_MSI), 128 .chip = &armada_370_xp_msi_irq_chip, 129 }; 130 131 static void armada_370_xp_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) 132 { 133 msg->address_lo = lower_32_bits(msi_doorbell_addr); 134 msg->address_hi = upper_32_bits(msi_doorbell_addr); 135 msg->data = 0xf00 | (data->hwirq + PCI_MSI_DOORBELL_START); 136 } 137 138 static int armada_370_xp_msi_set_affinity(struct irq_data *irq_data, 139 const struct cpumask *mask, bool force) 140 { 141 return -EINVAL; 142 } 143 144 static struct irq_chip armada_370_xp_msi_bottom_irq_chip = { 145 .name = "MPIC MSI", 146 .irq_compose_msi_msg = armada_370_xp_compose_msi_msg, 147 .irq_set_affinity = armada_370_xp_msi_set_affinity, 148 }; 149 150 static int armada_370_xp_msi_alloc(struct irq_domain *domain, unsigned int virq, 151 unsigned int nr_irqs, void *args) 152 { 153 int hwirq, i; 154 155 mutex_lock(&msi_used_lock); 156 157 hwirq = bitmap_find_next_zero_area(msi_used, PCI_MSI_DOORBELL_NR, 158 0, nr_irqs, 0); 159 if (hwirq >= PCI_MSI_DOORBELL_NR) { 160 mutex_unlock(&msi_used_lock); 161 return -ENOSPC; 162 } 163 164 bitmap_set(msi_used, hwirq, nr_irqs); 165 mutex_unlock(&msi_used_lock); 166 167 for (i = 0; i < nr_irqs; i++) { 168 irq_domain_set_info(domain, virq + i, hwirq + i, 169 &armada_370_xp_msi_bottom_irq_chip, 170 domain->host_data, handle_simple_irq, 171 NULL, NULL); 172 } 173 174 return hwirq; 175 } 176 177 static void armada_370_xp_msi_free(struct irq_domain *domain, 178 unsigned int virq, unsigned int nr_irqs) 179 { 180 struct irq_data *d = irq_domain_get_irq_data(domain, virq); 181 182 mutex_lock(&msi_used_lock); 183 bitmap_clear(msi_used, d->hwirq, nr_irqs); 184 mutex_unlock(&msi_used_lock); 185 } 186 187 static const struct irq_domain_ops armada_370_xp_msi_domain_ops = { 188 .alloc = armada_370_xp_msi_alloc, 189 .free = armada_370_xp_msi_free, 190 }; 191 192 static int armada_370_xp_msi_init(struct device_node *node, 193 phys_addr_t main_int_phys_base) 194 { 195 u32 reg; 196 197 msi_doorbell_addr = main_int_phys_base + 198 ARMADA_370_XP_SW_TRIG_INT_OFFS; 199 200 armada_370_xp_msi_inner_domain = 201 irq_domain_add_linear(NULL, PCI_MSI_DOORBELL_NR, 202 &armada_370_xp_msi_domain_ops, NULL); 203 if (!armada_370_xp_msi_inner_domain) 204 return -ENOMEM; 205 206 armada_370_xp_msi_domain = 207 pci_msi_create_irq_domain(of_node_to_fwnode(node), 208 &armada_370_xp_msi_domain_info, 209 armada_370_xp_msi_inner_domain); 210 if (!armada_370_xp_msi_domain) { 211 irq_domain_remove(armada_370_xp_msi_inner_domain); 212 return -ENOMEM; 213 } 214 215 reg = readl(per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS) 216 | PCI_MSI_DOORBELL_MASK; 217 218 writel(reg, per_cpu_int_base + 219 ARMADA_370_XP_IN_DRBEL_MSK_OFFS); 220 221 /* Unmask IPI interrupt */ 222 writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS); 223 224 return 0; 225 } 226 #else 227 static inline int armada_370_xp_msi_init(struct device_node *node, 228 phys_addr_t main_int_phys_base) 229 { 230 return 0; 231 } 232 #endif 233 234 #ifdef CONFIG_SMP 235 static DEFINE_RAW_SPINLOCK(irq_controller_lock); 236 237 static int armada_xp_set_affinity(struct irq_data *d, 238 const struct cpumask *mask_val, bool force) 239 { 240 irq_hw_number_t hwirq = irqd_to_hwirq(d); 241 unsigned long reg, mask; 242 int cpu; 243 244 /* Select a single core from the affinity mask which is online */ 245 cpu = cpumask_any_and(mask_val, cpu_online_mask); 246 mask = 1UL << cpu_logical_map(cpu); 247 248 raw_spin_lock(&irq_controller_lock); 249 reg = readl(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq)); 250 reg = (reg & (~ARMADA_370_XP_INT_SOURCE_CPU_MASK)) | mask; 251 writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq)); 252 raw_spin_unlock(&irq_controller_lock); 253 254 return IRQ_SET_MASK_OK; 255 } 256 #endif 257 258 static struct irq_chip armada_370_xp_irq_chip = { 259 .name = "MPIC", 260 .irq_mask = armada_370_xp_irq_mask, 261 .irq_mask_ack = armada_370_xp_irq_mask, 262 .irq_unmask = armada_370_xp_irq_unmask, 263 #ifdef CONFIG_SMP 264 .irq_set_affinity = armada_xp_set_affinity, 265 #endif 266 .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND, 267 }; 268 269 static int armada_370_xp_mpic_irq_map(struct irq_domain *h, 270 unsigned int virq, irq_hw_number_t hw) 271 { 272 armada_370_xp_irq_mask(irq_get_irq_data(virq)); 273 if (!is_percpu_irq(hw)) 274 writel(hw, per_cpu_int_base + 275 ARMADA_370_XP_INT_CLEAR_MASK_OFFS); 276 else 277 writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS); 278 irq_set_status_flags(virq, IRQ_LEVEL); 279 280 if (is_percpu_irq(hw)) { 281 irq_set_percpu_devid(virq); 282 irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip, 283 handle_percpu_devid_irq); 284 285 } else { 286 irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip, 287 handle_level_irq); 288 } 289 irq_set_probe(virq); 290 irq_clear_status_flags(virq, IRQ_NOAUTOEN); 291 292 return 0; 293 } 294 295 static void armada_xp_mpic_smp_cpu_init(void) 296 { 297 u32 control; 298 int nr_irqs, i; 299 300 control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL); 301 nr_irqs = (control >> 2) & 0x3ff; 302 303 for (i = 0; i < nr_irqs; i++) 304 writel(i, per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS); 305 306 /* Clear pending IPIs */ 307 writel(0, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS); 308 309 /* Enable first 8 IPIs */ 310 writel(IPI_DOORBELL_MASK, per_cpu_int_base + 311 ARMADA_370_XP_IN_DRBEL_MSK_OFFS); 312 313 /* Unmask IPI interrupt */ 314 writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS); 315 } 316 317 static void armada_xp_mpic_perf_init(void) 318 { 319 unsigned long cpuid = cpu_logical_map(smp_processor_id()); 320 321 /* Enable Performance Counter Overflow interrupts */ 322 writel(ARMADA_370_XP_INT_CAUSE_PERF(cpuid), 323 per_cpu_int_base + ARMADA_370_XP_INT_FABRIC_MASK_OFFS); 324 } 325 326 #ifdef CONFIG_SMP 327 static void armada_mpic_send_doorbell(const struct cpumask *mask, 328 unsigned int irq) 329 { 330 int cpu; 331 unsigned long map = 0; 332 333 /* Convert our logical CPU mask into a physical one. */ 334 for_each_cpu(cpu, mask) 335 map |= 1 << cpu_logical_map(cpu); 336 337 /* 338 * Ensure that stores to Normal memory are visible to the 339 * other CPUs before issuing the IPI. 340 */ 341 dsb(); 342 343 /* submit softirq */ 344 writel((map << 8) | irq, main_int_base + 345 ARMADA_370_XP_SW_TRIG_INT_OFFS); 346 } 347 348 static int armada_xp_mpic_secondary_init(struct notifier_block *nfb, 349 unsigned long action, void *hcpu) 350 { 351 if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) { 352 armada_xp_mpic_perf_init(); 353 armada_xp_mpic_smp_cpu_init(); 354 } 355 356 return NOTIFY_OK; 357 } 358 359 static struct notifier_block armada_370_xp_mpic_cpu_notifier = { 360 .notifier_call = armada_xp_mpic_secondary_init, 361 .priority = 100, 362 }; 363 364 static int mpic_cascaded_secondary_init(struct notifier_block *nfb, 365 unsigned long action, void *hcpu) 366 { 367 if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) { 368 armada_xp_mpic_perf_init(); 369 enable_percpu_irq(parent_irq, IRQ_TYPE_NONE); 370 } 371 372 return NOTIFY_OK; 373 } 374 375 static struct notifier_block mpic_cascaded_cpu_notifier = { 376 .notifier_call = mpic_cascaded_secondary_init, 377 .priority = 100, 378 }; 379 #endif /* CONFIG_SMP */ 380 381 static const struct irq_domain_ops armada_370_xp_mpic_irq_ops = { 382 .map = armada_370_xp_mpic_irq_map, 383 .xlate = irq_domain_xlate_onecell, 384 }; 385 386 #ifdef CONFIG_PCI_MSI 387 static void armada_370_xp_handle_msi_irq(struct pt_regs *regs, bool is_chained) 388 { 389 u32 msimask, msinr; 390 391 msimask = readl_relaxed(per_cpu_int_base + 392 ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS) 393 & PCI_MSI_DOORBELL_MASK; 394 395 writel(~msimask, per_cpu_int_base + 396 ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS); 397 398 for (msinr = PCI_MSI_DOORBELL_START; 399 msinr < PCI_MSI_DOORBELL_END; msinr++) { 400 int irq; 401 402 if (!(msimask & BIT(msinr))) 403 continue; 404 405 if (is_chained) { 406 irq = irq_find_mapping(armada_370_xp_msi_inner_domain, 407 msinr - PCI_MSI_DOORBELL_START); 408 generic_handle_irq(irq); 409 } else { 410 irq = msinr - PCI_MSI_DOORBELL_START; 411 handle_domain_irq(armada_370_xp_msi_inner_domain, 412 irq, regs); 413 } 414 } 415 } 416 #else 417 static void armada_370_xp_handle_msi_irq(struct pt_regs *r, bool b) {} 418 #endif 419 420 static void armada_370_xp_mpic_handle_cascade_irq(struct irq_desc *desc) 421 { 422 struct irq_chip *chip = irq_desc_get_chip(desc); 423 unsigned long irqmap, irqn, irqsrc, cpuid; 424 unsigned int cascade_irq; 425 426 chained_irq_enter(chip, desc); 427 428 irqmap = readl_relaxed(per_cpu_int_base + ARMADA_375_PPI_CAUSE); 429 cpuid = cpu_logical_map(smp_processor_id()); 430 431 for_each_set_bit(irqn, &irqmap, BITS_PER_LONG) { 432 irqsrc = readl_relaxed(main_int_base + 433 ARMADA_370_XP_INT_SOURCE_CTL(irqn)); 434 435 /* Check if the interrupt is not masked on current CPU. 436 * Test IRQ (0-1) and FIQ (8-9) mask bits. 437 */ 438 if (!(irqsrc & ARMADA_370_XP_INT_IRQ_FIQ_MASK(cpuid))) 439 continue; 440 441 if (irqn == 1) { 442 armada_370_xp_handle_msi_irq(NULL, true); 443 continue; 444 } 445 446 cascade_irq = irq_find_mapping(armada_370_xp_mpic_domain, irqn); 447 generic_handle_irq(cascade_irq); 448 } 449 450 chained_irq_exit(chip, desc); 451 } 452 453 static void __exception_irq_entry 454 armada_370_xp_handle_irq(struct pt_regs *regs) 455 { 456 u32 irqstat, irqnr; 457 458 do { 459 irqstat = readl_relaxed(per_cpu_int_base + 460 ARMADA_370_XP_CPU_INTACK_OFFS); 461 irqnr = irqstat & 0x3FF; 462 463 if (irqnr > 1022) 464 break; 465 466 if (irqnr > 1) { 467 handle_domain_irq(armada_370_xp_mpic_domain, 468 irqnr, regs); 469 continue; 470 } 471 472 /* MSI handling */ 473 if (irqnr == 1) 474 armada_370_xp_handle_msi_irq(regs, false); 475 476 #ifdef CONFIG_SMP 477 /* IPI Handling */ 478 if (irqnr == 0) { 479 u32 ipimask, ipinr; 480 481 ipimask = readl_relaxed(per_cpu_int_base + 482 ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS) 483 & IPI_DOORBELL_MASK; 484 485 writel(~ipimask, per_cpu_int_base + 486 ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS); 487 488 /* Handle all pending doorbells */ 489 for (ipinr = IPI_DOORBELL_START; 490 ipinr < IPI_DOORBELL_END; ipinr++) { 491 if (ipimask & (0x1 << ipinr)) 492 handle_IPI(ipinr, regs); 493 } 494 continue; 495 } 496 #endif 497 498 } while (1); 499 } 500 501 static int armada_370_xp_mpic_suspend(void) 502 { 503 doorbell_mask_reg = readl(per_cpu_int_base + 504 ARMADA_370_XP_IN_DRBEL_MSK_OFFS); 505 return 0; 506 } 507 508 static void armada_370_xp_mpic_resume(void) 509 { 510 int nirqs; 511 irq_hw_number_t irq; 512 513 /* Re-enable interrupts */ 514 nirqs = (readl(main_int_base + ARMADA_370_XP_INT_CONTROL) >> 2) & 0x3ff; 515 for (irq = 0; irq < nirqs; irq++) { 516 struct irq_data *data; 517 int virq; 518 519 virq = irq_linear_revmap(armada_370_xp_mpic_domain, irq); 520 if (virq == 0) 521 continue; 522 523 if (!is_percpu_irq(irq)) 524 writel(irq, per_cpu_int_base + 525 ARMADA_370_XP_INT_CLEAR_MASK_OFFS); 526 else 527 writel(irq, main_int_base + 528 ARMADA_370_XP_INT_SET_ENABLE_OFFS); 529 530 data = irq_get_irq_data(virq); 531 if (!irqd_irq_disabled(data)) 532 armada_370_xp_irq_unmask(data); 533 } 534 535 /* Reconfigure doorbells for IPIs and MSIs */ 536 writel(doorbell_mask_reg, 537 per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS); 538 if (doorbell_mask_reg & IPI_DOORBELL_MASK) 539 writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS); 540 if (doorbell_mask_reg & PCI_MSI_DOORBELL_MASK) 541 writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS); 542 } 543 544 struct syscore_ops armada_370_xp_mpic_syscore_ops = { 545 .suspend = armada_370_xp_mpic_suspend, 546 .resume = armada_370_xp_mpic_resume, 547 }; 548 549 static int __init armada_370_xp_mpic_of_init(struct device_node *node, 550 struct device_node *parent) 551 { 552 struct resource main_int_res, per_cpu_int_res; 553 int nr_irqs, i; 554 u32 control; 555 556 BUG_ON(of_address_to_resource(node, 0, &main_int_res)); 557 BUG_ON(of_address_to_resource(node, 1, &per_cpu_int_res)); 558 559 BUG_ON(!request_mem_region(main_int_res.start, 560 resource_size(&main_int_res), 561 node->full_name)); 562 BUG_ON(!request_mem_region(per_cpu_int_res.start, 563 resource_size(&per_cpu_int_res), 564 node->full_name)); 565 566 main_int_base = ioremap(main_int_res.start, 567 resource_size(&main_int_res)); 568 BUG_ON(!main_int_base); 569 570 per_cpu_int_base = ioremap(per_cpu_int_res.start, 571 resource_size(&per_cpu_int_res)); 572 BUG_ON(!per_cpu_int_base); 573 574 control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL); 575 nr_irqs = (control >> 2) & 0x3ff; 576 577 for (i = 0; i < nr_irqs; i++) 578 writel(i, main_int_base + ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS); 579 580 armada_370_xp_mpic_domain = 581 irq_domain_add_linear(node, nr_irqs, 582 &armada_370_xp_mpic_irq_ops, NULL); 583 BUG_ON(!armada_370_xp_mpic_domain); 584 armada_370_xp_mpic_domain->bus_token = DOMAIN_BUS_WIRED; 585 586 /* Setup for the boot CPU */ 587 armada_xp_mpic_perf_init(); 588 armada_xp_mpic_smp_cpu_init(); 589 590 armada_370_xp_msi_init(node, main_int_res.start); 591 592 parent_irq = irq_of_parse_and_map(node, 0); 593 if (parent_irq <= 0) { 594 irq_set_default_host(armada_370_xp_mpic_domain); 595 set_handle_irq(armada_370_xp_handle_irq); 596 #ifdef CONFIG_SMP 597 set_smp_cross_call(armada_mpic_send_doorbell); 598 register_cpu_notifier(&armada_370_xp_mpic_cpu_notifier); 599 #endif 600 } else { 601 #ifdef CONFIG_SMP 602 register_cpu_notifier(&mpic_cascaded_cpu_notifier); 603 #endif 604 irq_set_chained_handler(parent_irq, 605 armada_370_xp_mpic_handle_cascade_irq); 606 } 607 608 register_syscore_ops(&armada_370_xp_mpic_syscore_ops); 609 610 return 0; 611 } 612 613 IRQCHIP_DECLARE(armada_370_xp_mpic, "marvell,mpic", armada_370_xp_mpic_of_init); 614