1 /* 2 * Marvell Armada 370 and Armada XP SoC IRQ handling 3 * 4 * Copyright (C) 2012 Marvell 5 * 6 * Lior Amsalem <alior@marvell.com> 7 * Gregory CLEMENT <gregory.clement@free-electrons.com> 8 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> 9 * Ben Dooks <ben.dooks@codethink.co.uk> 10 * 11 * This file is licensed under the terms of the GNU General Public 12 * License version 2. This program is licensed "as is" without any 13 * warranty of any kind, whether express or implied. 14 */ 15 16 #include <linux/kernel.h> 17 #include <linux/module.h> 18 #include <linux/init.h> 19 #include <linux/irq.h> 20 #include <linux/interrupt.h> 21 #include <linux/irqchip/chained_irq.h> 22 #include <linux/cpu.h> 23 #include <linux/io.h> 24 #include <linux/of_address.h> 25 #include <linux/of_irq.h> 26 #include <linux/of_pci.h> 27 #include <linux/irqdomain.h> 28 #include <linux/slab.h> 29 #include <linux/msi.h> 30 #include <asm/mach/arch.h> 31 #include <asm/exception.h> 32 #include <asm/smp_plat.h> 33 #include <asm/mach/irq.h> 34 35 #include "irqchip.h" 36 37 /* Interrupt Controller Registers Map */ 38 #define ARMADA_370_XP_INT_SET_MASK_OFFS (0x48) 39 #define ARMADA_370_XP_INT_CLEAR_MASK_OFFS (0x4C) 40 41 #define ARMADA_370_XP_INT_CONTROL (0x00) 42 #define ARMADA_370_XP_INT_SET_ENABLE_OFFS (0x30) 43 #define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34) 44 #define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4) 45 #define ARMADA_370_XP_INT_SOURCE_CPU_MASK 0xF 46 47 #define ARMADA_370_XP_CPU_INTACK_OFFS (0x44) 48 #define ARMADA_375_PPI_CAUSE (0x10) 49 50 #define ARMADA_370_XP_SW_TRIG_INT_OFFS (0x4) 51 #define ARMADA_370_XP_IN_DRBEL_MSK_OFFS (0xc) 52 #define ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS (0x8) 53 54 #define ARMADA_370_XP_MAX_PER_CPU_IRQS (28) 55 56 #define ARMADA_370_XP_TIMER0_PER_CPU_IRQ (5) 57 58 #define IPI_DOORBELL_START (0) 59 #define IPI_DOORBELL_END (8) 60 #define IPI_DOORBELL_MASK 0xFF 61 #define PCI_MSI_DOORBELL_START (16) 62 #define PCI_MSI_DOORBELL_NR (16) 63 #define PCI_MSI_DOORBELL_END (32) 64 #define PCI_MSI_DOORBELL_MASK 0xFFFF0000 65 66 static void __iomem *per_cpu_int_base; 67 static void __iomem *main_int_base; 68 static struct irq_domain *armada_370_xp_mpic_domain; 69 #ifdef CONFIG_PCI_MSI 70 static struct irq_domain *armada_370_xp_msi_domain; 71 static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR); 72 static DEFINE_MUTEX(msi_used_lock); 73 static phys_addr_t msi_doorbell_addr; 74 #endif 75 76 /* 77 * In SMP mode: 78 * For shared global interrupts, mask/unmask global enable bit 79 * For CPU interrupts, mask/unmask the calling CPU's bit 80 */ 81 static void armada_370_xp_irq_mask(struct irq_data *d) 82 { 83 irq_hw_number_t hwirq = irqd_to_hwirq(d); 84 85 if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ) 86 writel(hwirq, main_int_base + 87 ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS); 88 else 89 writel(hwirq, per_cpu_int_base + 90 ARMADA_370_XP_INT_SET_MASK_OFFS); 91 } 92 93 static void armada_370_xp_irq_unmask(struct irq_data *d) 94 { 95 irq_hw_number_t hwirq = irqd_to_hwirq(d); 96 97 if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ) 98 writel(hwirq, main_int_base + 99 ARMADA_370_XP_INT_SET_ENABLE_OFFS); 100 else 101 writel(hwirq, per_cpu_int_base + 102 ARMADA_370_XP_INT_CLEAR_MASK_OFFS); 103 } 104 105 #ifdef CONFIG_PCI_MSI 106 107 static int armada_370_xp_alloc_msi(void) 108 { 109 int hwirq; 110 111 mutex_lock(&msi_used_lock); 112 hwirq = find_first_zero_bit(&msi_used, PCI_MSI_DOORBELL_NR); 113 if (hwirq >= PCI_MSI_DOORBELL_NR) 114 hwirq = -ENOSPC; 115 else 116 set_bit(hwirq, msi_used); 117 mutex_unlock(&msi_used_lock); 118 119 return hwirq; 120 } 121 122 static void armada_370_xp_free_msi(int hwirq) 123 { 124 mutex_lock(&msi_used_lock); 125 if (!test_bit(hwirq, msi_used)) 126 pr_err("trying to free unused MSI#%d\n", hwirq); 127 else 128 clear_bit(hwirq, msi_used); 129 mutex_unlock(&msi_used_lock); 130 } 131 132 static int armada_370_xp_setup_msi_irq(struct msi_chip *chip, 133 struct pci_dev *pdev, 134 struct msi_desc *desc) 135 { 136 struct msi_msg msg; 137 int virq, hwirq; 138 139 hwirq = armada_370_xp_alloc_msi(); 140 if (hwirq < 0) 141 return hwirq; 142 143 virq = irq_create_mapping(armada_370_xp_msi_domain, hwirq); 144 if (!virq) { 145 armada_370_xp_free_msi(hwirq); 146 return -EINVAL; 147 } 148 149 irq_set_msi_desc(virq, desc); 150 151 msg.address_lo = msi_doorbell_addr; 152 msg.address_hi = 0; 153 msg.data = 0xf00 | (hwirq + 16); 154 155 write_msi_msg(virq, &msg); 156 return 0; 157 } 158 159 static void armada_370_xp_teardown_msi_irq(struct msi_chip *chip, 160 unsigned int irq) 161 { 162 struct irq_data *d = irq_get_irq_data(irq); 163 unsigned long hwirq = d->hwirq; 164 165 irq_dispose_mapping(irq); 166 armada_370_xp_free_msi(hwirq); 167 } 168 169 static int armada_370_xp_check_msi_device(struct msi_chip *chip, struct pci_dev *dev, 170 int nvec, int type) 171 { 172 /* We support MSI, but not MSI-X */ 173 if (type == PCI_CAP_ID_MSI) 174 return 0; 175 return -EINVAL; 176 } 177 178 static struct irq_chip armada_370_xp_msi_irq_chip = { 179 .name = "armada_370_xp_msi_irq", 180 .irq_enable = unmask_msi_irq, 181 .irq_disable = mask_msi_irq, 182 .irq_mask = mask_msi_irq, 183 .irq_unmask = unmask_msi_irq, 184 }; 185 186 static int armada_370_xp_msi_map(struct irq_domain *domain, unsigned int virq, 187 irq_hw_number_t hw) 188 { 189 irq_set_chip_and_handler(virq, &armada_370_xp_msi_irq_chip, 190 handle_simple_irq); 191 set_irq_flags(virq, IRQF_VALID); 192 193 return 0; 194 } 195 196 static const struct irq_domain_ops armada_370_xp_msi_irq_ops = { 197 .map = armada_370_xp_msi_map, 198 }; 199 200 static int armada_370_xp_msi_init(struct device_node *node, 201 phys_addr_t main_int_phys_base) 202 { 203 struct msi_chip *msi_chip; 204 u32 reg; 205 int ret; 206 207 msi_doorbell_addr = main_int_phys_base + 208 ARMADA_370_XP_SW_TRIG_INT_OFFS; 209 210 msi_chip = kzalloc(sizeof(*msi_chip), GFP_KERNEL); 211 if (!msi_chip) 212 return -ENOMEM; 213 214 msi_chip->setup_irq = armada_370_xp_setup_msi_irq; 215 msi_chip->teardown_irq = armada_370_xp_teardown_msi_irq; 216 msi_chip->check_device = armada_370_xp_check_msi_device; 217 msi_chip->of_node = node; 218 219 armada_370_xp_msi_domain = 220 irq_domain_add_linear(NULL, PCI_MSI_DOORBELL_NR, 221 &armada_370_xp_msi_irq_ops, 222 NULL); 223 if (!armada_370_xp_msi_domain) { 224 kfree(msi_chip); 225 return -ENOMEM; 226 } 227 228 ret = of_pci_msi_chip_add(msi_chip); 229 if (ret < 0) { 230 irq_domain_remove(armada_370_xp_msi_domain); 231 kfree(msi_chip); 232 return ret; 233 } 234 235 reg = readl(per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS) 236 | PCI_MSI_DOORBELL_MASK; 237 238 writel(reg, per_cpu_int_base + 239 ARMADA_370_XP_IN_DRBEL_MSK_OFFS); 240 241 /* Unmask IPI interrupt */ 242 writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS); 243 244 return 0; 245 } 246 #else 247 static inline int armada_370_xp_msi_init(struct device_node *node, 248 phys_addr_t main_int_phys_base) 249 { 250 return 0; 251 } 252 #endif 253 254 #ifdef CONFIG_SMP 255 static DEFINE_RAW_SPINLOCK(irq_controller_lock); 256 257 static int armada_xp_set_affinity(struct irq_data *d, 258 const struct cpumask *mask_val, bool force) 259 { 260 irq_hw_number_t hwirq = irqd_to_hwirq(d); 261 unsigned long reg, mask; 262 int cpu; 263 264 /* Select a single core from the affinity mask which is online */ 265 cpu = cpumask_any_and(mask_val, cpu_online_mask); 266 mask = 1UL << cpu_logical_map(cpu); 267 268 raw_spin_lock(&irq_controller_lock); 269 reg = readl(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq)); 270 reg = (reg & (~ARMADA_370_XP_INT_SOURCE_CPU_MASK)) | mask; 271 writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq)); 272 raw_spin_unlock(&irq_controller_lock); 273 274 return 0; 275 } 276 #endif 277 278 static struct irq_chip armada_370_xp_irq_chip = { 279 .name = "armada_370_xp_irq", 280 .irq_mask = armada_370_xp_irq_mask, 281 .irq_mask_ack = armada_370_xp_irq_mask, 282 .irq_unmask = armada_370_xp_irq_unmask, 283 #ifdef CONFIG_SMP 284 .irq_set_affinity = armada_xp_set_affinity, 285 #endif 286 }; 287 288 static int armada_370_xp_mpic_irq_map(struct irq_domain *h, 289 unsigned int virq, irq_hw_number_t hw) 290 { 291 armada_370_xp_irq_mask(irq_get_irq_data(virq)); 292 if (hw != ARMADA_370_XP_TIMER0_PER_CPU_IRQ) 293 writel(hw, per_cpu_int_base + 294 ARMADA_370_XP_INT_CLEAR_MASK_OFFS); 295 else 296 writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS); 297 irq_set_status_flags(virq, IRQ_LEVEL); 298 299 if (hw == ARMADA_370_XP_TIMER0_PER_CPU_IRQ) { 300 irq_set_percpu_devid(virq); 301 irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip, 302 handle_percpu_devid_irq); 303 304 } else { 305 irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip, 306 handle_level_irq); 307 } 308 set_irq_flags(virq, IRQF_VALID | IRQF_PROBE); 309 310 return 0; 311 } 312 313 #ifdef CONFIG_SMP 314 static void armada_mpic_send_doorbell(const struct cpumask *mask, 315 unsigned int irq) 316 { 317 int cpu; 318 unsigned long map = 0; 319 320 /* Convert our logical CPU mask into a physical one. */ 321 for_each_cpu(cpu, mask) 322 map |= 1 << cpu_logical_map(cpu); 323 324 /* 325 * Ensure that stores to Normal memory are visible to the 326 * other CPUs before issuing the IPI. 327 */ 328 dsb(); 329 330 /* submit softirq */ 331 writel((map << 8) | irq, main_int_base + 332 ARMADA_370_XP_SW_TRIG_INT_OFFS); 333 } 334 335 static void armada_xp_mpic_smp_cpu_init(void) 336 { 337 /* Clear pending IPIs */ 338 writel(0, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS); 339 340 /* Enable first 8 IPIs */ 341 writel(IPI_DOORBELL_MASK, per_cpu_int_base + 342 ARMADA_370_XP_IN_DRBEL_MSK_OFFS); 343 344 /* Unmask IPI interrupt */ 345 writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS); 346 } 347 348 static int armada_xp_mpic_secondary_init(struct notifier_block *nfb, 349 unsigned long action, void *hcpu) 350 { 351 if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) 352 armada_xp_mpic_smp_cpu_init(); 353 return NOTIFY_OK; 354 } 355 356 static struct notifier_block armada_370_xp_mpic_cpu_notifier = { 357 .notifier_call = armada_xp_mpic_secondary_init, 358 .priority = 100, 359 }; 360 361 #endif /* CONFIG_SMP */ 362 363 static struct irq_domain_ops armada_370_xp_mpic_irq_ops = { 364 .map = armada_370_xp_mpic_irq_map, 365 .xlate = irq_domain_xlate_onecell, 366 }; 367 368 #ifdef CONFIG_PCI_MSI 369 static void armada_370_xp_handle_msi_irq(struct pt_regs *regs, bool is_chained) 370 { 371 u32 msimask, msinr; 372 373 msimask = readl_relaxed(per_cpu_int_base + 374 ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS) 375 & PCI_MSI_DOORBELL_MASK; 376 377 writel(~msimask, per_cpu_int_base + 378 ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS); 379 380 for (msinr = PCI_MSI_DOORBELL_START; 381 msinr < PCI_MSI_DOORBELL_END; msinr++) { 382 int irq; 383 384 if (!(msimask & BIT(msinr))) 385 continue; 386 387 irq = irq_find_mapping(armada_370_xp_msi_domain, 388 msinr - 16); 389 390 if (is_chained) 391 generic_handle_irq(irq); 392 else 393 handle_IRQ(irq, regs); 394 } 395 } 396 #else 397 static void armada_370_xp_handle_msi_irq(struct pt_regs *r, bool b) {} 398 #endif 399 400 static void armada_370_xp_mpic_handle_cascade_irq(unsigned int irq, 401 struct irq_desc *desc) 402 { 403 struct irq_chip *chip = irq_get_chip(irq); 404 unsigned long irqmap, irqn; 405 unsigned int cascade_irq; 406 407 chained_irq_enter(chip, desc); 408 409 irqmap = readl_relaxed(per_cpu_int_base + ARMADA_375_PPI_CAUSE); 410 411 if (irqmap & BIT(0)) { 412 armada_370_xp_handle_msi_irq(NULL, true); 413 irqmap &= ~BIT(0); 414 } 415 416 for_each_set_bit(irqn, &irqmap, BITS_PER_LONG) { 417 cascade_irq = irq_find_mapping(armada_370_xp_mpic_domain, irqn); 418 generic_handle_irq(cascade_irq); 419 } 420 421 chained_irq_exit(chip, desc); 422 } 423 424 static void __exception_irq_entry 425 armada_370_xp_handle_irq(struct pt_regs *regs) 426 { 427 u32 irqstat, irqnr; 428 429 do { 430 irqstat = readl_relaxed(per_cpu_int_base + 431 ARMADA_370_XP_CPU_INTACK_OFFS); 432 irqnr = irqstat & 0x3FF; 433 434 if (irqnr > 1022) 435 break; 436 437 if (irqnr > 1) { 438 irqnr = irq_find_mapping(armada_370_xp_mpic_domain, 439 irqnr); 440 handle_IRQ(irqnr, regs); 441 continue; 442 } 443 444 /* MSI handling */ 445 if (irqnr == 1) 446 armada_370_xp_handle_msi_irq(regs, false); 447 448 #ifdef CONFIG_SMP 449 /* IPI Handling */ 450 if (irqnr == 0) { 451 u32 ipimask, ipinr; 452 453 ipimask = readl_relaxed(per_cpu_int_base + 454 ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS) 455 & IPI_DOORBELL_MASK; 456 457 writel(~ipimask, per_cpu_int_base + 458 ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS); 459 460 /* Handle all pending doorbells */ 461 for (ipinr = IPI_DOORBELL_START; 462 ipinr < IPI_DOORBELL_END; ipinr++) { 463 if (ipimask & (0x1 << ipinr)) 464 handle_IPI(ipinr, regs); 465 } 466 continue; 467 } 468 #endif 469 470 } while (1); 471 } 472 473 static int __init armada_370_xp_mpic_of_init(struct device_node *node, 474 struct device_node *parent) 475 { 476 struct resource main_int_res, per_cpu_int_res; 477 int parent_irq; 478 u32 control; 479 480 BUG_ON(of_address_to_resource(node, 0, &main_int_res)); 481 BUG_ON(of_address_to_resource(node, 1, &per_cpu_int_res)); 482 483 BUG_ON(!request_mem_region(main_int_res.start, 484 resource_size(&main_int_res), 485 node->full_name)); 486 BUG_ON(!request_mem_region(per_cpu_int_res.start, 487 resource_size(&per_cpu_int_res), 488 node->full_name)); 489 490 main_int_base = ioremap(main_int_res.start, 491 resource_size(&main_int_res)); 492 BUG_ON(!main_int_base); 493 494 per_cpu_int_base = ioremap(per_cpu_int_res.start, 495 resource_size(&per_cpu_int_res)); 496 BUG_ON(!per_cpu_int_base); 497 498 control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL); 499 500 armada_370_xp_mpic_domain = 501 irq_domain_add_linear(node, (control >> 2) & 0x3ff, 502 &armada_370_xp_mpic_irq_ops, NULL); 503 504 BUG_ON(!armada_370_xp_mpic_domain); 505 506 #ifdef CONFIG_SMP 507 armada_xp_mpic_smp_cpu_init(); 508 #endif 509 510 armada_370_xp_msi_init(node, main_int_res.start); 511 512 parent_irq = irq_of_parse_and_map(node, 0); 513 if (parent_irq <= 0) { 514 irq_set_default_host(armada_370_xp_mpic_domain); 515 set_handle_irq(armada_370_xp_handle_irq); 516 #ifdef CONFIG_SMP 517 set_smp_cross_call(armada_mpic_send_doorbell); 518 register_cpu_notifier(&armada_370_xp_mpic_cpu_notifier); 519 #endif 520 } else { 521 irq_set_chained_handler(parent_irq, 522 armada_370_xp_mpic_handle_cascade_irq); 523 } 524 525 return 0; 526 } 527 528 IRQCHIP_DECLARE(armada_370_xp_mpic, "marvell,mpic", armada_370_xp_mpic_of_init); 529