1 /* 2 * Copyright (C) 2007-2011 Freescale Semiconductor, Inc. 3 * 4 * Author: Tony Li <tony.li@freescale.com> 5 * Jason Jin <Jason.jin@freescale.com> 6 * 7 * The hwirq alloc and free code reuse from sysdev/mpic_msi.c 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * as published by the Free Software Foundation; version 2 of the 12 * License. 13 * 14 */ 15 #include <linux/irq.h> 16 #include <linux/bootmem.h> 17 #include <linux/msi.h> 18 #include <linux/pci.h> 19 #include <linux/slab.h> 20 #include <linux/of_platform.h> 21 #include <sysdev/fsl_soc.h> 22 #include <asm/prom.h> 23 #include <asm/hw_irq.h> 24 #include <asm/ppc-pci.h> 25 #include <asm/mpic.h> 26 #include <asm/fsl_hcalls.h> 27 28 #include "fsl_msi.h" 29 #include "fsl_pci.h" 30 31 #define MSIIR_OFFSET_MASK 0xfffff 32 #define MSIIR_IBS_SHIFT 0 33 #define MSIIR_SRS_SHIFT 5 34 #define MSIIR1_IBS_SHIFT 4 35 #define MSIIR1_SRS_SHIFT 0 36 #define MSI_SRS_MASK 0xf 37 #define MSI_IBS_MASK 0x1f 38 39 #define msi_hwirq(msi, msir_index, intr_index) \ 40 ((msir_index) << (msi)->srs_shift | \ 41 ((intr_index) << (msi)->ibs_shift)) 42 43 static LIST_HEAD(msi_head); 44 45 struct fsl_msi_feature { 46 u32 fsl_pic_ip; 47 u32 msiir_offset; /* Offset of MSIIR, relative to start of MSIR bank */ 48 }; 49 50 struct fsl_msi_cascade_data { 51 struct fsl_msi *msi_data; 52 int index; 53 }; 54 55 static inline u32 fsl_msi_read(u32 __iomem *base, unsigned int reg) 56 { 57 return in_be32(base + (reg >> 2)); 58 } 59 60 /* 61 * We do not need this actually. The MSIR register has been read once 62 * in the cascade interrupt. So, this MSI interrupt has been acked 63 */ 64 static void fsl_msi_end_irq(struct irq_data *d) 65 { 66 } 67 68 static struct irq_chip fsl_msi_chip = { 69 .irq_mask = mask_msi_irq, 70 .irq_unmask = unmask_msi_irq, 71 .irq_ack = fsl_msi_end_irq, 72 .name = "FSL-MSI", 73 }; 74 75 static int fsl_msi_host_map(struct irq_domain *h, unsigned int virq, 76 irq_hw_number_t hw) 77 { 78 struct fsl_msi *msi_data = h->host_data; 79 struct irq_chip *chip = &fsl_msi_chip; 80 81 irq_set_status_flags(virq, IRQ_TYPE_EDGE_FALLING); 82 83 irq_set_chip_data(virq, msi_data); 84 irq_set_chip_and_handler(virq, chip, handle_edge_irq); 85 86 return 0; 87 } 88 89 static const struct irq_domain_ops fsl_msi_host_ops = { 90 .map = fsl_msi_host_map, 91 }; 92 93 static int fsl_msi_init_allocator(struct fsl_msi *msi_data) 94 { 95 int rc, hwirq; 96 97 rc = msi_bitmap_alloc(&msi_data->bitmap, NR_MSI_IRQS_MAX, 98 msi_data->irqhost->of_node); 99 if (rc) 100 return rc; 101 102 /* 103 * Reserve all the hwirqs 104 * The available hwirqs will be released in fsl_msi_setup_hwirq() 105 */ 106 for (hwirq = 0; hwirq < NR_MSI_IRQS_MAX; hwirq++) 107 msi_bitmap_reserve_hwirq(&msi_data->bitmap, hwirq); 108 109 return 0; 110 } 111 112 static int fsl_msi_check_device(struct pci_dev *pdev, int nvec, int type) 113 { 114 if (type == PCI_CAP_ID_MSIX) 115 pr_debug("fslmsi: MSI-X untested, trying anyway.\n"); 116 117 return 0; 118 } 119 120 static void fsl_teardown_msi_irqs(struct pci_dev *pdev) 121 { 122 struct msi_desc *entry; 123 struct fsl_msi *msi_data; 124 125 list_for_each_entry(entry, &pdev->msi_list, list) { 126 if (entry->irq == NO_IRQ) 127 continue; 128 msi_data = irq_get_chip_data(entry->irq); 129 irq_set_msi_desc(entry->irq, NULL); 130 msi_bitmap_free_hwirqs(&msi_data->bitmap, 131 virq_to_hw(entry->irq), 1); 132 irq_dispose_mapping(entry->irq); 133 } 134 135 return; 136 } 137 138 static void fsl_compose_msi_msg(struct pci_dev *pdev, int hwirq, 139 struct msi_msg *msg, 140 struct fsl_msi *fsl_msi_data) 141 { 142 struct fsl_msi *msi_data = fsl_msi_data; 143 struct pci_controller *hose = pci_bus_to_host(pdev->bus); 144 u64 address; /* Physical address of the MSIIR */ 145 int len; 146 const __be64 *reg; 147 148 /* If the msi-address-64 property exists, then use it */ 149 reg = of_get_property(hose->dn, "msi-address-64", &len); 150 if (reg && (len == sizeof(u64))) 151 address = be64_to_cpup(reg); 152 else 153 address = fsl_pci_immrbar_base(hose) + msi_data->msiir_offset; 154 155 msg->address_lo = lower_32_bits(address); 156 msg->address_hi = upper_32_bits(address); 157 158 msg->data = hwirq; 159 160 pr_debug("%s: allocated srs: %d, ibs: %d\n", __func__, 161 (hwirq >> msi_data->srs_shift) & MSI_SRS_MASK, 162 (hwirq >> msi_data->ibs_shift) & MSI_IBS_MASK); 163 } 164 165 static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) 166 { 167 struct pci_controller *hose = pci_bus_to_host(pdev->bus); 168 struct device_node *np; 169 phandle phandle = 0; 170 int rc, hwirq = -ENOMEM; 171 unsigned int virq; 172 struct msi_desc *entry; 173 struct msi_msg msg; 174 struct fsl_msi *msi_data; 175 176 /* 177 * If the PCI node has an fsl,msi property, then we need to use it 178 * to find the specific MSI. 179 */ 180 np = of_parse_phandle(hose->dn, "fsl,msi", 0); 181 if (np) { 182 if (of_device_is_compatible(np, "fsl,mpic-msi") || 183 of_device_is_compatible(np, "fsl,vmpic-msi")) 184 phandle = np->phandle; 185 else { 186 dev_err(&pdev->dev, 187 "node %s has an invalid fsl,msi phandle %u\n", 188 hose->dn->full_name, np->phandle); 189 return -EINVAL; 190 } 191 } 192 193 list_for_each_entry(entry, &pdev->msi_list, list) { 194 /* 195 * Loop over all the MSI devices until we find one that has an 196 * available interrupt. 197 */ 198 list_for_each_entry(msi_data, &msi_head, list) { 199 /* 200 * If the PCI node has an fsl,msi property, then we 201 * restrict our search to the corresponding MSI node. 202 * The simplest way is to skip over MSI nodes with the 203 * wrong phandle. Under the Freescale hypervisor, this 204 * has the additional benefit of skipping over MSI 205 * nodes that are not mapped in the PAMU. 206 */ 207 if (phandle && (phandle != msi_data->phandle)) 208 continue; 209 210 hwirq = msi_bitmap_alloc_hwirqs(&msi_data->bitmap, 1); 211 if (hwirq >= 0) 212 break; 213 } 214 215 if (hwirq < 0) { 216 rc = hwirq; 217 dev_err(&pdev->dev, "could not allocate MSI interrupt\n"); 218 goto out_free; 219 } 220 221 virq = irq_create_mapping(msi_data->irqhost, hwirq); 222 223 if (virq == NO_IRQ) { 224 dev_err(&pdev->dev, "fail mapping hwirq %i\n", hwirq); 225 msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1); 226 rc = -ENOSPC; 227 goto out_free; 228 } 229 /* chip_data is msi_data via host->hostdata in host->map() */ 230 irq_set_msi_desc(virq, entry); 231 232 fsl_compose_msi_msg(pdev, hwirq, &msg, msi_data); 233 write_msi_msg(virq, &msg); 234 } 235 return 0; 236 237 out_free: 238 /* free by the caller of this function */ 239 return rc; 240 } 241 242 static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc) 243 { 244 struct irq_chip *chip = irq_desc_get_chip(desc); 245 struct irq_data *idata = irq_desc_get_irq_data(desc); 246 unsigned int cascade_irq; 247 struct fsl_msi *msi_data; 248 int msir_index = -1; 249 u32 msir_value = 0; 250 u32 intr_index; 251 u32 have_shift = 0; 252 struct fsl_msi_cascade_data *cascade_data; 253 254 cascade_data = irq_get_handler_data(irq); 255 msi_data = cascade_data->msi_data; 256 257 raw_spin_lock(&desc->lock); 258 if ((msi_data->feature & FSL_PIC_IP_MASK) == FSL_PIC_IP_IPIC) { 259 if (chip->irq_mask_ack) 260 chip->irq_mask_ack(idata); 261 else { 262 chip->irq_mask(idata); 263 chip->irq_ack(idata); 264 } 265 } 266 267 if (unlikely(irqd_irq_inprogress(idata))) 268 goto unlock; 269 270 msir_index = cascade_data->index; 271 272 if (msir_index >= NR_MSI_REG_MAX) 273 cascade_irq = NO_IRQ; 274 275 irqd_set_chained_irq_inprogress(idata); 276 switch (msi_data->feature & FSL_PIC_IP_MASK) { 277 case FSL_PIC_IP_MPIC: 278 msir_value = fsl_msi_read(msi_data->msi_regs, 279 msir_index * 0x10); 280 break; 281 case FSL_PIC_IP_IPIC: 282 msir_value = fsl_msi_read(msi_data->msi_regs, msir_index * 0x4); 283 break; 284 #ifdef CONFIG_EPAPR_PARAVIRT 285 case FSL_PIC_IP_VMPIC: { 286 unsigned int ret; 287 ret = fh_vmpic_get_msir(virq_to_hw(irq), &msir_value); 288 if (ret) { 289 pr_err("fsl-msi: fh_vmpic_get_msir() failed for " 290 "irq %u (ret=%u)\n", irq, ret); 291 msir_value = 0; 292 } 293 break; 294 } 295 #endif 296 } 297 298 while (msir_value) { 299 intr_index = ffs(msir_value) - 1; 300 301 cascade_irq = irq_linear_revmap(msi_data->irqhost, 302 msi_hwirq(msi_data, msir_index, 303 intr_index + have_shift)); 304 if (cascade_irq != NO_IRQ) 305 generic_handle_irq(cascade_irq); 306 have_shift += intr_index + 1; 307 msir_value = msir_value >> (intr_index + 1); 308 } 309 irqd_clr_chained_irq_inprogress(idata); 310 311 switch (msi_data->feature & FSL_PIC_IP_MASK) { 312 case FSL_PIC_IP_MPIC: 313 case FSL_PIC_IP_VMPIC: 314 chip->irq_eoi(idata); 315 break; 316 case FSL_PIC_IP_IPIC: 317 if (!irqd_irq_disabled(idata) && chip->irq_unmask) 318 chip->irq_unmask(idata); 319 break; 320 } 321 unlock: 322 raw_spin_unlock(&desc->lock); 323 } 324 325 static int fsl_of_msi_remove(struct platform_device *ofdev) 326 { 327 struct fsl_msi *msi = platform_get_drvdata(ofdev); 328 int virq, i; 329 struct fsl_msi_cascade_data *cascade_data; 330 331 if (msi->list.prev != NULL) 332 list_del(&msi->list); 333 for (i = 0; i < NR_MSI_REG_MAX; i++) { 334 virq = msi->msi_virqs[i]; 335 if (virq != NO_IRQ) { 336 cascade_data = irq_get_handler_data(virq); 337 kfree(cascade_data); 338 irq_dispose_mapping(virq); 339 } 340 } 341 if (msi->bitmap.bitmap) 342 msi_bitmap_free(&msi->bitmap); 343 if ((msi->feature & FSL_PIC_IP_MASK) != FSL_PIC_IP_VMPIC) 344 iounmap(msi->msi_regs); 345 kfree(msi); 346 347 return 0; 348 } 349 350 static struct lock_class_key fsl_msi_irq_class; 351 352 static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev, 353 int offset, int irq_index) 354 { 355 struct fsl_msi_cascade_data *cascade_data = NULL; 356 int virt_msir, i; 357 358 virt_msir = irq_of_parse_and_map(dev->dev.of_node, irq_index); 359 if (virt_msir == NO_IRQ) { 360 dev_err(&dev->dev, "%s: Cannot translate IRQ index %d\n", 361 __func__, irq_index); 362 return 0; 363 } 364 365 cascade_data = kzalloc(sizeof(struct fsl_msi_cascade_data), GFP_KERNEL); 366 if (!cascade_data) { 367 dev_err(&dev->dev, "No memory for MSI cascade data\n"); 368 return -ENOMEM; 369 } 370 irq_set_lockdep_class(virt_msir, &fsl_msi_irq_class); 371 msi->msi_virqs[irq_index] = virt_msir; 372 cascade_data->index = offset; 373 cascade_data->msi_data = msi; 374 irq_set_handler_data(virt_msir, cascade_data); 375 irq_set_chained_handler(virt_msir, fsl_msi_cascade); 376 377 /* Release the hwirqs corresponding to this MSI register */ 378 for (i = 0; i < IRQS_PER_MSI_REG; i++) 379 msi_bitmap_free_hwirqs(&msi->bitmap, 380 msi_hwirq(msi, offset, i), 1); 381 382 return 0; 383 } 384 385 static const struct of_device_id fsl_of_msi_ids[]; 386 static int fsl_of_msi_probe(struct platform_device *dev) 387 { 388 const struct of_device_id *match; 389 struct fsl_msi *msi; 390 struct resource res, msiir; 391 int err, i, j, irq_index, count; 392 const u32 *p; 393 const struct fsl_msi_feature *features; 394 int len; 395 u32 offset; 396 397 match = of_match_device(fsl_of_msi_ids, &dev->dev); 398 if (!match) 399 return -EINVAL; 400 features = match->data; 401 402 printk(KERN_DEBUG "Setting up Freescale MSI support\n"); 403 404 msi = kzalloc(sizeof(struct fsl_msi), GFP_KERNEL); 405 if (!msi) { 406 dev_err(&dev->dev, "No memory for MSI structure\n"); 407 return -ENOMEM; 408 } 409 platform_set_drvdata(dev, msi); 410 411 msi->irqhost = irq_domain_add_linear(dev->dev.of_node, 412 NR_MSI_IRQS_MAX, &fsl_msi_host_ops, msi); 413 414 if (msi->irqhost == NULL) { 415 dev_err(&dev->dev, "No memory for MSI irqhost\n"); 416 err = -ENOMEM; 417 goto error_out; 418 } 419 420 /* 421 * Under the Freescale hypervisor, the msi nodes don't have a 'reg' 422 * property. Instead, we use hypercalls to access the MSI. 423 */ 424 if ((features->fsl_pic_ip & FSL_PIC_IP_MASK) != FSL_PIC_IP_VMPIC) { 425 err = of_address_to_resource(dev->dev.of_node, 0, &res); 426 if (err) { 427 dev_err(&dev->dev, "invalid resource for node %s\n", 428 dev->dev.of_node->full_name); 429 goto error_out; 430 } 431 432 msi->msi_regs = ioremap(res.start, resource_size(&res)); 433 if (!msi->msi_regs) { 434 err = -ENOMEM; 435 dev_err(&dev->dev, "could not map node %s\n", 436 dev->dev.of_node->full_name); 437 goto error_out; 438 } 439 msi->msiir_offset = 440 features->msiir_offset + (res.start & 0xfffff); 441 442 /* 443 * First read the MSIIR/MSIIR1 offset from dts 444 * On failure use the hardcode MSIIR offset 445 */ 446 if (of_address_to_resource(dev->dev.of_node, 1, &msiir)) 447 msi->msiir_offset = features->msiir_offset + 448 (res.start & MSIIR_OFFSET_MASK); 449 else 450 msi->msiir_offset = msiir.start & MSIIR_OFFSET_MASK; 451 } 452 453 msi->feature = features->fsl_pic_ip; 454 455 /* 456 * Remember the phandle, so that we can match with any PCI nodes 457 * that have an "fsl,msi" property. 458 */ 459 msi->phandle = dev->dev.of_node->phandle; 460 461 err = fsl_msi_init_allocator(msi); 462 if (err) { 463 dev_err(&dev->dev, "Error allocating MSI bitmap\n"); 464 goto error_out; 465 } 466 467 p = of_get_property(dev->dev.of_node, "msi-available-ranges", &len); 468 469 if (of_device_is_compatible(dev->dev.of_node, "fsl,mpic-msi-v4.3")) { 470 msi->srs_shift = MSIIR1_SRS_SHIFT; 471 msi->ibs_shift = MSIIR1_IBS_SHIFT; 472 if (p) 473 dev_warn(&dev->dev, "%s: dose not support msi-available-ranges property\n", 474 __func__); 475 476 for (irq_index = 0; irq_index < NR_MSI_REG_MSIIR1; 477 irq_index++) { 478 err = fsl_msi_setup_hwirq(msi, dev, 479 irq_index, irq_index); 480 if (err) 481 goto error_out; 482 } 483 } else { 484 static const u32 all_avail[] = 485 { 0, NR_MSI_REG_MSIIR * IRQS_PER_MSI_REG }; 486 487 msi->srs_shift = MSIIR_SRS_SHIFT; 488 msi->ibs_shift = MSIIR_IBS_SHIFT; 489 490 if (p && len % (2 * sizeof(u32)) != 0) { 491 dev_err(&dev->dev, "%s: Malformed msi-available-ranges property\n", 492 __func__); 493 err = -EINVAL; 494 goto error_out; 495 } 496 497 if (!p) { 498 p = all_avail; 499 len = sizeof(all_avail); 500 } 501 502 for (irq_index = 0, i = 0; i < len / (2 * sizeof(u32)); i++) { 503 if (p[i * 2] % IRQS_PER_MSI_REG || 504 p[i * 2 + 1] % IRQS_PER_MSI_REG) { 505 pr_warn("%s: %s: msi available range of %u at %u is not IRQ-aligned\n", 506 __func__, dev->dev.of_node->full_name, 507 p[i * 2 + 1], p[i * 2]); 508 err = -EINVAL; 509 goto error_out; 510 } 511 512 offset = p[i * 2] / IRQS_PER_MSI_REG; 513 count = p[i * 2 + 1] / IRQS_PER_MSI_REG; 514 515 for (j = 0; j < count; j++, irq_index++) { 516 err = fsl_msi_setup_hwirq(msi, dev, offset + j, 517 irq_index); 518 if (err) 519 goto error_out; 520 } 521 } 522 } 523 524 list_add_tail(&msi->list, &msi_head); 525 526 /* The multiple setting ppc_md.setup_msi_irqs will not harm things */ 527 if (!ppc_md.setup_msi_irqs) { 528 ppc_md.setup_msi_irqs = fsl_setup_msi_irqs; 529 ppc_md.teardown_msi_irqs = fsl_teardown_msi_irqs; 530 ppc_md.msi_check_device = fsl_msi_check_device; 531 } else if (ppc_md.setup_msi_irqs != fsl_setup_msi_irqs) { 532 dev_err(&dev->dev, "Different MSI driver already installed!\n"); 533 err = -ENODEV; 534 goto error_out; 535 } 536 return 0; 537 error_out: 538 fsl_of_msi_remove(dev); 539 return err; 540 } 541 542 static const struct fsl_msi_feature mpic_msi_feature = { 543 .fsl_pic_ip = FSL_PIC_IP_MPIC, 544 .msiir_offset = 0x140, 545 }; 546 547 static const struct fsl_msi_feature ipic_msi_feature = { 548 .fsl_pic_ip = FSL_PIC_IP_IPIC, 549 .msiir_offset = 0x38, 550 }; 551 552 static const struct fsl_msi_feature vmpic_msi_feature = { 553 .fsl_pic_ip = FSL_PIC_IP_VMPIC, 554 .msiir_offset = 0, 555 }; 556 557 static const struct of_device_id fsl_of_msi_ids[] = { 558 { 559 .compatible = "fsl,mpic-msi", 560 .data = &mpic_msi_feature, 561 }, 562 { 563 .compatible = "fsl,mpic-msi-v4.3", 564 .data = &mpic_msi_feature, 565 }, 566 { 567 .compatible = "fsl,ipic-msi", 568 .data = &ipic_msi_feature, 569 }, 570 #ifdef CONFIG_EPAPR_PARAVIRT 571 { 572 .compatible = "fsl,vmpic-msi", 573 .data = &vmpic_msi_feature, 574 }, 575 #endif 576 {} 577 }; 578 579 static struct platform_driver fsl_of_msi_driver = { 580 .driver = { 581 .name = "fsl-msi", 582 .owner = THIS_MODULE, 583 .of_match_table = fsl_of_msi_ids, 584 }, 585 .probe = fsl_of_msi_probe, 586 .remove = fsl_of_msi_remove, 587 }; 588 589 static __init int fsl_of_msi_init(void) 590 { 591 return platform_driver_register(&fsl_of_msi_driver); 592 } 593 594 subsys_initcall(fsl_of_msi_init); 595