1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright 2007, Michael Ellerman, IBM Corporation. 4 */ 5 6 7 #include <linux/interrupt.h> 8 #include <linux/irq.h> 9 #include <linux/kernel.h> 10 #include <linux/pci.h> 11 #include <linux/msi.h> 12 #include <linux/export.h> 13 #include <linux/of_platform.h> 14 #include <linux/slab.h> 15 #include <linux/debugfs.h> 16 #include <linux/of_irq.h> 17 18 #include <asm/dcr.h> 19 #include <asm/machdep.h> 20 21 #include "cell.h" 22 23 /* 24 * MSIC registers, specified as offsets from dcr_base 25 */ 26 #define MSIC_CTRL_REG 0x0 27 28 /* Base Address registers specify FIFO location in BE memory */ 29 #define MSIC_BASE_ADDR_HI_REG 0x3 30 #define MSIC_BASE_ADDR_LO_REG 0x4 31 32 /* Hold the read/write offsets into the FIFO */ 33 #define MSIC_READ_OFFSET_REG 0x5 34 #define MSIC_WRITE_OFFSET_REG 0x6 35 36 37 /* MSIC control register flags */ 38 #define MSIC_CTRL_ENABLE 0x0001 39 #define MSIC_CTRL_FIFO_FULL_ENABLE 0x0002 40 #define MSIC_CTRL_IRQ_ENABLE 0x0008 41 #define MSIC_CTRL_FULL_STOP_ENABLE 0x0010 42 43 /* 44 * The MSIC can be configured to use a FIFO of 32KB, 64KB, 128KB or 256KB. 45 * Currently we're using a 64KB FIFO size. 46 */ 47 #define MSIC_FIFO_SIZE_SHIFT 16 48 #define MSIC_FIFO_SIZE_BYTES (1 << MSIC_FIFO_SIZE_SHIFT) 49 50 /* 51 * To configure the FIFO size as (1 << n) bytes, we write (n - 15) into bits 52 * 8-9 of the MSIC control reg. 53 */ 54 #define MSIC_CTRL_FIFO_SIZE (((MSIC_FIFO_SIZE_SHIFT - 15) << 8) & 0x300) 55 56 /* 57 * We need to mask the read/write offsets to make sure they stay within 58 * the bounds of the FIFO. Also they should always be 16-byte aligned. 59 */ 60 #define MSIC_FIFO_SIZE_MASK ((MSIC_FIFO_SIZE_BYTES - 1) & ~0xFu) 61 62 /* Each entry in the FIFO is 16 bytes, the first 4 bytes hold the irq # */ 63 #define MSIC_FIFO_ENTRY_SIZE 0x10 64 65 66 struct axon_msic { 67 struct irq_domain *irq_domain; 68 __le32 *fifo_virt; 69 dma_addr_t fifo_phys; 70 dcr_host_t dcr_host; 71 u32 read_offset; 72 #ifdef DEBUG 73 u32 __iomem *trigger; 74 #endif 75 }; 76 77 #ifdef DEBUG 78 void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic); 79 #else 80 static inline void axon_msi_debug_setup(struct device_node *dn, 81 struct axon_msic *msic) { } 82 #endif 83 84 85 static void msic_dcr_write(struct axon_msic *msic, unsigned int dcr_n, u32 val) 86 { 87 pr_devel("axon_msi: dcr_write(0x%x, 0x%x)\n", val, dcr_n); 88 89 dcr_write(msic->dcr_host, dcr_n, val); 90 } 91 92 static void axon_msi_cascade(struct irq_desc *desc) 93 { 94 struct irq_chip *chip = irq_desc_get_chip(desc); 95 struct axon_msic *msic = irq_desc_get_handler_data(desc); 96 u32 write_offset, msi; 97 int idx; 98 int retry = 0; 99 100 write_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG); 101 pr_devel("axon_msi: original write_offset 0x%x\n", write_offset); 102 103 /* write_offset doesn't wrap properly, so we have to mask it */ 104 write_offset &= MSIC_FIFO_SIZE_MASK; 105 106 while (msic->read_offset != write_offset && retry < 100) { 107 idx = msic->read_offset / sizeof(__le32); 108 msi = le32_to_cpu(msic->fifo_virt[idx]); 109 msi &= 0xFFFF; 110 111 pr_devel("axon_msi: woff %x roff %x msi %x\n", 112 write_offset, msic->read_offset, msi); 113 114 if (msi < nr_irqs && irq_get_chip_data(msi) == msic) { 115 generic_handle_irq(msi); 116 msic->fifo_virt[idx] = cpu_to_le32(0xffffffff); 117 } else { 118 /* 119 * Reading the MSIC_WRITE_OFFSET_REG does not 120 * reliably flush the outstanding DMA to the 121 * FIFO buffer. Here we were reading stale 122 * data, so we need to retry. 123 */ 124 udelay(1); 125 retry++; 126 pr_devel("axon_msi: invalid irq 0x%x!\n", msi); 127 continue; 128 } 129 130 if (retry) { 131 pr_devel("axon_msi: late irq 0x%x, retry %d\n", 132 msi, retry); 133 retry = 0; 134 } 135 136 msic->read_offset += MSIC_FIFO_ENTRY_SIZE; 137 msic->read_offset &= MSIC_FIFO_SIZE_MASK; 138 } 139 140 if (retry) { 141 printk(KERN_WARNING "axon_msi: irq timed out\n"); 142 143 msic->read_offset += MSIC_FIFO_ENTRY_SIZE; 144 msic->read_offset &= MSIC_FIFO_SIZE_MASK; 145 } 146 147 chip->irq_eoi(&desc->irq_data); 148 } 149 150 static struct axon_msic *find_msi_translator(struct pci_dev *dev) 151 { 152 struct irq_domain *irq_domain; 153 struct device_node *dn, *tmp; 154 const phandle *ph; 155 struct axon_msic *msic = NULL; 156 157 dn = of_node_get(pci_device_to_OF_node(dev)); 158 if (!dn) { 159 dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n"); 160 return NULL; 161 } 162 163 for (; dn; dn = of_get_next_parent(dn)) { 164 ph = of_get_property(dn, "msi-translator", NULL); 165 if (ph) 166 break; 167 } 168 169 if (!ph) { 170 dev_dbg(&dev->dev, 171 "axon_msi: no msi-translator property found\n"); 172 goto out_error; 173 } 174 175 tmp = dn; 176 dn = of_find_node_by_phandle(*ph); 177 of_node_put(tmp); 178 if (!dn) { 179 dev_dbg(&dev->dev, 180 "axon_msi: msi-translator doesn't point to a node\n"); 181 goto out_error; 182 } 183 184 irq_domain = irq_find_host(dn); 185 if (!irq_domain) { 186 dev_dbg(&dev->dev, "axon_msi: no irq_domain found for node %pOF\n", 187 dn); 188 goto out_error; 189 } 190 191 msic = irq_domain->host_data; 192 193 out_error: 194 of_node_put(dn); 195 196 return msic; 197 } 198 199 static int setup_msi_msg_address(struct pci_dev *dev, struct msi_msg *msg) 200 { 201 struct device_node *dn; 202 int len; 203 const u32 *prop; 204 205 dn = of_node_get(pci_device_to_OF_node(dev)); 206 if (!dn) { 207 dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n"); 208 return -ENODEV; 209 } 210 211 for (; dn; dn = of_get_next_parent(dn)) { 212 if (!dev->no_64bit_msi) { 213 prop = of_get_property(dn, "msi-address-64", &len); 214 if (prop) 215 break; 216 } 217 218 prop = of_get_property(dn, "msi-address-32", &len); 219 if (prop) 220 break; 221 } 222 223 if (!prop) { 224 dev_dbg(&dev->dev, 225 "axon_msi: no msi-address-(32|64) properties found\n"); 226 return -ENOENT; 227 } 228 229 switch (len) { 230 case 8: 231 msg->address_hi = prop[0]; 232 msg->address_lo = prop[1]; 233 break; 234 case 4: 235 msg->address_hi = 0; 236 msg->address_lo = prop[0]; 237 break; 238 default: 239 dev_dbg(&dev->dev, 240 "axon_msi: malformed msi-address-(32|64) property\n"); 241 of_node_put(dn); 242 return -EINVAL; 243 } 244 245 of_node_put(dn); 246 247 return 0; 248 } 249 250 static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) 251 { 252 unsigned int virq, rc; 253 struct msi_desc *entry; 254 struct msi_msg msg; 255 struct axon_msic *msic; 256 257 msic = find_msi_translator(dev); 258 if (!msic) 259 return -ENODEV; 260 261 rc = setup_msi_msg_address(dev, &msg); 262 if (rc) 263 return rc; 264 265 msi_for_each_desc(entry, &dev->dev, MSI_DESC_NOTASSOCIATED) { 266 virq = irq_create_direct_mapping(msic->irq_domain); 267 if (!virq) { 268 dev_warn(&dev->dev, 269 "axon_msi: virq allocation failed!\n"); 270 return -1; 271 } 272 dev_dbg(&dev->dev, "axon_msi: allocated virq 0x%x\n", virq); 273 274 irq_set_msi_desc(virq, entry); 275 msg.data = virq; 276 pci_write_msi_msg(virq, &msg); 277 } 278 279 return 0; 280 } 281 282 static void axon_msi_teardown_msi_irqs(struct pci_dev *dev) 283 { 284 struct msi_desc *entry; 285 286 dev_dbg(&dev->dev, "axon_msi: tearing down msi irqs\n"); 287 288 msi_for_each_desc(entry, &dev->dev, MSI_DESC_ASSOCIATED) { 289 irq_set_msi_desc(entry->irq, NULL); 290 irq_dispose_mapping(entry->irq); 291 } 292 } 293 294 static struct irq_chip msic_irq_chip = { 295 .irq_mask = pci_msi_mask_irq, 296 .irq_unmask = pci_msi_unmask_irq, 297 .irq_shutdown = pci_msi_mask_irq, 298 .name = "AXON-MSI", 299 }; 300 301 static int msic_host_map(struct irq_domain *h, unsigned int virq, 302 irq_hw_number_t hw) 303 { 304 irq_set_chip_data(virq, h->host_data); 305 irq_set_chip_and_handler(virq, &msic_irq_chip, handle_simple_irq); 306 307 return 0; 308 } 309 310 static const struct irq_domain_ops msic_host_ops = { 311 .map = msic_host_map, 312 }; 313 314 static void axon_msi_shutdown(struct platform_device *device) 315 { 316 struct axon_msic *msic = dev_get_drvdata(&device->dev); 317 u32 tmp; 318 319 pr_devel("axon_msi: disabling %pOF\n", 320 irq_domain_get_of_node(msic->irq_domain)); 321 tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG); 322 tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE; 323 msic_dcr_write(msic, MSIC_CTRL_REG, tmp); 324 } 325 326 static int axon_msi_probe(struct platform_device *device) 327 { 328 struct device_node *dn = device->dev.of_node; 329 struct axon_msic *msic; 330 unsigned int virq; 331 int dcr_base, dcr_len; 332 333 pr_devel("axon_msi: setting up dn %pOF\n", dn); 334 335 msic = kzalloc(sizeof(*msic), GFP_KERNEL); 336 if (!msic) { 337 printk(KERN_ERR "axon_msi: couldn't allocate msic for %pOF\n", 338 dn); 339 goto out; 340 } 341 342 dcr_base = dcr_resource_start(dn, 0); 343 dcr_len = dcr_resource_len(dn, 0); 344 345 if (dcr_base == 0 || dcr_len == 0) { 346 printk(KERN_ERR 347 "axon_msi: couldn't parse dcr properties on %pOF\n", 348 dn); 349 goto out_free_msic; 350 } 351 352 msic->dcr_host = dcr_map(dn, dcr_base, dcr_len); 353 if (!DCR_MAP_OK(msic->dcr_host)) { 354 printk(KERN_ERR "axon_msi: dcr_map failed for %pOF\n", 355 dn); 356 goto out_free_msic; 357 } 358 359 msic->fifo_virt = dma_alloc_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES, 360 &msic->fifo_phys, GFP_KERNEL); 361 if (!msic->fifo_virt) { 362 printk(KERN_ERR "axon_msi: couldn't allocate fifo for %pOF\n", 363 dn); 364 goto out_free_msic; 365 } 366 367 virq = irq_of_parse_and_map(dn, 0); 368 if (!virq) { 369 printk(KERN_ERR "axon_msi: irq parse and map failed for %pOF\n", 370 dn); 371 goto out_free_fifo; 372 } 373 memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES); 374 375 /* We rely on being able to stash a virq in a u16, so limit irqs to < 65536 */ 376 msic->irq_domain = irq_domain_add_nomap(dn, 65536, &msic_host_ops, msic); 377 if (!msic->irq_domain) { 378 printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %pOF\n", 379 dn); 380 goto out_free_fifo; 381 } 382 383 irq_set_handler_data(virq, msic); 384 irq_set_chained_handler(virq, axon_msi_cascade); 385 pr_devel("axon_msi: irq 0x%x setup for axon_msi\n", virq); 386 387 /* Enable the MSIC hardware */ 388 msic_dcr_write(msic, MSIC_BASE_ADDR_HI_REG, msic->fifo_phys >> 32); 389 msic_dcr_write(msic, MSIC_BASE_ADDR_LO_REG, 390 msic->fifo_phys & 0xFFFFFFFF); 391 msic_dcr_write(msic, MSIC_CTRL_REG, 392 MSIC_CTRL_IRQ_ENABLE | MSIC_CTRL_ENABLE | 393 MSIC_CTRL_FIFO_SIZE); 394 395 msic->read_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG) 396 & MSIC_FIFO_SIZE_MASK; 397 398 dev_set_drvdata(&device->dev, msic); 399 400 cell_pci_controller_ops.setup_msi_irqs = axon_msi_setup_msi_irqs; 401 cell_pci_controller_ops.teardown_msi_irqs = axon_msi_teardown_msi_irqs; 402 403 axon_msi_debug_setup(dn, msic); 404 405 printk(KERN_DEBUG "axon_msi: setup MSIC on %pOF\n", dn); 406 407 return 0; 408 409 out_free_fifo: 410 dma_free_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES, msic->fifo_virt, 411 msic->fifo_phys); 412 out_free_msic: 413 kfree(msic); 414 out: 415 416 return -1; 417 } 418 419 static const struct of_device_id axon_msi_device_id[] = { 420 { 421 .compatible = "ibm,axon-msic" 422 }, 423 {} 424 }; 425 426 static struct platform_driver axon_msi_driver = { 427 .probe = axon_msi_probe, 428 .shutdown = axon_msi_shutdown, 429 .driver = { 430 .name = "axon-msi", 431 .of_match_table = axon_msi_device_id, 432 }, 433 }; 434 435 static int __init axon_msi_init(void) 436 { 437 return platform_driver_register(&axon_msi_driver); 438 } 439 subsys_initcall(axon_msi_init); 440 441 442 #ifdef DEBUG 443 static int msic_set(void *data, u64 val) 444 { 445 struct axon_msic *msic = data; 446 out_le32(msic->trigger, val); 447 return 0; 448 } 449 450 static int msic_get(void *data, u64 *val) 451 { 452 *val = 0; 453 return 0; 454 } 455 456 DEFINE_SIMPLE_ATTRIBUTE(fops_msic, msic_get, msic_set, "%llu\n"); 457 458 void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic) 459 { 460 char name[8]; 461 u64 addr; 462 463 addr = of_translate_address(dn, of_get_property(dn, "reg", NULL)); 464 if (addr == OF_BAD_ADDR) { 465 pr_devel("axon_msi: couldn't translate reg property\n"); 466 return; 467 } 468 469 msic->trigger = ioremap(addr, 0x4); 470 if (!msic->trigger) { 471 pr_devel("axon_msi: ioremap failed\n"); 472 return; 473 } 474 475 snprintf(name, sizeof(name), "msic_%d", of_node_to_nid(dn)); 476 477 debugfs_create_file(name, 0600, arch_debugfs_dir, msic, &fops_msic); 478 } 479 #endif /* DEBUG */ 480