1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * PCIe driver for Renesas R-Car SoCs 4 * Copyright (C) 2014-2020 Renesas Electronics Europe Ltd 5 * 6 * Based on: 7 * arch/sh/drivers/pci/pcie-sh7786.c 8 * arch/sh/drivers/pci/ops-sh7786.c 9 * Copyright (C) 2009 - 2011 Paul Mundt 10 * 11 * Author: Phil Edworthy <phil.edworthy@renesas.com> 12 */ 13 14 #include <linux/bitops.h> 15 #include <linux/clk.h> 16 #include <linux/delay.h> 17 #include <linux/interrupt.h> 18 #include <linux/irq.h> 19 #include <linux/irqdomain.h> 20 #include <linux/kernel.h> 21 #include <linux/init.h> 22 #include <linux/msi.h> 23 #include <linux/of_address.h> 24 #include <linux/of_irq.h> 25 #include <linux/of_pci.h> 26 #include <linux/of_platform.h> 27 #include <linux/pci.h> 28 #include <linux/phy/phy.h> 29 #include <linux/platform_device.h> 30 #include <linux/pm_runtime.h> 31 #include <linux/slab.h> 32 33 #include "pcie-rcar.h" 34 35 struct rcar_msi { 36 DECLARE_BITMAP(used, INT_PCI_MSI_NR); 37 struct irq_domain *domain; 38 struct msi_controller chip; 39 unsigned long pages; 40 struct mutex lock; 41 int irq1; 42 int irq2; 43 }; 44 45 static inline struct rcar_msi *to_rcar_msi(struct msi_controller *chip) 46 { 47 return container_of(chip, struct rcar_msi, chip); 48 } 49 50 /* Structure representing the PCIe interface */ 51 struct rcar_pcie_host { 52 struct rcar_pcie pcie; 53 struct device *dev; 54 struct phy *phy; 55 void __iomem *base; 56 struct list_head resources; 57 int root_bus_nr; 58 struct clk *bus_clk; 59 struct rcar_msi msi; 60 int (*phy_init_fn)(struct rcar_pcie_host *host); 61 }; 62 63 static u32 rcar_read_conf(struct rcar_pcie *pcie, int where) 64 { 65 unsigned int shift = BITS_PER_BYTE * (where & 3); 66 u32 val = rcar_pci_read_reg(pcie, where & ~3); 67 68 return val >> shift; 69 } 70 71 /* Serialization is provided by 'pci_lock' in drivers/pci/access.c */ 72 static int rcar_pcie_config_access(struct rcar_pcie_host *host, 73 unsigned char access_type, struct pci_bus *bus, 74 unsigned int devfn, int where, u32 *data) 75 { 76 struct rcar_pcie *pcie = &host->pcie; 77 unsigned int dev, func, reg, index; 78 79 dev = PCI_SLOT(devfn); 80 func = PCI_FUNC(devfn); 81 reg = where & ~3; 82 index = reg / 4; 83 84 /* 85 * While each channel has its own memory-mapped extended config 86 * space, it's generally only accessible when in endpoint mode. 87 * When in root complex mode, the controller is unable to target 88 * itself with either type 0 or type 1 accesses, and indeed, any 89 * controller initiated target transfer to its own config space 90 * result in a completer abort. 91 * 92 * Each channel effectively only supports a single device, but as 93 * the same channel <-> device access works for any PCI_SLOT() 94 * value, we cheat a bit here and bind the controller's config 95 * space to devfn 0 in order to enable self-enumeration. In this 96 * case the regular ECAR/ECDR path is sidelined and the mangled 97 * config access itself is initiated as an internal bus transaction. 98 */ 99 if (pci_is_root_bus(bus)) { 100 if (dev != 0) 101 return PCIBIOS_DEVICE_NOT_FOUND; 102 103 if (access_type == RCAR_PCI_ACCESS_READ) { 104 *data = rcar_pci_read_reg(pcie, PCICONF(index)); 105 } else { 106 /* Keep an eye out for changes to the root bus number */ 107 if (pci_is_root_bus(bus) && (reg == PCI_PRIMARY_BUS)) 108 host->root_bus_nr = *data & 0xff; 109 110 rcar_pci_write_reg(pcie, *data, PCICONF(index)); 111 } 112 113 return PCIBIOS_SUCCESSFUL; 114 } 115 116 if (host->root_bus_nr < 0) 117 return PCIBIOS_DEVICE_NOT_FOUND; 118 119 /* Clear errors */ 120 rcar_pci_write_reg(pcie, rcar_pci_read_reg(pcie, PCIEERRFR), PCIEERRFR); 121 122 /* Set the PIO address */ 123 rcar_pci_write_reg(pcie, PCIE_CONF_BUS(bus->number) | 124 PCIE_CONF_DEV(dev) | PCIE_CONF_FUNC(func) | reg, PCIECAR); 125 126 /* Enable the configuration access */ 127 if (bus->parent->number == host->root_bus_nr) 128 rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE0, PCIECCTLR); 129 else 130 rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE1, PCIECCTLR); 131 132 /* Check for errors */ 133 if (rcar_pci_read_reg(pcie, PCIEERRFR) & UNSUPPORTED_REQUEST) 134 return PCIBIOS_DEVICE_NOT_FOUND; 135 136 /* Check for master and target aborts */ 137 if (rcar_read_conf(pcie, RCONF(PCI_STATUS)) & 138 (PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT)) 139 return PCIBIOS_DEVICE_NOT_FOUND; 140 141 if (access_type == RCAR_PCI_ACCESS_READ) 142 *data = rcar_pci_read_reg(pcie, PCIECDR); 143 else 144 rcar_pci_write_reg(pcie, *data, PCIECDR); 145 146 /* Disable the configuration access */ 147 rcar_pci_write_reg(pcie, 0, PCIECCTLR); 148 149 return PCIBIOS_SUCCESSFUL; 150 } 151 152 static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn, 153 int where, int size, u32 *val) 154 { 155 struct rcar_pcie_host *host = bus->sysdata; 156 int ret; 157 158 ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_READ, 159 bus, devfn, where, val); 160 if (ret != PCIBIOS_SUCCESSFUL) { 161 *val = 0xffffffff; 162 return ret; 163 } 164 165 if (size == 1) 166 *val = (*val >> (BITS_PER_BYTE * (where & 3))) & 0xff; 167 else if (size == 2) 168 *val = (*val >> (BITS_PER_BYTE * (where & 2))) & 0xffff; 169 170 dev_dbg(&bus->dev, "pcie-config-read: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x\n", 171 bus->number, devfn, where, size, *val); 172 173 return ret; 174 } 175 176 /* Serialization is provided by 'pci_lock' in drivers/pci/access.c */ 177 static int rcar_pcie_write_conf(struct pci_bus *bus, unsigned int devfn, 178 int where, int size, u32 val) 179 { 180 struct rcar_pcie_host *host = bus->sysdata; 181 unsigned int shift; 182 u32 data; 183 int ret; 184 185 ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_READ, 186 bus, devfn, where, &data); 187 if (ret != PCIBIOS_SUCCESSFUL) 188 return ret; 189 190 dev_dbg(&bus->dev, "pcie-config-write: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x\n", 191 bus->number, devfn, where, size, val); 192 193 if (size == 1) { 194 shift = BITS_PER_BYTE * (where & 3); 195 data &= ~(0xff << shift); 196 data |= ((val & 0xff) << shift); 197 } else if (size == 2) { 198 shift = BITS_PER_BYTE * (where & 2); 199 data &= ~(0xffff << shift); 200 data |= ((val & 0xffff) << shift); 201 } else 202 data = val; 203 204 ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_WRITE, 205 bus, devfn, where, &data); 206 207 return ret; 208 } 209 210 static struct pci_ops rcar_pcie_ops = { 211 .read = rcar_pcie_read_conf, 212 .write = rcar_pcie_write_conf, 213 }; 214 215 static int rcar_pcie_setup(struct list_head *resource, 216 struct rcar_pcie_host *host) 217 { 218 struct resource_entry *win; 219 int i = 0; 220 221 /* Setup PCI resources */ 222 resource_list_for_each_entry(win, &host->resources) { 223 struct resource *res = win->res; 224 225 if (!res->flags) 226 continue; 227 228 switch (resource_type(res)) { 229 case IORESOURCE_IO: 230 case IORESOURCE_MEM: 231 rcar_pcie_set_outbound(&host->pcie, i, win); 232 i++; 233 break; 234 case IORESOURCE_BUS: 235 host->root_bus_nr = res->start; 236 break; 237 default: 238 continue; 239 } 240 241 pci_add_resource(resource, res); 242 } 243 244 return 1; 245 } 246 247 static void rcar_pcie_force_speedup(struct rcar_pcie *pcie) 248 { 249 struct device *dev = pcie->dev; 250 unsigned int timeout = 1000; 251 u32 macsr; 252 253 if ((rcar_pci_read_reg(pcie, MACS2R) & LINK_SPEED) != LINK_SPEED_5_0GTS) 254 return; 255 256 if (rcar_pci_read_reg(pcie, MACCTLR) & SPEED_CHANGE) { 257 dev_err(dev, "Speed change already in progress\n"); 258 return; 259 } 260 261 macsr = rcar_pci_read_reg(pcie, MACSR); 262 if ((macsr & LINK_SPEED) == LINK_SPEED_5_0GTS) 263 goto done; 264 265 /* Set target link speed to 5.0 GT/s */ 266 rcar_rmw32(pcie, EXPCAP(12), PCI_EXP_LNKSTA_CLS, 267 PCI_EXP_LNKSTA_CLS_5_0GB); 268 269 /* Set speed change reason as intentional factor */ 270 rcar_rmw32(pcie, MACCGSPSETR, SPCNGRSN, 0); 271 272 /* Clear SPCHGFIN, SPCHGSUC, and SPCHGFAIL */ 273 if (macsr & (SPCHGFIN | SPCHGSUC | SPCHGFAIL)) 274 rcar_pci_write_reg(pcie, macsr, MACSR); 275 276 /* Start link speed change */ 277 rcar_rmw32(pcie, MACCTLR, SPEED_CHANGE, SPEED_CHANGE); 278 279 while (timeout--) { 280 macsr = rcar_pci_read_reg(pcie, MACSR); 281 if (macsr & SPCHGFIN) { 282 /* Clear the interrupt bits */ 283 rcar_pci_write_reg(pcie, macsr, MACSR); 284 285 if (macsr & SPCHGFAIL) 286 dev_err(dev, "Speed change failed\n"); 287 288 goto done; 289 } 290 291 msleep(1); 292 } 293 294 dev_err(dev, "Speed change timed out\n"); 295 296 done: 297 dev_info(dev, "Current link speed is %s GT/s\n", 298 (macsr & LINK_SPEED) == LINK_SPEED_5_0GTS ? "5" : "2.5"); 299 } 300 301 static void rcar_pcie_hw_enable(struct rcar_pcie_host *host) 302 { 303 struct rcar_pcie *pcie = &host->pcie; 304 struct resource_entry *win; 305 LIST_HEAD(res); 306 int i = 0; 307 308 /* Try setting 5 GT/s link speed */ 309 rcar_pcie_force_speedup(pcie); 310 311 /* Setup PCI resources */ 312 resource_list_for_each_entry(win, &host->resources) { 313 struct resource *res = win->res; 314 315 if (!res->flags) 316 continue; 317 318 switch (resource_type(res)) { 319 case IORESOURCE_IO: 320 case IORESOURCE_MEM: 321 rcar_pcie_set_outbound(pcie, i, win); 322 i++; 323 break; 324 } 325 } 326 } 327 328 static int rcar_pcie_enable(struct rcar_pcie_host *host) 329 { 330 struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host); 331 struct rcar_pcie *pcie = &host->pcie; 332 struct device *dev = pcie->dev; 333 struct pci_bus *bus, *child; 334 int ret; 335 336 /* Try setting 5 GT/s link speed */ 337 rcar_pcie_force_speedup(pcie); 338 339 rcar_pcie_setup(&bridge->windows, host); 340 341 pci_add_flags(PCI_REASSIGN_ALL_BUS); 342 343 bridge->dev.parent = dev; 344 bridge->sysdata = host; 345 bridge->busnr = host->root_bus_nr; 346 bridge->ops = &rcar_pcie_ops; 347 bridge->map_irq = of_irq_parse_and_map_pci; 348 bridge->swizzle_irq = pci_common_swizzle; 349 if (IS_ENABLED(CONFIG_PCI_MSI)) 350 bridge->msi = &host->msi.chip; 351 352 ret = pci_scan_root_bus_bridge(bridge); 353 if (ret < 0) 354 return ret; 355 356 bus = bridge->bus; 357 358 pci_bus_size_bridges(bus); 359 pci_bus_assign_resources(bus); 360 361 list_for_each_entry(child, &bus->children, node) 362 pcie_bus_configure_settings(child); 363 364 pci_bus_add_devices(bus); 365 366 return 0; 367 } 368 369 static int phy_wait_for_ack(struct rcar_pcie *pcie) 370 { 371 struct device *dev = pcie->dev; 372 unsigned int timeout = 100; 373 374 while (timeout--) { 375 if (rcar_pci_read_reg(pcie, H1_PCIEPHYADRR) & PHY_ACK) 376 return 0; 377 378 udelay(100); 379 } 380 381 dev_err(dev, "Access to PCIe phy timed out\n"); 382 383 return -ETIMEDOUT; 384 } 385 386 static void phy_write_reg(struct rcar_pcie *pcie, 387 unsigned int rate, u32 addr, 388 unsigned int lane, u32 data) 389 { 390 u32 phyaddr; 391 392 phyaddr = WRITE_CMD | 393 ((rate & 1) << RATE_POS) | 394 ((lane & 0xf) << LANE_POS) | 395 ((addr & 0xff) << ADR_POS); 396 397 /* Set write data */ 398 rcar_pci_write_reg(pcie, data, H1_PCIEPHYDOUTR); 399 rcar_pci_write_reg(pcie, phyaddr, H1_PCIEPHYADRR); 400 401 /* Ignore errors as they will be dealt with if the data link is down */ 402 phy_wait_for_ack(pcie); 403 404 /* Clear command */ 405 rcar_pci_write_reg(pcie, 0, H1_PCIEPHYDOUTR); 406 rcar_pci_write_reg(pcie, 0, H1_PCIEPHYADRR); 407 408 /* Ignore errors as they will be dealt with if the data link is down */ 409 phy_wait_for_ack(pcie); 410 } 411 412 static int rcar_pcie_hw_init(struct rcar_pcie *pcie) 413 { 414 int err; 415 416 /* Begin initialization */ 417 rcar_pci_write_reg(pcie, 0, PCIETCTLR); 418 419 /* Set mode */ 420 rcar_pci_write_reg(pcie, 1, PCIEMSR); 421 422 err = rcar_pcie_wait_for_phyrdy(pcie); 423 if (err) 424 return err; 425 426 /* 427 * Initial header for port config space is type 1, set the device 428 * class to match. Hardware takes care of propagating the IDSETR 429 * settings, so there is no need to bother with a quirk. 430 */ 431 rcar_pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI << 16, IDSETR1); 432 433 /* 434 * Setup Secondary Bus Number & Subordinate Bus Number, even though 435 * they aren't used, to avoid bridge being detected as broken. 436 */ 437 rcar_rmw32(pcie, RCONF(PCI_SECONDARY_BUS), 0xff, 1); 438 rcar_rmw32(pcie, RCONF(PCI_SUBORDINATE_BUS), 0xff, 1); 439 440 /* Initialize default capabilities. */ 441 rcar_rmw32(pcie, REXPCAP(0), 0xff, PCI_CAP_ID_EXP); 442 rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS), 443 PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ROOT_PORT << 4); 444 rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), 0x7f, 445 PCI_HEADER_TYPE_BRIDGE); 446 447 /* Enable data link layer active state reporting */ 448 rcar_rmw32(pcie, REXPCAP(PCI_EXP_LNKCAP), PCI_EXP_LNKCAP_DLLLARC, 449 PCI_EXP_LNKCAP_DLLLARC); 450 451 /* Write out the physical slot number = 0 */ 452 rcar_rmw32(pcie, REXPCAP(PCI_EXP_SLTCAP), PCI_EXP_SLTCAP_PSN, 0); 453 454 /* Set the completion timer timeout to the maximum 50ms. */ 455 rcar_rmw32(pcie, TLCTLR + 1, 0x3f, 50); 456 457 /* Terminate list of capabilities (Next Capability Offset=0) */ 458 rcar_rmw32(pcie, RVCCAP(0), 0xfff00000, 0); 459 460 /* Enable MSI */ 461 if (IS_ENABLED(CONFIG_PCI_MSI)) 462 rcar_pci_write_reg(pcie, 0x801f0000, PCIEMSITXR); 463 464 rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR); 465 466 /* Finish initialization - establish a PCI Express link */ 467 rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR); 468 469 /* This will timeout if we don't have a link. */ 470 err = rcar_pcie_wait_for_dl(pcie); 471 if (err) 472 return err; 473 474 /* Enable INTx interrupts */ 475 rcar_rmw32(pcie, PCIEINTXR, 0, 0xF << 8); 476 477 wmb(); 478 479 return 0; 480 } 481 482 static int rcar_pcie_phy_init_h1(struct rcar_pcie_host *host) 483 { 484 struct rcar_pcie *pcie = &host->pcie; 485 486 /* Initialize the phy */ 487 phy_write_reg(pcie, 0, 0x42, 0x1, 0x0EC34191); 488 phy_write_reg(pcie, 1, 0x42, 0x1, 0x0EC34180); 489 phy_write_reg(pcie, 0, 0x43, 0x1, 0x00210188); 490 phy_write_reg(pcie, 1, 0x43, 0x1, 0x00210188); 491 phy_write_reg(pcie, 0, 0x44, 0x1, 0x015C0014); 492 phy_write_reg(pcie, 1, 0x44, 0x1, 0x015C0014); 493 phy_write_reg(pcie, 1, 0x4C, 0x1, 0x786174A0); 494 phy_write_reg(pcie, 1, 0x4D, 0x1, 0x048000BB); 495 phy_write_reg(pcie, 0, 0x51, 0x1, 0x079EC062); 496 phy_write_reg(pcie, 0, 0x52, 0x1, 0x20000000); 497 phy_write_reg(pcie, 1, 0x52, 0x1, 0x20000000); 498 phy_write_reg(pcie, 1, 0x56, 0x1, 0x00003806); 499 500 phy_write_reg(pcie, 0, 0x60, 0x1, 0x004B03A5); 501 phy_write_reg(pcie, 0, 0x64, 0x1, 0x3F0F1F0F); 502 phy_write_reg(pcie, 0, 0x66, 0x1, 0x00008000); 503 504 return 0; 505 } 506 507 static int rcar_pcie_phy_init_gen2(struct rcar_pcie_host *host) 508 { 509 struct rcar_pcie *pcie = &host->pcie; 510 511 /* 512 * These settings come from the R-Car Series, 2nd Generation User's 513 * Manual, section 50.3.1 (2) Initialization of the physical layer. 514 */ 515 rcar_pci_write_reg(pcie, 0x000f0030, GEN2_PCIEPHYADDR); 516 rcar_pci_write_reg(pcie, 0x00381203, GEN2_PCIEPHYDATA); 517 rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL); 518 rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL); 519 520 rcar_pci_write_reg(pcie, 0x000f0054, GEN2_PCIEPHYADDR); 521 /* The following value is for DC connection, no termination resistor */ 522 rcar_pci_write_reg(pcie, 0x13802007, GEN2_PCIEPHYDATA); 523 rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL); 524 rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL); 525 526 return 0; 527 } 528 529 static int rcar_pcie_phy_init_gen3(struct rcar_pcie_host *host) 530 { 531 int err; 532 533 err = phy_init(host->phy); 534 if (err) 535 return err; 536 537 err = phy_power_on(host->phy); 538 if (err) 539 phy_exit(host->phy); 540 541 return err; 542 } 543 544 static int rcar_msi_alloc(struct rcar_msi *chip) 545 { 546 int msi; 547 548 mutex_lock(&chip->lock); 549 550 msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR); 551 if (msi < INT_PCI_MSI_NR) 552 set_bit(msi, chip->used); 553 else 554 msi = -ENOSPC; 555 556 mutex_unlock(&chip->lock); 557 558 return msi; 559 } 560 561 static int rcar_msi_alloc_region(struct rcar_msi *chip, int no_irqs) 562 { 563 int msi; 564 565 mutex_lock(&chip->lock); 566 msi = bitmap_find_free_region(chip->used, INT_PCI_MSI_NR, 567 order_base_2(no_irqs)); 568 mutex_unlock(&chip->lock); 569 570 return msi; 571 } 572 573 static void rcar_msi_free(struct rcar_msi *chip, unsigned long irq) 574 { 575 mutex_lock(&chip->lock); 576 clear_bit(irq, chip->used); 577 mutex_unlock(&chip->lock); 578 } 579 580 static irqreturn_t rcar_pcie_msi_irq(int irq, void *data) 581 { 582 struct rcar_pcie_host *host = data; 583 struct rcar_pcie *pcie = &host->pcie; 584 struct rcar_msi *msi = &host->msi; 585 struct device *dev = pcie->dev; 586 unsigned long reg; 587 588 reg = rcar_pci_read_reg(pcie, PCIEMSIFR); 589 590 /* MSI & INTx share an interrupt - we only handle MSI here */ 591 if (!reg) 592 return IRQ_NONE; 593 594 while (reg) { 595 unsigned int index = find_first_bit(®, 32); 596 unsigned int msi_irq; 597 598 /* clear the interrupt */ 599 rcar_pci_write_reg(pcie, 1 << index, PCIEMSIFR); 600 601 msi_irq = irq_find_mapping(msi->domain, index); 602 if (msi_irq) { 603 if (test_bit(index, msi->used)) 604 generic_handle_irq(msi_irq); 605 else 606 dev_info(dev, "unhandled MSI\n"); 607 } else { 608 /* Unknown MSI, just clear it */ 609 dev_dbg(dev, "unexpected MSI\n"); 610 } 611 612 /* see if there's any more pending in this vector */ 613 reg = rcar_pci_read_reg(pcie, PCIEMSIFR); 614 } 615 616 return IRQ_HANDLED; 617 } 618 619 static int rcar_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev, 620 struct msi_desc *desc) 621 { 622 struct rcar_msi *msi = to_rcar_msi(chip); 623 struct rcar_pcie_host *host = container_of(chip, struct rcar_pcie_host, 624 msi.chip); 625 struct rcar_pcie *pcie = &host->pcie; 626 struct msi_msg msg; 627 unsigned int irq; 628 int hwirq; 629 630 hwirq = rcar_msi_alloc(msi); 631 if (hwirq < 0) 632 return hwirq; 633 634 irq = irq_find_mapping(msi->domain, hwirq); 635 if (!irq) { 636 rcar_msi_free(msi, hwirq); 637 return -EINVAL; 638 } 639 640 irq_set_msi_desc(irq, desc); 641 642 msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE; 643 msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR); 644 msg.data = hwirq; 645 646 pci_write_msi_msg(irq, &msg); 647 648 return 0; 649 } 650 651 static int rcar_msi_setup_irqs(struct msi_controller *chip, 652 struct pci_dev *pdev, int nvec, int type) 653 { 654 struct rcar_msi *msi = to_rcar_msi(chip); 655 struct rcar_pcie_host *host = container_of(chip, struct rcar_pcie_host, 656 msi.chip); 657 struct rcar_pcie *pcie = &host->pcie; 658 struct msi_desc *desc; 659 struct msi_msg msg; 660 unsigned int irq; 661 int hwirq; 662 int i; 663 664 /* MSI-X interrupts are not supported */ 665 if (type == PCI_CAP_ID_MSIX) 666 return -EINVAL; 667 668 WARN_ON(!list_is_singular(&pdev->dev.msi_list)); 669 desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list); 670 671 hwirq = rcar_msi_alloc_region(msi, nvec); 672 if (hwirq < 0) 673 return -ENOSPC; 674 675 irq = irq_find_mapping(msi->domain, hwirq); 676 if (!irq) 677 return -ENOSPC; 678 679 for (i = 0; i < nvec; i++) { 680 /* 681 * irq_create_mapping() called from rcar_pcie_probe() pre- 682 * allocates descs, so there is no need to allocate descs here. 683 * We can therefore assume that if irq_find_mapping() above 684 * returns non-zero, then the descs are also successfully 685 * allocated. 686 */ 687 if (irq_set_msi_desc_off(irq, i, desc)) { 688 /* TODO: clear */ 689 return -EINVAL; 690 } 691 } 692 693 desc->nvec_used = nvec; 694 desc->msi_attrib.multiple = order_base_2(nvec); 695 696 msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE; 697 msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR); 698 msg.data = hwirq; 699 700 pci_write_msi_msg(irq, &msg); 701 702 return 0; 703 } 704 705 static void rcar_msi_teardown_irq(struct msi_controller *chip, unsigned int irq) 706 { 707 struct rcar_msi *msi = to_rcar_msi(chip); 708 struct irq_data *d = irq_get_irq_data(irq); 709 710 rcar_msi_free(msi, d->hwirq); 711 } 712 713 static struct irq_chip rcar_msi_irq_chip = { 714 .name = "R-Car PCIe MSI", 715 .irq_enable = pci_msi_unmask_irq, 716 .irq_disable = pci_msi_mask_irq, 717 .irq_mask = pci_msi_mask_irq, 718 .irq_unmask = pci_msi_unmask_irq, 719 }; 720 721 static int rcar_msi_map(struct irq_domain *domain, unsigned int irq, 722 irq_hw_number_t hwirq) 723 { 724 irq_set_chip_and_handler(irq, &rcar_msi_irq_chip, handle_simple_irq); 725 irq_set_chip_data(irq, domain->host_data); 726 727 return 0; 728 } 729 730 static const struct irq_domain_ops msi_domain_ops = { 731 .map = rcar_msi_map, 732 }; 733 734 static void rcar_pcie_unmap_msi(struct rcar_pcie_host *host) 735 { 736 struct rcar_msi *msi = &host->msi; 737 int i, irq; 738 739 for (i = 0; i < INT_PCI_MSI_NR; i++) { 740 irq = irq_find_mapping(msi->domain, i); 741 if (irq > 0) 742 irq_dispose_mapping(irq); 743 } 744 745 irq_domain_remove(msi->domain); 746 } 747 748 static void rcar_pcie_hw_enable_msi(struct rcar_pcie_host *host) 749 { 750 struct rcar_pcie *pcie = &host->pcie; 751 struct rcar_msi *msi = &host->msi; 752 unsigned long base; 753 754 /* setup MSI data target */ 755 base = virt_to_phys((void *)msi->pages); 756 757 rcar_pci_write_reg(pcie, lower_32_bits(base) | MSIFE, PCIEMSIALR); 758 rcar_pci_write_reg(pcie, upper_32_bits(base), PCIEMSIAUR); 759 760 /* enable all MSI interrupts */ 761 rcar_pci_write_reg(pcie, 0xffffffff, PCIEMSIIER); 762 } 763 764 static int rcar_pcie_enable_msi(struct rcar_pcie_host *host) 765 { 766 struct rcar_pcie *pcie = &host->pcie; 767 struct device *dev = pcie->dev; 768 struct rcar_msi *msi = &host->msi; 769 int err, i; 770 771 mutex_init(&msi->lock); 772 773 msi->chip.dev = dev; 774 msi->chip.setup_irq = rcar_msi_setup_irq; 775 msi->chip.setup_irqs = rcar_msi_setup_irqs; 776 msi->chip.teardown_irq = rcar_msi_teardown_irq; 777 778 msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR, 779 &msi_domain_ops, &msi->chip); 780 if (!msi->domain) { 781 dev_err(dev, "failed to create IRQ domain\n"); 782 return -ENOMEM; 783 } 784 785 for (i = 0; i < INT_PCI_MSI_NR; i++) 786 irq_create_mapping(msi->domain, i); 787 788 /* Two irqs are for MSI, but they are also used for non-MSI irqs */ 789 err = devm_request_irq(dev, msi->irq1, rcar_pcie_msi_irq, 790 IRQF_SHARED | IRQF_NO_THREAD, 791 rcar_msi_irq_chip.name, host); 792 if (err < 0) { 793 dev_err(dev, "failed to request IRQ: %d\n", err); 794 goto err; 795 } 796 797 err = devm_request_irq(dev, msi->irq2, rcar_pcie_msi_irq, 798 IRQF_SHARED | IRQF_NO_THREAD, 799 rcar_msi_irq_chip.name, host); 800 if (err < 0) { 801 dev_err(dev, "failed to request IRQ: %d\n", err); 802 goto err; 803 } 804 805 /* setup MSI data target */ 806 msi->pages = __get_free_pages(GFP_KERNEL, 0); 807 rcar_pcie_hw_enable_msi(host); 808 809 return 0; 810 811 err: 812 rcar_pcie_unmap_msi(host); 813 return err; 814 } 815 816 static void rcar_pcie_teardown_msi(struct rcar_pcie_host *host) 817 { 818 struct rcar_pcie *pcie = &host->pcie; 819 struct rcar_msi *msi = &host->msi; 820 821 /* Disable all MSI interrupts */ 822 rcar_pci_write_reg(pcie, 0, PCIEMSIIER); 823 824 /* Disable address decoding of the MSI interrupt, MSIFE */ 825 rcar_pci_write_reg(pcie, 0, PCIEMSIALR); 826 827 free_pages(msi->pages, 0); 828 829 rcar_pcie_unmap_msi(host); 830 } 831 832 static int rcar_pcie_get_resources(struct rcar_pcie_host *host) 833 { 834 struct rcar_pcie *pcie = &host->pcie; 835 struct device *dev = pcie->dev; 836 struct resource res; 837 int err, i; 838 839 host->phy = devm_phy_optional_get(dev, "pcie"); 840 if (IS_ERR(host->phy)) 841 return PTR_ERR(host->phy); 842 843 err = of_address_to_resource(dev->of_node, 0, &res); 844 if (err) 845 return err; 846 847 pcie->base = devm_ioremap_resource(dev, &res); 848 if (IS_ERR(pcie->base)) 849 return PTR_ERR(pcie->base); 850 851 host->bus_clk = devm_clk_get(dev, "pcie_bus"); 852 if (IS_ERR(host->bus_clk)) { 853 dev_err(dev, "cannot get pcie bus clock\n"); 854 return PTR_ERR(host->bus_clk); 855 } 856 857 i = irq_of_parse_and_map(dev->of_node, 0); 858 if (!i) { 859 dev_err(dev, "cannot get platform resources for msi interrupt\n"); 860 err = -ENOENT; 861 goto err_irq1; 862 } 863 host->msi.irq1 = i; 864 865 i = irq_of_parse_and_map(dev->of_node, 1); 866 if (!i) { 867 dev_err(dev, "cannot get platform resources for msi interrupt\n"); 868 err = -ENOENT; 869 goto err_irq2; 870 } 871 host->msi.irq2 = i; 872 873 return 0; 874 875 err_irq2: 876 irq_dispose_mapping(host->msi.irq1); 877 err_irq1: 878 return err; 879 } 880 881 static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie, 882 struct resource_entry *entry, 883 int *index) 884 { 885 u64 restype = entry->res->flags; 886 u64 cpu_addr = entry->res->start; 887 u64 cpu_end = entry->res->end; 888 u64 pci_addr = entry->res->start - entry->offset; 889 u32 flags = LAM_64BIT | LAR_ENABLE; 890 u64 mask; 891 u64 size = resource_size(entry->res); 892 int idx = *index; 893 894 if (restype & IORESOURCE_PREFETCH) 895 flags |= LAM_PREFETCH; 896 897 while (cpu_addr < cpu_end) { 898 if (idx >= MAX_NR_INBOUND_MAPS - 1) { 899 dev_err(pcie->dev, "Failed to map inbound regions!\n"); 900 return -EINVAL; 901 } 902 /* 903 * If the size of the range is larger than the alignment of 904 * the start address, we have to use multiple entries to 905 * perform the mapping. 906 */ 907 if (cpu_addr > 0) { 908 unsigned long nr_zeros = __ffs64(cpu_addr); 909 u64 alignment = 1ULL << nr_zeros; 910 911 size = min(size, alignment); 912 } 913 /* Hardware supports max 4GiB inbound region */ 914 size = min(size, 1ULL << 32); 915 916 mask = roundup_pow_of_two(size) - 1; 917 mask &= ~0xf; 918 919 rcar_pcie_set_inbound(pcie, cpu_addr, pci_addr, 920 lower_32_bits(mask) | flags, idx, true); 921 922 pci_addr += size; 923 cpu_addr += size; 924 idx += 2; 925 } 926 *index = idx; 927 928 return 0; 929 } 930 931 static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie_host *host) 932 { 933 struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host); 934 struct resource_entry *entry; 935 int index = 0, err = 0; 936 937 resource_list_for_each_entry(entry, &bridge->dma_ranges) { 938 err = rcar_pcie_inbound_ranges(&host->pcie, entry, &index); 939 if (err) 940 break; 941 } 942 943 return err; 944 } 945 946 static const struct of_device_id rcar_pcie_of_match[] = { 947 { .compatible = "renesas,pcie-r8a7779", 948 .data = rcar_pcie_phy_init_h1 }, 949 { .compatible = "renesas,pcie-r8a7790", 950 .data = rcar_pcie_phy_init_gen2 }, 951 { .compatible = "renesas,pcie-r8a7791", 952 .data = rcar_pcie_phy_init_gen2 }, 953 { .compatible = "renesas,pcie-rcar-gen2", 954 .data = rcar_pcie_phy_init_gen2 }, 955 { .compatible = "renesas,pcie-r8a7795", 956 .data = rcar_pcie_phy_init_gen3 }, 957 { .compatible = "renesas,pcie-rcar-gen3", 958 .data = rcar_pcie_phy_init_gen3 }, 959 {}, 960 }; 961 962 static int rcar_pcie_probe(struct platform_device *pdev) 963 { 964 struct device *dev = &pdev->dev; 965 struct rcar_pcie_host *host; 966 struct rcar_pcie *pcie; 967 u32 data; 968 int err; 969 struct pci_host_bridge *bridge; 970 971 bridge = pci_alloc_host_bridge(sizeof(*host)); 972 if (!bridge) 973 return -ENOMEM; 974 975 host = pci_host_bridge_priv(bridge); 976 pcie = &host->pcie; 977 pcie->dev = dev; 978 platform_set_drvdata(pdev, host); 979 980 err = pci_parse_request_of_pci_ranges(dev, &host->resources, 981 &bridge->dma_ranges, NULL); 982 if (err) 983 goto err_free_bridge; 984 985 pm_runtime_enable(pcie->dev); 986 err = pm_runtime_get_sync(pcie->dev); 987 if (err < 0) { 988 dev_err(pcie->dev, "pm_runtime_get_sync failed\n"); 989 goto err_pm_disable; 990 } 991 992 err = rcar_pcie_get_resources(host); 993 if (err < 0) { 994 dev_err(dev, "failed to request resources: %d\n", err); 995 goto err_pm_put; 996 } 997 998 err = clk_prepare_enable(host->bus_clk); 999 if (err) { 1000 dev_err(dev, "failed to enable bus clock: %d\n", err); 1001 goto err_unmap_msi_irqs; 1002 } 1003 1004 err = rcar_pcie_parse_map_dma_ranges(host); 1005 if (err) 1006 goto err_clk_disable; 1007 1008 host->phy_init_fn = of_device_get_match_data(dev); 1009 err = host->phy_init_fn(host); 1010 if (err) { 1011 dev_err(dev, "failed to init PCIe PHY\n"); 1012 goto err_clk_disable; 1013 } 1014 1015 /* Failure to get a link might just be that no cards are inserted */ 1016 if (rcar_pcie_hw_init(pcie)) { 1017 dev_info(dev, "PCIe link down\n"); 1018 err = -ENODEV; 1019 goto err_phy_shutdown; 1020 } 1021 1022 data = rcar_pci_read_reg(pcie, MACSR); 1023 dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f); 1024 1025 if (IS_ENABLED(CONFIG_PCI_MSI)) { 1026 err = rcar_pcie_enable_msi(host); 1027 if (err < 0) { 1028 dev_err(dev, 1029 "failed to enable MSI support: %d\n", 1030 err); 1031 goto err_phy_shutdown; 1032 } 1033 } 1034 1035 err = rcar_pcie_enable(host); 1036 if (err) 1037 goto err_msi_teardown; 1038 1039 return 0; 1040 1041 err_msi_teardown: 1042 if (IS_ENABLED(CONFIG_PCI_MSI)) 1043 rcar_pcie_teardown_msi(host); 1044 1045 err_phy_shutdown: 1046 if (host->phy) { 1047 phy_power_off(host->phy); 1048 phy_exit(host->phy); 1049 } 1050 1051 err_clk_disable: 1052 clk_disable_unprepare(host->bus_clk); 1053 1054 err_unmap_msi_irqs: 1055 irq_dispose_mapping(host->msi.irq2); 1056 irq_dispose_mapping(host->msi.irq1); 1057 1058 err_pm_put: 1059 pm_runtime_put(dev); 1060 1061 err_pm_disable: 1062 pm_runtime_disable(dev); 1063 pci_free_resource_list(&host->resources); 1064 1065 err_free_bridge: 1066 pci_free_host_bridge(bridge); 1067 1068 return err; 1069 } 1070 1071 static int __maybe_unused rcar_pcie_resume(struct device *dev) 1072 { 1073 struct rcar_pcie_host *host = dev_get_drvdata(dev); 1074 struct rcar_pcie *pcie = &host->pcie; 1075 unsigned int data; 1076 int err; 1077 1078 err = rcar_pcie_parse_map_dma_ranges(host); 1079 if (err) 1080 return 0; 1081 1082 /* Failure to get a link might just be that no cards are inserted */ 1083 err = host->phy_init_fn(host); 1084 if (err) { 1085 dev_info(dev, "PCIe link down\n"); 1086 return 0; 1087 } 1088 1089 data = rcar_pci_read_reg(pcie, MACSR); 1090 dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f); 1091 1092 /* Enable MSI */ 1093 if (IS_ENABLED(CONFIG_PCI_MSI)) 1094 rcar_pcie_hw_enable_msi(host); 1095 1096 rcar_pcie_hw_enable(host); 1097 1098 return 0; 1099 } 1100 1101 static int rcar_pcie_resume_noirq(struct device *dev) 1102 { 1103 struct rcar_pcie_host *host = dev_get_drvdata(dev); 1104 struct rcar_pcie *pcie = &host->pcie; 1105 1106 if (rcar_pci_read_reg(pcie, PMSR) && 1107 !(rcar_pci_read_reg(pcie, PCIETCTLR) & DL_DOWN)) 1108 return 0; 1109 1110 /* Re-establish the PCIe link */ 1111 rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR); 1112 rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR); 1113 return rcar_pcie_wait_for_dl(pcie); 1114 } 1115 1116 static const struct dev_pm_ops rcar_pcie_pm_ops = { 1117 SET_SYSTEM_SLEEP_PM_OPS(NULL, rcar_pcie_resume) 1118 .resume_noirq = rcar_pcie_resume_noirq, 1119 }; 1120 1121 static struct platform_driver rcar_pcie_driver = { 1122 .driver = { 1123 .name = "rcar-pcie", 1124 .of_match_table = rcar_pcie_of_match, 1125 .pm = &rcar_pcie_pm_ops, 1126 .suppress_bind_attrs = true, 1127 }, 1128 .probe = rcar_pcie_probe, 1129 }; 1130 builtin_platform_driver(rcar_pcie_driver); 1131