1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * PCIe driver for Renesas R-Car SoCs 4 * Copyright (C) 2014 Renesas Electronics Europe Ltd 5 * 6 * Based on: 7 * arch/sh/drivers/pci/pcie-sh7786.c 8 * arch/sh/drivers/pci/ops-sh7786.c 9 * Copyright (C) 2009 - 2011 Paul Mundt 10 * 11 * Author: Phil Edworthy <phil.edworthy@renesas.com> 12 */ 13 14 #include <linux/bitops.h> 15 #include <linux/clk.h> 16 #include <linux/delay.h> 17 #include <linux/interrupt.h> 18 #include <linux/irq.h> 19 #include <linux/irqdomain.h> 20 #include <linux/kernel.h> 21 #include <linux/init.h> 22 #include <linux/msi.h> 23 #include <linux/of_address.h> 24 #include <linux/of_irq.h> 25 #include <linux/of_pci.h> 26 #include <linux/of_platform.h> 27 #include <linux/pci.h> 28 #include <linux/phy/phy.h> 29 #include <linux/platform_device.h> 30 #include <linux/pm_runtime.h> 31 #include <linux/slab.h> 32 33 #define PCIECAR 0x000010 34 #define PCIECCTLR 0x000018 35 #define CONFIG_SEND_ENABLE BIT(31) 36 #define TYPE0 (0 << 8) 37 #define TYPE1 BIT(8) 38 #define PCIECDR 0x000020 39 #define PCIEMSR 0x000028 40 #define PCIEINTXR 0x000400 41 #define PCIEPHYSR 0x0007f0 42 #define PHYRDY BIT(0) 43 #define PCIEMSITXR 0x000840 44 45 /* Transfer control */ 46 #define PCIETCTLR 0x02000 47 #define DL_DOWN BIT(3) 48 #define CFINIT BIT(0) 49 #define PCIETSTR 0x02004 50 #define DATA_LINK_ACTIVE BIT(0) 51 #define PCIEERRFR 0x02020 52 #define UNSUPPORTED_REQUEST BIT(4) 53 #define PCIEMSIFR 0x02044 54 #define PCIEMSIALR 0x02048 55 #define MSIFE BIT(0) 56 #define PCIEMSIAUR 0x0204c 57 #define PCIEMSIIER 0x02050 58 59 /* root port address */ 60 #define PCIEPRAR(x) (0x02080 + ((x) * 0x4)) 61 62 /* local address reg & mask */ 63 #define PCIELAR(x) (0x02200 + ((x) * 0x20)) 64 #define PCIELAMR(x) (0x02208 + ((x) * 0x20)) 65 #define LAM_PREFETCH BIT(3) 66 #define LAM_64BIT BIT(2) 67 #define LAR_ENABLE BIT(1) 68 69 /* PCIe address reg & mask */ 70 #define PCIEPALR(x) (0x03400 + ((x) * 0x20)) 71 #define PCIEPAUR(x) (0x03404 + ((x) * 0x20)) 72 #define PCIEPAMR(x) (0x03408 + ((x) * 0x20)) 73 #define PCIEPTCTLR(x) (0x0340c + ((x) * 0x20)) 74 #define PAR_ENABLE BIT(31) 75 #define IO_SPACE BIT(8) 76 77 /* Configuration */ 78 #define PCICONF(x) (0x010000 + ((x) * 0x4)) 79 #define PMCAP(x) (0x010040 + ((x) * 0x4)) 80 #define EXPCAP(x) (0x010070 + ((x) * 0x4)) 81 #define VCCAP(x) (0x010100 + ((x) * 0x4)) 82 83 /* link layer */ 84 #define IDSETR1 0x011004 85 #define TLCTLR 0x011048 86 #define MACSR 0x011054 87 #define SPCHGFIN BIT(4) 88 #define SPCHGFAIL BIT(6) 89 #define SPCHGSUC BIT(7) 90 #define LINK_SPEED (0xf << 16) 91 #define LINK_SPEED_2_5GTS (1 << 16) 92 #define LINK_SPEED_5_0GTS (2 << 16) 93 #define MACCTLR 0x011058 94 #define MACCTLR_NFTS_MASK GENMASK(23, 16) /* The name is from SH7786 */ 95 #define SPEED_CHANGE BIT(24) 96 #define SCRAMBLE_DISABLE BIT(27) 97 #define LTSMDIS BIT(31) 98 #define MACCTLR_INIT_VAL (LTSMDIS | MACCTLR_NFTS_MASK) 99 #define PMSR 0x01105c 100 #define MACS2R 0x011078 101 #define MACCGSPSETR 0x011084 102 #define SPCNGRSN BIT(31) 103 104 /* R-Car H1 PHY */ 105 #define H1_PCIEPHYADRR 0x04000c 106 #define WRITE_CMD BIT(16) 107 #define PHY_ACK BIT(24) 108 #define RATE_POS 12 109 #define LANE_POS 8 110 #define ADR_POS 0 111 #define H1_PCIEPHYDOUTR 0x040014 112 113 /* R-Car Gen2 PHY */ 114 #define GEN2_PCIEPHYADDR 0x780 115 #define GEN2_PCIEPHYDATA 0x784 116 #define GEN2_PCIEPHYCTRL 0x78c 117 118 #define INT_PCI_MSI_NR 32 119 120 #define RCONF(x) (PCICONF(0) + (x)) 121 #define RPMCAP(x) (PMCAP(0) + (x)) 122 #define REXPCAP(x) (EXPCAP(0) + (x)) 123 #define RVCCAP(x) (VCCAP(0) + (x)) 124 125 #define PCIE_CONF_BUS(b) (((b) & 0xff) << 24) 126 #define PCIE_CONF_DEV(d) (((d) & 0x1f) << 19) 127 #define PCIE_CONF_FUNC(f) (((f) & 0x7) << 16) 128 129 #define RCAR_PCI_MAX_RESOURCES 4 130 #define MAX_NR_INBOUND_MAPS 6 131 132 struct rcar_msi { 133 DECLARE_BITMAP(used, INT_PCI_MSI_NR); 134 struct irq_domain *domain; 135 struct msi_controller chip; 136 unsigned long pages; 137 struct mutex lock; 138 int irq1; 139 int irq2; 140 }; 141 142 static inline struct rcar_msi *to_rcar_msi(struct msi_controller *chip) 143 { 144 return container_of(chip, struct rcar_msi, chip); 145 } 146 147 /* Structure representing the PCIe interface */ 148 struct rcar_pcie { 149 struct device *dev; 150 struct phy *phy; 151 void __iomem *base; 152 struct list_head resources; 153 int root_bus_nr; 154 struct clk *bus_clk; 155 struct rcar_msi msi; 156 }; 157 158 static void rcar_pci_write_reg(struct rcar_pcie *pcie, u32 val, 159 unsigned int reg) 160 { 161 writel(val, pcie->base + reg); 162 } 163 164 static u32 rcar_pci_read_reg(struct rcar_pcie *pcie, unsigned int reg) 165 { 166 return readl(pcie->base + reg); 167 } 168 169 enum { 170 RCAR_PCI_ACCESS_READ, 171 RCAR_PCI_ACCESS_WRITE, 172 }; 173 174 static void rcar_rmw32(struct rcar_pcie *pcie, int where, u32 mask, u32 data) 175 { 176 unsigned int shift = BITS_PER_BYTE * (where & 3); 177 u32 val = rcar_pci_read_reg(pcie, where & ~3); 178 179 val &= ~(mask << shift); 180 val |= data << shift; 181 rcar_pci_write_reg(pcie, val, where & ~3); 182 } 183 184 static u32 rcar_read_conf(struct rcar_pcie *pcie, int where) 185 { 186 unsigned int shift = BITS_PER_BYTE * (where & 3); 187 u32 val = rcar_pci_read_reg(pcie, where & ~3); 188 189 return val >> shift; 190 } 191 192 /* Serialization is provided by 'pci_lock' in drivers/pci/access.c */ 193 static int rcar_pcie_config_access(struct rcar_pcie *pcie, 194 unsigned char access_type, struct pci_bus *bus, 195 unsigned int devfn, int where, u32 *data) 196 { 197 unsigned int dev, func, reg, index; 198 199 dev = PCI_SLOT(devfn); 200 func = PCI_FUNC(devfn); 201 reg = where & ~3; 202 index = reg / 4; 203 204 /* 205 * While each channel has its own memory-mapped extended config 206 * space, it's generally only accessible when in endpoint mode. 207 * When in root complex mode, the controller is unable to target 208 * itself with either type 0 or type 1 accesses, and indeed, any 209 * controller initiated target transfer to its own config space 210 * result in a completer abort. 211 * 212 * Each channel effectively only supports a single device, but as 213 * the same channel <-> device access works for any PCI_SLOT() 214 * value, we cheat a bit here and bind the controller's config 215 * space to devfn 0 in order to enable self-enumeration. In this 216 * case the regular ECAR/ECDR path is sidelined and the mangled 217 * config access itself is initiated as an internal bus transaction. 218 */ 219 if (pci_is_root_bus(bus)) { 220 if (dev != 0) 221 return PCIBIOS_DEVICE_NOT_FOUND; 222 223 if (access_type == RCAR_PCI_ACCESS_READ) { 224 *data = rcar_pci_read_reg(pcie, PCICONF(index)); 225 } else { 226 /* Keep an eye out for changes to the root bus number */ 227 if (pci_is_root_bus(bus) && (reg == PCI_PRIMARY_BUS)) 228 pcie->root_bus_nr = *data & 0xff; 229 230 rcar_pci_write_reg(pcie, *data, PCICONF(index)); 231 } 232 233 return PCIBIOS_SUCCESSFUL; 234 } 235 236 if (pcie->root_bus_nr < 0) 237 return PCIBIOS_DEVICE_NOT_FOUND; 238 239 /* Clear errors */ 240 rcar_pci_write_reg(pcie, rcar_pci_read_reg(pcie, PCIEERRFR), PCIEERRFR); 241 242 /* Set the PIO address */ 243 rcar_pci_write_reg(pcie, PCIE_CONF_BUS(bus->number) | 244 PCIE_CONF_DEV(dev) | PCIE_CONF_FUNC(func) | reg, PCIECAR); 245 246 /* Enable the configuration access */ 247 if (bus->parent->number == pcie->root_bus_nr) 248 rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE0, PCIECCTLR); 249 else 250 rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE1, PCIECCTLR); 251 252 /* Check for errors */ 253 if (rcar_pci_read_reg(pcie, PCIEERRFR) & UNSUPPORTED_REQUEST) 254 return PCIBIOS_DEVICE_NOT_FOUND; 255 256 /* Check for master and target aborts */ 257 if (rcar_read_conf(pcie, RCONF(PCI_STATUS)) & 258 (PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT)) 259 return PCIBIOS_DEVICE_NOT_FOUND; 260 261 if (access_type == RCAR_PCI_ACCESS_READ) 262 *data = rcar_pci_read_reg(pcie, PCIECDR); 263 else 264 rcar_pci_write_reg(pcie, *data, PCIECDR); 265 266 /* Disable the configuration access */ 267 rcar_pci_write_reg(pcie, 0, PCIECCTLR); 268 269 return PCIBIOS_SUCCESSFUL; 270 } 271 272 static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn, 273 int where, int size, u32 *val) 274 { 275 struct rcar_pcie *pcie = bus->sysdata; 276 int ret; 277 278 ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_READ, 279 bus, devfn, where, val); 280 if (ret != PCIBIOS_SUCCESSFUL) { 281 *val = 0xffffffff; 282 return ret; 283 } 284 285 if (size == 1) 286 *val = (*val >> (BITS_PER_BYTE * (where & 3))) & 0xff; 287 else if (size == 2) 288 *val = (*val >> (BITS_PER_BYTE * (where & 2))) & 0xffff; 289 290 dev_dbg(&bus->dev, "pcie-config-read: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x\n", 291 bus->number, devfn, where, size, *val); 292 293 return ret; 294 } 295 296 /* Serialization is provided by 'pci_lock' in drivers/pci/access.c */ 297 static int rcar_pcie_write_conf(struct pci_bus *bus, unsigned int devfn, 298 int where, int size, u32 val) 299 { 300 struct rcar_pcie *pcie = bus->sysdata; 301 unsigned int shift; 302 u32 data; 303 int ret; 304 305 ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_READ, 306 bus, devfn, where, &data); 307 if (ret != PCIBIOS_SUCCESSFUL) 308 return ret; 309 310 dev_dbg(&bus->dev, "pcie-config-write: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x\n", 311 bus->number, devfn, where, size, val); 312 313 if (size == 1) { 314 shift = BITS_PER_BYTE * (where & 3); 315 data &= ~(0xff << shift); 316 data |= ((val & 0xff) << shift); 317 } else if (size == 2) { 318 shift = BITS_PER_BYTE * (where & 2); 319 data &= ~(0xffff << shift); 320 data |= ((val & 0xffff) << shift); 321 } else 322 data = val; 323 324 ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_WRITE, 325 bus, devfn, where, &data); 326 327 return ret; 328 } 329 330 static struct pci_ops rcar_pcie_ops = { 331 .read = rcar_pcie_read_conf, 332 .write = rcar_pcie_write_conf, 333 }; 334 335 static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie, 336 struct resource *res) 337 { 338 /* Setup PCIe address space mappings for each resource */ 339 resource_size_t size; 340 resource_size_t res_start; 341 u32 mask; 342 343 rcar_pci_write_reg(pcie, 0x00000000, PCIEPTCTLR(win)); 344 345 /* 346 * The PAMR mask is calculated in units of 128Bytes, which 347 * keeps things pretty simple. 348 */ 349 size = resource_size(res); 350 mask = (roundup_pow_of_two(size) / SZ_128) - 1; 351 rcar_pci_write_reg(pcie, mask << 7, PCIEPAMR(win)); 352 353 if (res->flags & IORESOURCE_IO) 354 res_start = pci_pio_to_address(res->start); 355 else 356 res_start = res->start; 357 358 rcar_pci_write_reg(pcie, upper_32_bits(res_start), PCIEPAUR(win)); 359 rcar_pci_write_reg(pcie, lower_32_bits(res_start) & ~0x7F, 360 PCIEPALR(win)); 361 362 /* First resource is for IO */ 363 mask = PAR_ENABLE; 364 if (res->flags & IORESOURCE_IO) 365 mask |= IO_SPACE; 366 367 rcar_pci_write_reg(pcie, mask, PCIEPTCTLR(win)); 368 } 369 370 static int rcar_pcie_setup(struct list_head *resource, struct rcar_pcie *pci) 371 { 372 struct resource_entry *win; 373 int i = 0; 374 375 /* Setup PCI resources */ 376 resource_list_for_each_entry(win, &pci->resources) { 377 struct resource *res = win->res; 378 379 if (!res->flags) 380 continue; 381 382 switch (resource_type(res)) { 383 case IORESOURCE_IO: 384 case IORESOURCE_MEM: 385 rcar_pcie_setup_window(i, pci, res); 386 i++; 387 break; 388 case IORESOURCE_BUS: 389 pci->root_bus_nr = res->start; 390 break; 391 default: 392 continue; 393 } 394 395 pci_add_resource(resource, res); 396 } 397 398 return 1; 399 } 400 401 static void rcar_pcie_force_speedup(struct rcar_pcie *pcie) 402 { 403 struct device *dev = pcie->dev; 404 unsigned int timeout = 1000; 405 u32 macsr; 406 407 if ((rcar_pci_read_reg(pcie, MACS2R) & LINK_SPEED) != LINK_SPEED_5_0GTS) 408 return; 409 410 if (rcar_pci_read_reg(pcie, MACCTLR) & SPEED_CHANGE) { 411 dev_err(dev, "Speed change already in progress\n"); 412 return; 413 } 414 415 macsr = rcar_pci_read_reg(pcie, MACSR); 416 if ((macsr & LINK_SPEED) == LINK_SPEED_5_0GTS) 417 goto done; 418 419 /* Set target link speed to 5.0 GT/s */ 420 rcar_rmw32(pcie, EXPCAP(12), PCI_EXP_LNKSTA_CLS, 421 PCI_EXP_LNKSTA_CLS_5_0GB); 422 423 /* Set speed change reason as intentional factor */ 424 rcar_rmw32(pcie, MACCGSPSETR, SPCNGRSN, 0); 425 426 /* Clear SPCHGFIN, SPCHGSUC, and SPCHGFAIL */ 427 if (macsr & (SPCHGFIN | SPCHGSUC | SPCHGFAIL)) 428 rcar_pci_write_reg(pcie, macsr, MACSR); 429 430 /* Start link speed change */ 431 rcar_rmw32(pcie, MACCTLR, SPEED_CHANGE, SPEED_CHANGE); 432 433 while (timeout--) { 434 macsr = rcar_pci_read_reg(pcie, MACSR); 435 if (macsr & SPCHGFIN) { 436 /* Clear the interrupt bits */ 437 rcar_pci_write_reg(pcie, macsr, MACSR); 438 439 if (macsr & SPCHGFAIL) 440 dev_err(dev, "Speed change failed\n"); 441 442 goto done; 443 } 444 445 msleep(1); 446 } 447 448 dev_err(dev, "Speed change timed out\n"); 449 450 done: 451 dev_info(dev, "Current link speed is %s GT/s\n", 452 (macsr & LINK_SPEED) == LINK_SPEED_5_0GTS ? "5" : "2.5"); 453 } 454 455 static int rcar_pcie_enable(struct rcar_pcie *pcie) 456 { 457 struct device *dev = pcie->dev; 458 struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); 459 struct pci_bus *bus, *child; 460 int ret; 461 462 /* Try setting 5 GT/s link speed */ 463 rcar_pcie_force_speedup(pcie); 464 465 rcar_pcie_setup(&bridge->windows, pcie); 466 467 pci_add_flags(PCI_REASSIGN_ALL_BUS); 468 469 bridge->dev.parent = dev; 470 bridge->sysdata = pcie; 471 bridge->busnr = pcie->root_bus_nr; 472 bridge->ops = &rcar_pcie_ops; 473 bridge->map_irq = of_irq_parse_and_map_pci; 474 bridge->swizzle_irq = pci_common_swizzle; 475 if (IS_ENABLED(CONFIG_PCI_MSI)) 476 bridge->msi = &pcie->msi.chip; 477 478 ret = pci_scan_root_bus_bridge(bridge); 479 if (ret < 0) 480 return ret; 481 482 bus = bridge->bus; 483 484 pci_bus_size_bridges(bus); 485 pci_bus_assign_resources(bus); 486 487 list_for_each_entry(child, &bus->children, node) 488 pcie_bus_configure_settings(child); 489 490 pci_bus_add_devices(bus); 491 492 return 0; 493 } 494 495 static int phy_wait_for_ack(struct rcar_pcie *pcie) 496 { 497 struct device *dev = pcie->dev; 498 unsigned int timeout = 100; 499 500 while (timeout--) { 501 if (rcar_pci_read_reg(pcie, H1_PCIEPHYADRR) & PHY_ACK) 502 return 0; 503 504 udelay(100); 505 } 506 507 dev_err(dev, "Access to PCIe phy timed out\n"); 508 509 return -ETIMEDOUT; 510 } 511 512 static void phy_write_reg(struct rcar_pcie *pcie, 513 unsigned int rate, u32 addr, 514 unsigned int lane, u32 data) 515 { 516 u32 phyaddr; 517 518 phyaddr = WRITE_CMD | 519 ((rate & 1) << RATE_POS) | 520 ((lane & 0xf) << LANE_POS) | 521 ((addr & 0xff) << ADR_POS); 522 523 /* Set write data */ 524 rcar_pci_write_reg(pcie, data, H1_PCIEPHYDOUTR); 525 rcar_pci_write_reg(pcie, phyaddr, H1_PCIEPHYADRR); 526 527 /* Ignore errors as they will be dealt with if the data link is down */ 528 phy_wait_for_ack(pcie); 529 530 /* Clear command */ 531 rcar_pci_write_reg(pcie, 0, H1_PCIEPHYDOUTR); 532 rcar_pci_write_reg(pcie, 0, H1_PCIEPHYADRR); 533 534 /* Ignore errors as they will be dealt with if the data link is down */ 535 phy_wait_for_ack(pcie); 536 } 537 538 static int rcar_pcie_wait_for_phyrdy(struct rcar_pcie *pcie) 539 { 540 unsigned int timeout = 10; 541 542 while (timeout--) { 543 if (rcar_pci_read_reg(pcie, PCIEPHYSR) & PHYRDY) 544 return 0; 545 546 msleep(5); 547 } 548 549 return -ETIMEDOUT; 550 } 551 552 static int rcar_pcie_wait_for_dl(struct rcar_pcie *pcie) 553 { 554 unsigned int timeout = 10000; 555 556 while (timeout--) { 557 if ((rcar_pci_read_reg(pcie, PCIETSTR) & DATA_LINK_ACTIVE)) 558 return 0; 559 560 udelay(5); 561 cpu_relax(); 562 } 563 564 return -ETIMEDOUT; 565 } 566 567 static int rcar_pcie_hw_init(struct rcar_pcie *pcie) 568 { 569 int err; 570 571 /* Begin initialization */ 572 rcar_pci_write_reg(pcie, 0, PCIETCTLR); 573 574 /* Set mode */ 575 rcar_pci_write_reg(pcie, 1, PCIEMSR); 576 577 err = rcar_pcie_wait_for_phyrdy(pcie); 578 if (err) 579 return err; 580 581 /* 582 * Initial header for port config space is type 1, set the device 583 * class to match. Hardware takes care of propagating the IDSETR 584 * settings, so there is no need to bother with a quirk. 585 */ 586 rcar_pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI << 16, IDSETR1); 587 588 /* 589 * Setup Secondary Bus Number & Subordinate Bus Number, even though 590 * they aren't used, to avoid bridge being detected as broken. 591 */ 592 rcar_rmw32(pcie, RCONF(PCI_SECONDARY_BUS), 0xff, 1); 593 rcar_rmw32(pcie, RCONF(PCI_SUBORDINATE_BUS), 0xff, 1); 594 595 /* Initialize default capabilities. */ 596 rcar_rmw32(pcie, REXPCAP(0), 0xff, PCI_CAP_ID_EXP); 597 rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS), 598 PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ROOT_PORT << 4); 599 rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), 0x7f, 600 PCI_HEADER_TYPE_BRIDGE); 601 602 /* Enable data link layer active state reporting */ 603 rcar_rmw32(pcie, REXPCAP(PCI_EXP_LNKCAP), PCI_EXP_LNKCAP_DLLLARC, 604 PCI_EXP_LNKCAP_DLLLARC); 605 606 /* Write out the physical slot number = 0 */ 607 rcar_rmw32(pcie, REXPCAP(PCI_EXP_SLTCAP), PCI_EXP_SLTCAP_PSN, 0); 608 609 /* Set the completion timer timeout to the maximum 50ms. */ 610 rcar_rmw32(pcie, TLCTLR + 1, 0x3f, 50); 611 612 /* Terminate list of capabilities (Next Capability Offset=0) */ 613 rcar_rmw32(pcie, RVCCAP(0), 0xfff00000, 0); 614 615 /* Enable MSI */ 616 if (IS_ENABLED(CONFIG_PCI_MSI)) 617 rcar_pci_write_reg(pcie, 0x801f0000, PCIEMSITXR); 618 619 rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR); 620 621 /* Finish initialization - establish a PCI Express link */ 622 rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR); 623 624 /* This will timeout if we don't have a link. */ 625 err = rcar_pcie_wait_for_dl(pcie); 626 if (err) 627 return err; 628 629 /* Enable INTx interrupts */ 630 rcar_rmw32(pcie, PCIEINTXR, 0, 0xF << 8); 631 632 wmb(); 633 634 return 0; 635 } 636 637 static int rcar_pcie_phy_init_h1(struct rcar_pcie *pcie) 638 { 639 /* Initialize the phy */ 640 phy_write_reg(pcie, 0, 0x42, 0x1, 0x0EC34191); 641 phy_write_reg(pcie, 1, 0x42, 0x1, 0x0EC34180); 642 phy_write_reg(pcie, 0, 0x43, 0x1, 0x00210188); 643 phy_write_reg(pcie, 1, 0x43, 0x1, 0x00210188); 644 phy_write_reg(pcie, 0, 0x44, 0x1, 0x015C0014); 645 phy_write_reg(pcie, 1, 0x44, 0x1, 0x015C0014); 646 phy_write_reg(pcie, 1, 0x4C, 0x1, 0x786174A0); 647 phy_write_reg(pcie, 1, 0x4D, 0x1, 0x048000BB); 648 phy_write_reg(pcie, 0, 0x51, 0x1, 0x079EC062); 649 phy_write_reg(pcie, 0, 0x52, 0x1, 0x20000000); 650 phy_write_reg(pcie, 1, 0x52, 0x1, 0x20000000); 651 phy_write_reg(pcie, 1, 0x56, 0x1, 0x00003806); 652 653 phy_write_reg(pcie, 0, 0x60, 0x1, 0x004B03A5); 654 phy_write_reg(pcie, 0, 0x64, 0x1, 0x3F0F1F0F); 655 phy_write_reg(pcie, 0, 0x66, 0x1, 0x00008000); 656 657 return 0; 658 } 659 660 static int rcar_pcie_phy_init_gen2(struct rcar_pcie *pcie) 661 { 662 /* 663 * These settings come from the R-Car Series, 2nd Generation User's 664 * Manual, section 50.3.1 (2) Initialization of the physical layer. 665 */ 666 rcar_pci_write_reg(pcie, 0x000f0030, GEN2_PCIEPHYADDR); 667 rcar_pci_write_reg(pcie, 0x00381203, GEN2_PCIEPHYDATA); 668 rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL); 669 rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL); 670 671 rcar_pci_write_reg(pcie, 0x000f0054, GEN2_PCIEPHYADDR); 672 /* The following value is for DC connection, no termination resistor */ 673 rcar_pci_write_reg(pcie, 0x13802007, GEN2_PCIEPHYDATA); 674 rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL); 675 rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL); 676 677 return 0; 678 } 679 680 static int rcar_pcie_phy_init_gen3(struct rcar_pcie *pcie) 681 { 682 int err; 683 684 err = phy_init(pcie->phy); 685 if (err) 686 return err; 687 688 err = phy_power_on(pcie->phy); 689 if (err) 690 phy_exit(pcie->phy); 691 692 return err; 693 } 694 695 static int rcar_msi_alloc(struct rcar_msi *chip) 696 { 697 int msi; 698 699 mutex_lock(&chip->lock); 700 701 msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR); 702 if (msi < INT_PCI_MSI_NR) 703 set_bit(msi, chip->used); 704 else 705 msi = -ENOSPC; 706 707 mutex_unlock(&chip->lock); 708 709 return msi; 710 } 711 712 static int rcar_msi_alloc_region(struct rcar_msi *chip, int no_irqs) 713 { 714 int msi; 715 716 mutex_lock(&chip->lock); 717 msi = bitmap_find_free_region(chip->used, INT_PCI_MSI_NR, 718 order_base_2(no_irqs)); 719 mutex_unlock(&chip->lock); 720 721 return msi; 722 } 723 724 static void rcar_msi_free(struct rcar_msi *chip, unsigned long irq) 725 { 726 mutex_lock(&chip->lock); 727 clear_bit(irq, chip->used); 728 mutex_unlock(&chip->lock); 729 } 730 731 static irqreturn_t rcar_pcie_msi_irq(int irq, void *data) 732 { 733 struct rcar_pcie *pcie = data; 734 struct rcar_msi *msi = &pcie->msi; 735 struct device *dev = pcie->dev; 736 unsigned long reg; 737 738 reg = rcar_pci_read_reg(pcie, PCIEMSIFR); 739 740 /* MSI & INTx share an interrupt - we only handle MSI here */ 741 if (!reg) 742 return IRQ_NONE; 743 744 while (reg) { 745 unsigned int index = find_first_bit(®, 32); 746 unsigned int msi_irq; 747 748 /* clear the interrupt */ 749 rcar_pci_write_reg(pcie, 1 << index, PCIEMSIFR); 750 751 msi_irq = irq_find_mapping(msi->domain, index); 752 if (msi_irq) { 753 if (test_bit(index, msi->used)) 754 generic_handle_irq(msi_irq); 755 else 756 dev_info(dev, "unhandled MSI\n"); 757 } else { 758 /* Unknown MSI, just clear it */ 759 dev_dbg(dev, "unexpected MSI\n"); 760 } 761 762 /* see if there's any more pending in this vector */ 763 reg = rcar_pci_read_reg(pcie, PCIEMSIFR); 764 } 765 766 return IRQ_HANDLED; 767 } 768 769 static int rcar_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev, 770 struct msi_desc *desc) 771 { 772 struct rcar_msi *msi = to_rcar_msi(chip); 773 struct rcar_pcie *pcie = container_of(chip, struct rcar_pcie, msi.chip); 774 struct msi_msg msg; 775 unsigned int irq; 776 int hwirq; 777 778 hwirq = rcar_msi_alloc(msi); 779 if (hwirq < 0) 780 return hwirq; 781 782 irq = irq_find_mapping(msi->domain, hwirq); 783 if (!irq) { 784 rcar_msi_free(msi, hwirq); 785 return -EINVAL; 786 } 787 788 irq_set_msi_desc(irq, desc); 789 790 msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE; 791 msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR); 792 msg.data = hwirq; 793 794 pci_write_msi_msg(irq, &msg); 795 796 return 0; 797 } 798 799 static int rcar_msi_setup_irqs(struct msi_controller *chip, 800 struct pci_dev *pdev, int nvec, int type) 801 { 802 struct rcar_pcie *pcie = container_of(chip, struct rcar_pcie, msi.chip); 803 struct rcar_msi *msi = to_rcar_msi(chip); 804 struct msi_desc *desc; 805 struct msi_msg msg; 806 unsigned int irq; 807 int hwirq; 808 int i; 809 810 /* MSI-X interrupts are not supported */ 811 if (type == PCI_CAP_ID_MSIX) 812 return -EINVAL; 813 814 WARN_ON(!list_is_singular(&pdev->dev.msi_list)); 815 desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list); 816 817 hwirq = rcar_msi_alloc_region(msi, nvec); 818 if (hwirq < 0) 819 return -ENOSPC; 820 821 irq = irq_find_mapping(msi->domain, hwirq); 822 if (!irq) 823 return -ENOSPC; 824 825 for (i = 0; i < nvec; i++) { 826 /* 827 * irq_create_mapping() called from rcar_pcie_probe() pre- 828 * allocates descs, so there is no need to allocate descs here. 829 * We can therefore assume that if irq_find_mapping() above 830 * returns non-zero, then the descs are also successfully 831 * allocated. 832 */ 833 if (irq_set_msi_desc_off(irq, i, desc)) { 834 /* TODO: clear */ 835 return -EINVAL; 836 } 837 } 838 839 desc->nvec_used = nvec; 840 desc->msi_attrib.multiple = order_base_2(nvec); 841 842 msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE; 843 msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR); 844 msg.data = hwirq; 845 846 pci_write_msi_msg(irq, &msg); 847 848 return 0; 849 } 850 851 static void rcar_msi_teardown_irq(struct msi_controller *chip, unsigned int irq) 852 { 853 struct rcar_msi *msi = to_rcar_msi(chip); 854 struct irq_data *d = irq_get_irq_data(irq); 855 856 rcar_msi_free(msi, d->hwirq); 857 } 858 859 static struct irq_chip rcar_msi_irq_chip = { 860 .name = "R-Car PCIe MSI", 861 .irq_enable = pci_msi_unmask_irq, 862 .irq_disable = pci_msi_mask_irq, 863 .irq_mask = pci_msi_mask_irq, 864 .irq_unmask = pci_msi_unmask_irq, 865 }; 866 867 static int rcar_msi_map(struct irq_domain *domain, unsigned int irq, 868 irq_hw_number_t hwirq) 869 { 870 irq_set_chip_and_handler(irq, &rcar_msi_irq_chip, handle_simple_irq); 871 irq_set_chip_data(irq, domain->host_data); 872 873 return 0; 874 } 875 876 static const struct irq_domain_ops msi_domain_ops = { 877 .map = rcar_msi_map, 878 }; 879 880 static void rcar_pcie_unmap_msi(struct rcar_pcie *pcie) 881 { 882 struct rcar_msi *msi = &pcie->msi; 883 int i, irq; 884 885 for (i = 0; i < INT_PCI_MSI_NR; i++) { 886 irq = irq_find_mapping(msi->domain, i); 887 if (irq > 0) 888 irq_dispose_mapping(irq); 889 } 890 891 irq_domain_remove(msi->domain); 892 } 893 894 static int rcar_pcie_enable_msi(struct rcar_pcie *pcie) 895 { 896 struct device *dev = pcie->dev; 897 struct rcar_msi *msi = &pcie->msi; 898 phys_addr_t base; 899 int err, i; 900 901 mutex_init(&msi->lock); 902 903 msi->chip.dev = dev; 904 msi->chip.setup_irq = rcar_msi_setup_irq; 905 msi->chip.setup_irqs = rcar_msi_setup_irqs; 906 msi->chip.teardown_irq = rcar_msi_teardown_irq; 907 908 msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR, 909 &msi_domain_ops, &msi->chip); 910 if (!msi->domain) { 911 dev_err(dev, "failed to create IRQ domain\n"); 912 return -ENOMEM; 913 } 914 915 for (i = 0; i < INT_PCI_MSI_NR; i++) 916 irq_create_mapping(msi->domain, i); 917 918 /* Two irqs are for MSI, but they are also used for non-MSI irqs */ 919 err = devm_request_irq(dev, msi->irq1, rcar_pcie_msi_irq, 920 IRQF_SHARED | IRQF_NO_THREAD, 921 rcar_msi_irq_chip.name, pcie); 922 if (err < 0) { 923 dev_err(dev, "failed to request IRQ: %d\n", err); 924 goto err; 925 } 926 927 err = devm_request_irq(dev, msi->irq2, rcar_pcie_msi_irq, 928 IRQF_SHARED | IRQF_NO_THREAD, 929 rcar_msi_irq_chip.name, pcie); 930 if (err < 0) { 931 dev_err(dev, "failed to request IRQ: %d\n", err); 932 goto err; 933 } 934 935 /* setup MSI data target */ 936 msi->pages = __get_free_pages(GFP_KERNEL, 0); 937 if (!msi->pages) { 938 err = -ENOMEM; 939 goto err; 940 } 941 base = virt_to_phys((void *)msi->pages); 942 943 rcar_pci_write_reg(pcie, lower_32_bits(base) | MSIFE, PCIEMSIALR); 944 rcar_pci_write_reg(pcie, upper_32_bits(base), PCIEMSIAUR); 945 946 /* enable all MSI interrupts */ 947 rcar_pci_write_reg(pcie, 0xffffffff, PCIEMSIIER); 948 949 return 0; 950 951 err: 952 rcar_pcie_unmap_msi(pcie); 953 return err; 954 } 955 956 static void rcar_pcie_teardown_msi(struct rcar_pcie *pcie) 957 { 958 struct rcar_msi *msi = &pcie->msi; 959 960 /* Disable all MSI interrupts */ 961 rcar_pci_write_reg(pcie, 0, PCIEMSIIER); 962 963 /* Disable address decoding of the MSI interrupt, MSIFE */ 964 rcar_pci_write_reg(pcie, 0, PCIEMSIALR); 965 966 free_pages(msi->pages, 0); 967 968 rcar_pcie_unmap_msi(pcie); 969 } 970 971 static int rcar_pcie_get_resources(struct rcar_pcie *pcie) 972 { 973 struct device *dev = pcie->dev; 974 struct resource res; 975 int err, i; 976 977 pcie->phy = devm_phy_optional_get(dev, "pcie"); 978 if (IS_ERR(pcie->phy)) 979 return PTR_ERR(pcie->phy); 980 981 err = of_address_to_resource(dev->of_node, 0, &res); 982 if (err) 983 return err; 984 985 pcie->base = devm_ioremap_resource(dev, &res); 986 if (IS_ERR(pcie->base)) 987 return PTR_ERR(pcie->base); 988 989 pcie->bus_clk = devm_clk_get(dev, "pcie_bus"); 990 if (IS_ERR(pcie->bus_clk)) { 991 dev_err(dev, "cannot get pcie bus clock\n"); 992 return PTR_ERR(pcie->bus_clk); 993 } 994 995 i = irq_of_parse_and_map(dev->of_node, 0); 996 if (!i) { 997 dev_err(dev, "cannot get platform resources for msi interrupt\n"); 998 err = -ENOENT; 999 goto err_irq1; 1000 } 1001 pcie->msi.irq1 = i; 1002 1003 i = irq_of_parse_and_map(dev->of_node, 1); 1004 if (!i) { 1005 dev_err(dev, "cannot get platform resources for msi interrupt\n"); 1006 err = -ENOENT; 1007 goto err_irq2; 1008 } 1009 pcie->msi.irq2 = i; 1010 1011 return 0; 1012 1013 err_irq2: 1014 irq_dispose_mapping(pcie->msi.irq1); 1015 err_irq1: 1016 return err; 1017 } 1018 1019 static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie, 1020 struct resource_entry *entry, 1021 int *index) 1022 { 1023 u64 restype = entry->res->flags; 1024 u64 cpu_addr = entry->res->start; 1025 u64 cpu_end = entry->res->end; 1026 u64 pci_addr = entry->res->start - entry->offset; 1027 u32 flags = LAM_64BIT | LAR_ENABLE; 1028 u64 mask; 1029 u64 size = resource_size(entry->res); 1030 int idx = *index; 1031 1032 if (restype & IORESOURCE_PREFETCH) 1033 flags |= LAM_PREFETCH; 1034 1035 while (cpu_addr < cpu_end) { 1036 if (idx >= MAX_NR_INBOUND_MAPS - 1) { 1037 dev_err(pcie->dev, "Failed to map inbound regions!\n"); 1038 return -EINVAL; 1039 } 1040 /* 1041 * If the size of the range is larger than the alignment of 1042 * the start address, we have to use multiple entries to 1043 * perform the mapping. 1044 */ 1045 if (cpu_addr > 0) { 1046 unsigned long nr_zeros = __ffs64(cpu_addr); 1047 u64 alignment = 1ULL << nr_zeros; 1048 1049 size = min(size, alignment); 1050 } 1051 /* Hardware supports max 4GiB inbound region */ 1052 size = min(size, 1ULL << 32); 1053 1054 mask = roundup_pow_of_two(size) - 1; 1055 mask &= ~0xf; 1056 1057 /* 1058 * Set up 64-bit inbound regions as the range parser doesn't 1059 * distinguish between 32 and 64-bit types. 1060 */ 1061 rcar_pci_write_reg(pcie, lower_32_bits(pci_addr), 1062 PCIEPRAR(idx)); 1063 rcar_pci_write_reg(pcie, lower_32_bits(cpu_addr), PCIELAR(idx)); 1064 rcar_pci_write_reg(pcie, lower_32_bits(mask) | flags, 1065 PCIELAMR(idx)); 1066 1067 rcar_pci_write_reg(pcie, upper_32_bits(pci_addr), 1068 PCIEPRAR(idx + 1)); 1069 rcar_pci_write_reg(pcie, upper_32_bits(cpu_addr), 1070 PCIELAR(idx + 1)); 1071 rcar_pci_write_reg(pcie, 0, PCIELAMR(idx + 1)); 1072 1073 pci_addr += size; 1074 cpu_addr += size; 1075 idx += 2; 1076 } 1077 *index = idx; 1078 1079 return 0; 1080 } 1081 1082 static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie) 1083 { 1084 struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); 1085 struct resource_entry *entry; 1086 int index = 0, err = 0; 1087 1088 resource_list_for_each_entry(entry, &bridge->dma_ranges) { 1089 err = rcar_pcie_inbound_ranges(pcie, entry, &index); 1090 if (err) 1091 break; 1092 } 1093 1094 return err; 1095 } 1096 1097 static const struct of_device_id rcar_pcie_of_match[] = { 1098 { .compatible = "renesas,pcie-r8a7779", 1099 .data = rcar_pcie_phy_init_h1 }, 1100 { .compatible = "renesas,pcie-r8a7790", 1101 .data = rcar_pcie_phy_init_gen2 }, 1102 { .compatible = "renesas,pcie-r8a7791", 1103 .data = rcar_pcie_phy_init_gen2 }, 1104 { .compatible = "renesas,pcie-rcar-gen2", 1105 .data = rcar_pcie_phy_init_gen2 }, 1106 { .compatible = "renesas,pcie-r8a7795", 1107 .data = rcar_pcie_phy_init_gen3 }, 1108 { .compatible = "renesas,pcie-rcar-gen3", 1109 .data = rcar_pcie_phy_init_gen3 }, 1110 {}, 1111 }; 1112 1113 static int rcar_pcie_probe(struct platform_device *pdev) 1114 { 1115 struct device *dev = &pdev->dev; 1116 struct rcar_pcie *pcie; 1117 u32 data; 1118 int err; 1119 int (*phy_init_fn)(struct rcar_pcie *); 1120 struct pci_host_bridge *bridge; 1121 1122 bridge = pci_alloc_host_bridge(sizeof(*pcie)); 1123 if (!bridge) 1124 return -ENOMEM; 1125 1126 pcie = pci_host_bridge_priv(bridge); 1127 1128 pcie->dev = dev; 1129 platform_set_drvdata(pdev, pcie); 1130 1131 err = pci_parse_request_of_pci_ranges(dev, &pcie->resources, 1132 &bridge->dma_ranges, NULL); 1133 if (err) 1134 goto err_free_bridge; 1135 1136 pm_runtime_enable(pcie->dev); 1137 err = pm_runtime_get_sync(pcie->dev); 1138 if (err < 0) { 1139 dev_err(pcie->dev, "pm_runtime_get_sync failed\n"); 1140 goto err_pm_disable; 1141 } 1142 1143 err = rcar_pcie_get_resources(pcie); 1144 if (err < 0) { 1145 dev_err(dev, "failed to request resources: %d\n", err); 1146 goto err_pm_put; 1147 } 1148 1149 err = clk_prepare_enable(pcie->bus_clk); 1150 if (err) { 1151 dev_err(dev, "failed to enable bus clock: %d\n", err); 1152 goto err_unmap_msi_irqs; 1153 } 1154 1155 err = rcar_pcie_parse_map_dma_ranges(pcie); 1156 if (err) 1157 goto err_clk_disable; 1158 1159 phy_init_fn = of_device_get_match_data(dev); 1160 err = phy_init_fn(pcie); 1161 if (err) { 1162 dev_err(dev, "failed to init PCIe PHY\n"); 1163 goto err_clk_disable; 1164 } 1165 1166 /* Failure to get a link might just be that no cards are inserted */ 1167 if (rcar_pcie_hw_init(pcie)) { 1168 dev_info(dev, "PCIe link down\n"); 1169 err = -ENODEV; 1170 goto err_phy_shutdown; 1171 } 1172 1173 data = rcar_pci_read_reg(pcie, MACSR); 1174 dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f); 1175 1176 if (IS_ENABLED(CONFIG_PCI_MSI)) { 1177 err = rcar_pcie_enable_msi(pcie); 1178 if (err < 0) { 1179 dev_err(dev, 1180 "failed to enable MSI support: %d\n", 1181 err); 1182 goto err_phy_shutdown; 1183 } 1184 } 1185 1186 err = rcar_pcie_enable(pcie); 1187 if (err) 1188 goto err_msi_teardown; 1189 1190 return 0; 1191 1192 err_msi_teardown: 1193 if (IS_ENABLED(CONFIG_PCI_MSI)) 1194 rcar_pcie_teardown_msi(pcie); 1195 1196 err_phy_shutdown: 1197 if (pcie->phy) { 1198 phy_power_off(pcie->phy); 1199 phy_exit(pcie->phy); 1200 } 1201 1202 err_clk_disable: 1203 clk_disable_unprepare(pcie->bus_clk); 1204 1205 err_unmap_msi_irqs: 1206 irq_dispose_mapping(pcie->msi.irq2); 1207 irq_dispose_mapping(pcie->msi.irq1); 1208 1209 err_pm_put: 1210 pm_runtime_put(dev); 1211 1212 err_pm_disable: 1213 pm_runtime_disable(dev); 1214 pci_free_resource_list(&pcie->resources); 1215 1216 err_free_bridge: 1217 pci_free_host_bridge(bridge); 1218 1219 return err; 1220 } 1221 1222 static int rcar_pcie_resume_noirq(struct device *dev) 1223 { 1224 struct rcar_pcie *pcie = dev_get_drvdata(dev); 1225 1226 if (rcar_pci_read_reg(pcie, PMSR) && 1227 !(rcar_pci_read_reg(pcie, PCIETCTLR) & DL_DOWN)) 1228 return 0; 1229 1230 /* Re-establish the PCIe link */ 1231 rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR); 1232 rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR); 1233 return rcar_pcie_wait_for_dl(pcie); 1234 } 1235 1236 static const struct dev_pm_ops rcar_pcie_pm_ops = { 1237 .resume_noirq = rcar_pcie_resume_noirq, 1238 }; 1239 1240 static struct platform_driver rcar_pcie_driver = { 1241 .driver = { 1242 .name = "rcar-pcie", 1243 .of_match_table = rcar_pcie_of_match, 1244 .pm = &rcar_pcie_pm_ops, 1245 .suppress_bind_attrs = true, 1246 }, 1247 .probe = rcar_pcie_probe, 1248 }; 1249 builtin_platform_driver(rcar_pcie_driver); 1250