1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * PCIe driver for Marvell Armada 370 and Armada XP SoCs 4 * 5 * Author: Thomas Petazzoni <thomas.petazzoni@free-electrons.com> 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/module.h> 10 #include <linux/pci.h> 11 #include <linux/clk.h> 12 #include <linux/delay.h> 13 #include <linux/gpio.h> 14 #include <linux/init.h> 15 #include <linux/mbus.h> 16 #include <linux/slab.h> 17 #include <linux/platform_device.h> 18 #include <linux/of_address.h> 19 #include <linux/of_irq.h> 20 #include <linux/of_gpio.h> 21 #include <linux/of_pci.h> 22 #include <linux/of_platform.h> 23 24 #include "../pci.h" 25 #include "../pci-bridge-emul.h" 26 27 /* 28 * PCIe unit register offsets. 29 */ 30 #define PCIE_DEV_ID_OFF 0x0000 31 #define PCIE_CMD_OFF 0x0004 32 #define PCIE_DEV_REV_OFF 0x0008 33 #define PCIE_BAR_LO_OFF(n) (0x0010 + ((n) << 3)) 34 #define PCIE_BAR_HI_OFF(n) (0x0014 + ((n) << 3)) 35 #define PCIE_SSDEV_ID_OFF 0x002c 36 #define PCIE_CAP_PCIEXP 0x0060 37 #define PCIE_CAP_PCIERR_OFF 0x0100 38 #define PCIE_BAR_CTRL_OFF(n) (0x1804 + (((n) - 1) * 4)) 39 #define PCIE_WIN04_CTRL_OFF(n) (0x1820 + ((n) << 4)) 40 #define PCIE_WIN04_BASE_OFF(n) (0x1824 + ((n) << 4)) 41 #define PCIE_WIN04_REMAP_OFF(n) (0x182c + ((n) << 4)) 42 #define PCIE_WIN5_CTRL_OFF 0x1880 43 #define PCIE_WIN5_BASE_OFF 0x1884 44 #define PCIE_WIN5_REMAP_OFF 0x188c 45 #define PCIE_CONF_ADDR_OFF 0x18f8 46 #define PCIE_CONF_ADDR_EN 0x80000000 47 #define PCIE_CONF_REG(r) ((((r) & 0xf00) << 16) | ((r) & 0xfc)) 48 #define PCIE_CONF_BUS(b) (((b) & 0xff) << 16) 49 #define PCIE_CONF_DEV(d) (((d) & 0x1f) << 11) 50 #define PCIE_CONF_FUNC(f) (((f) & 0x7) << 8) 51 #define PCIE_CONF_ADDR(bus, devfn, where) \ 52 (PCIE_CONF_BUS(bus) | PCIE_CONF_DEV(PCI_SLOT(devfn)) | \ 53 PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where) | \ 54 PCIE_CONF_ADDR_EN) 55 #define PCIE_CONF_DATA_OFF 0x18fc 56 #define PCIE_INT_CAUSE_OFF 0x1900 57 #define PCIE_INT_UNMASK_OFF 0x1910 58 #define PCIE_INT_INTX(i) BIT(24+i) 59 #define PCIE_INT_PM_PME BIT(28) 60 #define PCIE_INT_ALL_MASK GENMASK(31, 0) 61 #define PCIE_CTRL_OFF 0x1a00 62 #define PCIE_CTRL_X1_MODE 0x0001 63 #define PCIE_CTRL_RC_MODE BIT(1) 64 #define PCIE_CTRL_MASTER_HOT_RESET BIT(24) 65 #define PCIE_STAT_OFF 0x1a04 66 #define PCIE_STAT_BUS 0xff00 67 #define PCIE_STAT_DEV 0x1f0000 68 #define PCIE_STAT_LINK_DOWN BIT(0) 69 #define PCIE_RC_RTSTA 0x1a14 70 #define PCIE_DEBUG_CTRL 0x1a60 71 #define PCIE_DEBUG_SOFT_RESET BIT(20) 72 73 struct mvebu_pcie_port; 74 75 /* Structure representing all PCIe interfaces */ 76 struct mvebu_pcie { 77 struct platform_device *pdev; 78 struct mvebu_pcie_port *ports; 79 struct resource io; 80 struct resource realio; 81 struct resource mem; 82 struct resource busn; 83 int nports; 84 }; 85 86 struct mvebu_pcie_window { 87 phys_addr_t base; 88 phys_addr_t remap; 89 size_t size; 90 }; 91 92 /* Structure representing one PCIe interface */ 93 struct mvebu_pcie_port { 94 char *name; 95 void __iomem *base; 96 u32 port; 97 u32 lane; 98 bool is_x4; 99 int devfn; 100 unsigned int mem_target; 101 unsigned int mem_attr; 102 unsigned int io_target; 103 unsigned int io_attr; 104 struct clk *clk; 105 struct gpio_desc *reset_gpio; 106 char *reset_name; 107 struct pci_bridge_emul bridge; 108 struct device_node *dn; 109 struct mvebu_pcie *pcie; 110 struct mvebu_pcie_window memwin; 111 struct mvebu_pcie_window iowin; 112 u32 saved_pcie_stat; 113 struct resource regs; 114 struct irq_domain *intx_irq_domain; 115 raw_spinlock_t irq_lock; 116 int intx_irq; 117 }; 118 119 static inline void mvebu_writel(struct mvebu_pcie_port *port, u32 val, u32 reg) 120 { 121 writel(val, port->base + reg); 122 } 123 124 static inline u32 mvebu_readl(struct mvebu_pcie_port *port, u32 reg) 125 { 126 return readl(port->base + reg); 127 } 128 129 static inline bool mvebu_has_ioport(struct mvebu_pcie_port *port) 130 { 131 return port->io_target != -1 && port->io_attr != -1; 132 } 133 134 static bool mvebu_pcie_link_up(struct mvebu_pcie_port *port) 135 { 136 return !(mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_LINK_DOWN); 137 } 138 139 static u8 mvebu_pcie_get_local_bus_nr(struct mvebu_pcie_port *port) 140 { 141 return (mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_BUS) >> 8; 142 } 143 144 static void mvebu_pcie_set_local_bus_nr(struct mvebu_pcie_port *port, int nr) 145 { 146 u32 stat; 147 148 stat = mvebu_readl(port, PCIE_STAT_OFF); 149 stat &= ~PCIE_STAT_BUS; 150 stat |= nr << 8; 151 mvebu_writel(port, stat, PCIE_STAT_OFF); 152 } 153 154 static void mvebu_pcie_set_local_dev_nr(struct mvebu_pcie_port *port, int nr) 155 { 156 u32 stat; 157 158 stat = mvebu_readl(port, PCIE_STAT_OFF); 159 stat &= ~PCIE_STAT_DEV; 160 stat |= nr << 16; 161 mvebu_writel(port, stat, PCIE_STAT_OFF); 162 } 163 164 static void mvebu_pcie_disable_wins(struct mvebu_pcie_port *port) 165 { 166 int i; 167 168 mvebu_writel(port, 0, PCIE_BAR_LO_OFF(0)); 169 mvebu_writel(port, 0, PCIE_BAR_HI_OFF(0)); 170 171 for (i = 1; i < 3; i++) { 172 mvebu_writel(port, 0, PCIE_BAR_CTRL_OFF(i)); 173 mvebu_writel(port, 0, PCIE_BAR_LO_OFF(i)); 174 mvebu_writel(port, 0, PCIE_BAR_HI_OFF(i)); 175 } 176 177 for (i = 0; i < 5; i++) { 178 mvebu_writel(port, 0, PCIE_WIN04_CTRL_OFF(i)); 179 mvebu_writel(port, 0, PCIE_WIN04_BASE_OFF(i)); 180 mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i)); 181 } 182 183 mvebu_writel(port, 0, PCIE_WIN5_CTRL_OFF); 184 mvebu_writel(port, 0, PCIE_WIN5_BASE_OFF); 185 mvebu_writel(port, 0, PCIE_WIN5_REMAP_OFF); 186 } 187 188 /* 189 * Setup PCIE BARs and Address Decode Wins: 190 * BAR[0] -> internal registers (needed for MSI) 191 * BAR[1] -> covers all DRAM banks 192 * BAR[2] -> Disabled 193 * WIN[0-3] -> DRAM bank[0-3] 194 */ 195 static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port) 196 { 197 const struct mbus_dram_target_info *dram; 198 u32 size; 199 int i; 200 201 dram = mv_mbus_dram_info(); 202 203 /* First, disable and clear BARs and windows. */ 204 mvebu_pcie_disable_wins(port); 205 206 /* Setup windows for DDR banks. Count total DDR size on the fly. */ 207 size = 0; 208 for (i = 0; i < dram->num_cs; i++) { 209 const struct mbus_dram_window *cs = dram->cs + i; 210 211 mvebu_writel(port, cs->base & 0xffff0000, 212 PCIE_WIN04_BASE_OFF(i)); 213 mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i)); 214 mvebu_writel(port, 215 ((cs->size - 1) & 0xffff0000) | 216 (cs->mbus_attr << 8) | 217 (dram->mbus_dram_target_id << 4) | 1, 218 PCIE_WIN04_CTRL_OFF(i)); 219 220 size += cs->size; 221 } 222 223 /* Round up 'size' to the nearest power of two. */ 224 if ((size & (size - 1)) != 0) 225 size = 1 << fls(size); 226 227 /* Setup BAR[1] to all DRAM banks. */ 228 mvebu_writel(port, dram->cs[0].base, PCIE_BAR_LO_OFF(1)); 229 mvebu_writel(port, 0, PCIE_BAR_HI_OFF(1)); 230 mvebu_writel(port, ((size - 1) & 0xffff0000) | 1, 231 PCIE_BAR_CTRL_OFF(1)); 232 233 /* 234 * Point BAR[0] to the device's internal registers. 235 */ 236 mvebu_writel(port, round_down(port->regs.start, SZ_1M), PCIE_BAR_LO_OFF(0)); 237 mvebu_writel(port, 0, PCIE_BAR_HI_OFF(0)); 238 } 239 240 static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port) 241 { 242 u32 ctrl, lnkcap, cmd, dev_rev, unmask; 243 244 /* Setup PCIe controller to Root Complex mode. */ 245 ctrl = mvebu_readl(port, PCIE_CTRL_OFF); 246 ctrl |= PCIE_CTRL_RC_MODE; 247 mvebu_writel(port, ctrl, PCIE_CTRL_OFF); 248 249 /* 250 * Set Maximum Link Width to X1 or X4 in Root Port's PCIe Link 251 * Capability register. This register is defined by PCIe specification 252 * as read-only but this mvebu controller has it as read-write and must 253 * be set to number of SerDes PCIe lanes (1 or 4). If this register is 254 * not set correctly then link with endpoint card is not established. 255 */ 256 lnkcap = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP); 257 lnkcap &= ~PCI_EXP_LNKCAP_MLW; 258 lnkcap |= (port->is_x4 ? 4 : 1) << 4; 259 mvebu_writel(port, lnkcap, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP); 260 261 /* Disable Root Bridge I/O space, memory space and bus mastering. */ 262 cmd = mvebu_readl(port, PCIE_CMD_OFF); 263 cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); 264 mvebu_writel(port, cmd, PCIE_CMD_OFF); 265 266 /* 267 * Change Class Code of PCI Bridge device to PCI Bridge (0x6004) 268 * because default value is Memory controller (0x5080). 269 * 270 * Note that this mvebu PCI Bridge does not have compliant Type 1 271 * Configuration Space. Header Type is reported as Type 0 and it 272 * has format of Type 0 config space. 273 * 274 * Moreover Type 0 BAR registers (ranges 0x10 - 0x28 and 0x30 - 0x34) 275 * have the same format in Marvell's specification as in PCIe 276 * specification, but their meaning is totally different and they do 277 * different things: they are aliased into internal mvebu registers 278 * (e.g. PCIE_BAR_LO_OFF) and these should not be changed or 279 * reconfigured by pci device drivers. 280 * 281 * Therefore driver uses emulation of PCI Bridge which emulates 282 * access to configuration space via internal mvebu registers or 283 * emulated configuration buffer. Driver access these PCI Bridge 284 * directly for simplification, but these registers can be accessed 285 * also via standard mvebu way for accessing PCI config space. 286 */ 287 dev_rev = mvebu_readl(port, PCIE_DEV_REV_OFF); 288 dev_rev &= ~0xffffff00; 289 dev_rev |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8; 290 mvebu_writel(port, dev_rev, PCIE_DEV_REV_OFF); 291 292 /* Point PCIe unit MBUS decode windows to DRAM space. */ 293 mvebu_pcie_setup_wins(port); 294 295 /* Mask all interrupt sources. */ 296 mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_UNMASK_OFF); 297 298 /* Clear all interrupt causes. */ 299 mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_CAUSE_OFF); 300 301 /* Check if "intx" interrupt was specified in DT. */ 302 if (port->intx_irq > 0) 303 return; 304 305 /* 306 * Fallback code when "intx" interrupt was not specified in DT: 307 * Unmask all legacy INTx interrupts as driver does not provide a way 308 * for masking and unmasking of individual legacy INTx interrupts. 309 * Legacy INTx are reported via one shared GIC source and therefore 310 * kernel cannot distinguish which individual legacy INTx was triggered. 311 * These interrupts are shared, so it should not cause any issue. Just 312 * performance penalty as every PCIe interrupt handler needs to be 313 * called when some interrupt is triggered. 314 */ 315 unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF); 316 unmask |= PCIE_INT_INTX(0) | PCIE_INT_INTX(1) | 317 PCIE_INT_INTX(2) | PCIE_INT_INTX(3); 318 mvebu_writel(port, unmask, PCIE_INT_UNMASK_OFF); 319 } 320 321 static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie, 322 struct pci_bus *bus, 323 int devfn); 324 325 static int mvebu_pcie_child_rd_conf(struct pci_bus *bus, u32 devfn, int where, 326 int size, u32 *val) 327 { 328 struct mvebu_pcie *pcie = bus->sysdata; 329 struct mvebu_pcie_port *port; 330 void __iomem *conf_data; 331 332 port = mvebu_pcie_find_port(pcie, bus, devfn); 333 if (!port) 334 return PCIBIOS_DEVICE_NOT_FOUND; 335 336 if (!mvebu_pcie_link_up(port)) 337 return PCIBIOS_DEVICE_NOT_FOUND; 338 339 conf_data = port->base + PCIE_CONF_DATA_OFF; 340 341 mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where), 342 PCIE_CONF_ADDR_OFF); 343 344 switch (size) { 345 case 1: 346 *val = readb_relaxed(conf_data + (where & 3)); 347 break; 348 case 2: 349 *val = readw_relaxed(conf_data + (where & 2)); 350 break; 351 case 4: 352 *val = readl_relaxed(conf_data); 353 break; 354 default: 355 return PCIBIOS_BAD_REGISTER_NUMBER; 356 } 357 358 return PCIBIOS_SUCCESSFUL; 359 } 360 361 static int mvebu_pcie_child_wr_conf(struct pci_bus *bus, u32 devfn, 362 int where, int size, u32 val) 363 { 364 struct mvebu_pcie *pcie = bus->sysdata; 365 struct mvebu_pcie_port *port; 366 void __iomem *conf_data; 367 368 port = mvebu_pcie_find_port(pcie, bus, devfn); 369 if (!port) 370 return PCIBIOS_DEVICE_NOT_FOUND; 371 372 if (!mvebu_pcie_link_up(port)) 373 return PCIBIOS_DEVICE_NOT_FOUND; 374 375 conf_data = port->base + PCIE_CONF_DATA_OFF; 376 377 mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where), 378 PCIE_CONF_ADDR_OFF); 379 380 switch (size) { 381 case 1: 382 writeb(val, conf_data + (where & 3)); 383 break; 384 case 2: 385 writew(val, conf_data + (where & 2)); 386 break; 387 case 4: 388 writel(val, conf_data); 389 break; 390 default: 391 return PCIBIOS_BAD_REGISTER_NUMBER; 392 } 393 394 return PCIBIOS_SUCCESSFUL; 395 } 396 397 static struct pci_ops mvebu_pcie_child_ops = { 398 .read = mvebu_pcie_child_rd_conf, 399 .write = mvebu_pcie_child_wr_conf, 400 }; 401 402 /* 403 * Remove windows, starting from the largest ones to the smallest 404 * ones. 405 */ 406 static void mvebu_pcie_del_windows(struct mvebu_pcie_port *port, 407 phys_addr_t base, size_t size) 408 { 409 while (size) { 410 size_t sz = 1 << (fls(size) - 1); 411 412 mvebu_mbus_del_window(base, sz); 413 base += sz; 414 size -= sz; 415 } 416 } 417 418 /* 419 * MBus windows can only have a power of two size, but PCI BARs do not 420 * have this constraint. Therefore, we have to split the PCI BAR into 421 * areas each having a power of two size. We start from the largest 422 * one (i.e highest order bit set in the size). 423 */ 424 static int mvebu_pcie_add_windows(struct mvebu_pcie_port *port, 425 unsigned int target, unsigned int attribute, 426 phys_addr_t base, size_t size, 427 phys_addr_t remap) 428 { 429 size_t size_mapped = 0; 430 431 while (size) { 432 size_t sz = 1 << (fls(size) - 1); 433 int ret; 434 435 ret = mvebu_mbus_add_window_remap_by_id(target, attribute, base, 436 sz, remap); 437 if (ret) { 438 phys_addr_t end = base + sz - 1; 439 440 dev_err(&port->pcie->pdev->dev, 441 "Could not create MBus window at [mem %pa-%pa]: %d\n", 442 &base, &end, ret); 443 mvebu_pcie_del_windows(port, base - size_mapped, 444 size_mapped); 445 return ret; 446 } 447 448 size -= sz; 449 size_mapped += sz; 450 base += sz; 451 if (remap != MVEBU_MBUS_NO_REMAP) 452 remap += sz; 453 } 454 455 return 0; 456 } 457 458 static int mvebu_pcie_set_window(struct mvebu_pcie_port *port, 459 unsigned int target, unsigned int attribute, 460 const struct mvebu_pcie_window *desired, 461 struct mvebu_pcie_window *cur) 462 { 463 int ret; 464 465 if (desired->base == cur->base && desired->remap == cur->remap && 466 desired->size == cur->size) 467 return 0; 468 469 if (cur->size != 0) { 470 mvebu_pcie_del_windows(port, cur->base, cur->size); 471 cur->size = 0; 472 cur->base = 0; 473 474 /* 475 * If something tries to change the window while it is enabled 476 * the change will not be done atomically. That would be 477 * difficult to do in the general case. 478 */ 479 } 480 481 if (desired->size == 0) 482 return 0; 483 484 ret = mvebu_pcie_add_windows(port, target, attribute, desired->base, 485 desired->size, desired->remap); 486 if (ret) { 487 cur->size = 0; 488 cur->base = 0; 489 return ret; 490 } 491 492 *cur = *desired; 493 return 0; 494 } 495 496 static int mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port) 497 { 498 struct mvebu_pcie_window desired = {}; 499 struct pci_bridge_emul_conf *conf = &port->bridge.conf; 500 501 /* Are the new iobase/iolimit values invalid? */ 502 if (conf->iolimit < conf->iobase || 503 conf->iolimitupper < conf->iobaseupper) 504 return mvebu_pcie_set_window(port, port->io_target, port->io_attr, 505 &desired, &port->iowin); 506 507 /* 508 * We read the PCI-to-PCI bridge emulated registers, and 509 * calculate the base address and size of the address decoding 510 * window to setup, according to the PCI-to-PCI bridge 511 * specifications. iobase is the bus address, port->iowin_base 512 * is the CPU address. 513 */ 514 desired.remap = ((conf->iobase & 0xF0) << 8) | 515 (conf->iobaseupper << 16); 516 desired.base = port->pcie->io.start + desired.remap; 517 desired.size = ((0xFFF | ((conf->iolimit & 0xF0) << 8) | 518 (conf->iolimitupper << 16)) - 519 desired.remap) + 520 1; 521 522 return mvebu_pcie_set_window(port, port->io_target, port->io_attr, &desired, 523 &port->iowin); 524 } 525 526 static int mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port) 527 { 528 struct mvebu_pcie_window desired = {.remap = MVEBU_MBUS_NO_REMAP}; 529 struct pci_bridge_emul_conf *conf = &port->bridge.conf; 530 531 /* Are the new membase/memlimit values invalid? */ 532 if (conf->memlimit < conf->membase) 533 return mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, 534 &desired, &port->memwin); 535 536 /* 537 * We read the PCI-to-PCI bridge emulated registers, and 538 * calculate the base address and size of the address decoding 539 * window to setup, according to the PCI-to-PCI bridge 540 * specifications. 541 */ 542 desired.base = ((conf->membase & 0xFFF0) << 16); 543 desired.size = (((conf->memlimit & 0xFFF0) << 16) | 0xFFFFF) - 544 desired.base + 1; 545 546 return mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, &desired, 547 &port->memwin); 548 } 549 550 static pci_bridge_emul_read_status_t 551 mvebu_pci_bridge_emul_base_conf_read(struct pci_bridge_emul *bridge, 552 int reg, u32 *value) 553 { 554 struct mvebu_pcie_port *port = bridge->data; 555 556 switch (reg) { 557 case PCI_COMMAND: 558 *value = mvebu_readl(port, PCIE_CMD_OFF); 559 break; 560 561 case PCI_PRIMARY_BUS: { 562 /* 563 * From the whole 32bit register we support reading from HW only 564 * secondary bus number which is mvebu local bus number. 565 * Other bits are retrieved only from emulated config buffer. 566 */ 567 __le32 *cfgspace = (__le32 *)&bridge->conf; 568 u32 val = le32_to_cpu(cfgspace[PCI_PRIMARY_BUS / 4]); 569 val &= ~0xff00; 570 val |= mvebu_pcie_get_local_bus_nr(port) << 8; 571 *value = val; 572 break; 573 } 574 575 case PCI_INTERRUPT_LINE: { 576 /* 577 * From the whole 32bit register we support reading from HW only 578 * one bit: PCI_BRIDGE_CTL_BUS_RESET. 579 * Other bits are retrieved only from emulated config buffer. 580 */ 581 __le32 *cfgspace = (__le32 *)&bridge->conf; 582 u32 val = le32_to_cpu(cfgspace[PCI_INTERRUPT_LINE / 4]); 583 if (mvebu_readl(port, PCIE_CTRL_OFF) & PCIE_CTRL_MASTER_HOT_RESET) 584 val |= PCI_BRIDGE_CTL_BUS_RESET << 16; 585 else 586 val &= ~(PCI_BRIDGE_CTL_BUS_RESET << 16); 587 *value = val; 588 break; 589 } 590 591 default: 592 return PCI_BRIDGE_EMUL_NOT_HANDLED; 593 } 594 595 return PCI_BRIDGE_EMUL_HANDLED; 596 } 597 598 static pci_bridge_emul_read_status_t 599 mvebu_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge, 600 int reg, u32 *value) 601 { 602 struct mvebu_pcie_port *port = bridge->data; 603 604 switch (reg) { 605 case PCI_EXP_DEVCAP: 606 *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCAP); 607 break; 608 609 case PCI_EXP_DEVCTL: 610 *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL); 611 break; 612 613 case PCI_EXP_LNKCAP: 614 /* 615 * PCIe requires that the Clock Power Management capability bit 616 * is hard-wired to zero for downstream ports but HW returns 1. 617 * Additionally enable Data Link Layer Link Active Reporting 618 * Capable bit as DL_Active indication is provided too. 619 */ 620 *value = (mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP) & 621 ~PCI_EXP_LNKCAP_CLKPM) | PCI_EXP_LNKCAP_DLLLARC; 622 break; 623 624 case PCI_EXP_LNKCTL: 625 /* DL_Active indication is provided via PCIE_STAT_OFF */ 626 *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL) | 627 (mvebu_pcie_link_up(port) ? 628 (PCI_EXP_LNKSTA_DLLLA << 16) : 0); 629 break; 630 631 case PCI_EXP_SLTCTL: 632 *value = PCI_EXP_SLTSTA_PDS << 16; 633 break; 634 635 case PCI_EXP_RTSTA: 636 *value = mvebu_readl(port, PCIE_RC_RTSTA); 637 break; 638 639 case PCI_EXP_DEVCAP2: 640 *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCAP2); 641 break; 642 643 case PCI_EXP_DEVCTL2: 644 *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL2); 645 break; 646 647 case PCI_EXP_LNKCTL2: 648 *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL2); 649 break; 650 651 default: 652 return PCI_BRIDGE_EMUL_NOT_HANDLED; 653 } 654 655 return PCI_BRIDGE_EMUL_HANDLED; 656 } 657 658 static pci_bridge_emul_read_status_t 659 mvebu_pci_bridge_emul_ext_conf_read(struct pci_bridge_emul *bridge, 660 int reg, u32 *value) 661 { 662 struct mvebu_pcie_port *port = bridge->data; 663 664 switch (reg) { 665 case 0: 666 case PCI_ERR_UNCOR_STATUS: 667 case PCI_ERR_UNCOR_MASK: 668 case PCI_ERR_UNCOR_SEVER: 669 case PCI_ERR_COR_STATUS: 670 case PCI_ERR_COR_MASK: 671 case PCI_ERR_CAP: 672 case PCI_ERR_HEADER_LOG+0: 673 case PCI_ERR_HEADER_LOG+4: 674 case PCI_ERR_HEADER_LOG+8: 675 case PCI_ERR_HEADER_LOG+12: 676 case PCI_ERR_ROOT_COMMAND: 677 case PCI_ERR_ROOT_STATUS: 678 case PCI_ERR_ROOT_ERR_SRC: 679 *value = mvebu_readl(port, PCIE_CAP_PCIERR_OFF + reg); 680 break; 681 682 default: 683 return PCI_BRIDGE_EMUL_NOT_HANDLED; 684 } 685 686 return PCI_BRIDGE_EMUL_HANDLED; 687 } 688 689 static void 690 mvebu_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge, 691 int reg, u32 old, u32 new, u32 mask) 692 { 693 struct mvebu_pcie_port *port = bridge->data; 694 struct pci_bridge_emul_conf *conf = &bridge->conf; 695 696 switch (reg) { 697 case PCI_COMMAND: 698 mvebu_writel(port, new, PCIE_CMD_OFF); 699 break; 700 701 case PCI_IO_BASE: 702 if ((mask & 0xffff) && mvebu_has_ioport(port) && 703 mvebu_pcie_handle_iobase_change(port)) { 704 /* On error disable IO range */ 705 conf->iobase &= ~0xf0; 706 conf->iolimit &= ~0xf0; 707 conf->iobase |= 0xf0; 708 conf->iobaseupper = cpu_to_le16(0x0000); 709 conf->iolimitupper = cpu_to_le16(0x0000); 710 } 711 break; 712 713 case PCI_MEMORY_BASE: 714 if (mvebu_pcie_handle_membase_change(port)) { 715 /* On error disable mem range */ 716 conf->membase = cpu_to_le16(le16_to_cpu(conf->membase) & ~0xfff0); 717 conf->memlimit = cpu_to_le16(le16_to_cpu(conf->memlimit) & ~0xfff0); 718 conf->membase = cpu_to_le16(le16_to_cpu(conf->membase) | 0xfff0); 719 } 720 break; 721 722 case PCI_IO_BASE_UPPER16: 723 if (mvebu_has_ioport(port) && 724 mvebu_pcie_handle_iobase_change(port)) { 725 /* On error disable IO range */ 726 conf->iobase &= ~0xf0; 727 conf->iolimit &= ~0xf0; 728 conf->iobase |= 0xf0; 729 conf->iobaseupper = cpu_to_le16(0x0000); 730 conf->iolimitupper = cpu_to_le16(0x0000); 731 } 732 break; 733 734 case PCI_PRIMARY_BUS: 735 if (mask & 0xff00) 736 mvebu_pcie_set_local_bus_nr(port, conf->secondary_bus); 737 break; 738 739 case PCI_INTERRUPT_LINE: 740 if (mask & (PCI_BRIDGE_CTL_BUS_RESET << 16)) { 741 u32 ctrl = mvebu_readl(port, PCIE_CTRL_OFF); 742 if (new & (PCI_BRIDGE_CTL_BUS_RESET << 16)) 743 ctrl |= PCIE_CTRL_MASTER_HOT_RESET; 744 else 745 ctrl &= ~PCIE_CTRL_MASTER_HOT_RESET; 746 mvebu_writel(port, ctrl, PCIE_CTRL_OFF); 747 } 748 break; 749 750 default: 751 break; 752 } 753 } 754 755 static void 756 mvebu_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge, 757 int reg, u32 old, u32 new, u32 mask) 758 { 759 struct mvebu_pcie_port *port = bridge->data; 760 761 switch (reg) { 762 case PCI_EXP_DEVCTL: 763 mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL); 764 break; 765 766 case PCI_EXP_LNKCTL: 767 /* 768 * PCIe requires that the Enable Clock Power Management bit 769 * is hard-wired to zero for downstream ports but HW allows 770 * to change it. 771 */ 772 new &= ~PCI_EXP_LNKCTL_CLKREQ_EN; 773 774 mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL); 775 break; 776 777 case PCI_EXP_RTSTA: 778 /* 779 * PME Status bit in Root Status Register (PCIE_RC_RTSTA) 780 * is read-only and can be cleared only by writing 0b to the 781 * Interrupt Cause RW0C register (PCIE_INT_CAUSE_OFF). So 782 * clear PME via Interrupt Cause. 783 */ 784 if (new & PCI_EXP_RTSTA_PME) 785 mvebu_writel(port, ~PCIE_INT_PM_PME, PCIE_INT_CAUSE_OFF); 786 break; 787 788 case PCI_EXP_DEVCTL2: 789 mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL2); 790 break; 791 792 case PCI_EXP_LNKCTL2: 793 mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL2); 794 break; 795 796 default: 797 break; 798 } 799 } 800 801 static void 802 mvebu_pci_bridge_emul_ext_conf_write(struct pci_bridge_emul *bridge, 803 int reg, u32 old, u32 new, u32 mask) 804 { 805 struct mvebu_pcie_port *port = bridge->data; 806 807 switch (reg) { 808 /* These are W1C registers, so clear other bits */ 809 case PCI_ERR_UNCOR_STATUS: 810 case PCI_ERR_COR_STATUS: 811 case PCI_ERR_ROOT_STATUS: 812 new &= mask; 813 fallthrough; 814 815 case PCI_ERR_UNCOR_MASK: 816 case PCI_ERR_UNCOR_SEVER: 817 case PCI_ERR_COR_MASK: 818 case PCI_ERR_CAP: 819 case PCI_ERR_HEADER_LOG+0: 820 case PCI_ERR_HEADER_LOG+4: 821 case PCI_ERR_HEADER_LOG+8: 822 case PCI_ERR_HEADER_LOG+12: 823 case PCI_ERR_ROOT_COMMAND: 824 case PCI_ERR_ROOT_ERR_SRC: 825 mvebu_writel(port, new, PCIE_CAP_PCIERR_OFF + reg); 826 break; 827 828 default: 829 break; 830 } 831 } 832 833 static const struct pci_bridge_emul_ops mvebu_pci_bridge_emul_ops = { 834 .read_base = mvebu_pci_bridge_emul_base_conf_read, 835 .write_base = mvebu_pci_bridge_emul_base_conf_write, 836 .read_pcie = mvebu_pci_bridge_emul_pcie_conf_read, 837 .write_pcie = mvebu_pci_bridge_emul_pcie_conf_write, 838 .read_ext = mvebu_pci_bridge_emul_ext_conf_read, 839 .write_ext = mvebu_pci_bridge_emul_ext_conf_write, 840 }; 841 842 /* 843 * Initialize the configuration space of the PCI-to-PCI bridge 844 * associated with the given PCIe interface. 845 */ 846 static int mvebu_pci_bridge_emul_init(struct mvebu_pcie_port *port) 847 { 848 unsigned int bridge_flags = PCI_BRIDGE_EMUL_NO_PREFMEM_FORWARD; 849 struct pci_bridge_emul *bridge = &port->bridge; 850 u32 dev_id = mvebu_readl(port, PCIE_DEV_ID_OFF); 851 u32 dev_rev = mvebu_readl(port, PCIE_DEV_REV_OFF); 852 u32 ssdev_id = mvebu_readl(port, PCIE_SSDEV_ID_OFF); 853 u32 pcie_cap = mvebu_readl(port, PCIE_CAP_PCIEXP); 854 u8 pcie_cap_ver = ((pcie_cap >> 16) & PCI_EXP_FLAGS_VERS); 855 856 bridge->conf.vendor = cpu_to_le16(dev_id & 0xffff); 857 bridge->conf.device = cpu_to_le16(dev_id >> 16); 858 bridge->conf.class_revision = cpu_to_le32(dev_rev & 0xff); 859 860 if (mvebu_has_ioport(port)) { 861 /* We support 32 bits I/O addressing */ 862 bridge->conf.iobase = PCI_IO_RANGE_TYPE_32; 863 bridge->conf.iolimit = PCI_IO_RANGE_TYPE_32; 864 } else { 865 bridge_flags |= PCI_BRIDGE_EMUL_NO_IO_FORWARD; 866 } 867 868 /* 869 * Older mvebu hardware provides PCIe Capability structure only in 870 * version 1. New hardware provides it in version 2. 871 */ 872 bridge->pcie_conf.cap = cpu_to_le16(pcie_cap_ver); 873 874 bridge->subsystem_vendor_id = ssdev_id & 0xffff; 875 bridge->subsystem_id = ssdev_id >> 16; 876 bridge->has_pcie = true; 877 bridge->data = port; 878 bridge->ops = &mvebu_pci_bridge_emul_ops; 879 880 return pci_bridge_emul_init(bridge, bridge_flags); 881 } 882 883 static inline struct mvebu_pcie *sys_to_pcie(struct pci_sys_data *sys) 884 { 885 return sys->private_data; 886 } 887 888 static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie, 889 struct pci_bus *bus, 890 int devfn) 891 { 892 int i; 893 894 for (i = 0; i < pcie->nports; i++) { 895 struct mvebu_pcie_port *port = &pcie->ports[i]; 896 897 if (!port->base) 898 continue; 899 900 if (bus->number == 0 && port->devfn == devfn) 901 return port; 902 if (bus->number != 0 && 903 bus->number >= port->bridge.conf.secondary_bus && 904 bus->number <= port->bridge.conf.subordinate_bus) 905 return port; 906 } 907 908 return NULL; 909 } 910 911 /* PCI configuration space write function */ 912 static int mvebu_pcie_wr_conf(struct pci_bus *bus, u32 devfn, 913 int where, int size, u32 val) 914 { 915 struct mvebu_pcie *pcie = bus->sysdata; 916 struct mvebu_pcie_port *port; 917 918 port = mvebu_pcie_find_port(pcie, bus, devfn); 919 if (!port) 920 return PCIBIOS_DEVICE_NOT_FOUND; 921 922 return pci_bridge_emul_conf_write(&port->bridge, where, size, val); 923 } 924 925 /* PCI configuration space read function */ 926 static int mvebu_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, 927 int size, u32 *val) 928 { 929 struct mvebu_pcie *pcie = bus->sysdata; 930 struct mvebu_pcie_port *port; 931 932 port = mvebu_pcie_find_port(pcie, bus, devfn); 933 if (!port) 934 return PCIBIOS_DEVICE_NOT_FOUND; 935 936 return pci_bridge_emul_conf_read(&port->bridge, where, size, val); 937 } 938 939 static struct pci_ops mvebu_pcie_ops = { 940 .read = mvebu_pcie_rd_conf, 941 .write = mvebu_pcie_wr_conf, 942 }; 943 944 static void mvebu_pcie_intx_irq_mask(struct irq_data *d) 945 { 946 struct mvebu_pcie_port *port = d->domain->host_data; 947 irq_hw_number_t hwirq = irqd_to_hwirq(d); 948 unsigned long flags; 949 u32 unmask; 950 951 raw_spin_lock_irqsave(&port->irq_lock, flags); 952 unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF); 953 unmask &= ~PCIE_INT_INTX(hwirq); 954 mvebu_writel(port, unmask, PCIE_INT_UNMASK_OFF); 955 raw_spin_unlock_irqrestore(&port->irq_lock, flags); 956 } 957 958 static void mvebu_pcie_intx_irq_unmask(struct irq_data *d) 959 { 960 struct mvebu_pcie_port *port = d->domain->host_data; 961 irq_hw_number_t hwirq = irqd_to_hwirq(d); 962 unsigned long flags; 963 u32 unmask; 964 965 raw_spin_lock_irqsave(&port->irq_lock, flags); 966 unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF); 967 unmask |= PCIE_INT_INTX(hwirq); 968 mvebu_writel(port, unmask, PCIE_INT_UNMASK_OFF); 969 raw_spin_unlock_irqrestore(&port->irq_lock, flags); 970 } 971 972 static struct irq_chip intx_irq_chip = { 973 .name = "mvebu-INTx", 974 .irq_mask = mvebu_pcie_intx_irq_mask, 975 .irq_unmask = mvebu_pcie_intx_irq_unmask, 976 }; 977 978 static int mvebu_pcie_intx_irq_map(struct irq_domain *h, 979 unsigned int virq, irq_hw_number_t hwirq) 980 { 981 struct mvebu_pcie_port *port = h->host_data; 982 983 irq_set_status_flags(virq, IRQ_LEVEL); 984 irq_set_chip_and_handler(virq, &intx_irq_chip, handle_level_irq); 985 irq_set_chip_data(virq, port); 986 987 return 0; 988 } 989 990 static const struct irq_domain_ops mvebu_pcie_intx_irq_domain_ops = { 991 .map = mvebu_pcie_intx_irq_map, 992 .xlate = irq_domain_xlate_onecell, 993 }; 994 995 static int mvebu_pcie_init_irq_domain(struct mvebu_pcie_port *port) 996 { 997 struct device *dev = &port->pcie->pdev->dev; 998 struct device_node *pcie_intc_node; 999 1000 raw_spin_lock_init(&port->irq_lock); 1001 1002 pcie_intc_node = of_get_next_child(port->dn, NULL); 1003 if (!pcie_intc_node) { 1004 dev_err(dev, "No PCIe Intc node found for %s\n", port->name); 1005 return -ENODEV; 1006 } 1007 1008 port->intx_irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, 1009 &mvebu_pcie_intx_irq_domain_ops, 1010 port); 1011 of_node_put(pcie_intc_node); 1012 if (!port->intx_irq_domain) { 1013 dev_err(dev, "Failed to get INTx IRQ domain for %s\n", port->name); 1014 return -ENOMEM; 1015 } 1016 1017 return 0; 1018 } 1019 1020 static void mvebu_pcie_irq_handler(struct irq_desc *desc) 1021 { 1022 struct mvebu_pcie_port *port = irq_desc_get_handler_data(desc); 1023 struct irq_chip *chip = irq_desc_get_chip(desc); 1024 struct device *dev = &port->pcie->pdev->dev; 1025 u32 cause, unmask, status; 1026 int i; 1027 1028 chained_irq_enter(chip, desc); 1029 1030 cause = mvebu_readl(port, PCIE_INT_CAUSE_OFF); 1031 unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF); 1032 status = cause & unmask; 1033 1034 /* Process legacy INTx interrupts */ 1035 for (i = 0; i < PCI_NUM_INTX; i++) { 1036 if (!(status & PCIE_INT_INTX(i))) 1037 continue; 1038 1039 if (generic_handle_domain_irq(port->intx_irq_domain, i) == -EINVAL) 1040 dev_err_ratelimited(dev, "unexpected INT%c IRQ\n", (char)i+'A'); 1041 } 1042 1043 chained_irq_exit(chip, desc); 1044 } 1045 1046 static int mvebu_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 1047 { 1048 /* Interrupt support on mvebu emulated bridges is not implemented yet */ 1049 if (dev->bus->number == 0) 1050 return 0; /* Proper return code 0 == NO_IRQ */ 1051 1052 return of_irq_parse_and_map_pci(dev, slot, pin); 1053 } 1054 1055 static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev, 1056 const struct resource *res, 1057 resource_size_t start, 1058 resource_size_t size, 1059 resource_size_t align) 1060 { 1061 if (dev->bus->number != 0) 1062 return start; 1063 1064 /* 1065 * On the PCI-to-PCI bridge side, the I/O windows must have at 1066 * least a 64 KB size and the memory windows must have at 1067 * least a 1 MB size. Moreover, MBus windows need to have a 1068 * base address aligned on their size, and their size must be 1069 * a power of two. This means that if the BAR doesn't have a 1070 * power of two size, several MBus windows will actually be 1071 * created. We need to ensure that the biggest MBus window 1072 * (which will be the first one) is aligned on its size, which 1073 * explains the rounddown_pow_of_two() being done here. 1074 */ 1075 if (res->flags & IORESOURCE_IO) 1076 return round_up(start, max_t(resource_size_t, SZ_64K, 1077 rounddown_pow_of_two(size))); 1078 else if (res->flags & IORESOURCE_MEM) 1079 return round_up(start, max_t(resource_size_t, SZ_1M, 1080 rounddown_pow_of_two(size))); 1081 else 1082 return start; 1083 } 1084 1085 static void __iomem *mvebu_pcie_map_registers(struct platform_device *pdev, 1086 struct device_node *np, 1087 struct mvebu_pcie_port *port) 1088 { 1089 int ret = 0; 1090 1091 ret = of_address_to_resource(np, 0, &port->regs); 1092 if (ret) 1093 return (void __iomem *)ERR_PTR(ret); 1094 1095 return devm_ioremap_resource(&pdev->dev, &port->regs); 1096 } 1097 1098 #define DT_FLAGS_TO_TYPE(flags) (((flags) >> 24) & 0x03) 1099 #define DT_TYPE_IO 0x1 1100 #define DT_TYPE_MEM32 0x2 1101 #define DT_CPUADDR_TO_TARGET(cpuaddr) (((cpuaddr) >> 56) & 0xFF) 1102 #define DT_CPUADDR_TO_ATTR(cpuaddr) (((cpuaddr) >> 48) & 0xFF) 1103 1104 static int mvebu_get_tgt_attr(struct device_node *np, int devfn, 1105 unsigned long type, 1106 unsigned int *tgt, 1107 unsigned int *attr) 1108 { 1109 const int na = 3, ns = 2; 1110 const __be32 *range; 1111 int rlen, nranges, rangesz, pna, i; 1112 1113 *tgt = -1; 1114 *attr = -1; 1115 1116 range = of_get_property(np, "ranges", &rlen); 1117 if (!range) 1118 return -EINVAL; 1119 1120 pna = of_n_addr_cells(np); 1121 rangesz = pna + na + ns; 1122 nranges = rlen / sizeof(__be32) / rangesz; 1123 1124 for (i = 0; i < nranges; i++, range += rangesz) { 1125 u32 flags = of_read_number(range, 1); 1126 u32 slot = of_read_number(range + 1, 1); 1127 u64 cpuaddr = of_read_number(range + na, pna); 1128 unsigned long rtype; 1129 1130 if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_IO) 1131 rtype = IORESOURCE_IO; 1132 else if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_MEM32) 1133 rtype = IORESOURCE_MEM; 1134 else 1135 continue; 1136 1137 if (slot == PCI_SLOT(devfn) && type == rtype) { 1138 *tgt = DT_CPUADDR_TO_TARGET(cpuaddr); 1139 *attr = DT_CPUADDR_TO_ATTR(cpuaddr); 1140 return 0; 1141 } 1142 } 1143 1144 return -ENOENT; 1145 } 1146 1147 #ifdef CONFIG_PM_SLEEP 1148 static int mvebu_pcie_suspend(struct device *dev) 1149 { 1150 struct mvebu_pcie *pcie; 1151 int i; 1152 1153 pcie = dev_get_drvdata(dev); 1154 for (i = 0; i < pcie->nports; i++) { 1155 struct mvebu_pcie_port *port = pcie->ports + i; 1156 if (!port->base) 1157 continue; 1158 port->saved_pcie_stat = mvebu_readl(port, PCIE_STAT_OFF); 1159 } 1160 1161 return 0; 1162 } 1163 1164 static int mvebu_pcie_resume(struct device *dev) 1165 { 1166 struct mvebu_pcie *pcie; 1167 int i; 1168 1169 pcie = dev_get_drvdata(dev); 1170 for (i = 0; i < pcie->nports; i++) { 1171 struct mvebu_pcie_port *port = pcie->ports + i; 1172 if (!port->base) 1173 continue; 1174 mvebu_writel(port, port->saved_pcie_stat, PCIE_STAT_OFF); 1175 mvebu_pcie_setup_hw(port); 1176 } 1177 1178 return 0; 1179 } 1180 #endif 1181 1182 static void mvebu_pcie_port_clk_put(void *data) 1183 { 1184 struct mvebu_pcie_port *port = data; 1185 1186 clk_put(port->clk); 1187 } 1188 1189 static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie, 1190 struct mvebu_pcie_port *port, struct device_node *child) 1191 { 1192 struct device *dev = &pcie->pdev->dev; 1193 enum of_gpio_flags flags; 1194 int reset_gpio, ret; 1195 u32 num_lanes; 1196 1197 port->pcie = pcie; 1198 1199 if (of_property_read_u32(child, "marvell,pcie-port", &port->port)) { 1200 dev_warn(dev, "ignoring %pOF, missing pcie-port property\n", 1201 child); 1202 goto skip; 1203 } 1204 1205 if (of_property_read_u32(child, "marvell,pcie-lane", &port->lane)) 1206 port->lane = 0; 1207 1208 if (!of_property_read_u32(child, "num-lanes", &num_lanes) && num_lanes == 4) 1209 port->is_x4 = true; 1210 1211 port->name = devm_kasprintf(dev, GFP_KERNEL, "pcie%d.%d", port->port, 1212 port->lane); 1213 if (!port->name) { 1214 ret = -ENOMEM; 1215 goto err; 1216 } 1217 1218 port->devfn = of_pci_get_devfn(child); 1219 if (port->devfn < 0) 1220 goto skip; 1221 if (PCI_FUNC(port->devfn) != 0) { 1222 dev_err(dev, "%s: invalid function number, must be zero\n", 1223 port->name); 1224 goto skip; 1225 } 1226 1227 ret = mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_MEM, 1228 &port->mem_target, &port->mem_attr); 1229 if (ret < 0) { 1230 dev_err(dev, "%s: cannot get tgt/attr for mem window\n", 1231 port->name); 1232 goto skip; 1233 } 1234 1235 if (resource_size(&pcie->io) != 0) { 1236 mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_IO, 1237 &port->io_target, &port->io_attr); 1238 } else { 1239 port->io_target = -1; 1240 port->io_attr = -1; 1241 } 1242 1243 /* 1244 * Old DT bindings do not contain "intx" interrupt 1245 * so do not fail probing driver when interrupt does not exist. 1246 */ 1247 port->intx_irq = of_irq_get_byname(child, "intx"); 1248 if (port->intx_irq == -EPROBE_DEFER) { 1249 ret = port->intx_irq; 1250 goto err; 1251 } 1252 if (port->intx_irq <= 0) { 1253 dev_warn(dev, "%s: legacy INTx interrupts cannot be masked individually, " 1254 "%pOF does not contain intx interrupt\n", 1255 port->name, child); 1256 } 1257 1258 reset_gpio = of_get_named_gpio_flags(child, "reset-gpios", 0, &flags); 1259 if (reset_gpio == -EPROBE_DEFER) { 1260 ret = reset_gpio; 1261 goto err; 1262 } 1263 1264 if (gpio_is_valid(reset_gpio)) { 1265 unsigned long gpio_flags; 1266 1267 port->reset_name = devm_kasprintf(dev, GFP_KERNEL, "%s-reset", 1268 port->name); 1269 if (!port->reset_name) { 1270 ret = -ENOMEM; 1271 goto err; 1272 } 1273 1274 if (flags & OF_GPIO_ACTIVE_LOW) { 1275 dev_info(dev, "%pOF: reset gpio is active low\n", 1276 child); 1277 gpio_flags = GPIOF_ACTIVE_LOW | 1278 GPIOF_OUT_INIT_LOW; 1279 } else { 1280 gpio_flags = GPIOF_OUT_INIT_HIGH; 1281 } 1282 1283 ret = devm_gpio_request_one(dev, reset_gpio, gpio_flags, 1284 port->reset_name); 1285 if (ret) { 1286 if (ret == -EPROBE_DEFER) 1287 goto err; 1288 goto skip; 1289 } 1290 1291 port->reset_gpio = gpio_to_desc(reset_gpio); 1292 } 1293 1294 port->clk = of_clk_get_by_name(child, NULL); 1295 if (IS_ERR(port->clk)) { 1296 dev_err(dev, "%s: cannot get clock\n", port->name); 1297 goto skip; 1298 } 1299 1300 ret = devm_add_action(dev, mvebu_pcie_port_clk_put, port); 1301 if (ret < 0) { 1302 clk_put(port->clk); 1303 goto err; 1304 } 1305 1306 return 1; 1307 1308 skip: 1309 ret = 0; 1310 1311 /* In the case of skipping, we need to free these */ 1312 devm_kfree(dev, port->reset_name); 1313 port->reset_name = NULL; 1314 devm_kfree(dev, port->name); 1315 port->name = NULL; 1316 1317 err: 1318 return ret; 1319 } 1320 1321 /* 1322 * Power up a PCIe port. PCIe requires the refclk to be stable for 100µs 1323 * prior to releasing PERST. See table 2-4 in section 2.6.2 AC Specifications 1324 * of the PCI Express Card Electromechanical Specification, 1.1. 1325 */ 1326 static int mvebu_pcie_powerup(struct mvebu_pcie_port *port) 1327 { 1328 int ret; 1329 1330 ret = clk_prepare_enable(port->clk); 1331 if (ret < 0) 1332 return ret; 1333 1334 if (port->reset_gpio) { 1335 u32 reset_udelay = PCI_PM_D3COLD_WAIT * 1000; 1336 1337 of_property_read_u32(port->dn, "reset-delay-us", 1338 &reset_udelay); 1339 1340 udelay(100); 1341 1342 gpiod_set_value_cansleep(port->reset_gpio, 0); 1343 msleep(reset_udelay / 1000); 1344 } 1345 1346 return 0; 1347 } 1348 1349 /* 1350 * Power down a PCIe port. Strictly, PCIe requires us to place the card 1351 * in D3hot state before asserting PERST#. 1352 */ 1353 static void mvebu_pcie_powerdown(struct mvebu_pcie_port *port) 1354 { 1355 gpiod_set_value_cansleep(port->reset_gpio, 1); 1356 1357 clk_disable_unprepare(port->clk); 1358 } 1359 1360 /* 1361 * devm_of_pci_get_host_bridge_resources() only sets up translateable resources, 1362 * so we need extra resource setup parsing our special DT properties encoding 1363 * the MEM and IO apertures. 1364 */ 1365 static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie) 1366 { 1367 struct device *dev = &pcie->pdev->dev; 1368 struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); 1369 int ret; 1370 1371 /* Get the PCIe memory aperture */ 1372 mvebu_mbus_get_pcie_mem_aperture(&pcie->mem); 1373 if (resource_size(&pcie->mem) == 0) { 1374 dev_err(dev, "invalid memory aperture size\n"); 1375 return -EINVAL; 1376 } 1377 1378 pcie->mem.name = "PCI MEM"; 1379 pci_add_resource(&bridge->windows, &pcie->mem); 1380 ret = devm_request_resource(dev, &iomem_resource, &pcie->mem); 1381 if (ret) 1382 return ret; 1383 1384 /* Get the PCIe IO aperture */ 1385 mvebu_mbus_get_pcie_io_aperture(&pcie->io); 1386 1387 if (resource_size(&pcie->io) != 0) { 1388 pcie->realio.flags = pcie->io.flags; 1389 pcie->realio.start = PCIBIOS_MIN_IO; 1390 pcie->realio.end = min_t(resource_size_t, 1391 IO_SPACE_LIMIT - SZ_64K, 1392 resource_size(&pcie->io) - 1); 1393 pcie->realio.name = "PCI I/O"; 1394 1395 ret = devm_pci_remap_iospace(dev, &pcie->realio, pcie->io.start); 1396 if (ret) 1397 return ret; 1398 1399 pci_add_resource(&bridge->windows, &pcie->realio); 1400 ret = devm_request_resource(dev, &ioport_resource, &pcie->realio); 1401 if (ret) 1402 return ret; 1403 } 1404 1405 return 0; 1406 } 1407 1408 static int mvebu_pcie_probe(struct platform_device *pdev) 1409 { 1410 struct device *dev = &pdev->dev; 1411 struct mvebu_pcie *pcie; 1412 struct pci_host_bridge *bridge; 1413 struct device_node *np = dev->of_node; 1414 struct device_node *child; 1415 int num, i, ret; 1416 1417 bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct mvebu_pcie)); 1418 if (!bridge) 1419 return -ENOMEM; 1420 1421 pcie = pci_host_bridge_priv(bridge); 1422 pcie->pdev = pdev; 1423 platform_set_drvdata(pdev, pcie); 1424 1425 ret = mvebu_pcie_parse_request_resources(pcie); 1426 if (ret) 1427 return ret; 1428 1429 num = of_get_available_child_count(np); 1430 1431 pcie->ports = devm_kcalloc(dev, num, sizeof(*pcie->ports), GFP_KERNEL); 1432 if (!pcie->ports) 1433 return -ENOMEM; 1434 1435 i = 0; 1436 for_each_available_child_of_node(np, child) { 1437 struct mvebu_pcie_port *port = &pcie->ports[i]; 1438 1439 ret = mvebu_pcie_parse_port(pcie, port, child); 1440 if (ret < 0) { 1441 of_node_put(child); 1442 return ret; 1443 } else if (ret == 0) { 1444 continue; 1445 } 1446 1447 port->dn = child; 1448 i++; 1449 } 1450 pcie->nports = i; 1451 1452 for (i = 0; i < pcie->nports; i++) { 1453 struct mvebu_pcie_port *port = &pcie->ports[i]; 1454 int irq = port->intx_irq; 1455 1456 child = port->dn; 1457 if (!child) 1458 continue; 1459 1460 ret = mvebu_pcie_powerup(port); 1461 if (ret < 0) 1462 continue; 1463 1464 port->base = mvebu_pcie_map_registers(pdev, child, port); 1465 if (IS_ERR(port->base)) { 1466 dev_err(dev, "%s: cannot map registers\n", port->name); 1467 port->base = NULL; 1468 mvebu_pcie_powerdown(port); 1469 continue; 1470 } 1471 1472 ret = mvebu_pci_bridge_emul_init(port); 1473 if (ret < 0) { 1474 dev_err(dev, "%s: cannot init emulated bridge\n", 1475 port->name); 1476 devm_iounmap(dev, port->base); 1477 port->base = NULL; 1478 mvebu_pcie_powerdown(port); 1479 continue; 1480 } 1481 1482 if (irq > 0) { 1483 ret = mvebu_pcie_init_irq_domain(port); 1484 if (ret) { 1485 dev_err(dev, "%s: cannot init irq domain\n", 1486 port->name); 1487 pci_bridge_emul_cleanup(&port->bridge); 1488 devm_iounmap(dev, port->base); 1489 port->base = NULL; 1490 mvebu_pcie_powerdown(port); 1491 continue; 1492 } 1493 irq_set_chained_handler_and_data(irq, 1494 mvebu_pcie_irq_handler, 1495 port); 1496 } 1497 1498 /* 1499 * PCIe topology exported by mvebu hw is quite complicated. In 1500 * reality has something like N fully independent host bridges 1501 * where each host bridge has one PCIe Root Port (which acts as 1502 * PCI Bridge device). Each host bridge has its own independent 1503 * internal registers, independent access to PCI config space, 1504 * independent interrupt lines, independent window and memory 1505 * access configuration. But additionally there is some kind of 1506 * peer-to-peer support between PCIe devices behind different 1507 * host bridges limited just to forwarding of memory and I/O 1508 * transactions (forwarding of error messages and config cycles 1509 * is not supported). So we could say there are N independent 1510 * PCIe Root Complexes. 1511 * 1512 * For this kind of setup DT should have been structured into 1513 * N independent PCIe controllers / host bridges. But instead 1514 * structure in past was defined to put PCIe Root Ports of all 1515 * host bridges into one bus zero, like in classic multi-port 1516 * Root Complex setup with just one host bridge. 1517 * 1518 * This means that pci-mvebu.c driver provides "virtual" bus 0 1519 * on which registers all PCIe Root Ports (PCI Bridge devices) 1520 * specified in DT by their BDF addresses and virtually routes 1521 * PCI config access of each PCI bridge device to specific PCIe 1522 * host bridge. 1523 * 1524 * Normally PCI Bridge should choose between Type 0 and Type 1 1525 * config requests based on primary and secondary bus numbers 1526 * configured on the bridge itself. But because mvebu PCI Bridge 1527 * does not have registers for primary and secondary bus numbers 1528 * in its config space, it determinates type of config requests 1529 * via its own custom way. 1530 * 1531 * There are two options how mvebu determinate type of config 1532 * request. 1533 * 1534 * 1. If Secondary Bus Number Enable bit is not set or is not 1535 * available (applies for pre-XP PCIe controllers) then Type 0 1536 * is used if target bus number equals Local Bus Number (bits 1537 * [15:8] in register 0x1a04) and target device number differs 1538 * from Local Device Number (bits [20:16] in register 0x1a04). 1539 * Type 1 is used if target bus number differs from Local Bus 1540 * Number. And when target bus number equals Local Bus Number 1541 * and target device equals Local Device Number then request is 1542 * routed to Local PCI Bridge (PCIe Root Port). 1543 * 1544 * 2. If Secondary Bus Number Enable bit is set (bit 7 in 1545 * register 0x1a2c) then mvebu hw determinate type of config 1546 * request like compliant PCI Bridge based on primary bus number 1547 * which is configured via Local Bus Number (bits [15:8] in 1548 * register 0x1a04) and secondary bus number which is configured 1549 * via Secondary Bus Number (bits [7:0] in register 0x1a2c). 1550 * Local PCI Bridge (PCIe Root Port) is available on primary bus 1551 * as device with Local Device Number (bits [20:16] in register 1552 * 0x1a04). 1553 * 1554 * Secondary Bus Number Enable bit is disabled by default and 1555 * option 2. is not available on pre-XP PCIe controllers. Hence 1556 * this driver always use option 1. 1557 * 1558 * Basically it means that primary and secondary buses shares 1559 * one virtual number configured via Local Bus Number bits and 1560 * Local Device Number bits determinates if accessing primary 1561 * or secondary bus. Set Local Device Number to 1 and redirect 1562 * all writes of PCI Bridge Secondary Bus Number register to 1563 * Local Bus Number (bits [15:8] in register 0x1a04). 1564 * 1565 * So when accessing devices on buses behind secondary bus 1566 * number it would work correctly. And also when accessing 1567 * device 0 at secondary bus number via config space would be 1568 * correctly routed to secondary bus. Due to issues described 1569 * in mvebu_pcie_setup_hw(), PCI Bridges at primary bus (zero) 1570 * are not accessed directly via PCI config space but rarher 1571 * indirectly via kernel emulated PCI bridge driver. 1572 */ 1573 mvebu_pcie_setup_hw(port); 1574 mvebu_pcie_set_local_dev_nr(port, 1); 1575 mvebu_pcie_set_local_bus_nr(port, 0); 1576 } 1577 1578 bridge->sysdata = pcie; 1579 bridge->ops = &mvebu_pcie_ops; 1580 bridge->child_ops = &mvebu_pcie_child_ops; 1581 bridge->align_resource = mvebu_pcie_align_resource; 1582 bridge->map_irq = mvebu_pcie_map_irq; 1583 1584 return pci_host_probe(bridge); 1585 } 1586 1587 static int mvebu_pcie_remove(struct platform_device *pdev) 1588 { 1589 struct mvebu_pcie *pcie = platform_get_drvdata(pdev); 1590 struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); 1591 u32 cmd; 1592 int i; 1593 1594 /* Remove PCI bus with all devices. */ 1595 pci_lock_rescan_remove(); 1596 pci_stop_root_bus(bridge->bus); 1597 pci_remove_root_bus(bridge->bus); 1598 pci_unlock_rescan_remove(); 1599 1600 for (i = 0; i < pcie->nports; i++) { 1601 struct mvebu_pcie_port *port = &pcie->ports[i]; 1602 int irq = port->intx_irq; 1603 1604 if (!port->base) 1605 continue; 1606 1607 /* Disable Root Bridge I/O space, memory space and bus mastering. */ 1608 cmd = mvebu_readl(port, PCIE_CMD_OFF); 1609 cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); 1610 mvebu_writel(port, cmd, PCIE_CMD_OFF); 1611 1612 /* Mask all interrupt sources. */ 1613 mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_UNMASK_OFF); 1614 1615 /* Clear all interrupt causes. */ 1616 mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_CAUSE_OFF); 1617 1618 if (irq > 0) 1619 irq_set_chained_handler_and_data(irq, NULL, NULL); 1620 1621 /* Remove IRQ domains. */ 1622 if (port->intx_irq_domain) 1623 irq_domain_remove(port->intx_irq_domain); 1624 1625 /* Free config space for emulated root bridge. */ 1626 pci_bridge_emul_cleanup(&port->bridge); 1627 1628 /* Disable and clear BARs and windows. */ 1629 mvebu_pcie_disable_wins(port); 1630 1631 /* Delete PCIe IO and MEM windows. */ 1632 if (port->iowin.size) 1633 mvebu_pcie_del_windows(port, port->iowin.base, port->iowin.size); 1634 if (port->memwin.size) 1635 mvebu_pcie_del_windows(port, port->memwin.base, port->memwin.size); 1636 1637 /* Power down card and disable clocks. Must be the last step. */ 1638 mvebu_pcie_powerdown(port); 1639 } 1640 1641 return 0; 1642 } 1643 1644 static const struct of_device_id mvebu_pcie_of_match_table[] = { 1645 { .compatible = "marvell,armada-xp-pcie", }, 1646 { .compatible = "marvell,armada-370-pcie", }, 1647 { .compatible = "marvell,dove-pcie", }, 1648 { .compatible = "marvell,kirkwood-pcie", }, 1649 {}, 1650 }; 1651 1652 static const struct dev_pm_ops mvebu_pcie_pm_ops = { 1653 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mvebu_pcie_suspend, mvebu_pcie_resume) 1654 }; 1655 1656 static struct platform_driver mvebu_pcie_driver = { 1657 .driver = { 1658 .name = "mvebu-pcie", 1659 .of_match_table = mvebu_pcie_of_match_table, 1660 .pm = &mvebu_pcie_pm_ops, 1661 }, 1662 .probe = mvebu_pcie_probe, 1663 .remove = mvebu_pcie_remove, 1664 }; 1665 module_platform_driver(mvebu_pcie_driver); 1666 1667 MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@bootlin.com>"); 1668 MODULE_AUTHOR("Pali Rohár <pali@kernel.org>"); 1669 MODULE_DESCRIPTION("Marvell EBU PCIe controller"); 1670 MODULE_LICENSE("GPL v2"); 1671