1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * PCIe driver for Marvell Armada 370 and Armada XP SoCs 4 * 5 * Author: Thomas Petazzoni <thomas.petazzoni@free-electrons.com> 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/module.h> 10 #include <linux/pci.h> 11 #include <linux/bitfield.h> 12 #include <linux/clk.h> 13 #include <linux/delay.h> 14 #include <linux/gpio.h> 15 #include <linux/init.h> 16 #include <linux/mbus.h> 17 #include <linux/slab.h> 18 #include <linux/platform_device.h> 19 #include <linux/of_address.h> 20 #include <linux/of_irq.h> 21 #include <linux/of_gpio.h> 22 #include <linux/of_pci.h> 23 #include <linux/of_platform.h> 24 25 #include "../pci.h" 26 #include "../pci-bridge-emul.h" 27 28 /* 29 * PCIe unit register offsets. 30 */ 31 #define PCIE_DEV_ID_OFF 0x0000 32 #define PCIE_CMD_OFF 0x0004 33 #define PCIE_DEV_REV_OFF 0x0008 34 #define PCIE_BAR_LO_OFF(n) (0x0010 + ((n) << 3)) 35 #define PCIE_BAR_HI_OFF(n) (0x0014 + ((n) << 3)) 36 #define PCIE_SSDEV_ID_OFF 0x002c 37 #define PCIE_CAP_PCIEXP 0x0060 38 #define PCIE_CAP_PCIERR_OFF 0x0100 39 #define PCIE_BAR_CTRL_OFF(n) (0x1804 + (((n) - 1) * 4)) 40 #define PCIE_WIN04_CTRL_OFF(n) (0x1820 + ((n) << 4)) 41 #define PCIE_WIN04_BASE_OFF(n) (0x1824 + ((n) << 4)) 42 #define PCIE_WIN04_REMAP_OFF(n) (0x182c + ((n) << 4)) 43 #define PCIE_WIN5_CTRL_OFF 0x1880 44 #define PCIE_WIN5_BASE_OFF 0x1884 45 #define PCIE_WIN5_REMAP_OFF 0x188c 46 #define PCIE_CONF_ADDR_OFF 0x18f8 47 #define PCIE_CONF_ADDR_EN 0x80000000 48 #define PCIE_CONF_REG(r) ((((r) & 0xf00) << 16) | ((r) & 0xfc)) 49 #define PCIE_CONF_BUS(b) (((b) & 0xff) << 16) 50 #define PCIE_CONF_DEV(d) (((d) & 0x1f) << 11) 51 #define PCIE_CONF_FUNC(f) (((f) & 0x7) << 8) 52 #define PCIE_CONF_ADDR(bus, devfn, where) \ 53 (PCIE_CONF_BUS(bus) | PCIE_CONF_DEV(PCI_SLOT(devfn)) | \ 54 PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where) | \ 55 PCIE_CONF_ADDR_EN) 56 #define PCIE_CONF_DATA_OFF 0x18fc 57 #define PCIE_INT_CAUSE_OFF 0x1900 58 #define PCIE_INT_UNMASK_OFF 0x1910 59 #define PCIE_INT_INTX(i) BIT(24+i) 60 #define PCIE_INT_PM_PME BIT(28) 61 #define PCIE_INT_ALL_MASK GENMASK(31, 0) 62 #define PCIE_CTRL_OFF 0x1a00 63 #define PCIE_CTRL_X1_MODE 0x0001 64 #define PCIE_CTRL_RC_MODE BIT(1) 65 #define PCIE_CTRL_MASTER_HOT_RESET BIT(24) 66 #define PCIE_STAT_OFF 0x1a04 67 #define PCIE_STAT_BUS 0xff00 68 #define PCIE_STAT_DEV 0x1f0000 69 #define PCIE_STAT_LINK_DOWN BIT(0) 70 #define PCIE_SSPL_OFF 0x1a0c 71 #define PCIE_SSPL_VALUE_SHIFT 0 72 #define PCIE_SSPL_VALUE_MASK GENMASK(7, 0) 73 #define PCIE_SSPL_SCALE_SHIFT 8 74 #define PCIE_SSPL_SCALE_MASK GENMASK(9, 8) 75 #define PCIE_SSPL_ENABLE BIT(16) 76 #define PCIE_RC_RTSTA 0x1a14 77 #define PCIE_DEBUG_CTRL 0x1a60 78 #define PCIE_DEBUG_SOFT_RESET BIT(20) 79 80 struct mvebu_pcie_port; 81 82 /* Structure representing all PCIe interfaces */ 83 struct mvebu_pcie { 84 struct platform_device *pdev; 85 struct mvebu_pcie_port *ports; 86 struct resource io; 87 struct resource realio; 88 struct resource mem; 89 struct resource busn; 90 int nports; 91 }; 92 93 struct mvebu_pcie_window { 94 phys_addr_t base; 95 phys_addr_t remap; 96 size_t size; 97 }; 98 99 /* Structure representing one PCIe interface */ 100 struct mvebu_pcie_port { 101 char *name; 102 void __iomem *base; 103 u32 port; 104 u32 lane; 105 bool is_x4; 106 int devfn; 107 unsigned int mem_target; 108 unsigned int mem_attr; 109 unsigned int io_target; 110 unsigned int io_attr; 111 struct clk *clk; 112 struct gpio_desc *reset_gpio; 113 char *reset_name; 114 struct pci_bridge_emul bridge; 115 struct device_node *dn; 116 struct mvebu_pcie *pcie; 117 struct mvebu_pcie_window memwin; 118 struct mvebu_pcie_window iowin; 119 u32 saved_pcie_stat; 120 struct resource regs; 121 u8 slot_power_limit_value; 122 u8 slot_power_limit_scale; 123 struct irq_domain *intx_irq_domain; 124 raw_spinlock_t irq_lock; 125 int intx_irq; 126 }; 127 128 static inline void mvebu_writel(struct mvebu_pcie_port *port, u32 val, u32 reg) 129 { 130 writel(val, port->base + reg); 131 } 132 133 static inline u32 mvebu_readl(struct mvebu_pcie_port *port, u32 reg) 134 { 135 return readl(port->base + reg); 136 } 137 138 static inline bool mvebu_has_ioport(struct mvebu_pcie_port *port) 139 { 140 return port->io_target != -1 && port->io_attr != -1; 141 } 142 143 static bool mvebu_pcie_link_up(struct mvebu_pcie_port *port) 144 { 145 return !(mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_LINK_DOWN); 146 } 147 148 static u8 mvebu_pcie_get_local_bus_nr(struct mvebu_pcie_port *port) 149 { 150 return (mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_BUS) >> 8; 151 } 152 153 static void mvebu_pcie_set_local_bus_nr(struct mvebu_pcie_port *port, int nr) 154 { 155 u32 stat; 156 157 stat = mvebu_readl(port, PCIE_STAT_OFF); 158 stat &= ~PCIE_STAT_BUS; 159 stat |= nr << 8; 160 mvebu_writel(port, stat, PCIE_STAT_OFF); 161 } 162 163 static void mvebu_pcie_set_local_dev_nr(struct mvebu_pcie_port *port, int nr) 164 { 165 u32 stat; 166 167 stat = mvebu_readl(port, PCIE_STAT_OFF); 168 stat &= ~PCIE_STAT_DEV; 169 stat |= nr << 16; 170 mvebu_writel(port, stat, PCIE_STAT_OFF); 171 } 172 173 static void mvebu_pcie_disable_wins(struct mvebu_pcie_port *port) 174 { 175 int i; 176 177 mvebu_writel(port, 0, PCIE_BAR_LO_OFF(0)); 178 mvebu_writel(port, 0, PCIE_BAR_HI_OFF(0)); 179 180 for (i = 1; i < 3; i++) { 181 mvebu_writel(port, 0, PCIE_BAR_CTRL_OFF(i)); 182 mvebu_writel(port, 0, PCIE_BAR_LO_OFF(i)); 183 mvebu_writel(port, 0, PCIE_BAR_HI_OFF(i)); 184 } 185 186 for (i = 0; i < 5; i++) { 187 mvebu_writel(port, 0, PCIE_WIN04_CTRL_OFF(i)); 188 mvebu_writel(port, 0, PCIE_WIN04_BASE_OFF(i)); 189 mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i)); 190 } 191 192 mvebu_writel(port, 0, PCIE_WIN5_CTRL_OFF); 193 mvebu_writel(port, 0, PCIE_WIN5_BASE_OFF); 194 mvebu_writel(port, 0, PCIE_WIN5_REMAP_OFF); 195 } 196 197 /* 198 * Setup PCIE BARs and Address Decode Wins: 199 * BAR[0] -> internal registers (needed for MSI) 200 * BAR[1] -> covers all DRAM banks 201 * BAR[2] -> Disabled 202 * WIN[0-3] -> DRAM bank[0-3] 203 */ 204 static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port) 205 { 206 const struct mbus_dram_target_info *dram; 207 u32 size; 208 int i; 209 210 dram = mv_mbus_dram_info(); 211 212 /* First, disable and clear BARs and windows. */ 213 mvebu_pcie_disable_wins(port); 214 215 /* Setup windows for DDR banks. Count total DDR size on the fly. */ 216 size = 0; 217 for (i = 0; i < dram->num_cs; i++) { 218 const struct mbus_dram_window *cs = dram->cs + i; 219 220 mvebu_writel(port, cs->base & 0xffff0000, 221 PCIE_WIN04_BASE_OFF(i)); 222 mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i)); 223 mvebu_writel(port, 224 ((cs->size - 1) & 0xffff0000) | 225 (cs->mbus_attr << 8) | 226 (dram->mbus_dram_target_id << 4) | 1, 227 PCIE_WIN04_CTRL_OFF(i)); 228 229 size += cs->size; 230 } 231 232 /* Round up 'size' to the nearest power of two. */ 233 if ((size & (size - 1)) != 0) 234 size = 1 << fls(size); 235 236 /* Setup BAR[1] to all DRAM banks. */ 237 mvebu_writel(port, dram->cs[0].base, PCIE_BAR_LO_OFF(1)); 238 mvebu_writel(port, 0, PCIE_BAR_HI_OFF(1)); 239 mvebu_writel(port, ((size - 1) & 0xffff0000) | 1, 240 PCIE_BAR_CTRL_OFF(1)); 241 242 /* 243 * Point BAR[0] to the device's internal registers. 244 */ 245 mvebu_writel(port, round_down(port->regs.start, SZ_1M), PCIE_BAR_LO_OFF(0)); 246 mvebu_writel(port, 0, PCIE_BAR_HI_OFF(0)); 247 } 248 249 static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port) 250 { 251 u32 ctrl, lnkcap, cmd, dev_rev, unmask, sspl; 252 253 /* Setup PCIe controller to Root Complex mode. */ 254 ctrl = mvebu_readl(port, PCIE_CTRL_OFF); 255 ctrl |= PCIE_CTRL_RC_MODE; 256 mvebu_writel(port, ctrl, PCIE_CTRL_OFF); 257 258 /* 259 * Set Maximum Link Width to X1 or X4 in Root Port's PCIe Link 260 * Capability register. This register is defined by PCIe specification 261 * as read-only but this mvebu controller has it as read-write and must 262 * be set to number of SerDes PCIe lanes (1 or 4). If this register is 263 * not set correctly then link with endpoint card is not established. 264 */ 265 lnkcap = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP); 266 lnkcap &= ~PCI_EXP_LNKCAP_MLW; 267 lnkcap |= (port->is_x4 ? 4 : 1) << 4; 268 mvebu_writel(port, lnkcap, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP); 269 270 /* Disable Root Bridge I/O space, memory space and bus mastering. */ 271 cmd = mvebu_readl(port, PCIE_CMD_OFF); 272 cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); 273 mvebu_writel(port, cmd, PCIE_CMD_OFF); 274 275 /* 276 * Change Class Code of PCI Bridge device to PCI Bridge (0x6004) 277 * because default value is Memory controller (0x5080). 278 * 279 * Note that this mvebu PCI Bridge does not have compliant Type 1 280 * Configuration Space. Header Type is reported as Type 0 and it 281 * has format of Type 0 config space. 282 * 283 * Moreover Type 0 BAR registers (ranges 0x10 - 0x28 and 0x30 - 0x34) 284 * have the same format in Marvell's specification as in PCIe 285 * specification, but their meaning is totally different and they do 286 * different things: they are aliased into internal mvebu registers 287 * (e.g. PCIE_BAR_LO_OFF) and these should not be changed or 288 * reconfigured by pci device drivers. 289 * 290 * Therefore driver uses emulation of PCI Bridge which emulates 291 * access to configuration space via internal mvebu registers or 292 * emulated configuration buffer. Driver access these PCI Bridge 293 * directly for simplification, but these registers can be accessed 294 * also via standard mvebu way for accessing PCI config space. 295 */ 296 dev_rev = mvebu_readl(port, PCIE_DEV_REV_OFF); 297 dev_rev &= ~0xffffff00; 298 dev_rev |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8; 299 mvebu_writel(port, dev_rev, PCIE_DEV_REV_OFF); 300 301 /* Point PCIe unit MBUS decode windows to DRAM space. */ 302 mvebu_pcie_setup_wins(port); 303 304 /* 305 * Program Root Port to automatically send Set_Slot_Power_Limit 306 * PCIe Message when changing status from Dl_Down to Dl_Up and valid 307 * slot power limit was specified. 308 */ 309 sspl = mvebu_readl(port, PCIE_SSPL_OFF); 310 sspl &= ~(PCIE_SSPL_VALUE_MASK | PCIE_SSPL_SCALE_MASK | PCIE_SSPL_ENABLE); 311 if (port->slot_power_limit_value) { 312 sspl |= port->slot_power_limit_value << PCIE_SSPL_VALUE_SHIFT; 313 sspl |= port->slot_power_limit_scale << PCIE_SSPL_SCALE_SHIFT; 314 sspl |= PCIE_SSPL_ENABLE; 315 } 316 mvebu_writel(port, sspl, PCIE_SSPL_OFF); 317 318 /* Mask all interrupt sources. */ 319 mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_UNMASK_OFF); 320 321 /* Clear all interrupt causes. */ 322 mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_CAUSE_OFF); 323 324 /* Check if "intx" interrupt was specified in DT. */ 325 if (port->intx_irq > 0) 326 return; 327 328 /* 329 * Fallback code when "intx" interrupt was not specified in DT: 330 * Unmask all legacy INTx interrupts as driver does not provide a way 331 * for masking and unmasking of individual legacy INTx interrupts. 332 * Legacy INTx are reported via one shared GIC source and therefore 333 * kernel cannot distinguish which individual legacy INTx was triggered. 334 * These interrupts are shared, so it should not cause any issue. Just 335 * performance penalty as every PCIe interrupt handler needs to be 336 * called when some interrupt is triggered. 337 */ 338 unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF); 339 unmask |= PCIE_INT_INTX(0) | PCIE_INT_INTX(1) | 340 PCIE_INT_INTX(2) | PCIE_INT_INTX(3); 341 mvebu_writel(port, unmask, PCIE_INT_UNMASK_OFF); 342 } 343 344 static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie, 345 struct pci_bus *bus, 346 int devfn); 347 348 static int mvebu_pcie_child_rd_conf(struct pci_bus *bus, u32 devfn, int where, 349 int size, u32 *val) 350 { 351 struct mvebu_pcie *pcie = bus->sysdata; 352 struct mvebu_pcie_port *port; 353 void __iomem *conf_data; 354 355 port = mvebu_pcie_find_port(pcie, bus, devfn); 356 if (!port) 357 return PCIBIOS_DEVICE_NOT_FOUND; 358 359 if (!mvebu_pcie_link_up(port)) 360 return PCIBIOS_DEVICE_NOT_FOUND; 361 362 conf_data = port->base + PCIE_CONF_DATA_OFF; 363 364 mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where), 365 PCIE_CONF_ADDR_OFF); 366 367 switch (size) { 368 case 1: 369 *val = readb_relaxed(conf_data + (where & 3)); 370 break; 371 case 2: 372 *val = readw_relaxed(conf_data + (where & 2)); 373 break; 374 case 4: 375 *val = readl_relaxed(conf_data); 376 break; 377 default: 378 return PCIBIOS_BAD_REGISTER_NUMBER; 379 } 380 381 return PCIBIOS_SUCCESSFUL; 382 } 383 384 static int mvebu_pcie_child_wr_conf(struct pci_bus *bus, u32 devfn, 385 int where, int size, u32 val) 386 { 387 struct mvebu_pcie *pcie = bus->sysdata; 388 struct mvebu_pcie_port *port; 389 void __iomem *conf_data; 390 391 port = mvebu_pcie_find_port(pcie, bus, devfn); 392 if (!port) 393 return PCIBIOS_DEVICE_NOT_FOUND; 394 395 if (!mvebu_pcie_link_up(port)) 396 return PCIBIOS_DEVICE_NOT_FOUND; 397 398 conf_data = port->base + PCIE_CONF_DATA_OFF; 399 400 mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where), 401 PCIE_CONF_ADDR_OFF); 402 403 switch (size) { 404 case 1: 405 writeb(val, conf_data + (where & 3)); 406 break; 407 case 2: 408 writew(val, conf_data + (where & 2)); 409 break; 410 case 4: 411 writel(val, conf_data); 412 break; 413 default: 414 return PCIBIOS_BAD_REGISTER_NUMBER; 415 } 416 417 return PCIBIOS_SUCCESSFUL; 418 } 419 420 static struct pci_ops mvebu_pcie_child_ops = { 421 .read = mvebu_pcie_child_rd_conf, 422 .write = mvebu_pcie_child_wr_conf, 423 }; 424 425 /* 426 * Remove windows, starting from the largest ones to the smallest 427 * ones. 428 */ 429 static void mvebu_pcie_del_windows(struct mvebu_pcie_port *port, 430 phys_addr_t base, size_t size) 431 { 432 while (size) { 433 size_t sz = 1 << (fls(size) - 1); 434 435 mvebu_mbus_del_window(base, sz); 436 base += sz; 437 size -= sz; 438 } 439 } 440 441 /* 442 * MBus windows can only have a power of two size, but PCI BARs do not 443 * have this constraint. Therefore, we have to split the PCI BAR into 444 * areas each having a power of two size. We start from the largest 445 * one (i.e highest order bit set in the size). 446 */ 447 static int mvebu_pcie_add_windows(struct mvebu_pcie_port *port, 448 unsigned int target, unsigned int attribute, 449 phys_addr_t base, size_t size, 450 phys_addr_t remap) 451 { 452 size_t size_mapped = 0; 453 454 while (size) { 455 size_t sz = 1 << (fls(size) - 1); 456 int ret; 457 458 ret = mvebu_mbus_add_window_remap_by_id(target, attribute, base, 459 sz, remap); 460 if (ret) { 461 phys_addr_t end = base + sz - 1; 462 463 dev_err(&port->pcie->pdev->dev, 464 "Could not create MBus window at [mem %pa-%pa]: %d\n", 465 &base, &end, ret); 466 mvebu_pcie_del_windows(port, base - size_mapped, 467 size_mapped); 468 return ret; 469 } 470 471 size -= sz; 472 size_mapped += sz; 473 base += sz; 474 if (remap != MVEBU_MBUS_NO_REMAP) 475 remap += sz; 476 } 477 478 return 0; 479 } 480 481 static int mvebu_pcie_set_window(struct mvebu_pcie_port *port, 482 unsigned int target, unsigned int attribute, 483 const struct mvebu_pcie_window *desired, 484 struct mvebu_pcie_window *cur) 485 { 486 int ret; 487 488 if (desired->base == cur->base && desired->remap == cur->remap && 489 desired->size == cur->size) 490 return 0; 491 492 if (cur->size != 0) { 493 mvebu_pcie_del_windows(port, cur->base, cur->size); 494 cur->size = 0; 495 cur->base = 0; 496 497 /* 498 * If something tries to change the window while it is enabled 499 * the change will not be done atomically. That would be 500 * difficult to do in the general case. 501 */ 502 } 503 504 if (desired->size == 0) 505 return 0; 506 507 ret = mvebu_pcie_add_windows(port, target, attribute, desired->base, 508 desired->size, desired->remap); 509 if (ret) { 510 cur->size = 0; 511 cur->base = 0; 512 return ret; 513 } 514 515 *cur = *desired; 516 return 0; 517 } 518 519 static int mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port) 520 { 521 struct mvebu_pcie_window desired = {}; 522 struct pci_bridge_emul_conf *conf = &port->bridge.conf; 523 524 /* Are the new iobase/iolimit values invalid? */ 525 if (conf->iolimit < conf->iobase || 526 le16_to_cpu(conf->iolimitupper) < le16_to_cpu(conf->iobaseupper)) 527 return mvebu_pcie_set_window(port, port->io_target, port->io_attr, 528 &desired, &port->iowin); 529 530 /* 531 * We read the PCI-to-PCI bridge emulated registers, and 532 * calculate the base address and size of the address decoding 533 * window to setup, according to the PCI-to-PCI bridge 534 * specifications. iobase is the bus address, port->iowin_base 535 * is the CPU address. 536 */ 537 desired.remap = ((conf->iobase & 0xF0) << 8) | 538 (le16_to_cpu(conf->iobaseupper) << 16); 539 desired.base = port->pcie->io.start + desired.remap; 540 desired.size = ((0xFFF | ((conf->iolimit & 0xF0) << 8) | 541 (le16_to_cpu(conf->iolimitupper) << 16)) - 542 desired.remap) + 543 1; 544 545 return mvebu_pcie_set_window(port, port->io_target, port->io_attr, &desired, 546 &port->iowin); 547 } 548 549 static int mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port) 550 { 551 struct mvebu_pcie_window desired = {.remap = MVEBU_MBUS_NO_REMAP}; 552 struct pci_bridge_emul_conf *conf = &port->bridge.conf; 553 554 /* Are the new membase/memlimit values invalid? */ 555 if (le16_to_cpu(conf->memlimit) < le16_to_cpu(conf->membase)) 556 return mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, 557 &desired, &port->memwin); 558 559 /* 560 * We read the PCI-to-PCI bridge emulated registers, and 561 * calculate the base address and size of the address decoding 562 * window to setup, according to the PCI-to-PCI bridge 563 * specifications. 564 */ 565 desired.base = ((le16_to_cpu(conf->membase) & 0xFFF0) << 16); 566 desired.size = (((le16_to_cpu(conf->memlimit) & 0xFFF0) << 16) | 0xFFFFF) - 567 desired.base + 1; 568 569 return mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, &desired, 570 &port->memwin); 571 } 572 573 static pci_bridge_emul_read_status_t 574 mvebu_pci_bridge_emul_base_conf_read(struct pci_bridge_emul *bridge, 575 int reg, u32 *value) 576 { 577 struct mvebu_pcie_port *port = bridge->data; 578 579 switch (reg) { 580 case PCI_COMMAND: 581 *value = mvebu_readl(port, PCIE_CMD_OFF); 582 break; 583 584 case PCI_PRIMARY_BUS: { 585 /* 586 * From the whole 32bit register we support reading from HW only 587 * secondary bus number which is mvebu local bus number. 588 * Other bits are retrieved only from emulated config buffer. 589 */ 590 __le32 *cfgspace = (__le32 *)&bridge->conf; 591 u32 val = le32_to_cpu(cfgspace[PCI_PRIMARY_BUS / 4]); 592 val &= ~0xff00; 593 val |= mvebu_pcie_get_local_bus_nr(port) << 8; 594 *value = val; 595 break; 596 } 597 598 case PCI_INTERRUPT_LINE: { 599 /* 600 * From the whole 32bit register we support reading from HW only 601 * one bit: PCI_BRIDGE_CTL_BUS_RESET. 602 * Other bits are retrieved only from emulated config buffer. 603 */ 604 __le32 *cfgspace = (__le32 *)&bridge->conf; 605 u32 val = le32_to_cpu(cfgspace[PCI_INTERRUPT_LINE / 4]); 606 if (mvebu_readl(port, PCIE_CTRL_OFF) & PCIE_CTRL_MASTER_HOT_RESET) 607 val |= PCI_BRIDGE_CTL_BUS_RESET << 16; 608 else 609 val &= ~(PCI_BRIDGE_CTL_BUS_RESET << 16); 610 *value = val; 611 break; 612 } 613 614 default: 615 return PCI_BRIDGE_EMUL_NOT_HANDLED; 616 } 617 618 return PCI_BRIDGE_EMUL_HANDLED; 619 } 620 621 static pci_bridge_emul_read_status_t 622 mvebu_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge, 623 int reg, u32 *value) 624 { 625 struct mvebu_pcie_port *port = bridge->data; 626 627 switch (reg) { 628 case PCI_EXP_DEVCAP: 629 *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCAP); 630 break; 631 632 case PCI_EXP_DEVCTL: 633 *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL); 634 break; 635 636 case PCI_EXP_LNKCAP: 637 /* 638 * PCIe requires that the Clock Power Management capability bit 639 * is hard-wired to zero for downstream ports but HW returns 1. 640 * Additionally enable Data Link Layer Link Active Reporting 641 * Capable bit as DL_Active indication is provided too. 642 */ 643 *value = (mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP) & 644 ~PCI_EXP_LNKCAP_CLKPM) | PCI_EXP_LNKCAP_DLLLARC; 645 break; 646 647 case PCI_EXP_LNKCTL: 648 /* DL_Active indication is provided via PCIE_STAT_OFF */ 649 *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL) | 650 (mvebu_pcie_link_up(port) ? 651 (PCI_EXP_LNKSTA_DLLLA << 16) : 0); 652 break; 653 654 case PCI_EXP_SLTCTL: { 655 u16 slotctl = le16_to_cpu(bridge->pcie_conf.slotctl); 656 u16 slotsta = le16_to_cpu(bridge->pcie_conf.slotsta); 657 u32 val = 0; 658 /* 659 * When slot power limit was not specified in DT then 660 * ASPL_DISABLE bit is stored only in emulated config space. 661 * Otherwise reflect status of PCIE_SSPL_ENABLE bit in HW. 662 */ 663 if (!port->slot_power_limit_value) 664 val |= slotctl & PCI_EXP_SLTCTL_ASPL_DISABLE; 665 else if (!(mvebu_readl(port, PCIE_SSPL_OFF) & PCIE_SSPL_ENABLE)) 666 val |= PCI_EXP_SLTCTL_ASPL_DISABLE; 667 /* This callback is 32-bit and in high bits is slot status. */ 668 val |= slotsta << 16; 669 *value = val; 670 break; 671 } 672 673 case PCI_EXP_RTSTA: 674 *value = mvebu_readl(port, PCIE_RC_RTSTA); 675 break; 676 677 case PCI_EXP_DEVCAP2: 678 *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCAP2); 679 break; 680 681 case PCI_EXP_DEVCTL2: 682 *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL2); 683 break; 684 685 case PCI_EXP_LNKCTL2: 686 *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL2); 687 break; 688 689 default: 690 return PCI_BRIDGE_EMUL_NOT_HANDLED; 691 } 692 693 return PCI_BRIDGE_EMUL_HANDLED; 694 } 695 696 static pci_bridge_emul_read_status_t 697 mvebu_pci_bridge_emul_ext_conf_read(struct pci_bridge_emul *bridge, 698 int reg, u32 *value) 699 { 700 struct mvebu_pcie_port *port = bridge->data; 701 702 switch (reg) { 703 case 0: 704 case PCI_ERR_UNCOR_STATUS: 705 case PCI_ERR_UNCOR_MASK: 706 case PCI_ERR_UNCOR_SEVER: 707 case PCI_ERR_COR_STATUS: 708 case PCI_ERR_COR_MASK: 709 case PCI_ERR_CAP: 710 case PCI_ERR_HEADER_LOG+0: 711 case PCI_ERR_HEADER_LOG+4: 712 case PCI_ERR_HEADER_LOG+8: 713 case PCI_ERR_HEADER_LOG+12: 714 case PCI_ERR_ROOT_COMMAND: 715 case PCI_ERR_ROOT_STATUS: 716 case PCI_ERR_ROOT_ERR_SRC: 717 *value = mvebu_readl(port, PCIE_CAP_PCIERR_OFF + reg); 718 break; 719 720 default: 721 return PCI_BRIDGE_EMUL_NOT_HANDLED; 722 } 723 724 return PCI_BRIDGE_EMUL_HANDLED; 725 } 726 727 static void 728 mvebu_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge, 729 int reg, u32 old, u32 new, u32 mask) 730 { 731 struct mvebu_pcie_port *port = bridge->data; 732 struct pci_bridge_emul_conf *conf = &bridge->conf; 733 734 switch (reg) { 735 case PCI_COMMAND: 736 mvebu_writel(port, new, PCIE_CMD_OFF); 737 break; 738 739 case PCI_IO_BASE: 740 if ((mask & 0xffff) && mvebu_has_ioport(port) && 741 mvebu_pcie_handle_iobase_change(port)) { 742 /* On error disable IO range */ 743 conf->iobase &= ~0xf0; 744 conf->iolimit &= ~0xf0; 745 conf->iobase |= 0xf0; 746 conf->iobaseupper = cpu_to_le16(0x0000); 747 conf->iolimitupper = cpu_to_le16(0x0000); 748 } 749 break; 750 751 case PCI_MEMORY_BASE: 752 if (mvebu_pcie_handle_membase_change(port)) { 753 /* On error disable mem range */ 754 conf->membase = cpu_to_le16(le16_to_cpu(conf->membase) & ~0xfff0); 755 conf->memlimit = cpu_to_le16(le16_to_cpu(conf->memlimit) & ~0xfff0); 756 conf->membase = cpu_to_le16(le16_to_cpu(conf->membase) | 0xfff0); 757 } 758 break; 759 760 case PCI_IO_BASE_UPPER16: 761 if (mvebu_has_ioport(port) && 762 mvebu_pcie_handle_iobase_change(port)) { 763 /* On error disable IO range */ 764 conf->iobase &= ~0xf0; 765 conf->iolimit &= ~0xf0; 766 conf->iobase |= 0xf0; 767 conf->iobaseupper = cpu_to_le16(0x0000); 768 conf->iolimitupper = cpu_to_le16(0x0000); 769 } 770 break; 771 772 case PCI_PRIMARY_BUS: 773 if (mask & 0xff00) 774 mvebu_pcie_set_local_bus_nr(port, conf->secondary_bus); 775 break; 776 777 case PCI_INTERRUPT_LINE: 778 if (mask & (PCI_BRIDGE_CTL_BUS_RESET << 16)) { 779 u32 ctrl = mvebu_readl(port, PCIE_CTRL_OFF); 780 if (new & (PCI_BRIDGE_CTL_BUS_RESET << 16)) 781 ctrl |= PCIE_CTRL_MASTER_HOT_RESET; 782 else 783 ctrl &= ~PCIE_CTRL_MASTER_HOT_RESET; 784 mvebu_writel(port, ctrl, PCIE_CTRL_OFF); 785 } 786 break; 787 788 default: 789 break; 790 } 791 } 792 793 static void 794 mvebu_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge, 795 int reg, u32 old, u32 new, u32 mask) 796 { 797 struct mvebu_pcie_port *port = bridge->data; 798 799 switch (reg) { 800 case PCI_EXP_DEVCTL: 801 mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL); 802 break; 803 804 case PCI_EXP_LNKCTL: 805 /* 806 * PCIe requires that the Enable Clock Power Management bit 807 * is hard-wired to zero for downstream ports but HW allows 808 * to change it. 809 */ 810 new &= ~PCI_EXP_LNKCTL_CLKREQ_EN; 811 812 mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL); 813 break; 814 815 case PCI_EXP_SLTCTL: 816 /* 817 * Allow to change PCIE_SSPL_ENABLE bit only when slot power 818 * limit was specified in DT and configured into HW. 819 */ 820 if ((mask & PCI_EXP_SLTCTL_ASPL_DISABLE) && 821 port->slot_power_limit_value) { 822 u32 sspl = mvebu_readl(port, PCIE_SSPL_OFF); 823 if (new & PCI_EXP_SLTCTL_ASPL_DISABLE) 824 sspl &= ~PCIE_SSPL_ENABLE; 825 else 826 sspl |= PCIE_SSPL_ENABLE; 827 mvebu_writel(port, sspl, PCIE_SSPL_OFF); 828 } 829 break; 830 831 case PCI_EXP_RTSTA: 832 /* 833 * PME Status bit in Root Status Register (PCIE_RC_RTSTA) 834 * is read-only and can be cleared only by writing 0b to the 835 * Interrupt Cause RW0C register (PCIE_INT_CAUSE_OFF). So 836 * clear PME via Interrupt Cause. 837 */ 838 if (new & PCI_EXP_RTSTA_PME) 839 mvebu_writel(port, ~PCIE_INT_PM_PME, PCIE_INT_CAUSE_OFF); 840 break; 841 842 case PCI_EXP_DEVCTL2: 843 mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL2); 844 break; 845 846 case PCI_EXP_LNKCTL2: 847 mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL2); 848 break; 849 850 default: 851 break; 852 } 853 } 854 855 static void 856 mvebu_pci_bridge_emul_ext_conf_write(struct pci_bridge_emul *bridge, 857 int reg, u32 old, u32 new, u32 mask) 858 { 859 struct mvebu_pcie_port *port = bridge->data; 860 861 switch (reg) { 862 /* These are W1C registers, so clear other bits */ 863 case PCI_ERR_UNCOR_STATUS: 864 case PCI_ERR_COR_STATUS: 865 case PCI_ERR_ROOT_STATUS: 866 new &= mask; 867 fallthrough; 868 869 case PCI_ERR_UNCOR_MASK: 870 case PCI_ERR_UNCOR_SEVER: 871 case PCI_ERR_COR_MASK: 872 case PCI_ERR_CAP: 873 case PCI_ERR_HEADER_LOG+0: 874 case PCI_ERR_HEADER_LOG+4: 875 case PCI_ERR_HEADER_LOG+8: 876 case PCI_ERR_HEADER_LOG+12: 877 case PCI_ERR_ROOT_COMMAND: 878 case PCI_ERR_ROOT_ERR_SRC: 879 mvebu_writel(port, new, PCIE_CAP_PCIERR_OFF + reg); 880 break; 881 882 default: 883 break; 884 } 885 } 886 887 static const struct pci_bridge_emul_ops mvebu_pci_bridge_emul_ops = { 888 .read_base = mvebu_pci_bridge_emul_base_conf_read, 889 .write_base = mvebu_pci_bridge_emul_base_conf_write, 890 .read_pcie = mvebu_pci_bridge_emul_pcie_conf_read, 891 .write_pcie = mvebu_pci_bridge_emul_pcie_conf_write, 892 .read_ext = mvebu_pci_bridge_emul_ext_conf_read, 893 .write_ext = mvebu_pci_bridge_emul_ext_conf_write, 894 }; 895 896 /* 897 * Initialize the configuration space of the PCI-to-PCI bridge 898 * associated with the given PCIe interface. 899 */ 900 static int mvebu_pci_bridge_emul_init(struct mvebu_pcie_port *port) 901 { 902 unsigned int bridge_flags = PCI_BRIDGE_EMUL_NO_PREFMEM_FORWARD; 903 struct pci_bridge_emul *bridge = &port->bridge; 904 u32 dev_id = mvebu_readl(port, PCIE_DEV_ID_OFF); 905 u32 dev_rev = mvebu_readl(port, PCIE_DEV_REV_OFF); 906 u32 ssdev_id = mvebu_readl(port, PCIE_SSDEV_ID_OFF); 907 u32 pcie_cap = mvebu_readl(port, PCIE_CAP_PCIEXP); 908 u8 pcie_cap_ver = ((pcie_cap >> 16) & PCI_EXP_FLAGS_VERS); 909 910 bridge->conf.vendor = cpu_to_le16(dev_id & 0xffff); 911 bridge->conf.device = cpu_to_le16(dev_id >> 16); 912 bridge->conf.class_revision = cpu_to_le32(dev_rev & 0xff); 913 914 if (mvebu_has_ioport(port)) { 915 /* We support 32 bits I/O addressing */ 916 bridge->conf.iobase = PCI_IO_RANGE_TYPE_32; 917 bridge->conf.iolimit = PCI_IO_RANGE_TYPE_32; 918 } else { 919 bridge_flags |= PCI_BRIDGE_EMUL_NO_IO_FORWARD; 920 } 921 922 /* 923 * Older mvebu hardware provides PCIe Capability structure only in 924 * version 1. New hardware provides it in version 2. 925 * Enable slot support which is emulated. 926 */ 927 bridge->pcie_conf.cap = cpu_to_le16(pcie_cap_ver | PCI_EXP_FLAGS_SLOT); 928 929 /* 930 * Set Presence Detect State bit permanently as there is no support for 931 * unplugging PCIe card from the slot. Assume that PCIe card is always 932 * connected in slot. 933 * 934 * Set physical slot number to port+1 as mvebu ports are indexed from 935 * zero and zero value is reserved for ports within the same silicon 936 * as Root Port which is not mvebu case. 937 * 938 * Also set correct slot power limit. 939 */ 940 bridge->pcie_conf.slotcap = cpu_to_le32( 941 FIELD_PREP(PCI_EXP_SLTCAP_SPLV, port->slot_power_limit_value) | 942 FIELD_PREP(PCI_EXP_SLTCAP_SPLS, port->slot_power_limit_scale) | 943 FIELD_PREP(PCI_EXP_SLTCAP_PSN, port->port+1)); 944 bridge->pcie_conf.slotsta = cpu_to_le16(PCI_EXP_SLTSTA_PDS); 945 946 bridge->subsystem_vendor_id = ssdev_id & 0xffff; 947 bridge->subsystem_id = ssdev_id >> 16; 948 bridge->has_pcie = true; 949 bridge->pcie_start = PCIE_CAP_PCIEXP; 950 bridge->data = port; 951 bridge->ops = &mvebu_pci_bridge_emul_ops; 952 953 return pci_bridge_emul_init(bridge, bridge_flags); 954 } 955 956 static inline struct mvebu_pcie *sys_to_pcie(struct pci_sys_data *sys) 957 { 958 return sys->private_data; 959 } 960 961 static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie, 962 struct pci_bus *bus, 963 int devfn) 964 { 965 int i; 966 967 for (i = 0; i < pcie->nports; i++) { 968 struct mvebu_pcie_port *port = &pcie->ports[i]; 969 970 if (!port->base) 971 continue; 972 973 if (bus->number == 0 && port->devfn == devfn) 974 return port; 975 if (bus->number != 0 && 976 bus->number >= port->bridge.conf.secondary_bus && 977 bus->number <= port->bridge.conf.subordinate_bus) 978 return port; 979 } 980 981 return NULL; 982 } 983 984 /* PCI configuration space write function */ 985 static int mvebu_pcie_wr_conf(struct pci_bus *bus, u32 devfn, 986 int where, int size, u32 val) 987 { 988 struct mvebu_pcie *pcie = bus->sysdata; 989 struct mvebu_pcie_port *port; 990 991 port = mvebu_pcie_find_port(pcie, bus, devfn); 992 if (!port) 993 return PCIBIOS_DEVICE_NOT_FOUND; 994 995 return pci_bridge_emul_conf_write(&port->bridge, where, size, val); 996 } 997 998 /* PCI configuration space read function */ 999 static int mvebu_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, 1000 int size, u32 *val) 1001 { 1002 struct mvebu_pcie *pcie = bus->sysdata; 1003 struct mvebu_pcie_port *port; 1004 1005 port = mvebu_pcie_find_port(pcie, bus, devfn); 1006 if (!port) 1007 return PCIBIOS_DEVICE_NOT_FOUND; 1008 1009 return pci_bridge_emul_conf_read(&port->bridge, where, size, val); 1010 } 1011 1012 static struct pci_ops mvebu_pcie_ops = { 1013 .read = mvebu_pcie_rd_conf, 1014 .write = mvebu_pcie_wr_conf, 1015 }; 1016 1017 static void mvebu_pcie_intx_irq_mask(struct irq_data *d) 1018 { 1019 struct mvebu_pcie_port *port = d->domain->host_data; 1020 irq_hw_number_t hwirq = irqd_to_hwirq(d); 1021 unsigned long flags; 1022 u32 unmask; 1023 1024 raw_spin_lock_irqsave(&port->irq_lock, flags); 1025 unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF); 1026 unmask &= ~PCIE_INT_INTX(hwirq); 1027 mvebu_writel(port, unmask, PCIE_INT_UNMASK_OFF); 1028 raw_spin_unlock_irqrestore(&port->irq_lock, flags); 1029 } 1030 1031 static void mvebu_pcie_intx_irq_unmask(struct irq_data *d) 1032 { 1033 struct mvebu_pcie_port *port = d->domain->host_data; 1034 irq_hw_number_t hwirq = irqd_to_hwirq(d); 1035 unsigned long flags; 1036 u32 unmask; 1037 1038 raw_spin_lock_irqsave(&port->irq_lock, flags); 1039 unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF); 1040 unmask |= PCIE_INT_INTX(hwirq); 1041 mvebu_writel(port, unmask, PCIE_INT_UNMASK_OFF); 1042 raw_spin_unlock_irqrestore(&port->irq_lock, flags); 1043 } 1044 1045 static struct irq_chip intx_irq_chip = { 1046 .name = "mvebu-INTx", 1047 .irq_mask = mvebu_pcie_intx_irq_mask, 1048 .irq_unmask = mvebu_pcie_intx_irq_unmask, 1049 }; 1050 1051 static int mvebu_pcie_intx_irq_map(struct irq_domain *h, 1052 unsigned int virq, irq_hw_number_t hwirq) 1053 { 1054 struct mvebu_pcie_port *port = h->host_data; 1055 1056 irq_set_status_flags(virq, IRQ_LEVEL); 1057 irq_set_chip_and_handler(virq, &intx_irq_chip, handle_level_irq); 1058 irq_set_chip_data(virq, port); 1059 1060 return 0; 1061 } 1062 1063 static const struct irq_domain_ops mvebu_pcie_intx_irq_domain_ops = { 1064 .map = mvebu_pcie_intx_irq_map, 1065 .xlate = irq_domain_xlate_onecell, 1066 }; 1067 1068 static int mvebu_pcie_init_irq_domain(struct mvebu_pcie_port *port) 1069 { 1070 struct device *dev = &port->pcie->pdev->dev; 1071 struct device_node *pcie_intc_node; 1072 1073 raw_spin_lock_init(&port->irq_lock); 1074 1075 pcie_intc_node = of_get_next_child(port->dn, NULL); 1076 if (!pcie_intc_node) { 1077 dev_err(dev, "No PCIe Intc node found for %s\n", port->name); 1078 return -ENODEV; 1079 } 1080 1081 port->intx_irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, 1082 &mvebu_pcie_intx_irq_domain_ops, 1083 port); 1084 of_node_put(pcie_intc_node); 1085 if (!port->intx_irq_domain) { 1086 dev_err(dev, "Failed to get INTx IRQ domain for %s\n", port->name); 1087 return -ENOMEM; 1088 } 1089 1090 return 0; 1091 } 1092 1093 static void mvebu_pcie_irq_handler(struct irq_desc *desc) 1094 { 1095 struct mvebu_pcie_port *port = irq_desc_get_handler_data(desc); 1096 struct irq_chip *chip = irq_desc_get_chip(desc); 1097 struct device *dev = &port->pcie->pdev->dev; 1098 u32 cause, unmask, status; 1099 int i; 1100 1101 chained_irq_enter(chip, desc); 1102 1103 cause = mvebu_readl(port, PCIE_INT_CAUSE_OFF); 1104 unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF); 1105 status = cause & unmask; 1106 1107 /* Process legacy INTx interrupts */ 1108 for (i = 0; i < PCI_NUM_INTX; i++) { 1109 if (!(status & PCIE_INT_INTX(i))) 1110 continue; 1111 1112 if (generic_handle_domain_irq(port->intx_irq_domain, i) == -EINVAL) 1113 dev_err_ratelimited(dev, "unexpected INT%c IRQ\n", (char)i+'A'); 1114 } 1115 1116 chained_irq_exit(chip, desc); 1117 } 1118 1119 static int mvebu_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 1120 { 1121 /* Interrupt support on mvebu emulated bridges is not implemented yet */ 1122 if (dev->bus->number == 0) 1123 return 0; /* Proper return code 0 == NO_IRQ */ 1124 1125 return of_irq_parse_and_map_pci(dev, slot, pin); 1126 } 1127 1128 static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev, 1129 const struct resource *res, 1130 resource_size_t start, 1131 resource_size_t size, 1132 resource_size_t align) 1133 { 1134 if (dev->bus->number != 0) 1135 return start; 1136 1137 /* 1138 * On the PCI-to-PCI bridge side, the I/O windows must have at 1139 * least a 64 KB size and the memory windows must have at 1140 * least a 1 MB size. Moreover, MBus windows need to have a 1141 * base address aligned on their size, and their size must be 1142 * a power of two. This means that if the BAR doesn't have a 1143 * power of two size, several MBus windows will actually be 1144 * created. We need to ensure that the biggest MBus window 1145 * (which will be the first one) is aligned on its size, which 1146 * explains the rounddown_pow_of_two() being done here. 1147 */ 1148 if (res->flags & IORESOURCE_IO) 1149 return round_up(start, max_t(resource_size_t, SZ_64K, 1150 rounddown_pow_of_two(size))); 1151 else if (res->flags & IORESOURCE_MEM) 1152 return round_up(start, max_t(resource_size_t, SZ_1M, 1153 rounddown_pow_of_two(size))); 1154 else 1155 return start; 1156 } 1157 1158 static void __iomem *mvebu_pcie_map_registers(struct platform_device *pdev, 1159 struct device_node *np, 1160 struct mvebu_pcie_port *port) 1161 { 1162 int ret = 0; 1163 1164 ret = of_address_to_resource(np, 0, &port->regs); 1165 if (ret) 1166 return (void __iomem *)ERR_PTR(ret); 1167 1168 return devm_ioremap_resource(&pdev->dev, &port->regs); 1169 } 1170 1171 #define DT_FLAGS_TO_TYPE(flags) (((flags) >> 24) & 0x03) 1172 #define DT_TYPE_IO 0x1 1173 #define DT_TYPE_MEM32 0x2 1174 #define DT_CPUADDR_TO_TARGET(cpuaddr) (((cpuaddr) >> 56) & 0xFF) 1175 #define DT_CPUADDR_TO_ATTR(cpuaddr) (((cpuaddr) >> 48) & 0xFF) 1176 1177 static int mvebu_get_tgt_attr(struct device_node *np, int devfn, 1178 unsigned long type, 1179 unsigned int *tgt, 1180 unsigned int *attr) 1181 { 1182 const int na = 3, ns = 2; 1183 const __be32 *range; 1184 int rlen, nranges, rangesz, pna, i; 1185 1186 *tgt = -1; 1187 *attr = -1; 1188 1189 range = of_get_property(np, "ranges", &rlen); 1190 if (!range) 1191 return -EINVAL; 1192 1193 pna = of_n_addr_cells(np); 1194 rangesz = pna + na + ns; 1195 nranges = rlen / sizeof(__be32) / rangesz; 1196 1197 for (i = 0; i < nranges; i++, range += rangesz) { 1198 u32 flags = of_read_number(range, 1); 1199 u32 slot = of_read_number(range + 1, 1); 1200 u64 cpuaddr = of_read_number(range + na, pna); 1201 unsigned long rtype; 1202 1203 if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_IO) 1204 rtype = IORESOURCE_IO; 1205 else if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_MEM32) 1206 rtype = IORESOURCE_MEM; 1207 else 1208 continue; 1209 1210 if (slot == PCI_SLOT(devfn) && type == rtype) { 1211 *tgt = DT_CPUADDR_TO_TARGET(cpuaddr); 1212 *attr = DT_CPUADDR_TO_ATTR(cpuaddr); 1213 return 0; 1214 } 1215 } 1216 1217 return -ENOENT; 1218 } 1219 1220 static int mvebu_pcie_suspend(struct device *dev) 1221 { 1222 struct mvebu_pcie *pcie; 1223 int i; 1224 1225 pcie = dev_get_drvdata(dev); 1226 for (i = 0; i < pcie->nports; i++) { 1227 struct mvebu_pcie_port *port = pcie->ports + i; 1228 if (!port->base) 1229 continue; 1230 port->saved_pcie_stat = mvebu_readl(port, PCIE_STAT_OFF); 1231 } 1232 1233 return 0; 1234 } 1235 1236 static int mvebu_pcie_resume(struct device *dev) 1237 { 1238 struct mvebu_pcie *pcie; 1239 int i; 1240 1241 pcie = dev_get_drvdata(dev); 1242 for (i = 0; i < pcie->nports; i++) { 1243 struct mvebu_pcie_port *port = pcie->ports + i; 1244 if (!port->base) 1245 continue; 1246 mvebu_writel(port, port->saved_pcie_stat, PCIE_STAT_OFF); 1247 mvebu_pcie_setup_hw(port); 1248 } 1249 1250 return 0; 1251 } 1252 1253 static void mvebu_pcie_port_clk_put(void *data) 1254 { 1255 struct mvebu_pcie_port *port = data; 1256 1257 clk_put(port->clk); 1258 } 1259 1260 static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie, 1261 struct mvebu_pcie_port *port, struct device_node *child) 1262 { 1263 struct device *dev = &pcie->pdev->dev; 1264 enum of_gpio_flags flags; 1265 u32 slot_power_limit; 1266 int reset_gpio, ret; 1267 u32 num_lanes; 1268 1269 port->pcie = pcie; 1270 1271 if (of_property_read_u32(child, "marvell,pcie-port", &port->port)) { 1272 dev_warn(dev, "ignoring %pOF, missing pcie-port property\n", 1273 child); 1274 goto skip; 1275 } 1276 1277 if (of_property_read_u32(child, "marvell,pcie-lane", &port->lane)) 1278 port->lane = 0; 1279 1280 if (!of_property_read_u32(child, "num-lanes", &num_lanes) && num_lanes == 4) 1281 port->is_x4 = true; 1282 1283 port->name = devm_kasprintf(dev, GFP_KERNEL, "pcie%d.%d", port->port, 1284 port->lane); 1285 if (!port->name) { 1286 ret = -ENOMEM; 1287 goto err; 1288 } 1289 1290 port->devfn = of_pci_get_devfn(child); 1291 if (port->devfn < 0) 1292 goto skip; 1293 if (PCI_FUNC(port->devfn) != 0) { 1294 dev_err(dev, "%s: invalid function number, must be zero\n", 1295 port->name); 1296 goto skip; 1297 } 1298 1299 ret = mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_MEM, 1300 &port->mem_target, &port->mem_attr); 1301 if (ret < 0) { 1302 dev_err(dev, "%s: cannot get tgt/attr for mem window\n", 1303 port->name); 1304 goto skip; 1305 } 1306 1307 if (resource_size(&pcie->io) != 0) { 1308 mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_IO, 1309 &port->io_target, &port->io_attr); 1310 } else { 1311 port->io_target = -1; 1312 port->io_attr = -1; 1313 } 1314 1315 /* 1316 * Old DT bindings do not contain "intx" interrupt 1317 * so do not fail probing driver when interrupt does not exist. 1318 */ 1319 port->intx_irq = of_irq_get_byname(child, "intx"); 1320 if (port->intx_irq == -EPROBE_DEFER) { 1321 ret = port->intx_irq; 1322 goto err; 1323 } 1324 if (port->intx_irq <= 0) { 1325 dev_warn(dev, "%s: legacy INTx interrupts cannot be masked individually, " 1326 "%pOF does not contain intx interrupt\n", 1327 port->name, child); 1328 } 1329 1330 reset_gpio = of_get_named_gpio_flags(child, "reset-gpios", 0, &flags); 1331 if (reset_gpio == -EPROBE_DEFER) { 1332 ret = reset_gpio; 1333 goto err; 1334 } 1335 1336 if (gpio_is_valid(reset_gpio)) { 1337 unsigned long gpio_flags; 1338 1339 port->reset_name = devm_kasprintf(dev, GFP_KERNEL, "%s-reset", 1340 port->name); 1341 if (!port->reset_name) { 1342 ret = -ENOMEM; 1343 goto err; 1344 } 1345 1346 if (flags & OF_GPIO_ACTIVE_LOW) { 1347 dev_info(dev, "%pOF: reset gpio is active low\n", 1348 child); 1349 gpio_flags = GPIOF_ACTIVE_LOW | 1350 GPIOF_OUT_INIT_LOW; 1351 } else { 1352 gpio_flags = GPIOF_OUT_INIT_HIGH; 1353 } 1354 1355 ret = devm_gpio_request_one(dev, reset_gpio, gpio_flags, 1356 port->reset_name); 1357 if (ret) { 1358 if (ret == -EPROBE_DEFER) 1359 goto err; 1360 goto skip; 1361 } 1362 1363 port->reset_gpio = gpio_to_desc(reset_gpio); 1364 } 1365 1366 slot_power_limit = of_pci_get_slot_power_limit(child, 1367 &port->slot_power_limit_value, 1368 &port->slot_power_limit_scale); 1369 if (slot_power_limit) 1370 dev_info(dev, "%s: Slot power limit %u.%uW\n", 1371 port->name, 1372 slot_power_limit / 1000, 1373 (slot_power_limit / 100) % 10); 1374 1375 port->clk = of_clk_get_by_name(child, NULL); 1376 if (IS_ERR(port->clk)) { 1377 dev_err(dev, "%s: cannot get clock\n", port->name); 1378 goto skip; 1379 } 1380 1381 ret = devm_add_action(dev, mvebu_pcie_port_clk_put, port); 1382 if (ret < 0) { 1383 clk_put(port->clk); 1384 goto err; 1385 } 1386 1387 return 1; 1388 1389 skip: 1390 ret = 0; 1391 1392 /* In the case of skipping, we need to free these */ 1393 devm_kfree(dev, port->reset_name); 1394 port->reset_name = NULL; 1395 devm_kfree(dev, port->name); 1396 port->name = NULL; 1397 1398 err: 1399 return ret; 1400 } 1401 1402 /* 1403 * Power up a PCIe port. PCIe requires the refclk to be stable for 100µs 1404 * prior to releasing PERST. See table 2-4 in section 2.6.2 AC Specifications 1405 * of the PCI Express Card Electromechanical Specification, 1.1. 1406 */ 1407 static int mvebu_pcie_powerup(struct mvebu_pcie_port *port) 1408 { 1409 int ret; 1410 1411 ret = clk_prepare_enable(port->clk); 1412 if (ret < 0) 1413 return ret; 1414 1415 if (port->reset_gpio) { 1416 u32 reset_udelay = PCI_PM_D3COLD_WAIT * 1000; 1417 1418 of_property_read_u32(port->dn, "reset-delay-us", 1419 &reset_udelay); 1420 1421 udelay(100); 1422 1423 gpiod_set_value_cansleep(port->reset_gpio, 0); 1424 msleep(reset_udelay / 1000); 1425 } 1426 1427 return 0; 1428 } 1429 1430 /* 1431 * Power down a PCIe port. Strictly, PCIe requires us to place the card 1432 * in D3hot state before asserting PERST#. 1433 */ 1434 static void mvebu_pcie_powerdown(struct mvebu_pcie_port *port) 1435 { 1436 gpiod_set_value_cansleep(port->reset_gpio, 1); 1437 1438 clk_disable_unprepare(port->clk); 1439 } 1440 1441 /* 1442 * devm_of_pci_get_host_bridge_resources() only sets up translateable resources, 1443 * so we need extra resource setup parsing our special DT properties encoding 1444 * the MEM and IO apertures. 1445 */ 1446 static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie) 1447 { 1448 struct device *dev = &pcie->pdev->dev; 1449 struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); 1450 int ret; 1451 1452 /* Get the PCIe memory aperture */ 1453 mvebu_mbus_get_pcie_mem_aperture(&pcie->mem); 1454 if (resource_size(&pcie->mem) == 0) { 1455 dev_err(dev, "invalid memory aperture size\n"); 1456 return -EINVAL; 1457 } 1458 1459 pcie->mem.name = "PCI MEM"; 1460 pci_add_resource(&bridge->windows, &pcie->mem); 1461 ret = devm_request_resource(dev, &iomem_resource, &pcie->mem); 1462 if (ret) 1463 return ret; 1464 1465 /* Get the PCIe IO aperture */ 1466 mvebu_mbus_get_pcie_io_aperture(&pcie->io); 1467 1468 if (resource_size(&pcie->io) != 0) { 1469 pcie->realio.flags = pcie->io.flags; 1470 pcie->realio.start = PCIBIOS_MIN_IO; 1471 pcie->realio.end = min_t(resource_size_t, 1472 IO_SPACE_LIMIT - SZ_64K, 1473 resource_size(&pcie->io) - 1); 1474 pcie->realio.name = "PCI I/O"; 1475 1476 ret = devm_pci_remap_iospace(dev, &pcie->realio, pcie->io.start); 1477 if (ret) 1478 return ret; 1479 1480 pci_add_resource(&bridge->windows, &pcie->realio); 1481 ret = devm_request_resource(dev, &ioport_resource, &pcie->realio); 1482 if (ret) 1483 return ret; 1484 } 1485 1486 return 0; 1487 } 1488 1489 static int mvebu_pcie_probe(struct platform_device *pdev) 1490 { 1491 struct device *dev = &pdev->dev; 1492 struct mvebu_pcie *pcie; 1493 struct pci_host_bridge *bridge; 1494 struct device_node *np = dev->of_node; 1495 struct device_node *child; 1496 int num, i, ret; 1497 1498 bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct mvebu_pcie)); 1499 if (!bridge) 1500 return -ENOMEM; 1501 1502 pcie = pci_host_bridge_priv(bridge); 1503 pcie->pdev = pdev; 1504 platform_set_drvdata(pdev, pcie); 1505 1506 ret = mvebu_pcie_parse_request_resources(pcie); 1507 if (ret) 1508 return ret; 1509 1510 num = of_get_available_child_count(np); 1511 1512 pcie->ports = devm_kcalloc(dev, num, sizeof(*pcie->ports), GFP_KERNEL); 1513 if (!pcie->ports) 1514 return -ENOMEM; 1515 1516 i = 0; 1517 for_each_available_child_of_node(np, child) { 1518 struct mvebu_pcie_port *port = &pcie->ports[i]; 1519 1520 ret = mvebu_pcie_parse_port(pcie, port, child); 1521 if (ret < 0) { 1522 of_node_put(child); 1523 return ret; 1524 } else if (ret == 0) { 1525 continue; 1526 } 1527 1528 port->dn = child; 1529 i++; 1530 } 1531 pcie->nports = i; 1532 1533 for (i = 0; i < pcie->nports; i++) { 1534 struct mvebu_pcie_port *port = &pcie->ports[i]; 1535 int irq = port->intx_irq; 1536 1537 child = port->dn; 1538 if (!child) 1539 continue; 1540 1541 ret = mvebu_pcie_powerup(port); 1542 if (ret < 0) 1543 continue; 1544 1545 port->base = mvebu_pcie_map_registers(pdev, child, port); 1546 if (IS_ERR(port->base)) { 1547 dev_err(dev, "%s: cannot map registers\n", port->name); 1548 port->base = NULL; 1549 mvebu_pcie_powerdown(port); 1550 continue; 1551 } 1552 1553 ret = mvebu_pci_bridge_emul_init(port); 1554 if (ret < 0) { 1555 dev_err(dev, "%s: cannot init emulated bridge\n", 1556 port->name); 1557 devm_iounmap(dev, port->base); 1558 port->base = NULL; 1559 mvebu_pcie_powerdown(port); 1560 continue; 1561 } 1562 1563 if (irq > 0) { 1564 ret = mvebu_pcie_init_irq_domain(port); 1565 if (ret) { 1566 dev_err(dev, "%s: cannot init irq domain\n", 1567 port->name); 1568 pci_bridge_emul_cleanup(&port->bridge); 1569 devm_iounmap(dev, port->base); 1570 port->base = NULL; 1571 mvebu_pcie_powerdown(port); 1572 continue; 1573 } 1574 irq_set_chained_handler_and_data(irq, 1575 mvebu_pcie_irq_handler, 1576 port); 1577 } 1578 1579 /* 1580 * PCIe topology exported by mvebu hw is quite complicated. In 1581 * reality has something like N fully independent host bridges 1582 * where each host bridge has one PCIe Root Port (which acts as 1583 * PCI Bridge device). Each host bridge has its own independent 1584 * internal registers, independent access to PCI config space, 1585 * independent interrupt lines, independent window and memory 1586 * access configuration. But additionally there is some kind of 1587 * peer-to-peer support between PCIe devices behind different 1588 * host bridges limited just to forwarding of memory and I/O 1589 * transactions (forwarding of error messages and config cycles 1590 * is not supported). So we could say there are N independent 1591 * PCIe Root Complexes. 1592 * 1593 * For this kind of setup DT should have been structured into 1594 * N independent PCIe controllers / host bridges. But instead 1595 * structure in past was defined to put PCIe Root Ports of all 1596 * host bridges into one bus zero, like in classic multi-port 1597 * Root Complex setup with just one host bridge. 1598 * 1599 * This means that pci-mvebu.c driver provides "virtual" bus 0 1600 * on which registers all PCIe Root Ports (PCI Bridge devices) 1601 * specified in DT by their BDF addresses and virtually routes 1602 * PCI config access of each PCI bridge device to specific PCIe 1603 * host bridge. 1604 * 1605 * Normally PCI Bridge should choose between Type 0 and Type 1 1606 * config requests based on primary and secondary bus numbers 1607 * configured on the bridge itself. But because mvebu PCI Bridge 1608 * does not have registers for primary and secondary bus numbers 1609 * in its config space, it determinates type of config requests 1610 * via its own custom way. 1611 * 1612 * There are two options how mvebu determinate type of config 1613 * request. 1614 * 1615 * 1. If Secondary Bus Number Enable bit is not set or is not 1616 * available (applies for pre-XP PCIe controllers) then Type 0 1617 * is used if target bus number equals Local Bus Number (bits 1618 * [15:8] in register 0x1a04) and target device number differs 1619 * from Local Device Number (bits [20:16] in register 0x1a04). 1620 * Type 1 is used if target bus number differs from Local Bus 1621 * Number. And when target bus number equals Local Bus Number 1622 * and target device equals Local Device Number then request is 1623 * routed to Local PCI Bridge (PCIe Root Port). 1624 * 1625 * 2. If Secondary Bus Number Enable bit is set (bit 7 in 1626 * register 0x1a2c) then mvebu hw determinate type of config 1627 * request like compliant PCI Bridge based on primary bus number 1628 * which is configured via Local Bus Number (bits [15:8] in 1629 * register 0x1a04) and secondary bus number which is configured 1630 * via Secondary Bus Number (bits [7:0] in register 0x1a2c). 1631 * Local PCI Bridge (PCIe Root Port) is available on primary bus 1632 * as device with Local Device Number (bits [20:16] in register 1633 * 0x1a04). 1634 * 1635 * Secondary Bus Number Enable bit is disabled by default and 1636 * option 2. is not available on pre-XP PCIe controllers. Hence 1637 * this driver always use option 1. 1638 * 1639 * Basically it means that primary and secondary buses shares 1640 * one virtual number configured via Local Bus Number bits and 1641 * Local Device Number bits determinates if accessing primary 1642 * or secondary bus. Set Local Device Number to 1 and redirect 1643 * all writes of PCI Bridge Secondary Bus Number register to 1644 * Local Bus Number (bits [15:8] in register 0x1a04). 1645 * 1646 * So when accessing devices on buses behind secondary bus 1647 * number it would work correctly. And also when accessing 1648 * device 0 at secondary bus number via config space would be 1649 * correctly routed to secondary bus. Due to issues described 1650 * in mvebu_pcie_setup_hw(), PCI Bridges at primary bus (zero) 1651 * are not accessed directly via PCI config space but rarher 1652 * indirectly via kernel emulated PCI bridge driver. 1653 */ 1654 mvebu_pcie_setup_hw(port); 1655 mvebu_pcie_set_local_dev_nr(port, 1); 1656 mvebu_pcie_set_local_bus_nr(port, 0); 1657 } 1658 1659 bridge->sysdata = pcie; 1660 bridge->ops = &mvebu_pcie_ops; 1661 bridge->child_ops = &mvebu_pcie_child_ops; 1662 bridge->align_resource = mvebu_pcie_align_resource; 1663 bridge->map_irq = mvebu_pcie_map_irq; 1664 1665 return pci_host_probe(bridge); 1666 } 1667 1668 static int mvebu_pcie_remove(struct platform_device *pdev) 1669 { 1670 struct mvebu_pcie *pcie = platform_get_drvdata(pdev); 1671 struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); 1672 u32 cmd, sspl; 1673 int i; 1674 1675 /* Remove PCI bus with all devices. */ 1676 pci_lock_rescan_remove(); 1677 pci_stop_root_bus(bridge->bus); 1678 pci_remove_root_bus(bridge->bus); 1679 pci_unlock_rescan_remove(); 1680 1681 for (i = 0; i < pcie->nports; i++) { 1682 struct mvebu_pcie_port *port = &pcie->ports[i]; 1683 int irq = port->intx_irq; 1684 1685 if (!port->base) 1686 continue; 1687 1688 /* Disable Root Bridge I/O space, memory space and bus mastering. */ 1689 cmd = mvebu_readl(port, PCIE_CMD_OFF); 1690 cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); 1691 mvebu_writel(port, cmd, PCIE_CMD_OFF); 1692 1693 /* Mask all interrupt sources. */ 1694 mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_UNMASK_OFF); 1695 1696 /* Clear all interrupt causes. */ 1697 mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_CAUSE_OFF); 1698 1699 if (irq > 0) 1700 irq_set_chained_handler_and_data(irq, NULL, NULL); 1701 1702 /* Remove IRQ domains. */ 1703 if (port->intx_irq_domain) 1704 irq_domain_remove(port->intx_irq_domain); 1705 1706 /* Free config space for emulated root bridge. */ 1707 pci_bridge_emul_cleanup(&port->bridge); 1708 1709 /* Disable sending Set_Slot_Power_Limit PCIe Message. */ 1710 sspl = mvebu_readl(port, PCIE_SSPL_OFF); 1711 sspl &= ~(PCIE_SSPL_VALUE_MASK | PCIE_SSPL_SCALE_MASK | PCIE_SSPL_ENABLE); 1712 mvebu_writel(port, sspl, PCIE_SSPL_OFF); 1713 1714 /* Disable and clear BARs and windows. */ 1715 mvebu_pcie_disable_wins(port); 1716 1717 /* Delete PCIe IO and MEM windows. */ 1718 if (port->iowin.size) 1719 mvebu_pcie_del_windows(port, port->iowin.base, port->iowin.size); 1720 if (port->memwin.size) 1721 mvebu_pcie_del_windows(port, port->memwin.base, port->memwin.size); 1722 1723 /* Power down card and disable clocks. Must be the last step. */ 1724 mvebu_pcie_powerdown(port); 1725 } 1726 1727 return 0; 1728 } 1729 1730 static const struct of_device_id mvebu_pcie_of_match_table[] = { 1731 { .compatible = "marvell,armada-xp-pcie", }, 1732 { .compatible = "marvell,armada-370-pcie", }, 1733 { .compatible = "marvell,dove-pcie", }, 1734 { .compatible = "marvell,kirkwood-pcie", }, 1735 {}, 1736 }; 1737 1738 static const struct dev_pm_ops mvebu_pcie_pm_ops = { 1739 NOIRQ_SYSTEM_SLEEP_PM_OPS(mvebu_pcie_suspend, mvebu_pcie_resume) 1740 }; 1741 1742 static struct platform_driver mvebu_pcie_driver = { 1743 .driver = { 1744 .name = "mvebu-pcie", 1745 .of_match_table = mvebu_pcie_of_match_table, 1746 .pm = &mvebu_pcie_pm_ops, 1747 }, 1748 .probe = mvebu_pcie_probe, 1749 .remove = mvebu_pcie_remove, 1750 }; 1751 module_platform_driver(mvebu_pcie_driver); 1752 1753 MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@bootlin.com>"); 1754 MODULE_AUTHOR("Pali Rohár <pali@kernel.org>"); 1755 MODULE_DESCRIPTION("Marvell EBU PCIe controller"); 1756 MODULE_LICENSE("GPL v2"); 1757