1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * MediaTek PCIe host controller driver. 4 * 5 * Copyright (c) 2017 MediaTek Inc. 6 * Author: Ryder Lee <ryder.lee@mediatek.com> 7 * Honghui Zhang <honghui.zhang@mediatek.com> 8 */ 9 10 #include <linux/clk.h> 11 #include <linux/delay.h> 12 #include <linux/iopoll.h> 13 #include <linux/irq.h> 14 #include <linux/irqchip/chained_irq.h> 15 #include <linux/irqdomain.h> 16 #include <linux/kernel.h> 17 #include <linux/msi.h> 18 #include <linux/of_address.h> 19 #include <linux/of_pci.h> 20 #include <linux/of_platform.h> 21 #include <linux/pci.h> 22 #include <linux/phy/phy.h> 23 #include <linux/platform_device.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/reset.h> 26 27 #include "../pci.h" 28 29 /* PCIe shared registers */ 30 #define PCIE_SYS_CFG 0x00 31 #define PCIE_INT_ENABLE 0x0c 32 #define PCIE_CFG_ADDR 0x20 33 #define PCIE_CFG_DATA 0x24 34 35 /* PCIe per port registers */ 36 #define PCIE_BAR0_SETUP 0x10 37 #define PCIE_CLASS 0x34 38 #define PCIE_LINK_STATUS 0x50 39 40 #define PCIE_PORT_INT_EN(x) BIT(20 + (x)) 41 #define PCIE_PORT_PERST(x) BIT(1 + (x)) 42 #define PCIE_PORT_LINKUP BIT(0) 43 #define PCIE_BAR_MAP_MAX GENMASK(31, 16) 44 45 #define PCIE_BAR_ENABLE BIT(0) 46 #define PCIE_REVISION_ID BIT(0) 47 #define PCIE_CLASS_CODE (0x60400 << 8) 48 #define PCIE_CONF_REG(regn) (((regn) & GENMASK(7, 2)) | \ 49 ((((regn) >> 8) & GENMASK(3, 0)) << 24)) 50 #define PCIE_CONF_FUN(fun) (((fun) << 8) & GENMASK(10, 8)) 51 #define PCIE_CONF_DEV(dev) (((dev) << 11) & GENMASK(15, 11)) 52 #define PCIE_CONF_BUS(bus) (((bus) << 16) & GENMASK(23, 16)) 53 #define PCIE_CONF_ADDR(regn, fun, dev, bus) \ 54 (PCIE_CONF_REG(regn) | PCIE_CONF_FUN(fun) | \ 55 PCIE_CONF_DEV(dev) | PCIE_CONF_BUS(bus)) 56 57 /* MediaTek specific configuration registers */ 58 #define PCIE_FTS_NUM 0x70c 59 #define PCIE_FTS_NUM_MASK GENMASK(15, 8) 60 #define PCIE_FTS_NUM_L0(x) ((x) & 0xff << 8) 61 62 #define PCIE_FC_CREDIT 0x73c 63 #define PCIE_FC_CREDIT_MASK (GENMASK(31, 31) | GENMASK(28, 16)) 64 #define PCIE_FC_CREDIT_VAL(x) ((x) << 16) 65 66 /* PCIe V2 share registers */ 67 #define PCIE_SYS_CFG_V2 0x0 68 #define PCIE_CSR_LTSSM_EN(x) BIT(0 + (x) * 8) 69 #define PCIE_CSR_ASPM_L1_EN(x) BIT(1 + (x) * 8) 70 71 /* PCIe V2 per-port registers */ 72 #define PCIE_MSI_VECTOR 0x0c0 73 74 #define PCIE_CONF_VEND_ID 0x100 75 #define PCIE_CONF_CLASS_ID 0x106 76 77 #define PCIE_INT_MASK 0x420 78 #define INTX_MASK GENMASK(19, 16) 79 #define INTX_SHIFT 16 80 #define PCIE_INT_STATUS 0x424 81 #define MSI_STATUS BIT(23) 82 #define PCIE_IMSI_STATUS 0x42c 83 #define PCIE_IMSI_ADDR 0x430 84 #define MSI_MASK BIT(23) 85 #define MTK_MSI_IRQS_NUM 32 86 87 #define PCIE_AHB_TRANS_BASE0_L 0x438 88 #define PCIE_AHB_TRANS_BASE0_H 0x43c 89 #define AHB2PCIE_SIZE(x) ((x) & GENMASK(4, 0)) 90 #define PCIE_AXI_WINDOW0 0x448 91 #define WIN_ENABLE BIT(7) 92 93 /* PCIe V2 configuration transaction header */ 94 #define PCIE_CFG_HEADER0 0x460 95 #define PCIE_CFG_HEADER1 0x464 96 #define PCIE_CFG_HEADER2 0x468 97 #define PCIE_CFG_WDATA 0x470 98 #define PCIE_APP_TLP_REQ 0x488 99 #define PCIE_CFG_RDATA 0x48c 100 #define APP_CFG_REQ BIT(0) 101 #define APP_CPL_STATUS GENMASK(7, 5) 102 103 #define CFG_WRRD_TYPE_0 4 104 #define CFG_WR_FMT 2 105 #define CFG_RD_FMT 0 106 107 #define CFG_DW0_LENGTH(length) ((length) & GENMASK(9, 0)) 108 #define CFG_DW0_TYPE(type) (((type) << 24) & GENMASK(28, 24)) 109 #define CFG_DW0_FMT(fmt) (((fmt) << 29) & GENMASK(31, 29)) 110 #define CFG_DW2_REGN(regn) ((regn) & GENMASK(11, 2)) 111 #define CFG_DW2_FUN(fun) (((fun) << 16) & GENMASK(18, 16)) 112 #define CFG_DW2_DEV(dev) (((dev) << 19) & GENMASK(23, 19)) 113 #define CFG_DW2_BUS(bus) (((bus) << 24) & GENMASK(31, 24)) 114 #define CFG_HEADER_DW0(type, fmt) \ 115 (CFG_DW0_LENGTH(1) | CFG_DW0_TYPE(type) | CFG_DW0_FMT(fmt)) 116 #define CFG_HEADER_DW1(where, size) \ 117 (GENMASK(((size) - 1), 0) << ((where) & 0x3)) 118 #define CFG_HEADER_DW2(regn, fun, dev, bus) \ 119 (CFG_DW2_REGN(regn) | CFG_DW2_FUN(fun) | \ 120 CFG_DW2_DEV(dev) | CFG_DW2_BUS(bus)) 121 122 #define PCIE_RST_CTRL 0x510 123 #define PCIE_PHY_RSTB BIT(0) 124 #define PCIE_PIPE_SRSTB BIT(1) 125 #define PCIE_MAC_SRSTB BIT(2) 126 #define PCIE_CRSTB BIT(3) 127 #define PCIE_PERSTB BIT(8) 128 #define PCIE_LINKDOWN_RST_EN GENMASK(15, 13) 129 #define PCIE_LINK_STATUS_V2 0x804 130 #define PCIE_PORT_LINKUP_V2 BIT(10) 131 132 struct mtk_pcie_port; 133 134 /** 135 * struct mtk_pcie_soc - differentiate between host generations 136 * @need_fix_class_id: whether this host's class ID needed to be fixed or not 137 * @ops: pointer to configuration access functions 138 * @startup: pointer to controller setting functions 139 * @setup_irq: pointer to initialize IRQ functions 140 */ 141 struct mtk_pcie_soc { 142 bool need_fix_class_id; 143 struct pci_ops *ops; 144 int (*startup)(struct mtk_pcie_port *port); 145 int (*setup_irq)(struct mtk_pcie_port *port, struct device_node *node); 146 }; 147 148 /** 149 * struct mtk_pcie_port - PCIe port information 150 * @base: IO mapped register base 151 * @list: port list 152 * @pcie: pointer to PCIe host info 153 * @reset: pointer to port reset control 154 * @sys_ck: pointer to transaction/data link layer clock 155 * @ahb_ck: pointer to AHB slave interface operating clock for CSR access 156 * and RC initiated MMIO access 157 * @axi_ck: pointer to application layer MMIO channel operating clock 158 * @aux_ck: pointer to pe2_mac_bridge and pe2_mac_core operating clock 159 * when pcie_mac_ck/pcie_pipe_ck is turned off 160 * @obff_ck: pointer to OBFF functional block operating clock 161 * @pipe_ck: pointer to LTSSM and PHY/MAC layer operating clock 162 * @phy: pointer to PHY control block 163 * @lane: lane count 164 * @slot: port slot 165 * @irq_domain: legacy INTx IRQ domain 166 * @inner_domain: inner IRQ domain 167 * @msi_domain: MSI IRQ domain 168 * @lock: protect the msi_irq_in_use bitmap 169 * @msi_irq_in_use: bit map for assigned MSI IRQ 170 */ 171 struct mtk_pcie_port { 172 void __iomem *base; 173 struct list_head list; 174 struct mtk_pcie *pcie; 175 struct reset_control *reset; 176 struct clk *sys_ck; 177 struct clk *ahb_ck; 178 struct clk *axi_ck; 179 struct clk *aux_ck; 180 struct clk *obff_ck; 181 struct clk *pipe_ck; 182 struct phy *phy; 183 u32 lane; 184 u32 slot; 185 struct irq_domain *irq_domain; 186 struct irq_domain *inner_domain; 187 struct irq_domain *msi_domain; 188 struct mutex lock; 189 DECLARE_BITMAP(msi_irq_in_use, MTK_MSI_IRQS_NUM); 190 }; 191 192 /** 193 * struct mtk_pcie - PCIe host information 194 * @dev: pointer to PCIe device 195 * @base: IO mapped register base 196 * @free_ck: free-run reference clock 197 * @io: IO resource 198 * @pio: PIO resource 199 * @mem: non-prefetchable memory resource 200 * @busn: bus range 201 * @offset: IO / Memory offset 202 * @ports: pointer to PCIe port information 203 * @soc: pointer to SoC-dependent operations 204 */ 205 struct mtk_pcie { 206 struct device *dev; 207 void __iomem *base; 208 struct clk *free_ck; 209 210 struct resource io; 211 struct resource pio; 212 struct resource mem; 213 struct resource busn; 214 struct { 215 resource_size_t mem; 216 resource_size_t io; 217 } offset; 218 struct list_head ports; 219 const struct mtk_pcie_soc *soc; 220 }; 221 222 static void mtk_pcie_subsys_powerdown(struct mtk_pcie *pcie) 223 { 224 struct device *dev = pcie->dev; 225 226 clk_disable_unprepare(pcie->free_ck); 227 228 if (dev->pm_domain) { 229 pm_runtime_put_sync(dev); 230 pm_runtime_disable(dev); 231 } 232 } 233 234 static void mtk_pcie_port_free(struct mtk_pcie_port *port) 235 { 236 struct mtk_pcie *pcie = port->pcie; 237 struct device *dev = pcie->dev; 238 239 devm_iounmap(dev, port->base); 240 list_del(&port->list); 241 devm_kfree(dev, port); 242 } 243 244 static void mtk_pcie_put_resources(struct mtk_pcie *pcie) 245 { 246 struct mtk_pcie_port *port, *tmp; 247 248 list_for_each_entry_safe(port, tmp, &pcie->ports, list) { 249 phy_power_off(port->phy); 250 phy_exit(port->phy); 251 clk_disable_unprepare(port->pipe_ck); 252 clk_disable_unprepare(port->obff_ck); 253 clk_disable_unprepare(port->axi_ck); 254 clk_disable_unprepare(port->aux_ck); 255 clk_disable_unprepare(port->ahb_ck); 256 clk_disable_unprepare(port->sys_ck); 257 mtk_pcie_port_free(port); 258 } 259 260 mtk_pcie_subsys_powerdown(pcie); 261 } 262 263 static int mtk_pcie_check_cfg_cpld(struct mtk_pcie_port *port) 264 { 265 u32 val; 266 int err; 267 268 err = readl_poll_timeout_atomic(port->base + PCIE_APP_TLP_REQ, val, 269 !(val & APP_CFG_REQ), 10, 270 100 * USEC_PER_MSEC); 271 if (err) 272 return PCIBIOS_SET_FAILED; 273 274 if (readl(port->base + PCIE_APP_TLP_REQ) & APP_CPL_STATUS) 275 return PCIBIOS_SET_FAILED; 276 277 return PCIBIOS_SUCCESSFUL; 278 } 279 280 static int mtk_pcie_hw_rd_cfg(struct mtk_pcie_port *port, u32 bus, u32 devfn, 281 int where, int size, u32 *val) 282 { 283 u32 tmp; 284 285 /* Write PCIe configuration transaction header for Cfgrd */ 286 writel(CFG_HEADER_DW0(CFG_WRRD_TYPE_0, CFG_RD_FMT), 287 port->base + PCIE_CFG_HEADER0); 288 writel(CFG_HEADER_DW1(where, size), port->base + PCIE_CFG_HEADER1); 289 writel(CFG_HEADER_DW2(where, PCI_FUNC(devfn), PCI_SLOT(devfn), bus), 290 port->base + PCIE_CFG_HEADER2); 291 292 /* Trigger h/w to transmit Cfgrd TLP */ 293 tmp = readl(port->base + PCIE_APP_TLP_REQ); 294 tmp |= APP_CFG_REQ; 295 writel(tmp, port->base + PCIE_APP_TLP_REQ); 296 297 /* Check completion status */ 298 if (mtk_pcie_check_cfg_cpld(port)) 299 return PCIBIOS_SET_FAILED; 300 301 /* Read cpld payload of Cfgrd */ 302 *val = readl(port->base + PCIE_CFG_RDATA); 303 304 if (size == 1) 305 *val = (*val >> (8 * (where & 3))) & 0xff; 306 else if (size == 2) 307 *val = (*val >> (8 * (where & 3))) & 0xffff; 308 309 return PCIBIOS_SUCCESSFUL; 310 } 311 312 static int mtk_pcie_hw_wr_cfg(struct mtk_pcie_port *port, u32 bus, u32 devfn, 313 int where, int size, u32 val) 314 { 315 /* Write PCIe configuration transaction header for Cfgwr */ 316 writel(CFG_HEADER_DW0(CFG_WRRD_TYPE_0, CFG_WR_FMT), 317 port->base + PCIE_CFG_HEADER0); 318 writel(CFG_HEADER_DW1(where, size), port->base + PCIE_CFG_HEADER1); 319 writel(CFG_HEADER_DW2(where, PCI_FUNC(devfn), PCI_SLOT(devfn), bus), 320 port->base + PCIE_CFG_HEADER2); 321 322 /* Write Cfgwr data */ 323 val = val << 8 * (where & 3); 324 writel(val, port->base + PCIE_CFG_WDATA); 325 326 /* Trigger h/w to transmit Cfgwr TLP */ 327 val = readl(port->base + PCIE_APP_TLP_REQ); 328 val |= APP_CFG_REQ; 329 writel(val, port->base + PCIE_APP_TLP_REQ); 330 331 /* Check completion status */ 332 return mtk_pcie_check_cfg_cpld(port); 333 } 334 335 static struct mtk_pcie_port *mtk_pcie_find_port(struct pci_bus *bus, 336 unsigned int devfn) 337 { 338 struct mtk_pcie *pcie = bus->sysdata; 339 struct mtk_pcie_port *port; 340 341 list_for_each_entry(port, &pcie->ports, list) 342 if (port->slot == PCI_SLOT(devfn)) 343 return port; 344 345 return NULL; 346 } 347 348 static int mtk_pcie_config_read(struct pci_bus *bus, unsigned int devfn, 349 int where, int size, u32 *val) 350 { 351 struct mtk_pcie_port *port; 352 u32 bn = bus->number; 353 int ret; 354 355 port = mtk_pcie_find_port(bus, devfn); 356 if (!port) { 357 *val = ~0; 358 return PCIBIOS_DEVICE_NOT_FOUND; 359 } 360 361 ret = mtk_pcie_hw_rd_cfg(port, bn, devfn, where, size, val); 362 if (ret) 363 *val = ~0; 364 365 return ret; 366 } 367 368 static int mtk_pcie_config_write(struct pci_bus *bus, unsigned int devfn, 369 int where, int size, u32 val) 370 { 371 struct mtk_pcie_port *port; 372 u32 bn = bus->number; 373 374 port = mtk_pcie_find_port(bus, devfn); 375 if (!port) 376 return PCIBIOS_DEVICE_NOT_FOUND; 377 378 return mtk_pcie_hw_wr_cfg(port, bn, devfn, where, size, val); 379 } 380 381 static struct pci_ops mtk_pcie_ops_v2 = { 382 .read = mtk_pcie_config_read, 383 .write = mtk_pcie_config_write, 384 }; 385 386 static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port) 387 { 388 struct mtk_pcie *pcie = port->pcie; 389 struct resource *mem = &pcie->mem; 390 const struct mtk_pcie_soc *soc = port->pcie->soc; 391 u32 val; 392 size_t size; 393 int err; 394 395 /* MT7622 platforms need to enable LTSSM and ASPM from PCIe subsys */ 396 if (pcie->base) { 397 val = readl(pcie->base + PCIE_SYS_CFG_V2); 398 val |= PCIE_CSR_LTSSM_EN(port->slot) | 399 PCIE_CSR_ASPM_L1_EN(port->slot); 400 writel(val, pcie->base + PCIE_SYS_CFG_V2); 401 } 402 403 /* Assert all reset signals */ 404 writel(0, port->base + PCIE_RST_CTRL); 405 406 /* 407 * Enable PCIe link down reset, if link status changed from link up to 408 * link down, this will reset MAC control registers and configuration 409 * space. 410 */ 411 writel(PCIE_LINKDOWN_RST_EN, port->base + PCIE_RST_CTRL); 412 413 /* De-assert PHY, PE, PIPE, MAC and configuration reset */ 414 val = readl(port->base + PCIE_RST_CTRL); 415 val |= PCIE_PHY_RSTB | PCIE_PERSTB | PCIE_PIPE_SRSTB | 416 PCIE_MAC_SRSTB | PCIE_CRSTB; 417 writel(val, port->base + PCIE_RST_CTRL); 418 419 /* Set up vendor ID and class code */ 420 if (soc->need_fix_class_id) { 421 val = PCI_VENDOR_ID_MEDIATEK; 422 writew(val, port->base + PCIE_CONF_VEND_ID); 423 424 val = PCI_CLASS_BRIDGE_HOST; 425 writew(val, port->base + PCIE_CONF_CLASS_ID); 426 } 427 428 /* 100ms timeout value should be enough for Gen1/2 training */ 429 err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val, 430 !!(val & PCIE_PORT_LINKUP_V2), 20, 431 100 * USEC_PER_MSEC); 432 if (err) 433 return -ETIMEDOUT; 434 435 /* Set INTx mask */ 436 val = readl(port->base + PCIE_INT_MASK); 437 val &= ~INTX_MASK; 438 writel(val, port->base + PCIE_INT_MASK); 439 440 /* Set AHB to PCIe translation windows */ 441 size = mem->end - mem->start; 442 val = lower_32_bits(mem->start) | AHB2PCIE_SIZE(fls(size)); 443 writel(val, port->base + PCIE_AHB_TRANS_BASE0_L); 444 445 val = upper_32_bits(mem->start); 446 writel(val, port->base + PCIE_AHB_TRANS_BASE0_H); 447 448 /* Set PCIe to AXI translation memory space.*/ 449 val = fls(0xffffffff) | WIN_ENABLE; 450 writel(val, port->base + PCIE_AXI_WINDOW0); 451 452 return 0; 453 } 454 455 static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) 456 { 457 struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data); 458 phys_addr_t addr; 459 460 /* MT2712/MT7622 only support 32-bit MSI addresses */ 461 addr = virt_to_phys(port->base + PCIE_MSI_VECTOR); 462 msg->address_hi = 0; 463 msg->address_lo = lower_32_bits(addr); 464 465 msg->data = data->hwirq; 466 467 dev_dbg(port->pcie->dev, "msi#%d address_hi %#x address_lo %#x\n", 468 (int)data->hwirq, msg->address_hi, msg->address_lo); 469 } 470 471 static int mtk_msi_set_affinity(struct irq_data *irq_data, 472 const struct cpumask *mask, bool force) 473 { 474 return -EINVAL; 475 } 476 477 static void mtk_msi_ack_irq(struct irq_data *data) 478 { 479 struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data); 480 u32 hwirq = data->hwirq; 481 482 writel(1 << hwirq, port->base + PCIE_IMSI_STATUS); 483 } 484 485 static struct irq_chip mtk_msi_bottom_irq_chip = { 486 .name = "MTK MSI", 487 .irq_compose_msi_msg = mtk_compose_msi_msg, 488 .irq_set_affinity = mtk_msi_set_affinity, 489 .irq_ack = mtk_msi_ack_irq, 490 }; 491 492 static int mtk_pcie_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 493 unsigned int nr_irqs, void *args) 494 { 495 struct mtk_pcie_port *port = domain->host_data; 496 unsigned long bit; 497 498 WARN_ON(nr_irqs != 1); 499 mutex_lock(&port->lock); 500 501 bit = find_first_zero_bit(port->msi_irq_in_use, MTK_MSI_IRQS_NUM); 502 if (bit >= MTK_MSI_IRQS_NUM) { 503 mutex_unlock(&port->lock); 504 return -ENOSPC; 505 } 506 507 __set_bit(bit, port->msi_irq_in_use); 508 509 mutex_unlock(&port->lock); 510 511 irq_domain_set_info(domain, virq, bit, &mtk_msi_bottom_irq_chip, 512 domain->host_data, handle_edge_irq, 513 NULL, NULL); 514 515 return 0; 516 } 517 518 static void mtk_pcie_irq_domain_free(struct irq_domain *domain, 519 unsigned int virq, unsigned int nr_irqs) 520 { 521 struct irq_data *d = irq_domain_get_irq_data(domain, virq); 522 struct mtk_pcie_port *port = irq_data_get_irq_chip_data(d); 523 524 mutex_lock(&port->lock); 525 526 if (!test_bit(d->hwirq, port->msi_irq_in_use)) 527 dev_err(port->pcie->dev, "trying to free unused MSI#%lu\n", 528 d->hwirq); 529 else 530 __clear_bit(d->hwirq, port->msi_irq_in_use); 531 532 mutex_unlock(&port->lock); 533 534 irq_domain_free_irqs_parent(domain, virq, nr_irqs); 535 } 536 537 static const struct irq_domain_ops msi_domain_ops = { 538 .alloc = mtk_pcie_irq_domain_alloc, 539 .free = mtk_pcie_irq_domain_free, 540 }; 541 542 static struct irq_chip mtk_msi_irq_chip = { 543 .name = "MTK PCIe MSI", 544 .irq_ack = irq_chip_ack_parent, 545 .irq_mask = pci_msi_mask_irq, 546 .irq_unmask = pci_msi_unmask_irq, 547 }; 548 549 static struct msi_domain_info mtk_msi_domain_info = { 550 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | 551 MSI_FLAG_PCI_MSIX), 552 .chip = &mtk_msi_irq_chip, 553 }; 554 555 static int mtk_pcie_allocate_msi_domains(struct mtk_pcie_port *port) 556 { 557 struct fwnode_handle *fwnode = of_node_to_fwnode(port->pcie->dev->of_node); 558 559 mutex_init(&port->lock); 560 561 port->inner_domain = irq_domain_create_linear(fwnode, MTK_MSI_IRQS_NUM, 562 &msi_domain_ops, port); 563 if (!port->inner_domain) { 564 dev_err(port->pcie->dev, "failed to create IRQ domain\n"); 565 return -ENOMEM; 566 } 567 568 port->msi_domain = pci_msi_create_irq_domain(fwnode, &mtk_msi_domain_info, 569 port->inner_domain); 570 if (!port->msi_domain) { 571 dev_err(port->pcie->dev, "failed to create MSI domain\n"); 572 irq_domain_remove(port->inner_domain); 573 return -ENOMEM; 574 } 575 576 return 0; 577 } 578 579 static void mtk_pcie_enable_msi(struct mtk_pcie_port *port) 580 { 581 u32 val; 582 phys_addr_t msg_addr; 583 584 msg_addr = virt_to_phys(port->base + PCIE_MSI_VECTOR); 585 val = lower_32_bits(msg_addr); 586 writel(val, port->base + PCIE_IMSI_ADDR); 587 588 val = readl(port->base + PCIE_INT_MASK); 589 val &= ~MSI_MASK; 590 writel(val, port->base + PCIE_INT_MASK); 591 } 592 593 static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq, 594 irq_hw_number_t hwirq) 595 { 596 irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); 597 irq_set_chip_data(irq, domain->host_data); 598 599 return 0; 600 } 601 602 static const struct irq_domain_ops intx_domain_ops = { 603 .map = mtk_pcie_intx_map, 604 }; 605 606 static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port, 607 struct device_node *node) 608 { 609 struct device *dev = port->pcie->dev; 610 struct device_node *pcie_intc_node; 611 int ret; 612 613 /* Setup INTx */ 614 pcie_intc_node = of_get_next_child(node, NULL); 615 if (!pcie_intc_node) { 616 dev_err(dev, "no PCIe Intc node found\n"); 617 return -ENODEV; 618 } 619 620 port->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, 621 &intx_domain_ops, port); 622 if (!port->irq_domain) { 623 dev_err(dev, "failed to get INTx IRQ domain\n"); 624 return -ENODEV; 625 } 626 627 if (IS_ENABLED(CONFIG_PCI_MSI)) { 628 ret = mtk_pcie_allocate_msi_domains(port); 629 if (ret) 630 return ret; 631 632 mtk_pcie_enable_msi(port); 633 } 634 635 return 0; 636 } 637 638 static void mtk_pcie_intr_handler(struct irq_desc *desc) 639 { 640 struct mtk_pcie_port *port = irq_desc_get_handler_data(desc); 641 struct irq_chip *irqchip = irq_desc_get_chip(desc); 642 unsigned long status; 643 u32 virq; 644 u32 bit = INTX_SHIFT; 645 646 chained_irq_enter(irqchip, desc); 647 648 status = readl(port->base + PCIE_INT_STATUS); 649 if (status & INTX_MASK) { 650 for_each_set_bit_from(bit, &status, PCI_NUM_INTX + INTX_SHIFT) { 651 /* Clear the INTx */ 652 writel(1 << bit, port->base + PCIE_INT_STATUS); 653 virq = irq_find_mapping(port->irq_domain, 654 bit - INTX_SHIFT); 655 generic_handle_irq(virq); 656 } 657 } 658 659 if (IS_ENABLED(CONFIG_PCI_MSI)) { 660 if (status & MSI_STATUS){ 661 unsigned long imsi_status; 662 663 while ((imsi_status = readl(port->base + PCIE_IMSI_STATUS))) { 664 for_each_set_bit(bit, &imsi_status, MTK_MSI_IRQS_NUM) { 665 virq = irq_find_mapping(port->inner_domain, bit); 666 generic_handle_irq(virq); 667 } 668 } 669 /* Clear MSI interrupt status */ 670 writel(MSI_STATUS, port->base + PCIE_INT_STATUS); 671 } 672 } 673 674 chained_irq_exit(irqchip, desc); 675 676 return; 677 } 678 679 static int mtk_pcie_setup_irq(struct mtk_pcie_port *port, 680 struct device_node *node) 681 { 682 struct mtk_pcie *pcie = port->pcie; 683 struct device *dev = pcie->dev; 684 struct platform_device *pdev = to_platform_device(dev); 685 int err, irq; 686 687 err = mtk_pcie_init_irq_domain(port, node); 688 if (err) { 689 dev_err(dev, "failed to init PCIe IRQ domain\n"); 690 return err; 691 } 692 693 irq = platform_get_irq(pdev, port->slot); 694 irq_set_chained_handler_and_data(irq, mtk_pcie_intr_handler, port); 695 696 return 0; 697 } 698 699 static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus, 700 unsigned int devfn, int where) 701 { 702 struct mtk_pcie *pcie = bus->sysdata; 703 704 writel(PCIE_CONF_ADDR(where, PCI_FUNC(devfn), PCI_SLOT(devfn), 705 bus->number), pcie->base + PCIE_CFG_ADDR); 706 707 return pcie->base + PCIE_CFG_DATA + (where & 3); 708 } 709 710 static struct pci_ops mtk_pcie_ops = { 711 .map_bus = mtk_pcie_map_bus, 712 .read = pci_generic_config_read, 713 .write = pci_generic_config_write, 714 }; 715 716 static int mtk_pcie_startup_port(struct mtk_pcie_port *port) 717 { 718 struct mtk_pcie *pcie = port->pcie; 719 u32 func = PCI_FUNC(port->slot << 3); 720 u32 slot = PCI_SLOT(port->slot << 3); 721 u32 val; 722 int err; 723 724 /* assert port PERST_N */ 725 val = readl(pcie->base + PCIE_SYS_CFG); 726 val |= PCIE_PORT_PERST(port->slot); 727 writel(val, pcie->base + PCIE_SYS_CFG); 728 729 /* de-assert port PERST_N */ 730 val = readl(pcie->base + PCIE_SYS_CFG); 731 val &= ~PCIE_PORT_PERST(port->slot); 732 writel(val, pcie->base + PCIE_SYS_CFG); 733 734 /* 100ms timeout value should be enough for Gen1/2 training */ 735 err = readl_poll_timeout(port->base + PCIE_LINK_STATUS, val, 736 !!(val & PCIE_PORT_LINKUP), 20, 737 100 * USEC_PER_MSEC); 738 if (err) 739 return -ETIMEDOUT; 740 741 /* enable interrupt */ 742 val = readl(pcie->base + PCIE_INT_ENABLE); 743 val |= PCIE_PORT_INT_EN(port->slot); 744 writel(val, pcie->base + PCIE_INT_ENABLE); 745 746 /* map to all DDR region. We need to set it before cfg operation. */ 747 writel(PCIE_BAR_MAP_MAX | PCIE_BAR_ENABLE, 748 port->base + PCIE_BAR0_SETUP); 749 750 /* configure class code and revision ID */ 751 writel(PCIE_CLASS_CODE | PCIE_REVISION_ID, port->base + PCIE_CLASS); 752 753 /* configure FC credit */ 754 writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0), 755 pcie->base + PCIE_CFG_ADDR); 756 val = readl(pcie->base + PCIE_CFG_DATA); 757 val &= ~PCIE_FC_CREDIT_MASK; 758 val |= PCIE_FC_CREDIT_VAL(0x806c); 759 writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0), 760 pcie->base + PCIE_CFG_ADDR); 761 writel(val, pcie->base + PCIE_CFG_DATA); 762 763 /* configure RC FTS number to 250 when it leaves L0s */ 764 writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0), 765 pcie->base + PCIE_CFG_ADDR); 766 val = readl(pcie->base + PCIE_CFG_DATA); 767 val &= ~PCIE_FTS_NUM_MASK; 768 val |= PCIE_FTS_NUM_L0(0x50); 769 writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0), 770 pcie->base + PCIE_CFG_ADDR); 771 writel(val, pcie->base + PCIE_CFG_DATA); 772 773 return 0; 774 } 775 776 static void mtk_pcie_enable_port(struct mtk_pcie_port *port) 777 { 778 struct mtk_pcie *pcie = port->pcie; 779 struct device *dev = pcie->dev; 780 int err; 781 782 err = clk_prepare_enable(port->sys_ck); 783 if (err) { 784 dev_err(dev, "failed to enable sys_ck%d clock\n", port->slot); 785 goto err_sys_clk; 786 } 787 788 err = clk_prepare_enable(port->ahb_ck); 789 if (err) { 790 dev_err(dev, "failed to enable ahb_ck%d\n", port->slot); 791 goto err_ahb_clk; 792 } 793 794 err = clk_prepare_enable(port->aux_ck); 795 if (err) { 796 dev_err(dev, "failed to enable aux_ck%d\n", port->slot); 797 goto err_aux_clk; 798 } 799 800 err = clk_prepare_enable(port->axi_ck); 801 if (err) { 802 dev_err(dev, "failed to enable axi_ck%d\n", port->slot); 803 goto err_axi_clk; 804 } 805 806 err = clk_prepare_enable(port->obff_ck); 807 if (err) { 808 dev_err(dev, "failed to enable obff_ck%d\n", port->slot); 809 goto err_obff_clk; 810 } 811 812 err = clk_prepare_enable(port->pipe_ck); 813 if (err) { 814 dev_err(dev, "failed to enable pipe_ck%d\n", port->slot); 815 goto err_pipe_clk; 816 } 817 818 reset_control_assert(port->reset); 819 reset_control_deassert(port->reset); 820 821 err = phy_init(port->phy); 822 if (err) { 823 dev_err(dev, "failed to initialize port%d phy\n", port->slot); 824 goto err_phy_init; 825 } 826 827 err = phy_power_on(port->phy); 828 if (err) { 829 dev_err(dev, "failed to power on port%d phy\n", port->slot); 830 goto err_phy_on; 831 } 832 833 if (!pcie->soc->startup(port)) 834 return; 835 836 dev_info(dev, "Port%d link down\n", port->slot); 837 838 phy_power_off(port->phy); 839 err_phy_on: 840 phy_exit(port->phy); 841 err_phy_init: 842 clk_disable_unprepare(port->pipe_ck); 843 err_pipe_clk: 844 clk_disable_unprepare(port->obff_ck); 845 err_obff_clk: 846 clk_disable_unprepare(port->axi_ck); 847 err_axi_clk: 848 clk_disable_unprepare(port->aux_ck); 849 err_aux_clk: 850 clk_disable_unprepare(port->ahb_ck); 851 err_ahb_clk: 852 clk_disable_unprepare(port->sys_ck); 853 err_sys_clk: 854 mtk_pcie_port_free(port); 855 } 856 857 static int mtk_pcie_parse_port(struct mtk_pcie *pcie, 858 struct device_node *node, 859 int slot) 860 { 861 struct mtk_pcie_port *port; 862 struct resource *regs; 863 struct device *dev = pcie->dev; 864 struct platform_device *pdev = to_platform_device(dev); 865 char name[10]; 866 int err; 867 868 port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); 869 if (!port) 870 return -ENOMEM; 871 872 err = of_property_read_u32(node, "num-lanes", &port->lane); 873 if (err) { 874 dev_err(dev, "missing num-lanes property\n"); 875 return err; 876 } 877 878 snprintf(name, sizeof(name), "port%d", slot); 879 regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); 880 port->base = devm_ioremap_resource(dev, regs); 881 if (IS_ERR(port->base)) { 882 dev_err(dev, "failed to map port%d base\n", slot); 883 return PTR_ERR(port->base); 884 } 885 886 snprintf(name, sizeof(name), "sys_ck%d", slot); 887 port->sys_ck = devm_clk_get(dev, name); 888 if (IS_ERR(port->sys_ck)) { 889 dev_err(dev, "failed to get sys_ck%d clock\n", slot); 890 return PTR_ERR(port->sys_ck); 891 } 892 893 /* sys_ck might be divided into the following parts in some chips */ 894 snprintf(name, sizeof(name), "ahb_ck%d", slot); 895 port->ahb_ck = devm_clk_get(dev, name); 896 if (IS_ERR(port->ahb_ck)) { 897 if (PTR_ERR(port->ahb_ck) == -EPROBE_DEFER) 898 return -EPROBE_DEFER; 899 900 port->ahb_ck = NULL; 901 } 902 903 snprintf(name, sizeof(name), "axi_ck%d", slot); 904 port->axi_ck = devm_clk_get(dev, name); 905 if (IS_ERR(port->axi_ck)) { 906 if (PTR_ERR(port->axi_ck) == -EPROBE_DEFER) 907 return -EPROBE_DEFER; 908 909 port->axi_ck = NULL; 910 } 911 912 snprintf(name, sizeof(name), "aux_ck%d", slot); 913 port->aux_ck = devm_clk_get(dev, name); 914 if (IS_ERR(port->aux_ck)) { 915 if (PTR_ERR(port->aux_ck) == -EPROBE_DEFER) 916 return -EPROBE_DEFER; 917 918 port->aux_ck = NULL; 919 } 920 921 snprintf(name, sizeof(name), "obff_ck%d", slot); 922 port->obff_ck = devm_clk_get(dev, name); 923 if (IS_ERR(port->obff_ck)) { 924 if (PTR_ERR(port->obff_ck) == -EPROBE_DEFER) 925 return -EPROBE_DEFER; 926 927 port->obff_ck = NULL; 928 } 929 930 snprintf(name, sizeof(name), "pipe_ck%d", slot); 931 port->pipe_ck = devm_clk_get(dev, name); 932 if (IS_ERR(port->pipe_ck)) { 933 if (PTR_ERR(port->pipe_ck) == -EPROBE_DEFER) 934 return -EPROBE_DEFER; 935 936 port->pipe_ck = NULL; 937 } 938 939 snprintf(name, sizeof(name), "pcie-rst%d", slot); 940 port->reset = devm_reset_control_get_optional_exclusive(dev, name); 941 if (PTR_ERR(port->reset) == -EPROBE_DEFER) 942 return PTR_ERR(port->reset); 943 944 /* some platforms may use default PHY setting */ 945 snprintf(name, sizeof(name), "pcie-phy%d", slot); 946 port->phy = devm_phy_optional_get(dev, name); 947 if (IS_ERR(port->phy)) 948 return PTR_ERR(port->phy); 949 950 port->slot = slot; 951 port->pcie = pcie; 952 953 if (pcie->soc->setup_irq) { 954 err = pcie->soc->setup_irq(port, node); 955 if (err) 956 return err; 957 } 958 959 INIT_LIST_HEAD(&port->list); 960 list_add_tail(&port->list, &pcie->ports); 961 962 return 0; 963 } 964 965 static int mtk_pcie_subsys_powerup(struct mtk_pcie *pcie) 966 { 967 struct device *dev = pcie->dev; 968 struct platform_device *pdev = to_platform_device(dev); 969 struct resource *regs; 970 int err; 971 972 /* get shared registers, which are optional */ 973 regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "subsys"); 974 if (regs) { 975 pcie->base = devm_ioremap_resource(dev, regs); 976 if (IS_ERR(pcie->base)) { 977 dev_err(dev, "failed to map shared register\n"); 978 return PTR_ERR(pcie->base); 979 } 980 } 981 982 pcie->free_ck = devm_clk_get(dev, "free_ck"); 983 if (IS_ERR(pcie->free_ck)) { 984 if (PTR_ERR(pcie->free_ck) == -EPROBE_DEFER) 985 return -EPROBE_DEFER; 986 987 pcie->free_ck = NULL; 988 } 989 990 if (dev->pm_domain) { 991 pm_runtime_enable(dev); 992 pm_runtime_get_sync(dev); 993 } 994 995 /* enable top level clock */ 996 err = clk_prepare_enable(pcie->free_ck); 997 if (err) { 998 dev_err(dev, "failed to enable free_ck\n"); 999 goto err_free_ck; 1000 } 1001 1002 return 0; 1003 1004 err_free_ck: 1005 if (dev->pm_domain) { 1006 pm_runtime_put_sync(dev); 1007 pm_runtime_disable(dev); 1008 } 1009 1010 return err; 1011 } 1012 1013 static int mtk_pcie_setup(struct mtk_pcie *pcie) 1014 { 1015 struct device *dev = pcie->dev; 1016 struct device_node *node = dev->of_node, *child; 1017 struct of_pci_range_parser parser; 1018 struct of_pci_range range; 1019 struct resource res; 1020 struct mtk_pcie_port *port, *tmp; 1021 int err; 1022 1023 if (of_pci_range_parser_init(&parser, node)) { 1024 dev_err(dev, "missing \"ranges\" property\n"); 1025 return -EINVAL; 1026 } 1027 1028 for_each_of_pci_range(&parser, &range) { 1029 err = of_pci_range_to_resource(&range, node, &res); 1030 if (err < 0) 1031 return err; 1032 1033 switch (res.flags & IORESOURCE_TYPE_BITS) { 1034 case IORESOURCE_IO: 1035 pcie->offset.io = res.start - range.pci_addr; 1036 1037 memcpy(&pcie->pio, &res, sizeof(res)); 1038 pcie->pio.name = node->full_name; 1039 1040 pcie->io.start = range.cpu_addr; 1041 pcie->io.end = range.cpu_addr + range.size - 1; 1042 pcie->io.flags = IORESOURCE_MEM; 1043 pcie->io.name = "I/O"; 1044 1045 memcpy(&res, &pcie->io, sizeof(res)); 1046 break; 1047 1048 case IORESOURCE_MEM: 1049 pcie->offset.mem = res.start - range.pci_addr; 1050 1051 memcpy(&pcie->mem, &res, sizeof(res)); 1052 pcie->mem.name = "non-prefetchable"; 1053 break; 1054 } 1055 } 1056 1057 err = of_pci_parse_bus_range(node, &pcie->busn); 1058 if (err < 0) { 1059 dev_err(dev, "failed to parse bus ranges property: %d\n", err); 1060 pcie->busn.name = node->name; 1061 pcie->busn.start = 0; 1062 pcie->busn.end = 0xff; 1063 pcie->busn.flags = IORESOURCE_BUS; 1064 } 1065 1066 for_each_available_child_of_node(node, child) { 1067 int slot; 1068 1069 err = of_pci_get_devfn(child); 1070 if (err < 0) { 1071 dev_err(dev, "failed to parse devfn: %d\n", err); 1072 return err; 1073 } 1074 1075 slot = PCI_SLOT(err); 1076 1077 err = mtk_pcie_parse_port(pcie, child, slot); 1078 if (err) 1079 return err; 1080 } 1081 1082 err = mtk_pcie_subsys_powerup(pcie); 1083 if (err) 1084 return err; 1085 1086 /* enable each port, and then check link status */ 1087 list_for_each_entry_safe(port, tmp, &pcie->ports, list) 1088 mtk_pcie_enable_port(port); 1089 1090 /* power down PCIe subsys if slots are all empty (link down) */ 1091 if (list_empty(&pcie->ports)) 1092 mtk_pcie_subsys_powerdown(pcie); 1093 1094 return 0; 1095 } 1096 1097 static int mtk_pcie_request_resources(struct mtk_pcie *pcie) 1098 { 1099 struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); 1100 struct list_head *windows = &host->windows; 1101 struct device *dev = pcie->dev; 1102 int err; 1103 1104 pci_add_resource_offset(windows, &pcie->pio, pcie->offset.io); 1105 pci_add_resource_offset(windows, &pcie->mem, pcie->offset.mem); 1106 pci_add_resource(windows, &pcie->busn); 1107 1108 err = devm_request_pci_bus_resources(dev, windows); 1109 if (err < 0) 1110 return err; 1111 1112 devm_pci_remap_iospace(dev, &pcie->pio, pcie->io.start); 1113 1114 return 0; 1115 } 1116 1117 static int mtk_pcie_register_host(struct pci_host_bridge *host) 1118 { 1119 struct mtk_pcie *pcie = pci_host_bridge_priv(host); 1120 struct pci_bus *child; 1121 int err; 1122 1123 host->busnr = pcie->busn.start; 1124 host->dev.parent = pcie->dev; 1125 host->ops = pcie->soc->ops; 1126 host->map_irq = of_irq_parse_and_map_pci; 1127 host->swizzle_irq = pci_common_swizzle; 1128 host->sysdata = pcie; 1129 1130 err = pci_scan_root_bus_bridge(host); 1131 if (err < 0) 1132 return err; 1133 1134 pci_bus_size_bridges(host->bus); 1135 pci_bus_assign_resources(host->bus); 1136 1137 list_for_each_entry(child, &host->bus->children, node) 1138 pcie_bus_configure_settings(child); 1139 1140 pci_bus_add_devices(host->bus); 1141 1142 return 0; 1143 } 1144 1145 static int mtk_pcie_probe(struct platform_device *pdev) 1146 { 1147 struct device *dev = &pdev->dev; 1148 struct mtk_pcie *pcie; 1149 struct pci_host_bridge *host; 1150 int err; 1151 1152 host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); 1153 if (!host) 1154 return -ENOMEM; 1155 1156 pcie = pci_host_bridge_priv(host); 1157 1158 pcie->dev = dev; 1159 pcie->soc = of_device_get_match_data(dev); 1160 platform_set_drvdata(pdev, pcie); 1161 INIT_LIST_HEAD(&pcie->ports); 1162 1163 err = mtk_pcie_setup(pcie); 1164 if (err) 1165 return err; 1166 1167 err = mtk_pcie_request_resources(pcie); 1168 if (err) 1169 goto put_resources; 1170 1171 err = mtk_pcie_register_host(host); 1172 if (err) 1173 goto put_resources; 1174 1175 return 0; 1176 1177 put_resources: 1178 if (!list_empty(&pcie->ports)) 1179 mtk_pcie_put_resources(pcie); 1180 1181 return err; 1182 } 1183 1184 static const struct mtk_pcie_soc mtk_pcie_soc_v1 = { 1185 .ops = &mtk_pcie_ops, 1186 .startup = mtk_pcie_startup_port, 1187 }; 1188 1189 static const struct mtk_pcie_soc mtk_pcie_soc_mt2712 = { 1190 .ops = &mtk_pcie_ops_v2, 1191 .startup = mtk_pcie_startup_port_v2, 1192 .setup_irq = mtk_pcie_setup_irq, 1193 }; 1194 1195 static const struct mtk_pcie_soc mtk_pcie_soc_mt7622 = { 1196 .need_fix_class_id = true, 1197 .ops = &mtk_pcie_ops_v2, 1198 .startup = mtk_pcie_startup_port_v2, 1199 .setup_irq = mtk_pcie_setup_irq, 1200 }; 1201 1202 static const struct of_device_id mtk_pcie_ids[] = { 1203 { .compatible = "mediatek,mt2701-pcie", .data = &mtk_pcie_soc_v1 }, 1204 { .compatible = "mediatek,mt7623-pcie", .data = &mtk_pcie_soc_v1 }, 1205 { .compatible = "mediatek,mt2712-pcie", .data = &mtk_pcie_soc_mt2712 }, 1206 { .compatible = "mediatek,mt7622-pcie", .data = &mtk_pcie_soc_mt7622 }, 1207 {}, 1208 }; 1209 1210 static struct platform_driver mtk_pcie_driver = { 1211 .probe = mtk_pcie_probe, 1212 .driver = { 1213 .name = "mtk-pcie", 1214 .of_match_table = mtk_pcie_ids, 1215 .suppress_bind_attrs = true, 1216 }, 1217 }; 1218 builtin_platform_driver(mtk_pcie_driver); 1219