1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * MediaTek PCIe host controller driver. 4 * 5 * Copyright (c) 2017 MediaTek Inc. 6 * Author: Ryder Lee <ryder.lee@mediatek.com> 7 * Honghui Zhang <honghui.zhang@mediatek.com> 8 */ 9 10 #include <linux/clk.h> 11 #include <linux/delay.h> 12 #include <linux/iopoll.h> 13 #include <linux/irq.h> 14 #include <linux/irqchip/chained_irq.h> 15 #include <linux/irqdomain.h> 16 #include <linux/kernel.h> 17 #include <linux/msi.h> 18 #include <linux/module.h> 19 #include <linux/of_address.h> 20 #include <linux/of_pci.h> 21 #include <linux/of_platform.h> 22 #include <linux/pci.h> 23 #include <linux/phy/phy.h> 24 #include <linux/platform_device.h> 25 #include <linux/pm_runtime.h> 26 #include <linux/reset.h> 27 28 #include "../pci.h" 29 30 /* PCIe shared registers */ 31 #define PCIE_SYS_CFG 0x00 32 #define PCIE_INT_ENABLE 0x0c 33 #define PCIE_CFG_ADDR 0x20 34 #define PCIE_CFG_DATA 0x24 35 36 /* PCIe per port registers */ 37 #define PCIE_BAR0_SETUP 0x10 38 #define PCIE_CLASS 0x34 39 #define PCIE_LINK_STATUS 0x50 40 41 #define PCIE_PORT_INT_EN(x) BIT(20 + (x)) 42 #define PCIE_PORT_PERST(x) BIT(1 + (x)) 43 #define PCIE_PORT_LINKUP BIT(0) 44 #define PCIE_BAR_MAP_MAX GENMASK(31, 16) 45 46 #define PCIE_BAR_ENABLE BIT(0) 47 #define PCIE_REVISION_ID BIT(0) 48 #define PCIE_CLASS_CODE (0x60400 << 8) 49 #define PCIE_CONF_REG(regn) (((regn) & GENMASK(7, 2)) | \ 50 ((((regn) >> 8) & GENMASK(3, 0)) << 24)) 51 #define PCIE_CONF_FUN(fun) (((fun) << 8) & GENMASK(10, 8)) 52 #define PCIE_CONF_DEV(dev) (((dev) << 11) & GENMASK(15, 11)) 53 #define PCIE_CONF_BUS(bus) (((bus) << 16) & GENMASK(23, 16)) 54 #define PCIE_CONF_ADDR(regn, fun, dev, bus) \ 55 (PCIE_CONF_REG(regn) | PCIE_CONF_FUN(fun) | \ 56 PCIE_CONF_DEV(dev) | PCIE_CONF_BUS(bus)) 57 58 /* MediaTek specific configuration registers */ 59 #define PCIE_FTS_NUM 0x70c 60 #define PCIE_FTS_NUM_MASK GENMASK(15, 8) 61 #define PCIE_FTS_NUM_L0(x) ((x) & 0xff << 8) 62 63 #define PCIE_FC_CREDIT 0x73c 64 #define PCIE_FC_CREDIT_MASK (GENMASK(31, 31) | GENMASK(28, 16)) 65 #define PCIE_FC_CREDIT_VAL(x) ((x) << 16) 66 67 /* PCIe V2 share registers */ 68 #define PCIE_SYS_CFG_V2 0x0 69 #define PCIE_CSR_LTSSM_EN(x) BIT(0 + (x) * 8) 70 #define PCIE_CSR_ASPM_L1_EN(x) BIT(1 + (x) * 8) 71 72 /* PCIe V2 per-port registers */ 73 #define PCIE_MSI_VECTOR 0x0c0 74 75 #define PCIE_CONF_VEND_ID 0x100 76 #define PCIE_CONF_CLASS_ID 0x106 77 78 #define PCIE_INT_MASK 0x420 79 #define INTX_MASK GENMASK(19, 16) 80 #define INTX_SHIFT 16 81 #define PCIE_INT_STATUS 0x424 82 #define MSI_STATUS BIT(23) 83 #define PCIE_IMSI_STATUS 0x42c 84 #define PCIE_IMSI_ADDR 0x430 85 #define MSI_MASK BIT(23) 86 #define MTK_MSI_IRQS_NUM 32 87 88 #define PCIE_AHB_TRANS_BASE0_L 0x438 89 #define PCIE_AHB_TRANS_BASE0_H 0x43c 90 #define AHB2PCIE_SIZE(x) ((x) & GENMASK(4, 0)) 91 #define PCIE_AXI_WINDOW0 0x448 92 #define WIN_ENABLE BIT(7) 93 94 /* PCIe V2 configuration transaction header */ 95 #define PCIE_CFG_HEADER0 0x460 96 #define PCIE_CFG_HEADER1 0x464 97 #define PCIE_CFG_HEADER2 0x468 98 #define PCIE_CFG_WDATA 0x470 99 #define PCIE_APP_TLP_REQ 0x488 100 #define PCIE_CFG_RDATA 0x48c 101 #define APP_CFG_REQ BIT(0) 102 #define APP_CPL_STATUS GENMASK(7, 5) 103 104 #define CFG_WRRD_TYPE_0 4 105 #define CFG_WR_FMT 2 106 #define CFG_RD_FMT 0 107 108 #define CFG_DW0_LENGTH(length) ((length) & GENMASK(9, 0)) 109 #define CFG_DW0_TYPE(type) (((type) << 24) & GENMASK(28, 24)) 110 #define CFG_DW0_FMT(fmt) (((fmt) << 29) & GENMASK(31, 29)) 111 #define CFG_DW2_REGN(regn) ((regn) & GENMASK(11, 2)) 112 #define CFG_DW2_FUN(fun) (((fun) << 16) & GENMASK(18, 16)) 113 #define CFG_DW2_DEV(dev) (((dev) << 19) & GENMASK(23, 19)) 114 #define CFG_DW2_BUS(bus) (((bus) << 24) & GENMASK(31, 24)) 115 #define CFG_HEADER_DW0(type, fmt) \ 116 (CFG_DW0_LENGTH(1) | CFG_DW0_TYPE(type) | CFG_DW0_FMT(fmt)) 117 #define CFG_HEADER_DW1(where, size) \ 118 (GENMASK(((size) - 1), 0) << ((where) & 0x3)) 119 #define CFG_HEADER_DW2(regn, fun, dev, bus) \ 120 (CFG_DW2_REGN(regn) | CFG_DW2_FUN(fun) | \ 121 CFG_DW2_DEV(dev) | CFG_DW2_BUS(bus)) 122 123 #define PCIE_RST_CTRL 0x510 124 #define PCIE_PHY_RSTB BIT(0) 125 #define PCIE_PIPE_SRSTB BIT(1) 126 #define PCIE_MAC_SRSTB BIT(2) 127 #define PCIE_CRSTB BIT(3) 128 #define PCIE_PERSTB BIT(8) 129 #define PCIE_LINKDOWN_RST_EN GENMASK(15, 13) 130 #define PCIE_LINK_STATUS_V2 0x804 131 #define PCIE_PORT_LINKUP_V2 BIT(10) 132 133 struct mtk_pcie_port; 134 135 /** 136 * struct mtk_pcie_soc - differentiate between host generations 137 * @need_fix_class_id: whether this host's class ID needed to be fixed or not 138 * @ops: pointer to configuration access functions 139 * @startup: pointer to controller setting functions 140 * @setup_irq: pointer to initialize IRQ functions 141 */ 142 struct mtk_pcie_soc { 143 bool need_fix_class_id; 144 struct pci_ops *ops; 145 int (*startup)(struct mtk_pcie_port *port); 146 int (*setup_irq)(struct mtk_pcie_port *port, struct device_node *node); 147 }; 148 149 /** 150 * struct mtk_pcie_port - PCIe port information 151 * @base: IO mapped register base 152 * @list: port list 153 * @pcie: pointer to PCIe host info 154 * @reset: pointer to port reset control 155 * @sys_ck: pointer to transaction/data link layer clock 156 * @ahb_ck: pointer to AHB slave interface operating clock for CSR access 157 * and RC initiated MMIO access 158 * @axi_ck: pointer to application layer MMIO channel operating clock 159 * @aux_ck: pointer to pe2_mac_bridge and pe2_mac_core operating clock 160 * when pcie_mac_ck/pcie_pipe_ck is turned off 161 * @obff_ck: pointer to OBFF functional block operating clock 162 * @pipe_ck: pointer to LTSSM and PHY/MAC layer operating clock 163 * @phy: pointer to PHY control block 164 * @lane: lane count 165 * @slot: port slot 166 * @irq: GIC irq 167 * @irq_domain: legacy INTx IRQ domain 168 * @inner_domain: inner IRQ domain 169 * @msi_domain: MSI IRQ domain 170 * @lock: protect the msi_irq_in_use bitmap 171 * @msi_irq_in_use: bit map for assigned MSI IRQ 172 */ 173 struct mtk_pcie_port { 174 void __iomem *base; 175 struct list_head list; 176 struct mtk_pcie *pcie; 177 struct reset_control *reset; 178 struct clk *sys_ck; 179 struct clk *ahb_ck; 180 struct clk *axi_ck; 181 struct clk *aux_ck; 182 struct clk *obff_ck; 183 struct clk *pipe_ck; 184 struct phy *phy; 185 u32 lane; 186 u32 slot; 187 int irq; 188 struct irq_domain *irq_domain; 189 struct irq_domain *inner_domain; 190 struct irq_domain *msi_domain; 191 struct mutex lock; 192 DECLARE_BITMAP(msi_irq_in_use, MTK_MSI_IRQS_NUM); 193 }; 194 195 /** 196 * struct mtk_pcie - PCIe host information 197 * @dev: pointer to PCIe device 198 * @base: IO mapped register base 199 * @free_ck: free-run reference clock 200 * @io: IO resource 201 * @pio: PIO resource 202 * @mem: non-prefetchable memory resource 203 * @busn: bus range 204 * @offset: IO / Memory offset 205 * @ports: pointer to PCIe port information 206 * @soc: pointer to SoC-dependent operations 207 */ 208 struct mtk_pcie { 209 struct device *dev; 210 void __iomem *base; 211 struct clk *free_ck; 212 213 struct resource io; 214 struct resource pio; 215 struct resource mem; 216 struct resource busn; 217 struct { 218 resource_size_t mem; 219 resource_size_t io; 220 } offset; 221 struct list_head ports; 222 const struct mtk_pcie_soc *soc; 223 }; 224 225 static void mtk_pcie_subsys_powerdown(struct mtk_pcie *pcie) 226 { 227 struct device *dev = pcie->dev; 228 229 clk_disable_unprepare(pcie->free_ck); 230 231 pm_runtime_put_sync(dev); 232 pm_runtime_disable(dev); 233 } 234 235 static void mtk_pcie_port_free(struct mtk_pcie_port *port) 236 { 237 struct mtk_pcie *pcie = port->pcie; 238 struct device *dev = pcie->dev; 239 240 devm_iounmap(dev, port->base); 241 list_del(&port->list); 242 devm_kfree(dev, port); 243 } 244 245 static void mtk_pcie_put_resources(struct mtk_pcie *pcie) 246 { 247 struct mtk_pcie_port *port, *tmp; 248 249 list_for_each_entry_safe(port, tmp, &pcie->ports, list) { 250 phy_power_off(port->phy); 251 phy_exit(port->phy); 252 clk_disable_unprepare(port->pipe_ck); 253 clk_disable_unprepare(port->obff_ck); 254 clk_disable_unprepare(port->axi_ck); 255 clk_disable_unprepare(port->aux_ck); 256 clk_disable_unprepare(port->ahb_ck); 257 clk_disable_unprepare(port->sys_ck); 258 mtk_pcie_port_free(port); 259 } 260 261 mtk_pcie_subsys_powerdown(pcie); 262 } 263 264 static int mtk_pcie_check_cfg_cpld(struct mtk_pcie_port *port) 265 { 266 u32 val; 267 int err; 268 269 err = readl_poll_timeout_atomic(port->base + PCIE_APP_TLP_REQ, val, 270 !(val & APP_CFG_REQ), 10, 271 100 * USEC_PER_MSEC); 272 if (err) 273 return PCIBIOS_SET_FAILED; 274 275 if (readl(port->base + PCIE_APP_TLP_REQ) & APP_CPL_STATUS) 276 return PCIBIOS_SET_FAILED; 277 278 return PCIBIOS_SUCCESSFUL; 279 } 280 281 static int mtk_pcie_hw_rd_cfg(struct mtk_pcie_port *port, u32 bus, u32 devfn, 282 int where, int size, u32 *val) 283 { 284 u32 tmp; 285 286 /* Write PCIe configuration transaction header for Cfgrd */ 287 writel(CFG_HEADER_DW0(CFG_WRRD_TYPE_0, CFG_RD_FMT), 288 port->base + PCIE_CFG_HEADER0); 289 writel(CFG_HEADER_DW1(where, size), port->base + PCIE_CFG_HEADER1); 290 writel(CFG_HEADER_DW2(where, PCI_FUNC(devfn), PCI_SLOT(devfn), bus), 291 port->base + PCIE_CFG_HEADER2); 292 293 /* Trigger h/w to transmit Cfgrd TLP */ 294 tmp = readl(port->base + PCIE_APP_TLP_REQ); 295 tmp |= APP_CFG_REQ; 296 writel(tmp, port->base + PCIE_APP_TLP_REQ); 297 298 /* Check completion status */ 299 if (mtk_pcie_check_cfg_cpld(port)) 300 return PCIBIOS_SET_FAILED; 301 302 /* Read cpld payload of Cfgrd */ 303 *val = readl(port->base + PCIE_CFG_RDATA); 304 305 if (size == 1) 306 *val = (*val >> (8 * (where & 3))) & 0xff; 307 else if (size == 2) 308 *val = (*val >> (8 * (where & 3))) & 0xffff; 309 310 return PCIBIOS_SUCCESSFUL; 311 } 312 313 static int mtk_pcie_hw_wr_cfg(struct mtk_pcie_port *port, u32 bus, u32 devfn, 314 int where, int size, u32 val) 315 { 316 /* Write PCIe configuration transaction header for Cfgwr */ 317 writel(CFG_HEADER_DW0(CFG_WRRD_TYPE_0, CFG_WR_FMT), 318 port->base + PCIE_CFG_HEADER0); 319 writel(CFG_HEADER_DW1(where, size), port->base + PCIE_CFG_HEADER1); 320 writel(CFG_HEADER_DW2(where, PCI_FUNC(devfn), PCI_SLOT(devfn), bus), 321 port->base + PCIE_CFG_HEADER2); 322 323 /* Write Cfgwr data */ 324 val = val << 8 * (where & 3); 325 writel(val, port->base + PCIE_CFG_WDATA); 326 327 /* Trigger h/w to transmit Cfgwr TLP */ 328 val = readl(port->base + PCIE_APP_TLP_REQ); 329 val |= APP_CFG_REQ; 330 writel(val, port->base + PCIE_APP_TLP_REQ); 331 332 /* Check completion status */ 333 return mtk_pcie_check_cfg_cpld(port); 334 } 335 336 static struct mtk_pcie_port *mtk_pcie_find_port(struct pci_bus *bus, 337 unsigned int devfn) 338 { 339 struct mtk_pcie *pcie = bus->sysdata; 340 struct mtk_pcie_port *port; 341 struct pci_dev *dev = NULL; 342 343 /* 344 * Walk the bus hierarchy to get the devfn value 345 * of the port in the root bus. 346 */ 347 while (bus && bus->number) { 348 dev = bus->self; 349 bus = dev->bus; 350 devfn = dev->devfn; 351 } 352 353 list_for_each_entry(port, &pcie->ports, list) 354 if (port->slot == PCI_SLOT(devfn)) 355 return port; 356 357 return NULL; 358 } 359 360 static int mtk_pcie_config_read(struct pci_bus *bus, unsigned int devfn, 361 int where, int size, u32 *val) 362 { 363 struct mtk_pcie_port *port; 364 u32 bn = bus->number; 365 int ret; 366 367 port = mtk_pcie_find_port(bus, devfn); 368 if (!port) { 369 *val = ~0; 370 return PCIBIOS_DEVICE_NOT_FOUND; 371 } 372 373 ret = mtk_pcie_hw_rd_cfg(port, bn, devfn, where, size, val); 374 if (ret) 375 *val = ~0; 376 377 return ret; 378 } 379 380 static int mtk_pcie_config_write(struct pci_bus *bus, unsigned int devfn, 381 int where, int size, u32 val) 382 { 383 struct mtk_pcie_port *port; 384 u32 bn = bus->number; 385 386 port = mtk_pcie_find_port(bus, devfn); 387 if (!port) 388 return PCIBIOS_DEVICE_NOT_FOUND; 389 390 return mtk_pcie_hw_wr_cfg(port, bn, devfn, where, size, val); 391 } 392 393 static struct pci_ops mtk_pcie_ops_v2 = { 394 .read = mtk_pcie_config_read, 395 .write = mtk_pcie_config_write, 396 }; 397 398 static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) 399 { 400 struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data); 401 phys_addr_t addr; 402 403 /* MT2712/MT7622 only support 32-bit MSI addresses */ 404 addr = virt_to_phys(port->base + PCIE_MSI_VECTOR); 405 msg->address_hi = 0; 406 msg->address_lo = lower_32_bits(addr); 407 408 msg->data = data->hwirq; 409 410 dev_dbg(port->pcie->dev, "msi#%d address_hi %#x address_lo %#x\n", 411 (int)data->hwirq, msg->address_hi, msg->address_lo); 412 } 413 414 static int mtk_msi_set_affinity(struct irq_data *irq_data, 415 const struct cpumask *mask, bool force) 416 { 417 return -EINVAL; 418 } 419 420 static void mtk_msi_ack_irq(struct irq_data *data) 421 { 422 struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data); 423 u32 hwirq = data->hwirq; 424 425 writel(1 << hwirq, port->base + PCIE_IMSI_STATUS); 426 } 427 428 static struct irq_chip mtk_msi_bottom_irq_chip = { 429 .name = "MTK MSI", 430 .irq_compose_msi_msg = mtk_compose_msi_msg, 431 .irq_set_affinity = mtk_msi_set_affinity, 432 .irq_ack = mtk_msi_ack_irq, 433 }; 434 435 static int mtk_pcie_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 436 unsigned int nr_irqs, void *args) 437 { 438 struct mtk_pcie_port *port = domain->host_data; 439 unsigned long bit; 440 441 WARN_ON(nr_irqs != 1); 442 mutex_lock(&port->lock); 443 444 bit = find_first_zero_bit(port->msi_irq_in_use, MTK_MSI_IRQS_NUM); 445 if (bit >= MTK_MSI_IRQS_NUM) { 446 mutex_unlock(&port->lock); 447 return -ENOSPC; 448 } 449 450 __set_bit(bit, port->msi_irq_in_use); 451 452 mutex_unlock(&port->lock); 453 454 irq_domain_set_info(domain, virq, bit, &mtk_msi_bottom_irq_chip, 455 domain->host_data, handle_edge_irq, 456 NULL, NULL); 457 458 return 0; 459 } 460 461 static void mtk_pcie_irq_domain_free(struct irq_domain *domain, 462 unsigned int virq, unsigned int nr_irqs) 463 { 464 struct irq_data *d = irq_domain_get_irq_data(domain, virq); 465 struct mtk_pcie_port *port = irq_data_get_irq_chip_data(d); 466 467 mutex_lock(&port->lock); 468 469 if (!test_bit(d->hwirq, port->msi_irq_in_use)) 470 dev_err(port->pcie->dev, "trying to free unused MSI#%lu\n", 471 d->hwirq); 472 else 473 __clear_bit(d->hwirq, port->msi_irq_in_use); 474 475 mutex_unlock(&port->lock); 476 477 irq_domain_free_irqs_parent(domain, virq, nr_irqs); 478 } 479 480 static const struct irq_domain_ops msi_domain_ops = { 481 .alloc = mtk_pcie_irq_domain_alloc, 482 .free = mtk_pcie_irq_domain_free, 483 }; 484 485 static struct irq_chip mtk_msi_irq_chip = { 486 .name = "MTK PCIe MSI", 487 .irq_ack = irq_chip_ack_parent, 488 .irq_mask = pci_msi_mask_irq, 489 .irq_unmask = pci_msi_unmask_irq, 490 }; 491 492 static struct msi_domain_info mtk_msi_domain_info = { 493 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | 494 MSI_FLAG_PCI_MSIX), 495 .chip = &mtk_msi_irq_chip, 496 }; 497 498 static int mtk_pcie_allocate_msi_domains(struct mtk_pcie_port *port) 499 { 500 struct fwnode_handle *fwnode = of_node_to_fwnode(port->pcie->dev->of_node); 501 502 mutex_init(&port->lock); 503 504 port->inner_domain = irq_domain_create_linear(fwnode, MTK_MSI_IRQS_NUM, 505 &msi_domain_ops, port); 506 if (!port->inner_domain) { 507 dev_err(port->pcie->dev, "failed to create IRQ domain\n"); 508 return -ENOMEM; 509 } 510 511 port->msi_domain = pci_msi_create_irq_domain(fwnode, &mtk_msi_domain_info, 512 port->inner_domain); 513 if (!port->msi_domain) { 514 dev_err(port->pcie->dev, "failed to create MSI domain\n"); 515 irq_domain_remove(port->inner_domain); 516 return -ENOMEM; 517 } 518 519 return 0; 520 } 521 522 static void mtk_pcie_enable_msi(struct mtk_pcie_port *port) 523 { 524 u32 val; 525 phys_addr_t msg_addr; 526 527 msg_addr = virt_to_phys(port->base + PCIE_MSI_VECTOR); 528 val = lower_32_bits(msg_addr); 529 writel(val, port->base + PCIE_IMSI_ADDR); 530 531 val = readl(port->base + PCIE_INT_MASK); 532 val &= ~MSI_MASK; 533 writel(val, port->base + PCIE_INT_MASK); 534 } 535 536 static void mtk_pcie_irq_teardown(struct mtk_pcie *pcie) 537 { 538 struct mtk_pcie_port *port, *tmp; 539 540 list_for_each_entry_safe(port, tmp, &pcie->ports, list) { 541 irq_set_chained_handler_and_data(port->irq, NULL, NULL); 542 543 if (port->irq_domain) 544 irq_domain_remove(port->irq_domain); 545 546 if (IS_ENABLED(CONFIG_PCI_MSI)) { 547 if (port->msi_domain) 548 irq_domain_remove(port->msi_domain); 549 if (port->inner_domain) 550 irq_domain_remove(port->inner_domain); 551 } 552 553 irq_dispose_mapping(port->irq); 554 } 555 } 556 557 static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq, 558 irq_hw_number_t hwirq) 559 { 560 irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); 561 irq_set_chip_data(irq, domain->host_data); 562 563 return 0; 564 } 565 566 static const struct irq_domain_ops intx_domain_ops = { 567 .map = mtk_pcie_intx_map, 568 }; 569 570 static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port, 571 struct device_node *node) 572 { 573 struct device *dev = port->pcie->dev; 574 struct device_node *pcie_intc_node; 575 int ret; 576 577 /* Setup INTx */ 578 pcie_intc_node = of_get_next_child(node, NULL); 579 if (!pcie_intc_node) { 580 dev_err(dev, "no PCIe Intc node found\n"); 581 return -ENODEV; 582 } 583 584 port->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, 585 &intx_domain_ops, port); 586 if (!port->irq_domain) { 587 dev_err(dev, "failed to get INTx IRQ domain\n"); 588 return -ENODEV; 589 } 590 591 if (IS_ENABLED(CONFIG_PCI_MSI)) { 592 ret = mtk_pcie_allocate_msi_domains(port); 593 if (ret) 594 return ret; 595 } 596 597 return 0; 598 } 599 600 static void mtk_pcie_intr_handler(struct irq_desc *desc) 601 { 602 struct mtk_pcie_port *port = irq_desc_get_handler_data(desc); 603 struct irq_chip *irqchip = irq_desc_get_chip(desc); 604 unsigned long status; 605 u32 virq; 606 u32 bit = INTX_SHIFT; 607 608 chained_irq_enter(irqchip, desc); 609 610 status = readl(port->base + PCIE_INT_STATUS); 611 if (status & INTX_MASK) { 612 for_each_set_bit_from(bit, &status, PCI_NUM_INTX + INTX_SHIFT) { 613 /* Clear the INTx */ 614 writel(1 << bit, port->base + PCIE_INT_STATUS); 615 virq = irq_find_mapping(port->irq_domain, 616 bit - INTX_SHIFT); 617 generic_handle_irq(virq); 618 } 619 } 620 621 if (IS_ENABLED(CONFIG_PCI_MSI)) { 622 if (status & MSI_STATUS){ 623 unsigned long imsi_status; 624 625 while ((imsi_status = readl(port->base + PCIE_IMSI_STATUS))) { 626 for_each_set_bit(bit, &imsi_status, MTK_MSI_IRQS_NUM) { 627 virq = irq_find_mapping(port->inner_domain, bit); 628 generic_handle_irq(virq); 629 } 630 } 631 /* Clear MSI interrupt status */ 632 writel(MSI_STATUS, port->base + PCIE_INT_STATUS); 633 } 634 } 635 636 chained_irq_exit(irqchip, desc); 637 638 return; 639 } 640 641 static int mtk_pcie_setup_irq(struct mtk_pcie_port *port, 642 struct device_node *node) 643 { 644 struct mtk_pcie *pcie = port->pcie; 645 struct device *dev = pcie->dev; 646 struct platform_device *pdev = to_platform_device(dev); 647 int err; 648 649 err = mtk_pcie_init_irq_domain(port, node); 650 if (err) { 651 dev_err(dev, "failed to init PCIe IRQ domain\n"); 652 return err; 653 } 654 655 port->irq = platform_get_irq(pdev, port->slot); 656 irq_set_chained_handler_and_data(port->irq, 657 mtk_pcie_intr_handler, port); 658 659 return 0; 660 } 661 662 static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port) 663 { 664 struct mtk_pcie *pcie = port->pcie; 665 struct resource *mem = &pcie->mem; 666 const struct mtk_pcie_soc *soc = port->pcie->soc; 667 u32 val; 668 size_t size; 669 int err; 670 671 /* MT7622 platforms need to enable LTSSM and ASPM from PCIe subsys */ 672 if (pcie->base) { 673 val = readl(pcie->base + PCIE_SYS_CFG_V2); 674 val |= PCIE_CSR_LTSSM_EN(port->slot) | 675 PCIE_CSR_ASPM_L1_EN(port->slot); 676 writel(val, pcie->base + PCIE_SYS_CFG_V2); 677 } 678 679 /* Assert all reset signals */ 680 writel(0, port->base + PCIE_RST_CTRL); 681 682 /* 683 * Enable PCIe link down reset, if link status changed from link up to 684 * link down, this will reset MAC control registers and configuration 685 * space. 686 */ 687 writel(PCIE_LINKDOWN_RST_EN, port->base + PCIE_RST_CTRL); 688 689 /* De-assert PHY, PE, PIPE, MAC and configuration reset */ 690 val = readl(port->base + PCIE_RST_CTRL); 691 val |= PCIE_PHY_RSTB | PCIE_PERSTB | PCIE_PIPE_SRSTB | 692 PCIE_MAC_SRSTB | PCIE_CRSTB; 693 writel(val, port->base + PCIE_RST_CTRL); 694 695 /* Set up vendor ID and class code */ 696 if (soc->need_fix_class_id) { 697 val = PCI_VENDOR_ID_MEDIATEK; 698 writew(val, port->base + PCIE_CONF_VEND_ID); 699 700 val = PCI_CLASS_BRIDGE_PCI; 701 writew(val, port->base + PCIE_CONF_CLASS_ID); 702 } 703 704 /* 100ms timeout value should be enough for Gen1/2 training */ 705 err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val, 706 !!(val & PCIE_PORT_LINKUP_V2), 20, 707 100 * USEC_PER_MSEC); 708 if (err) 709 return -ETIMEDOUT; 710 711 /* Set INTx mask */ 712 val = readl(port->base + PCIE_INT_MASK); 713 val &= ~INTX_MASK; 714 writel(val, port->base + PCIE_INT_MASK); 715 716 if (IS_ENABLED(CONFIG_PCI_MSI)) 717 mtk_pcie_enable_msi(port); 718 719 /* Set AHB to PCIe translation windows */ 720 size = mem->end - mem->start; 721 val = lower_32_bits(mem->start) | AHB2PCIE_SIZE(fls(size)); 722 writel(val, port->base + PCIE_AHB_TRANS_BASE0_L); 723 724 val = upper_32_bits(mem->start); 725 writel(val, port->base + PCIE_AHB_TRANS_BASE0_H); 726 727 /* Set PCIe to AXI translation memory space.*/ 728 val = fls(0xffffffff) | WIN_ENABLE; 729 writel(val, port->base + PCIE_AXI_WINDOW0); 730 731 return 0; 732 } 733 734 static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus, 735 unsigned int devfn, int where) 736 { 737 struct mtk_pcie *pcie = bus->sysdata; 738 739 writel(PCIE_CONF_ADDR(where, PCI_FUNC(devfn), PCI_SLOT(devfn), 740 bus->number), pcie->base + PCIE_CFG_ADDR); 741 742 return pcie->base + PCIE_CFG_DATA + (where & 3); 743 } 744 745 static struct pci_ops mtk_pcie_ops = { 746 .map_bus = mtk_pcie_map_bus, 747 .read = pci_generic_config_read, 748 .write = pci_generic_config_write, 749 }; 750 751 static int mtk_pcie_startup_port(struct mtk_pcie_port *port) 752 { 753 struct mtk_pcie *pcie = port->pcie; 754 u32 func = PCI_FUNC(port->slot << 3); 755 u32 slot = PCI_SLOT(port->slot << 3); 756 u32 val; 757 int err; 758 759 /* assert port PERST_N */ 760 val = readl(pcie->base + PCIE_SYS_CFG); 761 val |= PCIE_PORT_PERST(port->slot); 762 writel(val, pcie->base + PCIE_SYS_CFG); 763 764 /* de-assert port PERST_N */ 765 val = readl(pcie->base + PCIE_SYS_CFG); 766 val &= ~PCIE_PORT_PERST(port->slot); 767 writel(val, pcie->base + PCIE_SYS_CFG); 768 769 /* 100ms timeout value should be enough for Gen1/2 training */ 770 err = readl_poll_timeout(port->base + PCIE_LINK_STATUS, val, 771 !!(val & PCIE_PORT_LINKUP), 20, 772 100 * USEC_PER_MSEC); 773 if (err) 774 return -ETIMEDOUT; 775 776 /* enable interrupt */ 777 val = readl(pcie->base + PCIE_INT_ENABLE); 778 val |= PCIE_PORT_INT_EN(port->slot); 779 writel(val, pcie->base + PCIE_INT_ENABLE); 780 781 /* map to all DDR region. We need to set it before cfg operation. */ 782 writel(PCIE_BAR_MAP_MAX | PCIE_BAR_ENABLE, 783 port->base + PCIE_BAR0_SETUP); 784 785 /* configure class code and revision ID */ 786 writel(PCIE_CLASS_CODE | PCIE_REVISION_ID, port->base + PCIE_CLASS); 787 788 /* configure FC credit */ 789 writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0), 790 pcie->base + PCIE_CFG_ADDR); 791 val = readl(pcie->base + PCIE_CFG_DATA); 792 val &= ~PCIE_FC_CREDIT_MASK; 793 val |= PCIE_FC_CREDIT_VAL(0x806c); 794 writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0), 795 pcie->base + PCIE_CFG_ADDR); 796 writel(val, pcie->base + PCIE_CFG_DATA); 797 798 /* configure RC FTS number to 250 when it leaves L0s */ 799 writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0), 800 pcie->base + PCIE_CFG_ADDR); 801 val = readl(pcie->base + PCIE_CFG_DATA); 802 val &= ~PCIE_FTS_NUM_MASK; 803 val |= PCIE_FTS_NUM_L0(0x50); 804 writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0), 805 pcie->base + PCIE_CFG_ADDR); 806 writel(val, pcie->base + PCIE_CFG_DATA); 807 808 return 0; 809 } 810 811 static void mtk_pcie_enable_port(struct mtk_pcie_port *port) 812 { 813 struct mtk_pcie *pcie = port->pcie; 814 struct device *dev = pcie->dev; 815 int err; 816 817 err = clk_prepare_enable(port->sys_ck); 818 if (err) { 819 dev_err(dev, "failed to enable sys_ck%d clock\n", port->slot); 820 goto err_sys_clk; 821 } 822 823 err = clk_prepare_enable(port->ahb_ck); 824 if (err) { 825 dev_err(dev, "failed to enable ahb_ck%d\n", port->slot); 826 goto err_ahb_clk; 827 } 828 829 err = clk_prepare_enable(port->aux_ck); 830 if (err) { 831 dev_err(dev, "failed to enable aux_ck%d\n", port->slot); 832 goto err_aux_clk; 833 } 834 835 err = clk_prepare_enable(port->axi_ck); 836 if (err) { 837 dev_err(dev, "failed to enable axi_ck%d\n", port->slot); 838 goto err_axi_clk; 839 } 840 841 err = clk_prepare_enable(port->obff_ck); 842 if (err) { 843 dev_err(dev, "failed to enable obff_ck%d\n", port->slot); 844 goto err_obff_clk; 845 } 846 847 err = clk_prepare_enable(port->pipe_ck); 848 if (err) { 849 dev_err(dev, "failed to enable pipe_ck%d\n", port->slot); 850 goto err_pipe_clk; 851 } 852 853 reset_control_assert(port->reset); 854 reset_control_deassert(port->reset); 855 856 err = phy_init(port->phy); 857 if (err) { 858 dev_err(dev, "failed to initialize port%d phy\n", port->slot); 859 goto err_phy_init; 860 } 861 862 err = phy_power_on(port->phy); 863 if (err) { 864 dev_err(dev, "failed to power on port%d phy\n", port->slot); 865 goto err_phy_on; 866 } 867 868 if (!pcie->soc->startup(port)) 869 return; 870 871 dev_info(dev, "Port%d link down\n", port->slot); 872 873 phy_power_off(port->phy); 874 err_phy_on: 875 phy_exit(port->phy); 876 err_phy_init: 877 clk_disable_unprepare(port->pipe_ck); 878 err_pipe_clk: 879 clk_disable_unprepare(port->obff_ck); 880 err_obff_clk: 881 clk_disable_unprepare(port->axi_ck); 882 err_axi_clk: 883 clk_disable_unprepare(port->aux_ck); 884 err_aux_clk: 885 clk_disable_unprepare(port->ahb_ck); 886 err_ahb_clk: 887 clk_disable_unprepare(port->sys_ck); 888 err_sys_clk: 889 mtk_pcie_port_free(port); 890 } 891 892 static int mtk_pcie_parse_port(struct mtk_pcie *pcie, 893 struct device_node *node, 894 int slot) 895 { 896 struct mtk_pcie_port *port; 897 struct resource *regs; 898 struct device *dev = pcie->dev; 899 struct platform_device *pdev = to_platform_device(dev); 900 char name[10]; 901 int err; 902 903 port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); 904 if (!port) 905 return -ENOMEM; 906 907 err = of_property_read_u32(node, "num-lanes", &port->lane); 908 if (err) { 909 dev_err(dev, "missing num-lanes property\n"); 910 return err; 911 } 912 913 snprintf(name, sizeof(name), "port%d", slot); 914 regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); 915 port->base = devm_ioremap_resource(dev, regs); 916 if (IS_ERR(port->base)) { 917 dev_err(dev, "failed to map port%d base\n", slot); 918 return PTR_ERR(port->base); 919 } 920 921 snprintf(name, sizeof(name), "sys_ck%d", slot); 922 port->sys_ck = devm_clk_get(dev, name); 923 if (IS_ERR(port->sys_ck)) { 924 dev_err(dev, "failed to get sys_ck%d clock\n", slot); 925 return PTR_ERR(port->sys_ck); 926 } 927 928 /* sys_ck might be divided into the following parts in some chips */ 929 snprintf(name, sizeof(name), "ahb_ck%d", slot); 930 port->ahb_ck = devm_clk_get(dev, name); 931 if (IS_ERR(port->ahb_ck)) { 932 if (PTR_ERR(port->ahb_ck) == -EPROBE_DEFER) 933 return -EPROBE_DEFER; 934 935 port->ahb_ck = NULL; 936 } 937 938 snprintf(name, sizeof(name), "axi_ck%d", slot); 939 port->axi_ck = devm_clk_get(dev, name); 940 if (IS_ERR(port->axi_ck)) { 941 if (PTR_ERR(port->axi_ck) == -EPROBE_DEFER) 942 return -EPROBE_DEFER; 943 944 port->axi_ck = NULL; 945 } 946 947 snprintf(name, sizeof(name), "aux_ck%d", slot); 948 port->aux_ck = devm_clk_get(dev, name); 949 if (IS_ERR(port->aux_ck)) { 950 if (PTR_ERR(port->aux_ck) == -EPROBE_DEFER) 951 return -EPROBE_DEFER; 952 953 port->aux_ck = NULL; 954 } 955 956 snprintf(name, sizeof(name), "obff_ck%d", slot); 957 port->obff_ck = devm_clk_get(dev, name); 958 if (IS_ERR(port->obff_ck)) { 959 if (PTR_ERR(port->obff_ck) == -EPROBE_DEFER) 960 return -EPROBE_DEFER; 961 962 port->obff_ck = NULL; 963 } 964 965 snprintf(name, sizeof(name), "pipe_ck%d", slot); 966 port->pipe_ck = devm_clk_get(dev, name); 967 if (IS_ERR(port->pipe_ck)) { 968 if (PTR_ERR(port->pipe_ck) == -EPROBE_DEFER) 969 return -EPROBE_DEFER; 970 971 port->pipe_ck = NULL; 972 } 973 974 snprintf(name, sizeof(name), "pcie-rst%d", slot); 975 port->reset = devm_reset_control_get_optional_exclusive(dev, name); 976 if (PTR_ERR(port->reset) == -EPROBE_DEFER) 977 return PTR_ERR(port->reset); 978 979 /* some platforms may use default PHY setting */ 980 snprintf(name, sizeof(name), "pcie-phy%d", slot); 981 port->phy = devm_phy_optional_get(dev, name); 982 if (IS_ERR(port->phy)) 983 return PTR_ERR(port->phy); 984 985 port->slot = slot; 986 port->pcie = pcie; 987 988 if (pcie->soc->setup_irq) { 989 err = pcie->soc->setup_irq(port, node); 990 if (err) 991 return err; 992 } 993 994 INIT_LIST_HEAD(&port->list); 995 list_add_tail(&port->list, &pcie->ports); 996 997 return 0; 998 } 999 1000 static int mtk_pcie_subsys_powerup(struct mtk_pcie *pcie) 1001 { 1002 struct device *dev = pcie->dev; 1003 struct platform_device *pdev = to_platform_device(dev); 1004 struct resource *regs; 1005 int err; 1006 1007 /* get shared registers, which are optional */ 1008 regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "subsys"); 1009 if (regs) { 1010 pcie->base = devm_ioremap_resource(dev, regs); 1011 if (IS_ERR(pcie->base)) { 1012 dev_err(dev, "failed to map shared register\n"); 1013 return PTR_ERR(pcie->base); 1014 } 1015 } 1016 1017 pcie->free_ck = devm_clk_get(dev, "free_ck"); 1018 if (IS_ERR(pcie->free_ck)) { 1019 if (PTR_ERR(pcie->free_ck) == -EPROBE_DEFER) 1020 return -EPROBE_DEFER; 1021 1022 pcie->free_ck = NULL; 1023 } 1024 1025 pm_runtime_enable(dev); 1026 pm_runtime_get_sync(dev); 1027 1028 /* enable top level clock */ 1029 err = clk_prepare_enable(pcie->free_ck); 1030 if (err) { 1031 dev_err(dev, "failed to enable free_ck\n"); 1032 goto err_free_ck; 1033 } 1034 1035 return 0; 1036 1037 err_free_ck: 1038 pm_runtime_put_sync(dev); 1039 pm_runtime_disable(dev); 1040 1041 return err; 1042 } 1043 1044 static int mtk_pcie_setup(struct mtk_pcie *pcie) 1045 { 1046 struct device *dev = pcie->dev; 1047 struct device_node *node = dev->of_node, *child; 1048 struct of_pci_range_parser parser; 1049 struct of_pci_range range; 1050 struct resource res; 1051 struct mtk_pcie_port *port, *tmp; 1052 int err; 1053 1054 if (of_pci_range_parser_init(&parser, node)) { 1055 dev_err(dev, "missing \"ranges\" property\n"); 1056 return -EINVAL; 1057 } 1058 1059 for_each_of_pci_range(&parser, &range) { 1060 err = of_pci_range_to_resource(&range, node, &res); 1061 if (err < 0) 1062 return err; 1063 1064 switch (res.flags & IORESOURCE_TYPE_BITS) { 1065 case IORESOURCE_IO: 1066 pcie->offset.io = res.start - range.pci_addr; 1067 1068 memcpy(&pcie->pio, &res, sizeof(res)); 1069 pcie->pio.name = node->full_name; 1070 1071 pcie->io.start = range.cpu_addr; 1072 pcie->io.end = range.cpu_addr + range.size - 1; 1073 pcie->io.flags = IORESOURCE_MEM; 1074 pcie->io.name = "I/O"; 1075 1076 memcpy(&res, &pcie->io, sizeof(res)); 1077 break; 1078 1079 case IORESOURCE_MEM: 1080 pcie->offset.mem = res.start - range.pci_addr; 1081 1082 memcpy(&pcie->mem, &res, sizeof(res)); 1083 pcie->mem.name = "non-prefetchable"; 1084 break; 1085 } 1086 } 1087 1088 err = of_pci_parse_bus_range(node, &pcie->busn); 1089 if (err < 0) { 1090 dev_err(dev, "failed to parse bus ranges property: %d\n", err); 1091 pcie->busn.name = node->name; 1092 pcie->busn.start = 0; 1093 pcie->busn.end = 0xff; 1094 pcie->busn.flags = IORESOURCE_BUS; 1095 } 1096 1097 for_each_available_child_of_node(node, child) { 1098 int slot; 1099 1100 err = of_pci_get_devfn(child); 1101 if (err < 0) { 1102 dev_err(dev, "failed to parse devfn: %d\n", err); 1103 return err; 1104 } 1105 1106 slot = PCI_SLOT(err); 1107 1108 err = mtk_pcie_parse_port(pcie, child, slot); 1109 if (err) 1110 return err; 1111 } 1112 1113 err = mtk_pcie_subsys_powerup(pcie); 1114 if (err) 1115 return err; 1116 1117 /* enable each port, and then check link status */ 1118 list_for_each_entry_safe(port, tmp, &pcie->ports, list) 1119 mtk_pcie_enable_port(port); 1120 1121 /* power down PCIe subsys if slots are all empty (link down) */ 1122 if (list_empty(&pcie->ports)) 1123 mtk_pcie_subsys_powerdown(pcie); 1124 1125 return 0; 1126 } 1127 1128 static int mtk_pcie_request_resources(struct mtk_pcie *pcie) 1129 { 1130 struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); 1131 struct list_head *windows = &host->windows; 1132 struct device *dev = pcie->dev; 1133 int err; 1134 1135 pci_add_resource_offset(windows, &pcie->pio, pcie->offset.io); 1136 pci_add_resource_offset(windows, &pcie->mem, pcie->offset.mem); 1137 pci_add_resource(windows, &pcie->busn); 1138 1139 err = devm_request_pci_bus_resources(dev, windows); 1140 if (err < 0) 1141 return err; 1142 1143 err = devm_pci_remap_iospace(dev, &pcie->pio, pcie->io.start); 1144 if (err) 1145 return err; 1146 1147 return 0; 1148 } 1149 1150 static int mtk_pcie_probe(struct platform_device *pdev) 1151 { 1152 struct device *dev = &pdev->dev; 1153 struct mtk_pcie *pcie; 1154 struct pci_host_bridge *host; 1155 int err; 1156 1157 host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); 1158 if (!host) 1159 return -ENOMEM; 1160 1161 pcie = pci_host_bridge_priv(host); 1162 1163 pcie->dev = dev; 1164 pcie->soc = of_device_get_match_data(dev); 1165 platform_set_drvdata(pdev, pcie); 1166 INIT_LIST_HEAD(&pcie->ports); 1167 1168 err = mtk_pcie_setup(pcie); 1169 if (err) 1170 return err; 1171 1172 err = mtk_pcie_request_resources(pcie); 1173 if (err) 1174 goto put_resources; 1175 1176 host->busnr = pcie->busn.start; 1177 host->dev.parent = pcie->dev; 1178 host->ops = pcie->soc->ops; 1179 host->map_irq = of_irq_parse_and_map_pci; 1180 host->swizzle_irq = pci_common_swizzle; 1181 host->sysdata = pcie; 1182 1183 err = pci_host_probe(host); 1184 if (err) 1185 goto put_resources; 1186 1187 return 0; 1188 1189 put_resources: 1190 if (!list_empty(&pcie->ports)) 1191 mtk_pcie_put_resources(pcie); 1192 1193 return err; 1194 } 1195 1196 1197 static void mtk_pcie_free_resources(struct mtk_pcie *pcie) 1198 { 1199 struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); 1200 struct list_head *windows = &host->windows; 1201 1202 pci_free_resource_list(windows); 1203 } 1204 1205 static int mtk_pcie_remove(struct platform_device *pdev) 1206 { 1207 struct mtk_pcie *pcie = platform_get_drvdata(pdev); 1208 struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); 1209 1210 pci_stop_root_bus(host->bus); 1211 pci_remove_root_bus(host->bus); 1212 mtk_pcie_free_resources(pcie); 1213 1214 mtk_pcie_irq_teardown(pcie); 1215 1216 mtk_pcie_put_resources(pcie); 1217 1218 return 0; 1219 } 1220 1221 static int __maybe_unused mtk_pcie_suspend_noirq(struct device *dev) 1222 { 1223 struct mtk_pcie *pcie = dev_get_drvdata(dev); 1224 struct mtk_pcie_port *port; 1225 1226 if (list_empty(&pcie->ports)) 1227 return 0; 1228 1229 list_for_each_entry(port, &pcie->ports, list) { 1230 clk_disable_unprepare(port->pipe_ck); 1231 clk_disable_unprepare(port->obff_ck); 1232 clk_disable_unprepare(port->axi_ck); 1233 clk_disable_unprepare(port->aux_ck); 1234 clk_disable_unprepare(port->ahb_ck); 1235 clk_disable_unprepare(port->sys_ck); 1236 phy_power_off(port->phy); 1237 phy_exit(port->phy); 1238 } 1239 1240 clk_disable_unprepare(pcie->free_ck); 1241 1242 return 0; 1243 } 1244 1245 static int __maybe_unused mtk_pcie_resume_noirq(struct device *dev) 1246 { 1247 struct mtk_pcie *pcie = dev_get_drvdata(dev); 1248 struct mtk_pcie_port *port, *tmp; 1249 1250 if (list_empty(&pcie->ports)) 1251 return 0; 1252 1253 clk_prepare_enable(pcie->free_ck); 1254 1255 list_for_each_entry_safe(port, tmp, &pcie->ports, list) 1256 mtk_pcie_enable_port(port); 1257 1258 /* In case of EP was removed while system suspend. */ 1259 if (list_empty(&pcie->ports)) 1260 clk_disable_unprepare(pcie->free_ck); 1261 1262 return 0; 1263 } 1264 1265 static const struct dev_pm_ops mtk_pcie_pm_ops = { 1266 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq, 1267 mtk_pcie_resume_noirq) 1268 }; 1269 1270 static const struct mtk_pcie_soc mtk_pcie_soc_v1 = { 1271 .ops = &mtk_pcie_ops, 1272 .startup = mtk_pcie_startup_port, 1273 }; 1274 1275 static const struct mtk_pcie_soc mtk_pcie_soc_mt2712 = { 1276 .ops = &mtk_pcie_ops_v2, 1277 .startup = mtk_pcie_startup_port_v2, 1278 .setup_irq = mtk_pcie_setup_irq, 1279 }; 1280 1281 static const struct mtk_pcie_soc mtk_pcie_soc_mt7622 = { 1282 .need_fix_class_id = true, 1283 .ops = &mtk_pcie_ops_v2, 1284 .startup = mtk_pcie_startup_port_v2, 1285 .setup_irq = mtk_pcie_setup_irq, 1286 }; 1287 1288 static const struct of_device_id mtk_pcie_ids[] = { 1289 { .compatible = "mediatek,mt2701-pcie", .data = &mtk_pcie_soc_v1 }, 1290 { .compatible = "mediatek,mt7623-pcie", .data = &mtk_pcie_soc_v1 }, 1291 { .compatible = "mediatek,mt2712-pcie", .data = &mtk_pcie_soc_mt2712 }, 1292 { .compatible = "mediatek,mt7622-pcie", .data = &mtk_pcie_soc_mt7622 }, 1293 {}, 1294 }; 1295 1296 static struct platform_driver mtk_pcie_driver = { 1297 .probe = mtk_pcie_probe, 1298 .remove = mtk_pcie_remove, 1299 .driver = { 1300 .name = "mtk-pcie", 1301 .of_match_table = mtk_pcie_ids, 1302 .suppress_bind_attrs = true, 1303 .pm = &mtk_pcie_pm_ops, 1304 }, 1305 }; 1306 module_platform_driver(mtk_pcie_driver); 1307 MODULE_LICENSE("GPL v2"); 1308