1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * pcie-dra7xx - PCIe controller driver for TI DRA7xx SoCs 4 * 5 * Copyright (C) 2013-2014 Texas Instruments Incorporated - http://www.ti.com 6 * 7 * Authors: Kishon Vijay Abraham I <kishon@ti.com> 8 */ 9 10 #include <linux/delay.h> 11 #include <linux/device.h> 12 #include <linux/err.h> 13 #include <linux/interrupt.h> 14 #include <linux/irq.h> 15 #include <linux/irqdomain.h> 16 #include <linux/kernel.h> 17 #include <linux/init.h> 18 #include <linux/of_device.h> 19 #include <linux/of_gpio.h> 20 #include <linux/of_pci.h> 21 #include <linux/pci.h> 22 #include <linux/phy/phy.h> 23 #include <linux/platform_device.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/resource.h> 26 #include <linux/types.h> 27 #include <linux/mfd/syscon.h> 28 #include <linux/regmap.h> 29 30 #include "../../pci.h" 31 #include "pcie-designware.h" 32 33 /* PCIe controller wrapper DRA7XX configuration registers */ 34 35 #define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN 0x0024 36 #define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN 0x0028 37 #define ERR_SYS BIT(0) 38 #define ERR_FATAL BIT(1) 39 #define ERR_NONFATAL BIT(2) 40 #define ERR_COR BIT(3) 41 #define ERR_AXI BIT(4) 42 #define ERR_ECRC BIT(5) 43 #define PME_TURN_OFF BIT(8) 44 #define PME_TO_ACK BIT(9) 45 #define PM_PME BIT(10) 46 #define LINK_REQ_RST BIT(11) 47 #define LINK_UP_EVT BIT(12) 48 #define CFG_BME_EVT BIT(13) 49 #define CFG_MSE_EVT BIT(14) 50 #define INTERRUPTS (ERR_SYS | ERR_FATAL | ERR_NONFATAL | ERR_COR | ERR_AXI | \ 51 ERR_ECRC | PME_TURN_OFF | PME_TO_ACK | PM_PME | \ 52 LINK_REQ_RST | LINK_UP_EVT | CFG_BME_EVT | CFG_MSE_EVT) 53 54 #define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI 0x0034 55 #define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI 0x0038 56 #define INTA BIT(0) 57 #define INTB BIT(1) 58 #define INTC BIT(2) 59 #define INTD BIT(3) 60 #define MSI BIT(4) 61 #define LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD) 62 63 #define PCIECTRL_TI_CONF_DEVICE_TYPE 0x0100 64 #define DEVICE_TYPE_EP 0x0 65 #define DEVICE_TYPE_LEG_EP 0x1 66 #define DEVICE_TYPE_RC 0x4 67 68 #define PCIECTRL_DRA7XX_CONF_DEVICE_CMD 0x0104 69 #define LTSSM_EN 0x1 70 71 #define PCIECTRL_DRA7XX_CONF_PHY_CS 0x010C 72 #define LINK_UP BIT(16) 73 #define DRA7XX_CPU_TO_BUS_ADDR 0x0FFFFFFF 74 75 #define EXP_CAP_ID_OFFSET 0x70 76 77 #define PCIECTRL_TI_CONF_INTX_ASSERT 0x0124 78 #define PCIECTRL_TI_CONF_INTX_DEASSERT 0x0128 79 80 #define PCIECTRL_TI_CONF_MSI_XMT 0x012c 81 #define MSI_REQ_GRANT BIT(0) 82 #define MSI_VECTOR_SHIFT 7 83 84 struct dra7xx_pcie { 85 struct dw_pcie *pci; 86 void __iomem *base; /* DT ti_conf */ 87 int phy_count; /* DT phy-names count */ 88 struct phy **phy; 89 int link_gen; 90 struct irq_domain *irq_domain; 91 enum dw_pcie_device_mode mode; 92 }; 93 94 struct dra7xx_pcie_of_data { 95 enum dw_pcie_device_mode mode; 96 }; 97 98 #define to_dra7xx_pcie(x) dev_get_drvdata((x)->dev) 99 100 static inline u32 dra7xx_pcie_readl(struct dra7xx_pcie *pcie, u32 offset) 101 { 102 return readl(pcie->base + offset); 103 } 104 105 static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset, 106 u32 value) 107 { 108 writel(value, pcie->base + offset); 109 } 110 111 static u64 dra7xx_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr) 112 { 113 return pci_addr & DRA7XX_CPU_TO_BUS_ADDR; 114 } 115 116 static int dra7xx_pcie_link_up(struct dw_pcie *pci) 117 { 118 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 119 u32 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS); 120 121 return !!(reg & LINK_UP); 122 } 123 124 static void dra7xx_pcie_stop_link(struct dw_pcie *pci) 125 { 126 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 127 u32 reg; 128 129 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); 130 reg &= ~LTSSM_EN; 131 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); 132 } 133 134 static int dra7xx_pcie_establish_link(struct dw_pcie *pci) 135 { 136 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 137 struct device *dev = pci->dev; 138 u32 reg; 139 u32 exp_cap_off = EXP_CAP_ID_OFFSET; 140 141 if (dw_pcie_link_up(pci)) { 142 dev_err(dev, "link is already up\n"); 143 return 0; 144 } 145 146 if (dra7xx->link_gen == 1) { 147 dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCAP, 148 4, ®); 149 if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) { 150 reg &= ~((u32)PCI_EXP_LNKCAP_SLS); 151 reg |= PCI_EXP_LNKCAP_SLS_2_5GB; 152 dw_pcie_write(pci->dbi_base + exp_cap_off + 153 PCI_EXP_LNKCAP, 4, reg); 154 } 155 156 dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCTL2, 157 2, ®); 158 if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) { 159 reg &= ~((u32)PCI_EXP_LNKCAP_SLS); 160 reg |= PCI_EXP_LNKCAP_SLS_2_5GB; 161 dw_pcie_write(pci->dbi_base + exp_cap_off + 162 PCI_EXP_LNKCTL2, 2, reg); 163 } 164 } 165 166 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); 167 reg |= LTSSM_EN; 168 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); 169 170 return 0; 171 } 172 173 static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx) 174 { 175 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, 176 LEG_EP_INTERRUPTS | MSI); 177 178 dra7xx_pcie_writel(dra7xx, 179 PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI, 180 MSI | LEG_EP_INTERRUPTS); 181 } 182 183 static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie *dra7xx) 184 { 185 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, 186 INTERRUPTS); 187 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN, 188 INTERRUPTS); 189 } 190 191 static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx) 192 { 193 dra7xx_pcie_enable_wrapper_interrupts(dra7xx); 194 dra7xx_pcie_enable_msi_interrupts(dra7xx); 195 } 196 197 static int dra7xx_pcie_host_init(struct pcie_port *pp) 198 { 199 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 200 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 201 202 dw_pcie_setup_rc(pp); 203 204 dra7xx_pcie_establish_link(pci); 205 dw_pcie_wait_for_link(pci); 206 dw_pcie_msi_init(pp); 207 dra7xx_pcie_enable_interrupts(dra7xx); 208 209 return 0; 210 } 211 212 static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = { 213 .host_init = dra7xx_pcie_host_init, 214 }; 215 216 static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq, 217 irq_hw_number_t hwirq) 218 { 219 irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); 220 irq_set_chip_data(irq, domain->host_data); 221 222 return 0; 223 } 224 225 static const struct irq_domain_ops intx_domain_ops = { 226 .map = dra7xx_pcie_intx_map, 227 .xlate = pci_irqd_intx_xlate, 228 }; 229 230 static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp) 231 { 232 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 233 struct device *dev = pci->dev; 234 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 235 struct device_node *node = dev->of_node; 236 struct device_node *pcie_intc_node = of_get_next_child(node, NULL); 237 238 if (!pcie_intc_node) { 239 dev_err(dev, "No PCIe Intc node found\n"); 240 return -ENODEV; 241 } 242 243 dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, 244 &intx_domain_ops, pp); 245 if (!dra7xx->irq_domain) { 246 dev_err(dev, "Failed to get a INTx IRQ domain\n"); 247 return -ENODEV; 248 } 249 250 return 0; 251 } 252 253 static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg) 254 { 255 struct dra7xx_pcie *dra7xx = arg; 256 struct dw_pcie *pci = dra7xx->pci; 257 struct pcie_port *pp = &pci->pp; 258 unsigned long reg; 259 u32 virq, bit; 260 261 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI); 262 263 switch (reg) { 264 case MSI: 265 dw_handle_msi_irq(pp); 266 break; 267 case INTA: 268 case INTB: 269 case INTC: 270 case INTD: 271 for_each_set_bit(bit, ®, PCI_NUM_INTX) { 272 virq = irq_find_mapping(dra7xx->irq_domain, bit); 273 if (virq) 274 generic_handle_irq(virq); 275 } 276 break; 277 } 278 279 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg); 280 281 return IRQ_HANDLED; 282 } 283 284 static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg) 285 { 286 struct dra7xx_pcie *dra7xx = arg; 287 struct dw_pcie *pci = dra7xx->pci; 288 struct device *dev = pci->dev; 289 struct dw_pcie_ep *ep = &pci->ep; 290 u32 reg; 291 292 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN); 293 294 if (reg & ERR_SYS) 295 dev_dbg(dev, "System Error\n"); 296 297 if (reg & ERR_FATAL) 298 dev_dbg(dev, "Fatal Error\n"); 299 300 if (reg & ERR_NONFATAL) 301 dev_dbg(dev, "Non Fatal Error\n"); 302 303 if (reg & ERR_COR) 304 dev_dbg(dev, "Correctable Error\n"); 305 306 if (reg & ERR_AXI) 307 dev_dbg(dev, "AXI tag lookup fatal Error\n"); 308 309 if (reg & ERR_ECRC) 310 dev_dbg(dev, "ECRC Error\n"); 311 312 if (reg & PME_TURN_OFF) 313 dev_dbg(dev, 314 "Power Management Event Turn-Off message received\n"); 315 316 if (reg & PME_TO_ACK) 317 dev_dbg(dev, 318 "Power Management Turn-Off Ack message received\n"); 319 320 if (reg & PM_PME) 321 dev_dbg(dev, "PM Power Management Event message received\n"); 322 323 if (reg & LINK_REQ_RST) 324 dev_dbg(dev, "Link Request Reset\n"); 325 326 if (reg & LINK_UP_EVT) { 327 if (dra7xx->mode == DW_PCIE_EP_TYPE) 328 dw_pcie_ep_linkup(ep); 329 dev_dbg(dev, "Link-up state change\n"); 330 } 331 332 if (reg & CFG_BME_EVT) 333 dev_dbg(dev, "CFG 'Bus Master Enable' change\n"); 334 335 if (reg & CFG_MSE_EVT) 336 dev_dbg(dev, "CFG 'Memory Space Enable' change\n"); 337 338 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, reg); 339 340 return IRQ_HANDLED; 341 } 342 343 static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep) 344 { 345 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 346 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 347 enum pci_barno bar; 348 349 for (bar = BAR_0; bar <= BAR_5; bar++) 350 dw_pcie_ep_reset_bar(pci, bar); 351 352 dra7xx_pcie_enable_wrapper_interrupts(dra7xx); 353 } 354 355 static void dra7xx_pcie_raise_legacy_irq(struct dra7xx_pcie *dra7xx) 356 { 357 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_ASSERT, 0x1); 358 mdelay(1); 359 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_DEASSERT, 0x1); 360 } 361 362 static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx, 363 u8 interrupt_num) 364 { 365 u32 reg; 366 367 reg = (interrupt_num - 1) << MSI_VECTOR_SHIFT; 368 reg |= MSI_REQ_GRANT; 369 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_MSI_XMT, reg); 370 } 371 372 static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no, 373 enum pci_epc_irq_type type, u16 interrupt_num) 374 { 375 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 376 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 377 378 switch (type) { 379 case PCI_EPC_IRQ_LEGACY: 380 dra7xx_pcie_raise_legacy_irq(dra7xx); 381 break; 382 case PCI_EPC_IRQ_MSI: 383 dra7xx_pcie_raise_msi_irq(dra7xx, interrupt_num); 384 break; 385 default: 386 dev_err(pci->dev, "UNKNOWN IRQ type\n"); 387 } 388 389 return 0; 390 } 391 392 static struct dw_pcie_ep_ops pcie_ep_ops = { 393 .ep_init = dra7xx_pcie_ep_init, 394 .raise_irq = dra7xx_pcie_raise_irq, 395 }; 396 397 static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx, 398 struct platform_device *pdev) 399 { 400 int ret; 401 struct dw_pcie_ep *ep; 402 struct resource *res; 403 struct device *dev = &pdev->dev; 404 struct dw_pcie *pci = dra7xx->pci; 405 406 ep = &pci->ep; 407 ep->ops = &pcie_ep_ops; 408 409 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics"); 410 pci->dbi_base = devm_ioremap_resource(dev, res); 411 if (IS_ERR(pci->dbi_base)) 412 return PTR_ERR(pci->dbi_base); 413 414 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics2"); 415 pci->dbi_base2 = devm_ioremap_resource(dev, res); 416 if (IS_ERR(pci->dbi_base2)) 417 return PTR_ERR(pci->dbi_base2); 418 419 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space"); 420 if (!res) 421 return -EINVAL; 422 423 ep->phys_base = res->start; 424 ep->addr_size = resource_size(res); 425 426 ret = dw_pcie_ep_init(ep); 427 if (ret) { 428 dev_err(dev, "failed to initialize endpoint\n"); 429 return ret; 430 } 431 432 return 0; 433 } 434 435 static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx, 436 struct platform_device *pdev) 437 { 438 int ret; 439 struct dw_pcie *pci = dra7xx->pci; 440 struct pcie_port *pp = &pci->pp; 441 struct device *dev = pci->dev; 442 struct resource *res; 443 444 pp->irq = platform_get_irq(pdev, 1); 445 if (pp->irq < 0) { 446 dev_err(dev, "missing IRQ resource\n"); 447 return pp->irq; 448 } 449 450 ret = devm_request_irq(dev, pp->irq, dra7xx_pcie_msi_irq_handler, 451 IRQF_SHARED | IRQF_NO_THREAD, 452 "dra7-pcie-msi", dra7xx); 453 if (ret) { 454 dev_err(dev, "failed to request irq\n"); 455 return ret; 456 } 457 458 ret = dra7xx_pcie_init_irq_domain(pp); 459 if (ret < 0) 460 return ret; 461 462 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbics"); 463 pci->dbi_base = devm_ioremap_resource(dev, res); 464 if (IS_ERR(pci->dbi_base)) 465 return PTR_ERR(pci->dbi_base); 466 467 pp->ops = &dra7xx_pcie_host_ops; 468 469 ret = dw_pcie_host_init(pp); 470 if (ret) { 471 dev_err(dev, "failed to initialize host\n"); 472 return ret; 473 } 474 475 return 0; 476 } 477 478 static const struct dw_pcie_ops dw_pcie_ops = { 479 .cpu_addr_fixup = dra7xx_pcie_cpu_addr_fixup, 480 .start_link = dra7xx_pcie_establish_link, 481 .stop_link = dra7xx_pcie_stop_link, 482 .link_up = dra7xx_pcie_link_up, 483 }; 484 485 static void dra7xx_pcie_disable_phy(struct dra7xx_pcie *dra7xx) 486 { 487 int phy_count = dra7xx->phy_count; 488 489 while (phy_count--) { 490 phy_power_off(dra7xx->phy[phy_count]); 491 phy_exit(dra7xx->phy[phy_count]); 492 } 493 } 494 495 static int dra7xx_pcie_enable_phy(struct dra7xx_pcie *dra7xx) 496 { 497 int phy_count = dra7xx->phy_count; 498 int ret; 499 int i; 500 501 for (i = 0; i < phy_count; i++) { 502 ret = phy_init(dra7xx->phy[i]); 503 if (ret < 0) 504 goto err_phy; 505 506 ret = phy_power_on(dra7xx->phy[i]); 507 if (ret < 0) { 508 phy_exit(dra7xx->phy[i]); 509 goto err_phy; 510 } 511 } 512 513 return 0; 514 515 err_phy: 516 while (--i >= 0) { 517 phy_power_off(dra7xx->phy[i]); 518 phy_exit(dra7xx->phy[i]); 519 } 520 521 return ret; 522 } 523 524 static const struct dra7xx_pcie_of_data dra7xx_pcie_rc_of_data = { 525 .mode = DW_PCIE_RC_TYPE, 526 }; 527 528 static const struct dra7xx_pcie_of_data dra7xx_pcie_ep_of_data = { 529 .mode = DW_PCIE_EP_TYPE, 530 }; 531 532 static const struct of_device_id of_dra7xx_pcie_match[] = { 533 { 534 .compatible = "ti,dra7-pcie", 535 .data = &dra7xx_pcie_rc_of_data, 536 }, 537 { 538 .compatible = "ti,dra7-pcie-ep", 539 .data = &dra7xx_pcie_ep_of_data, 540 }, 541 {}, 542 }; 543 544 /* 545 * dra7xx_pcie_unaligned_memaccess: workaround for AM572x/AM571x Errata i870 546 * @dra7xx: the dra7xx device where the workaround should be applied 547 * 548 * Access to the PCIe slave port that are not 32-bit aligned will result 549 * in incorrect mapping to TLP Address and Byte enable fields. Therefore, 550 * byte and half-word accesses are not possible to byte offset 0x1, 0x2, or 551 * 0x3. 552 * 553 * To avoid this issue set PCIE_SS1_AXI2OCP_LEGACY_MODE_ENABLE to 1. 554 */ 555 static int dra7xx_pcie_unaligned_memaccess(struct device *dev) 556 { 557 int ret; 558 struct device_node *np = dev->of_node; 559 struct of_phandle_args args; 560 struct regmap *regmap; 561 562 regmap = syscon_regmap_lookup_by_phandle(np, 563 "ti,syscon-unaligned-access"); 564 if (IS_ERR(regmap)) { 565 dev_dbg(dev, "can't get ti,syscon-unaligned-access\n"); 566 return -EINVAL; 567 } 568 569 ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-unaligned-access", 570 2, 0, &args); 571 if (ret) { 572 dev_err(dev, "failed to parse ti,syscon-unaligned-access\n"); 573 return ret; 574 } 575 576 ret = regmap_update_bits(regmap, args.args[0], args.args[1], 577 args.args[1]); 578 if (ret) 579 dev_err(dev, "failed to enable unaligned access\n"); 580 581 of_node_put(args.np); 582 583 return ret; 584 } 585 586 static int __init dra7xx_pcie_probe(struct platform_device *pdev) 587 { 588 u32 reg; 589 int ret; 590 int irq; 591 int i; 592 int phy_count; 593 struct phy **phy; 594 struct device_link **link; 595 void __iomem *base; 596 struct resource *res; 597 struct dw_pcie *pci; 598 struct dra7xx_pcie *dra7xx; 599 struct device *dev = &pdev->dev; 600 struct device_node *np = dev->of_node; 601 char name[10]; 602 struct gpio_desc *reset; 603 const struct of_device_id *match; 604 const struct dra7xx_pcie_of_data *data; 605 enum dw_pcie_device_mode mode; 606 607 match = of_match_device(of_match_ptr(of_dra7xx_pcie_match), dev); 608 if (!match) 609 return -EINVAL; 610 611 data = (struct dra7xx_pcie_of_data *)match->data; 612 mode = (enum dw_pcie_device_mode)data->mode; 613 614 dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL); 615 if (!dra7xx) 616 return -ENOMEM; 617 618 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); 619 if (!pci) 620 return -ENOMEM; 621 622 pci->dev = dev; 623 pci->ops = &dw_pcie_ops; 624 625 irq = platform_get_irq(pdev, 0); 626 if (irq < 0) { 627 dev_err(dev, "missing IRQ resource: %d\n", irq); 628 return irq; 629 } 630 631 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf"); 632 base = devm_ioremap_nocache(dev, res->start, resource_size(res)); 633 if (!base) 634 return -ENOMEM; 635 636 phy_count = of_property_count_strings(np, "phy-names"); 637 if (phy_count < 0) { 638 dev_err(dev, "unable to find the strings\n"); 639 return phy_count; 640 } 641 642 phy = devm_kcalloc(dev, phy_count, sizeof(*phy), GFP_KERNEL); 643 if (!phy) 644 return -ENOMEM; 645 646 link = devm_kcalloc(dev, phy_count, sizeof(*link), GFP_KERNEL); 647 if (!link) 648 return -ENOMEM; 649 650 for (i = 0; i < phy_count; i++) { 651 snprintf(name, sizeof(name), "pcie-phy%d", i); 652 phy[i] = devm_phy_get(dev, name); 653 if (IS_ERR(phy[i])) 654 return PTR_ERR(phy[i]); 655 656 link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS); 657 if (!link[i]) { 658 ret = -EINVAL; 659 goto err_link; 660 } 661 } 662 663 dra7xx->base = base; 664 dra7xx->phy = phy; 665 dra7xx->pci = pci; 666 dra7xx->phy_count = phy_count; 667 668 ret = dra7xx_pcie_enable_phy(dra7xx); 669 if (ret) { 670 dev_err(dev, "failed to enable phy\n"); 671 return ret; 672 } 673 674 platform_set_drvdata(pdev, dra7xx); 675 676 pm_runtime_enable(dev); 677 ret = pm_runtime_get_sync(dev); 678 if (ret < 0) { 679 dev_err(dev, "pm_runtime_get_sync failed\n"); 680 goto err_get_sync; 681 } 682 683 reset = devm_gpiod_get_optional(dev, NULL, GPIOD_OUT_HIGH); 684 if (IS_ERR(reset)) { 685 ret = PTR_ERR(reset); 686 dev_err(&pdev->dev, "gpio request failed, ret %d\n", ret); 687 goto err_gpio; 688 } 689 690 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); 691 reg &= ~LTSSM_EN; 692 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); 693 694 dra7xx->link_gen = of_pci_get_max_link_speed(np); 695 if (dra7xx->link_gen < 0 || dra7xx->link_gen > 2) 696 dra7xx->link_gen = 2; 697 698 switch (mode) { 699 case DW_PCIE_RC_TYPE: 700 if (!IS_ENABLED(CONFIG_PCI_DRA7XX_HOST)) { 701 ret = -ENODEV; 702 goto err_gpio; 703 } 704 705 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE, 706 DEVICE_TYPE_RC); 707 708 ret = dra7xx_pcie_unaligned_memaccess(dev); 709 if (ret) 710 dev_err(dev, "WA for Errata i870 not applied\n"); 711 712 ret = dra7xx_add_pcie_port(dra7xx, pdev); 713 if (ret < 0) 714 goto err_gpio; 715 break; 716 case DW_PCIE_EP_TYPE: 717 if (!IS_ENABLED(CONFIG_PCI_DRA7XX_EP)) { 718 ret = -ENODEV; 719 goto err_gpio; 720 } 721 722 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE, 723 DEVICE_TYPE_EP); 724 725 ret = dra7xx_pcie_unaligned_memaccess(dev); 726 if (ret) 727 goto err_gpio; 728 729 ret = dra7xx_add_pcie_ep(dra7xx, pdev); 730 if (ret < 0) 731 goto err_gpio; 732 break; 733 default: 734 dev_err(dev, "INVALID device type %d\n", mode); 735 } 736 dra7xx->mode = mode; 737 738 ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler, 739 IRQF_SHARED, "dra7xx-pcie-main", dra7xx); 740 if (ret) { 741 dev_err(dev, "failed to request irq\n"); 742 goto err_gpio; 743 } 744 745 return 0; 746 747 err_gpio: 748 pm_runtime_put(dev); 749 750 err_get_sync: 751 pm_runtime_disable(dev); 752 dra7xx_pcie_disable_phy(dra7xx); 753 754 err_link: 755 while (--i >= 0) 756 device_link_del(link[i]); 757 758 return ret; 759 } 760 761 #ifdef CONFIG_PM_SLEEP 762 static int dra7xx_pcie_suspend(struct device *dev) 763 { 764 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); 765 struct dw_pcie *pci = dra7xx->pci; 766 u32 val; 767 768 if (dra7xx->mode != DW_PCIE_RC_TYPE) 769 return 0; 770 771 /* clear MSE */ 772 val = dw_pcie_readl_dbi(pci, PCI_COMMAND); 773 val &= ~PCI_COMMAND_MEMORY; 774 dw_pcie_writel_dbi(pci, PCI_COMMAND, val); 775 776 return 0; 777 } 778 779 static int dra7xx_pcie_resume(struct device *dev) 780 { 781 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); 782 struct dw_pcie *pci = dra7xx->pci; 783 u32 val; 784 785 if (dra7xx->mode != DW_PCIE_RC_TYPE) 786 return 0; 787 788 /* set MSE */ 789 val = dw_pcie_readl_dbi(pci, PCI_COMMAND); 790 val |= PCI_COMMAND_MEMORY; 791 dw_pcie_writel_dbi(pci, PCI_COMMAND, val); 792 793 return 0; 794 } 795 796 static int dra7xx_pcie_suspend_noirq(struct device *dev) 797 { 798 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); 799 800 dra7xx_pcie_disable_phy(dra7xx); 801 802 return 0; 803 } 804 805 static int dra7xx_pcie_resume_noirq(struct device *dev) 806 { 807 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); 808 int ret; 809 810 ret = dra7xx_pcie_enable_phy(dra7xx); 811 if (ret) { 812 dev_err(dev, "failed to enable phy\n"); 813 return ret; 814 } 815 816 return 0; 817 } 818 #endif 819 820 static void dra7xx_pcie_shutdown(struct platform_device *pdev) 821 { 822 struct device *dev = &pdev->dev; 823 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); 824 int ret; 825 826 dra7xx_pcie_stop_link(dra7xx->pci); 827 828 ret = pm_runtime_put_sync(dev); 829 if (ret < 0) 830 dev_dbg(dev, "pm_runtime_put_sync failed\n"); 831 832 pm_runtime_disable(dev); 833 dra7xx_pcie_disable_phy(dra7xx); 834 } 835 836 static const struct dev_pm_ops dra7xx_pcie_pm_ops = { 837 SET_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume) 838 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq, 839 dra7xx_pcie_resume_noirq) 840 }; 841 842 static struct platform_driver dra7xx_pcie_driver = { 843 .driver = { 844 .name = "dra7-pcie", 845 .of_match_table = of_dra7xx_pcie_match, 846 .suppress_bind_attrs = true, 847 .pm = &dra7xx_pcie_pm_ops, 848 }, 849 .shutdown = dra7xx_pcie_shutdown, 850 }; 851 builtin_platform_driver_probe(dra7xx_pcie_driver, dra7xx_pcie_probe); 852