1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * pcie-dra7xx - PCIe controller driver for TI DRA7xx SoCs 4 * 5 * Copyright (C) 2013-2014 Texas Instruments Incorporated - https://www.ti.com 6 * 7 * Authors: Kishon Vijay Abraham I <kishon@ti.com> 8 */ 9 10 #include <linux/delay.h> 11 #include <linux/device.h> 12 #include <linux/err.h> 13 #include <linux/interrupt.h> 14 #include <linux/irq.h> 15 #include <linux/irqdomain.h> 16 #include <linux/kernel.h> 17 #include <linux/init.h> 18 #include <linux/of_device.h> 19 #include <linux/of_gpio.h> 20 #include <linux/of_pci.h> 21 #include <linux/pci.h> 22 #include <linux/phy/phy.h> 23 #include <linux/platform_device.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/resource.h> 26 #include <linux/types.h> 27 #include <linux/mfd/syscon.h> 28 #include <linux/regmap.h> 29 #include <linux/gpio/consumer.h> 30 31 #include "../../pci.h" 32 #include "pcie-designware.h" 33 34 /* PCIe controller wrapper DRA7XX configuration registers */ 35 36 #define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN 0x0024 37 #define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN 0x0028 38 #define ERR_SYS BIT(0) 39 #define ERR_FATAL BIT(1) 40 #define ERR_NONFATAL BIT(2) 41 #define ERR_COR BIT(3) 42 #define ERR_AXI BIT(4) 43 #define ERR_ECRC BIT(5) 44 #define PME_TURN_OFF BIT(8) 45 #define PME_TO_ACK BIT(9) 46 #define PM_PME BIT(10) 47 #define LINK_REQ_RST BIT(11) 48 #define LINK_UP_EVT BIT(12) 49 #define CFG_BME_EVT BIT(13) 50 #define CFG_MSE_EVT BIT(14) 51 #define INTERRUPTS (ERR_SYS | ERR_FATAL | ERR_NONFATAL | ERR_COR | ERR_AXI | \ 52 ERR_ECRC | PME_TURN_OFF | PME_TO_ACK | PM_PME | \ 53 LINK_REQ_RST | LINK_UP_EVT | CFG_BME_EVT | CFG_MSE_EVT) 54 55 #define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI 0x0034 56 #define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI 0x0038 57 #define INTA BIT(0) 58 #define INTB BIT(1) 59 #define INTC BIT(2) 60 #define INTD BIT(3) 61 #define MSI BIT(4) 62 #define LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD) 63 64 #define PCIECTRL_TI_CONF_DEVICE_TYPE 0x0100 65 #define DEVICE_TYPE_EP 0x0 66 #define DEVICE_TYPE_LEG_EP 0x1 67 #define DEVICE_TYPE_RC 0x4 68 69 #define PCIECTRL_DRA7XX_CONF_DEVICE_CMD 0x0104 70 #define LTSSM_EN 0x1 71 72 #define PCIECTRL_DRA7XX_CONF_PHY_CS 0x010C 73 #define LINK_UP BIT(16) 74 #define DRA7XX_CPU_TO_BUS_ADDR 0x0FFFFFFF 75 76 #define PCIECTRL_TI_CONF_INTX_ASSERT 0x0124 77 #define PCIECTRL_TI_CONF_INTX_DEASSERT 0x0128 78 79 #define PCIECTRL_TI_CONF_MSI_XMT 0x012c 80 #define MSI_REQ_GRANT BIT(0) 81 #define MSI_VECTOR_SHIFT 7 82 83 #define PCIE_1LANE_2LANE_SELECTION BIT(13) 84 #define PCIE_B1C0_MODE_SEL BIT(2) 85 #define PCIE_B0_B1_TSYNCEN BIT(0) 86 87 struct dra7xx_pcie { 88 struct dw_pcie *pci; 89 void __iomem *base; /* DT ti_conf */ 90 int phy_count; /* DT phy-names count */ 91 struct phy **phy; 92 struct irq_domain *irq_domain; 93 enum dw_pcie_device_mode mode; 94 }; 95 96 struct dra7xx_pcie_of_data { 97 enum dw_pcie_device_mode mode; 98 u32 b1co_mode_sel_mask; 99 }; 100 101 #define to_dra7xx_pcie(x) dev_get_drvdata((x)->dev) 102 103 static inline u32 dra7xx_pcie_readl(struct dra7xx_pcie *pcie, u32 offset) 104 { 105 return readl(pcie->base + offset); 106 } 107 108 static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset, 109 u32 value) 110 { 111 writel(value, pcie->base + offset); 112 } 113 114 static u64 dra7xx_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr) 115 { 116 return pci_addr & DRA7XX_CPU_TO_BUS_ADDR; 117 } 118 119 static int dra7xx_pcie_link_up(struct dw_pcie *pci) 120 { 121 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 122 u32 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS); 123 124 return !!(reg & LINK_UP); 125 } 126 127 static void dra7xx_pcie_stop_link(struct dw_pcie *pci) 128 { 129 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 130 u32 reg; 131 132 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); 133 reg &= ~LTSSM_EN; 134 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); 135 } 136 137 static int dra7xx_pcie_establish_link(struct dw_pcie *pci) 138 { 139 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 140 struct device *dev = pci->dev; 141 u32 reg; 142 143 if (dw_pcie_link_up(pci)) { 144 dev_err(dev, "link is already up\n"); 145 return 0; 146 } 147 148 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); 149 reg |= LTSSM_EN; 150 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); 151 152 return 0; 153 } 154 155 static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx) 156 { 157 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, 158 LEG_EP_INTERRUPTS | MSI); 159 160 dra7xx_pcie_writel(dra7xx, 161 PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI, 162 MSI | LEG_EP_INTERRUPTS); 163 } 164 165 static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie *dra7xx) 166 { 167 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, 168 INTERRUPTS); 169 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN, 170 INTERRUPTS); 171 } 172 173 static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx) 174 { 175 dra7xx_pcie_enable_wrapper_interrupts(dra7xx); 176 dra7xx_pcie_enable_msi_interrupts(dra7xx); 177 } 178 179 static int dra7xx_pcie_host_init(struct pcie_port *pp) 180 { 181 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 182 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 183 184 dra7xx_pcie_enable_interrupts(dra7xx); 185 186 return 0; 187 } 188 189 static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq, 190 irq_hw_number_t hwirq) 191 { 192 irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); 193 irq_set_chip_data(irq, domain->host_data); 194 195 return 0; 196 } 197 198 static const struct irq_domain_ops intx_domain_ops = { 199 .map = dra7xx_pcie_intx_map, 200 .xlate = pci_irqd_intx_xlate, 201 }; 202 203 static int dra7xx_pcie_handle_msi(struct pcie_port *pp, int index) 204 { 205 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 206 unsigned long val; 207 int pos, irq; 208 209 val = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS + 210 (index * MSI_REG_CTRL_BLOCK_SIZE)); 211 if (!val) 212 return 0; 213 214 pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, 0); 215 while (pos != MAX_MSI_IRQS_PER_CTRL) { 216 irq = irq_find_mapping(pp->irq_domain, 217 (index * MAX_MSI_IRQS_PER_CTRL) + pos); 218 generic_handle_irq(irq); 219 pos++; 220 pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, pos); 221 } 222 223 return 1; 224 } 225 226 static void dra7xx_pcie_handle_msi_irq(struct pcie_port *pp) 227 { 228 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 229 int ret, i, count, num_ctrls; 230 231 num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; 232 233 /** 234 * Need to make sure all MSI status bits read 0 before exiting. 235 * Else, new MSI IRQs are not registered by the wrapper. Have an 236 * upperbound for the loop and exit the IRQ in case of IRQ flood 237 * to avoid locking up system in interrupt context. 238 */ 239 count = 0; 240 do { 241 ret = 0; 242 243 for (i = 0; i < num_ctrls; i++) 244 ret |= dra7xx_pcie_handle_msi(pp, i); 245 count++; 246 } while (ret && count <= 1000); 247 248 if (count > 1000) 249 dev_warn_ratelimited(pci->dev, 250 "Too many MSI IRQs to handle\n"); 251 } 252 253 static void dra7xx_pcie_msi_irq_handler(struct irq_desc *desc) 254 { 255 struct irq_chip *chip = irq_desc_get_chip(desc); 256 struct dra7xx_pcie *dra7xx; 257 struct dw_pcie *pci; 258 struct pcie_port *pp; 259 unsigned long reg; 260 u32 virq, bit; 261 262 chained_irq_enter(chip, desc); 263 264 pp = irq_desc_get_handler_data(desc); 265 pci = to_dw_pcie_from_pp(pp); 266 dra7xx = to_dra7xx_pcie(pci); 267 268 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI); 269 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg); 270 271 switch (reg) { 272 case MSI: 273 dra7xx_pcie_handle_msi_irq(pp); 274 break; 275 case INTA: 276 case INTB: 277 case INTC: 278 case INTD: 279 for_each_set_bit(bit, ®, PCI_NUM_INTX) { 280 virq = irq_find_mapping(dra7xx->irq_domain, bit); 281 if (virq) 282 generic_handle_irq(virq); 283 } 284 break; 285 } 286 287 chained_irq_exit(chip, desc); 288 } 289 290 static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg) 291 { 292 struct dra7xx_pcie *dra7xx = arg; 293 struct dw_pcie *pci = dra7xx->pci; 294 struct device *dev = pci->dev; 295 struct dw_pcie_ep *ep = &pci->ep; 296 u32 reg; 297 298 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN); 299 300 if (reg & ERR_SYS) 301 dev_dbg(dev, "System Error\n"); 302 303 if (reg & ERR_FATAL) 304 dev_dbg(dev, "Fatal Error\n"); 305 306 if (reg & ERR_NONFATAL) 307 dev_dbg(dev, "Non Fatal Error\n"); 308 309 if (reg & ERR_COR) 310 dev_dbg(dev, "Correctable Error\n"); 311 312 if (reg & ERR_AXI) 313 dev_dbg(dev, "AXI tag lookup fatal Error\n"); 314 315 if (reg & ERR_ECRC) 316 dev_dbg(dev, "ECRC Error\n"); 317 318 if (reg & PME_TURN_OFF) 319 dev_dbg(dev, 320 "Power Management Event Turn-Off message received\n"); 321 322 if (reg & PME_TO_ACK) 323 dev_dbg(dev, 324 "Power Management Turn-Off Ack message received\n"); 325 326 if (reg & PM_PME) 327 dev_dbg(dev, "PM Power Management Event message received\n"); 328 329 if (reg & LINK_REQ_RST) 330 dev_dbg(dev, "Link Request Reset\n"); 331 332 if (reg & LINK_UP_EVT) { 333 if (dra7xx->mode == DW_PCIE_EP_TYPE) 334 dw_pcie_ep_linkup(ep); 335 dev_dbg(dev, "Link-up state change\n"); 336 } 337 338 if (reg & CFG_BME_EVT) 339 dev_dbg(dev, "CFG 'Bus Master Enable' change\n"); 340 341 if (reg & CFG_MSE_EVT) 342 dev_dbg(dev, "CFG 'Memory Space Enable' change\n"); 343 344 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, reg); 345 346 return IRQ_HANDLED; 347 } 348 349 static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp) 350 { 351 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 352 struct device *dev = pci->dev; 353 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 354 struct device_node *node = dev->of_node; 355 struct device_node *pcie_intc_node = of_get_next_child(node, NULL); 356 357 if (!pcie_intc_node) { 358 dev_err(dev, "No PCIe Intc node found\n"); 359 return -ENODEV; 360 } 361 362 irq_set_chained_handler_and_data(pp->irq, dra7xx_pcie_msi_irq_handler, 363 pp); 364 dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, 365 &intx_domain_ops, pp); 366 of_node_put(pcie_intc_node); 367 if (!dra7xx->irq_domain) { 368 dev_err(dev, "Failed to get a INTx IRQ domain\n"); 369 return -ENODEV; 370 } 371 372 return 0; 373 } 374 375 static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = { 376 .host_init = dra7xx_pcie_host_init, 377 }; 378 379 static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep) 380 { 381 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 382 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 383 enum pci_barno bar; 384 385 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) 386 dw_pcie_ep_reset_bar(pci, bar); 387 388 dra7xx_pcie_enable_wrapper_interrupts(dra7xx); 389 } 390 391 static void dra7xx_pcie_raise_legacy_irq(struct dra7xx_pcie *dra7xx) 392 { 393 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_ASSERT, 0x1); 394 mdelay(1); 395 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_DEASSERT, 0x1); 396 } 397 398 static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx, 399 u8 interrupt_num) 400 { 401 u32 reg; 402 403 reg = (interrupt_num - 1) << MSI_VECTOR_SHIFT; 404 reg |= MSI_REQ_GRANT; 405 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_MSI_XMT, reg); 406 } 407 408 static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no, 409 enum pci_epc_irq_type type, u16 interrupt_num) 410 { 411 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 412 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 413 414 switch (type) { 415 case PCI_EPC_IRQ_LEGACY: 416 dra7xx_pcie_raise_legacy_irq(dra7xx); 417 break; 418 case PCI_EPC_IRQ_MSI: 419 dra7xx_pcie_raise_msi_irq(dra7xx, interrupt_num); 420 break; 421 default: 422 dev_err(pci->dev, "UNKNOWN IRQ type\n"); 423 } 424 425 return 0; 426 } 427 428 static const struct pci_epc_features dra7xx_pcie_epc_features = { 429 .linkup_notifier = true, 430 .msi_capable = true, 431 .msix_capable = false, 432 }; 433 434 static const struct pci_epc_features* 435 dra7xx_pcie_get_features(struct dw_pcie_ep *ep) 436 { 437 return &dra7xx_pcie_epc_features; 438 } 439 440 static const struct dw_pcie_ep_ops pcie_ep_ops = { 441 .ep_init = dra7xx_pcie_ep_init, 442 .raise_irq = dra7xx_pcie_raise_irq, 443 .get_features = dra7xx_pcie_get_features, 444 }; 445 446 static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx, 447 struct platform_device *pdev) 448 { 449 int ret; 450 struct dw_pcie_ep *ep; 451 struct device *dev = &pdev->dev; 452 struct dw_pcie *pci = dra7xx->pci; 453 454 ep = &pci->ep; 455 ep->ops = &pcie_ep_ops; 456 457 pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "ep_dbics"); 458 if (IS_ERR(pci->dbi_base)) 459 return PTR_ERR(pci->dbi_base); 460 461 pci->dbi_base2 = 462 devm_platform_ioremap_resource_byname(pdev, "ep_dbics2"); 463 if (IS_ERR(pci->dbi_base2)) 464 return PTR_ERR(pci->dbi_base2); 465 466 ret = dw_pcie_ep_init(ep); 467 if (ret) { 468 dev_err(dev, "failed to initialize endpoint\n"); 469 return ret; 470 } 471 472 return 0; 473 } 474 475 static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx, 476 struct platform_device *pdev) 477 { 478 int ret; 479 struct dw_pcie *pci = dra7xx->pci; 480 struct pcie_port *pp = &pci->pp; 481 struct device *dev = pci->dev; 482 483 pp->irq = platform_get_irq(pdev, 1); 484 if (pp->irq < 0) 485 return pp->irq; 486 487 /* MSI IRQ is muxed */ 488 pp->msi_irq = -ENODEV; 489 490 ret = dra7xx_pcie_init_irq_domain(pp); 491 if (ret < 0) 492 return ret; 493 494 pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "rc_dbics"); 495 if (IS_ERR(pci->dbi_base)) 496 return PTR_ERR(pci->dbi_base); 497 498 pp->ops = &dra7xx_pcie_host_ops; 499 500 ret = dw_pcie_host_init(pp); 501 if (ret) { 502 dev_err(dev, "failed to initialize host\n"); 503 return ret; 504 } 505 506 return 0; 507 } 508 509 static const struct dw_pcie_ops dw_pcie_ops = { 510 .cpu_addr_fixup = dra7xx_pcie_cpu_addr_fixup, 511 .start_link = dra7xx_pcie_establish_link, 512 .stop_link = dra7xx_pcie_stop_link, 513 .link_up = dra7xx_pcie_link_up, 514 }; 515 516 static void dra7xx_pcie_disable_phy(struct dra7xx_pcie *dra7xx) 517 { 518 int phy_count = dra7xx->phy_count; 519 520 while (phy_count--) { 521 phy_power_off(dra7xx->phy[phy_count]); 522 phy_exit(dra7xx->phy[phy_count]); 523 } 524 } 525 526 static int dra7xx_pcie_enable_phy(struct dra7xx_pcie *dra7xx) 527 { 528 int phy_count = dra7xx->phy_count; 529 int ret; 530 int i; 531 532 for (i = 0; i < phy_count; i++) { 533 ret = phy_set_mode(dra7xx->phy[i], PHY_MODE_PCIE); 534 if (ret < 0) 535 goto err_phy; 536 537 ret = phy_init(dra7xx->phy[i]); 538 if (ret < 0) 539 goto err_phy; 540 541 ret = phy_power_on(dra7xx->phy[i]); 542 if (ret < 0) { 543 phy_exit(dra7xx->phy[i]); 544 goto err_phy; 545 } 546 } 547 548 return 0; 549 550 err_phy: 551 while (--i >= 0) { 552 phy_power_off(dra7xx->phy[i]); 553 phy_exit(dra7xx->phy[i]); 554 } 555 556 return ret; 557 } 558 559 static const struct dra7xx_pcie_of_data dra7xx_pcie_rc_of_data = { 560 .mode = DW_PCIE_RC_TYPE, 561 }; 562 563 static const struct dra7xx_pcie_of_data dra7xx_pcie_ep_of_data = { 564 .mode = DW_PCIE_EP_TYPE, 565 }; 566 567 static const struct dra7xx_pcie_of_data dra746_pcie_rc_of_data = { 568 .b1co_mode_sel_mask = BIT(2), 569 .mode = DW_PCIE_RC_TYPE, 570 }; 571 572 static const struct dra7xx_pcie_of_data dra726_pcie_rc_of_data = { 573 .b1co_mode_sel_mask = GENMASK(3, 2), 574 .mode = DW_PCIE_RC_TYPE, 575 }; 576 577 static const struct dra7xx_pcie_of_data dra746_pcie_ep_of_data = { 578 .b1co_mode_sel_mask = BIT(2), 579 .mode = DW_PCIE_EP_TYPE, 580 }; 581 582 static const struct dra7xx_pcie_of_data dra726_pcie_ep_of_data = { 583 .b1co_mode_sel_mask = GENMASK(3, 2), 584 .mode = DW_PCIE_EP_TYPE, 585 }; 586 587 static const struct of_device_id of_dra7xx_pcie_match[] = { 588 { 589 .compatible = "ti,dra7-pcie", 590 .data = &dra7xx_pcie_rc_of_data, 591 }, 592 { 593 .compatible = "ti,dra7-pcie-ep", 594 .data = &dra7xx_pcie_ep_of_data, 595 }, 596 { 597 .compatible = "ti,dra746-pcie-rc", 598 .data = &dra746_pcie_rc_of_data, 599 }, 600 { 601 .compatible = "ti,dra726-pcie-rc", 602 .data = &dra726_pcie_rc_of_data, 603 }, 604 { 605 .compatible = "ti,dra746-pcie-ep", 606 .data = &dra746_pcie_ep_of_data, 607 }, 608 { 609 .compatible = "ti,dra726-pcie-ep", 610 .data = &dra726_pcie_ep_of_data, 611 }, 612 {}, 613 }; 614 615 /* 616 * dra7xx_pcie_unaligned_memaccess: workaround for AM572x/AM571x Errata i870 617 * @dra7xx: the dra7xx device where the workaround should be applied 618 * 619 * Access to the PCIe slave port that are not 32-bit aligned will result 620 * in incorrect mapping to TLP Address and Byte enable fields. Therefore, 621 * byte and half-word accesses are not possible to byte offset 0x1, 0x2, or 622 * 0x3. 623 * 624 * To avoid this issue set PCIE_SS1_AXI2OCP_LEGACY_MODE_ENABLE to 1. 625 */ 626 static int dra7xx_pcie_unaligned_memaccess(struct device *dev) 627 { 628 int ret; 629 struct device_node *np = dev->of_node; 630 struct of_phandle_args args; 631 struct regmap *regmap; 632 633 regmap = syscon_regmap_lookup_by_phandle(np, 634 "ti,syscon-unaligned-access"); 635 if (IS_ERR(regmap)) { 636 dev_dbg(dev, "can't get ti,syscon-unaligned-access\n"); 637 return -EINVAL; 638 } 639 640 ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-unaligned-access", 641 2, 0, &args); 642 if (ret) { 643 dev_err(dev, "failed to parse ti,syscon-unaligned-access\n"); 644 return ret; 645 } 646 647 ret = regmap_update_bits(regmap, args.args[0], args.args[1], 648 args.args[1]); 649 if (ret) 650 dev_err(dev, "failed to enable unaligned access\n"); 651 652 of_node_put(args.np); 653 654 return ret; 655 } 656 657 static int dra7xx_pcie_configure_two_lane(struct device *dev, 658 u32 b1co_mode_sel_mask) 659 { 660 struct device_node *np = dev->of_node; 661 struct regmap *pcie_syscon; 662 unsigned int pcie_reg; 663 u32 mask; 664 u32 val; 665 666 pcie_syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-lane-sel"); 667 if (IS_ERR(pcie_syscon)) { 668 dev_err(dev, "unable to get ti,syscon-lane-sel\n"); 669 return -EINVAL; 670 } 671 672 if (of_property_read_u32_index(np, "ti,syscon-lane-sel", 1, 673 &pcie_reg)) { 674 dev_err(dev, "couldn't get lane selection reg offset\n"); 675 return -EINVAL; 676 } 677 678 mask = b1co_mode_sel_mask | PCIE_B0_B1_TSYNCEN; 679 val = PCIE_B1C0_MODE_SEL | PCIE_B0_B1_TSYNCEN; 680 regmap_update_bits(pcie_syscon, pcie_reg, mask, val); 681 682 return 0; 683 } 684 685 static int __init dra7xx_pcie_probe(struct platform_device *pdev) 686 { 687 u32 reg; 688 int ret; 689 int irq; 690 int i; 691 int phy_count; 692 struct phy **phy; 693 struct device_link **link; 694 void __iomem *base; 695 struct dw_pcie *pci; 696 struct dra7xx_pcie *dra7xx; 697 struct device *dev = &pdev->dev; 698 struct device_node *np = dev->of_node; 699 char name[10]; 700 struct gpio_desc *reset; 701 const struct of_device_id *match; 702 const struct dra7xx_pcie_of_data *data; 703 enum dw_pcie_device_mode mode; 704 u32 b1co_mode_sel_mask; 705 706 match = of_match_device(of_match_ptr(of_dra7xx_pcie_match), dev); 707 if (!match) 708 return -EINVAL; 709 710 data = (struct dra7xx_pcie_of_data *)match->data; 711 mode = (enum dw_pcie_device_mode)data->mode; 712 b1co_mode_sel_mask = data->b1co_mode_sel_mask; 713 714 dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL); 715 if (!dra7xx) 716 return -ENOMEM; 717 718 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); 719 if (!pci) 720 return -ENOMEM; 721 722 pci->dev = dev; 723 pci->ops = &dw_pcie_ops; 724 725 irq = platform_get_irq(pdev, 0); 726 if (irq < 0) 727 return irq; 728 729 base = devm_platform_ioremap_resource_byname(pdev, "ti_conf"); 730 if (IS_ERR(base)) 731 return PTR_ERR(base); 732 733 phy_count = of_property_count_strings(np, "phy-names"); 734 if (phy_count < 0) { 735 dev_err(dev, "unable to find the strings\n"); 736 return phy_count; 737 } 738 739 phy = devm_kcalloc(dev, phy_count, sizeof(*phy), GFP_KERNEL); 740 if (!phy) 741 return -ENOMEM; 742 743 link = devm_kcalloc(dev, phy_count, sizeof(*link), GFP_KERNEL); 744 if (!link) 745 return -ENOMEM; 746 747 for (i = 0; i < phy_count; i++) { 748 snprintf(name, sizeof(name), "pcie-phy%d", i); 749 phy[i] = devm_phy_get(dev, name); 750 if (IS_ERR(phy[i])) 751 return PTR_ERR(phy[i]); 752 753 link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS); 754 if (!link[i]) { 755 ret = -EINVAL; 756 goto err_link; 757 } 758 } 759 760 dra7xx->base = base; 761 dra7xx->phy = phy; 762 dra7xx->pci = pci; 763 dra7xx->phy_count = phy_count; 764 765 if (phy_count == 2) { 766 ret = dra7xx_pcie_configure_two_lane(dev, b1co_mode_sel_mask); 767 if (ret < 0) 768 dra7xx->phy_count = 1; /* Fallback to x1 lane mode */ 769 } 770 771 ret = dra7xx_pcie_enable_phy(dra7xx); 772 if (ret) { 773 dev_err(dev, "failed to enable phy\n"); 774 return ret; 775 } 776 777 platform_set_drvdata(pdev, dra7xx); 778 779 pm_runtime_enable(dev); 780 ret = pm_runtime_get_sync(dev); 781 if (ret < 0) { 782 dev_err(dev, "pm_runtime_get_sync failed\n"); 783 goto err_get_sync; 784 } 785 786 reset = devm_gpiod_get_optional(dev, NULL, GPIOD_OUT_HIGH); 787 if (IS_ERR(reset)) { 788 ret = PTR_ERR(reset); 789 dev_err(&pdev->dev, "gpio request failed, ret %d\n", ret); 790 goto err_gpio; 791 } 792 793 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); 794 reg &= ~LTSSM_EN; 795 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); 796 797 switch (mode) { 798 case DW_PCIE_RC_TYPE: 799 if (!IS_ENABLED(CONFIG_PCI_DRA7XX_HOST)) { 800 ret = -ENODEV; 801 goto err_gpio; 802 } 803 804 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE, 805 DEVICE_TYPE_RC); 806 807 ret = dra7xx_pcie_unaligned_memaccess(dev); 808 if (ret) 809 dev_err(dev, "WA for Errata i870 not applied\n"); 810 811 ret = dra7xx_add_pcie_port(dra7xx, pdev); 812 if (ret < 0) 813 goto err_gpio; 814 break; 815 case DW_PCIE_EP_TYPE: 816 if (!IS_ENABLED(CONFIG_PCI_DRA7XX_EP)) { 817 ret = -ENODEV; 818 goto err_gpio; 819 } 820 821 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE, 822 DEVICE_TYPE_EP); 823 824 ret = dra7xx_pcie_unaligned_memaccess(dev); 825 if (ret) 826 goto err_gpio; 827 828 ret = dra7xx_add_pcie_ep(dra7xx, pdev); 829 if (ret < 0) 830 goto err_gpio; 831 break; 832 default: 833 dev_err(dev, "INVALID device type %d\n", mode); 834 } 835 dra7xx->mode = mode; 836 837 ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler, 838 IRQF_SHARED, "dra7xx-pcie-main", dra7xx); 839 if (ret) { 840 dev_err(dev, "failed to request irq\n"); 841 goto err_gpio; 842 } 843 844 return 0; 845 846 err_gpio: 847 err_get_sync: 848 pm_runtime_put(dev); 849 pm_runtime_disable(dev); 850 dra7xx_pcie_disable_phy(dra7xx); 851 852 err_link: 853 while (--i >= 0) 854 device_link_del(link[i]); 855 856 return ret; 857 } 858 859 #ifdef CONFIG_PM_SLEEP 860 static int dra7xx_pcie_suspend(struct device *dev) 861 { 862 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); 863 struct dw_pcie *pci = dra7xx->pci; 864 u32 val; 865 866 if (dra7xx->mode != DW_PCIE_RC_TYPE) 867 return 0; 868 869 /* clear MSE */ 870 val = dw_pcie_readl_dbi(pci, PCI_COMMAND); 871 val &= ~PCI_COMMAND_MEMORY; 872 dw_pcie_writel_dbi(pci, PCI_COMMAND, val); 873 874 return 0; 875 } 876 877 static int dra7xx_pcie_resume(struct device *dev) 878 { 879 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); 880 struct dw_pcie *pci = dra7xx->pci; 881 u32 val; 882 883 if (dra7xx->mode != DW_PCIE_RC_TYPE) 884 return 0; 885 886 /* set MSE */ 887 val = dw_pcie_readl_dbi(pci, PCI_COMMAND); 888 val |= PCI_COMMAND_MEMORY; 889 dw_pcie_writel_dbi(pci, PCI_COMMAND, val); 890 891 return 0; 892 } 893 894 static int dra7xx_pcie_suspend_noirq(struct device *dev) 895 { 896 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); 897 898 dra7xx_pcie_disable_phy(dra7xx); 899 900 return 0; 901 } 902 903 static int dra7xx_pcie_resume_noirq(struct device *dev) 904 { 905 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); 906 int ret; 907 908 ret = dra7xx_pcie_enable_phy(dra7xx); 909 if (ret) { 910 dev_err(dev, "failed to enable phy\n"); 911 return ret; 912 } 913 914 return 0; 915 } 916 #endif 917 918 static void dra7xx_pcie_shutdown(struct platform_device *pdev) 919 { 920 struct device *dev = &pdev->dev; 921 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); 922 int ret; 923 924 dra7xx_pcie_stop_link(dra7xx->pci); 925 926 ret = pm_runtime_put_sync(dev); 927 if (ret < 0) 928 dev_dbg(dev, "pm_runtime_put_sync failed\n"); 929 930 pm_runtime_disable(dev); 931 dra7xx_pcie_disable_phy(dra7xx); 932 } 933 934 static const struct dev_pm_ops dra7xx_pcie_pm_ops = { 935 SET_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume) 936 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq, 937 dra7xx_pcie_resume_noirq) 938 }; 939 940 static struct platform_driver dra7xx_pcie_driver = { 941 .driver = { 942 .name = "dra7-pcie", 943 .of_match_table = of_dra7xx_pcie_match, 944 .suppress_bind_attrs = true, 945 .pm = &dra7xx_pcie_pm_ops, 946 }, 947 .shutdown = dra7xx_pcie_shutdown, 948 }; 949 builtin_platform_driver_probe(dra7xx_pcie_driver, dra7xx_pcie_probe); 950