1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * pcie-dra7xx - PCIe controller driver for TI DRA7xx SoCs 4 * 5 * Copyright (C) 2013-2014 Texas Instruments Incorporated - https://www.ti.com 6 * 7 * Authors: Kishon Vijay Abraham I <kishon@ti.com> 8 */ 9 10 #include <linux/delay.h> 11 #include <linux/device.h> 12 #include <linux/err.h> 13 #include <linux/interrupt.h> 14 #include <linux/irq.h> 15 #include <linux/irqdomain.h> 16 #include <linux/kernel.h> 17 #include <linux/init.h> 18 #include <linux/of_device.h> 19 #include <linux/of_gpio.h> 20 #include <linux/of_pci.h> 21 #include <linux/pci.h> 22 #include <linux/phy/phy.h> 23 #include <linux/platform_device.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/resource.h> 26 #include <linux/types.h> 27 #include <linux/mfd/syscon.h> 28 #include <linux/regmap.h> 29 #include <linux/gpio/consumer.h> 30 31 #include "../../pci.h" 32 #include "pcie-designware.h" 33 34 /* PCIe controller wrapper DRA7XX configuration registers */ 35 36 #define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN 0x0024 37 #define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN 0x0028 38 #define ERR_SYS BIT(0) 39 #define ERR_FATAL BIT(1) 40 #define ERR_NONFATAL BIT(2) 41 #define ERR_COR BIT(3) 42 #define ERR_AXI BIT(4) 43 #define ERR_ECRC BIT(5) 44 #define PME_TURN_OFF BIT(8) 45 #define PME_TO_ACK BIT(9) 46 #define PM_PME BIT(10) 47 #define LINK_REQ_RST BIT(11) 48 #define LINK_UP_EVT BIT(12) 49 #define CFG_BME_EVT BIT(13) 50 #define CFG_MSE_EVT BIT(14) 51 #define INTERRUPTS (ERR_SYS | ERR_FATAL | ERR_NONFATAL | ERR_COR | ERR_AXI | \ 52 ERR_ECRC | PME_TURN_OFF | PME_TO_ACK | PM_PME | \ 53 LINK_REQ_RST | LINK_UP_EVT | CFG_BME_EVT | CFG_MSE_EVT) 54 55 #define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI 0x0034 56 #define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI 0x0038 57 #define INTA BIT(0) 58 #define INTB BIT(1) 59 #define INTC BIT(2) 60 #define INTD BIT(3) 61 #define MSI BIT(4) 62 #define LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD) 63 64 #define PCIECTRL_TI_CONF_DEVICE_TYPE 0x0100 65 #define DEVICE_TYPE_EP 0x0 66 #define DEVICE_TYPE_LEG_EP 0x1 67 #define DEVICE_TYPE_RC 0x4 68 69 #define PCIECTRL_DRA7XX_CONF_DEVICE_CMD 0x0104 70 #define LTSSM_EN 0x1 71 72 #define PCIECTRL_DRA7XX_CONF_PHY_CS 0x010C 73 #define LINK_UP BIT(16) 74 #define DRA7XX_CPU_TO_BUS_ADDR 0x0FFFFFFF 75 76 #define PCIECTRL_TI_CONF_INTX_ASSERT 0x0124 77 #define PCIECTRL_TI_CONF_INTX_DEASSERT 0x0128 78 79 #define PCIECTRL_TI_CONF_MSI_XMT 0x012c 80 #define MSI_REQ_GRANT BIT(0) 81 #define MSI_VECTOR_SHIFT 7 82 83 #define PCIE_1LANE_2LANE_SELECTION BIT(13) 84 #define PCIE_B1C0_MODE_SEL BIT(2) 85 #define PCIE_B0_B1_TSYNCEN BIT(0) 86 87 struct dra7xx_pcie { 88 struct dw_pcie *pci; 89 void __iomem *base; /* DT ti_conf */ 90 int phy_count; /* DT phy-names count */ 91 struct phy **phy; 92 struct irq_domain *irq_domain; 93 enum dw_pcie_device_mode mode; 94 }; 95 96 struct dra7xx_pcie_of_data { 97 enum dw_pcie_device_mode mode; 98 u32 b1co_mode_sel_mask; 99 }; 100 101 #define to_dra7xx_pcie(x) dev_get_drvdata((x)->dev) 102 103 static inline u32 dra7xx_pcie_readl(struct dra7xx_pcie *pcie, u32 offset) 104 { 105 return readl(pcie->base + offset); 106 } 107 108 static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset, 109 u32 value) 110 { 111 writel(value, pcie->base + offset); 112 } 113 114 static u64 dra7xx_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr) 115 { 116 return pci_addr & DRA7XX_CPU_TO_BUS_ADDR; 117 } 118 119 static int dra7xx_pcie_link_up(struct dw_pcie *pci) 120 { 121 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 122 u32 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS); 123 124 return !!(reg & LINK_UP); 125 } 126 127 static void dra7xx_pcie_stop_link(struct dw_pcie *pci) 128 { 129 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 130 u32 reg; 131 132 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); 133 reg &= ~LTSSM_EN; 134 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); 135 } 136 137 static int dra7xx_pcie_establish_link(struct dw_pcie *pci) 138 { 139 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 140 struct device *dev = pci->dev; 141 u32 reg; 142 143 if (dw_pcie_link_up(pci)) { 144 dev_err(dev, "link is already up\n"); 145 return 0; 146 } 147 148 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); 149 reg |= LTSSM_EN; 150 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); 151 152 return 0; 153 } 154 155 static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx) 156 { 157 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, 158 LEG_EP_INTERRUPTS | MSI); 159 160 dra7xx_pcie_writel(dra7xx, 161 PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI, 162 MSI | LEG_EP_INTERRUPTS); 163 } 164 165 static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie *dra7xx) 166 { 167 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, 168 INTERRUPTS); 169 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN, 170 INTERRUPTS); 171 } 172 173 static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx) 174 { 175 dra7xx_pcie_enable_wrapper_interrupts(dra7xx); 176 dra7xx_pcie_enable_msi_interrupts(dra7xx); 177 } 178 179 static int dra7xx_pcie_host_init(struct pcie_port *pp) 180 { 181 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 182 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 183 184 dw_pcie_setup_rc(pp); 185 186 dra7xx_pcie_establish_link(pci); 187 dw_pcie_wait_for_link(pci); 188 dw_pcie_msi_init(pp); 189 dra7xx_pcie_enable_interrupts(dra7xx); 190 191 return 0; 192 } 193 194 static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq, 195 irq_hw_number_t hwirq) 196 { 197 irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); 198 irq_set_chip_data(irq, domain->host_data); 199 200 return 0; 201 } 202 203 static const struct irq_domain_ops intx_domain_ops = { 204 .map = dra7xx_pcie_intx_map, 205 .xlate = pci_irqd_intx_xlate, 206 }; 207 208 static int dra7xx_pcie_handle_msi(struct pcie_port *pp, int index) 209 { 210 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 211 unsigned long val; 212 int pos, irq; 213 214 val = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS + 215 (index * MSI_REG_CTRL_BLOCK_SIZE)); 216 if (!val) 217 return 0; 218 219 pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, 0); 220 while (pos != MAX_MSI_IRQS_PER_CTRL) { 221 irq = irq_find_mapping(pp->irq_domain, 222 (index * MAX_MSI_IRQS_PER_CTRL) + pos); 223 generic_handle_irq(irq); 224 pos++; 225 pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, pos); 226 } 227 228 return 1; 229 } 230 231 static void dra7xx_pcie_handle_msi_irq(struct pcie_port *pp) 232 { 233 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 234 int ret, i, count, num_ctrls; 235 236 num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; 237 238 /** 239 * Need to make sure all MSI status bits read 0 before exiting. 240 * Else, new MSI IRQs are not registered by the wrapper. Have an 241 * upperbound for the loop and exit the IRQ in case of IRQ flood 242 * to avoid locking up system in interrupt context. 243 */ 244 count = 0; 245 do { 246 ret = 0; 247 248 for (i = 0; i < num_ctrls; i++) 249 ret |= dra7xx_pcie_handle_msi(pp, i); 250 count++; 251 } while (ret && count <= 1000); 252 253 if (count > 1000) 254 dev_warn_ratelimited(pci->dev, 255 "Too many MSI IRQs to handle\n"); 256 } 257 258 static void dra7xx_pcie_msi_irq_handler(struct irq_desc *desc) 259 { 260 struct irq_chip *chip = irq_desc_get_chip(desc); 261 struct dra7xx_pcie *dra7xx; 262 struct dw_pcie *pci; 263 struct pcie_port *pp; 264 unsigned long reg; 265 u32 virq, bit; 266 267 chained_irq_enter(chip, desc); 268 269 pp = irq_desc_get_handler_data(desc); 270 pci = to_dw_pcie_from_pp(pp); 271 dra7xx = to_dra7xx_pcie(pci); 272 273 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI); 274 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg); 275 276 switch (reg) { 277 case MSI: 278 dra7xx_pcie_handle_msi_irq(pp); 279 break; 280 case INTA: 281 case INTB: 282 case INTC: 283 case INTD: 284 for_each_set_bit(bit, ®, PCI_NUM_INTX) { 285 virq = irq_find_mapping(dra7xx->irq_domain, bit); 286 if (virq) 287 generic_handle_irq(virq); 288 } 289 break; 290 } 291 292 chained_irq_exit(chip, desc); 293 } 294 295 static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg) 296 { 297 struct dra7xx_pcie *dra7xx = arg; 298 struct dw_pcie *pci = dra7xx->pci; 299 struct device *dev = pci->dev; 300 struct dw_pcie_ep *ep = &pci->ep; 301 u32 reg; 302 303 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN); 304 305 if (reg & ERR_SYS) 306 dev_dbg(dev, "System Error\n"); 307 308 if (reg & ERR_FATAL) 309 dev_dbg(dev, "Fatal Error\n"); 310 311 if (reg & ERR_NONFATAL) 312 dev_dbg(dev, "Non Fatal Error\n"); 313 314 if (reg & ERR_COR) 315 dev_dbg(dev, "Correctable Error\n"); 316 317 if (reg & ERR_AXI) 318 dev_dbg(dev, "AXI tag lookup fatal Error\n"); 319 320 if (reg & ERR_ECRC) 321 dev_dbg(dev, "ECRC Error\n"); 322 323 if (reg & PME_TURN_OFF) 324 dev_dbg(dev, 325 "Power Management Event Turn-Off message received\n"); 326 327 if (reg & PME_TO_ACK) 328 dev_dbg(dev, 329 "Power Management Turn-Off Ack message received\n"); 330 331 if (reg & PM_PME) 332 dev_dbg(dev, "PM Power Management Event message received\n"); 333 334 if (reg & LINK_REQ_RST) 335 dev_dbg(dev, "Link Request Reset\n"); 336 337 if (reg & LINK_UP_EVT) { 338 if (dra7xx->mode == DW_PCIE_EP_TYPE) 339 dw_pcie_ep_linkup(ep); 340 dev_dbg(dev, "Link-up state change\n"); 341 } 342 343 if (reg & CFG_BME_EVT) 344 dev_dbg(dev, "CFG 'Bus Master Enable' change\n"); 345 346 if (reg & CFG_MSE_EVT) 347 dev_dbg(dev, "CFG 'Memory Space Enable' change\n"); 348 349 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, reg); 350 351 return IRQ_HANDLED; 352 } 353 354 static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp) 355 { 356 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 357 struct device *dev = pci->dev; 358 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 359 struct device_node *node = dev->of_node; 360 struct device_node *pcie_intc_node = of_get_next_child(node, NULL); 361 362 if (!pcie_intc_node) { 363 dev_err(dev, "No PCIe Intc node found\n"); 364 return -ENODEV; 365 } 366 367 irq_set_chained_handler_and_data(pp->irq, dra7xx_pcie_msi_irq_handler, 368 pp); 369 dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, 370 &intx_domain_ops, pp); 371 of_node_put(pcie_intc_node); 372 if (!dra7xx->irq_domain) { 373 dev_err(dev, "Failed to get a INTx IRQ domain\n"); 374 return -ENODEV; 375 } 376 377 return 0; 378 } 379 380 static void dra7xx_pcie_setup_msi_msg(struct irq_data *d, struct msi_msg *msg) 381 { 382 struct pcie_port *pp = irq_data_get_irq_chip_data(d); 383 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 384 u64 msi_target; 385 386 msi_target = (u64)pp->msi_data; 387 388 msg->address_lo = lower_32_bits(msi_target); 389 msg->address_hi = upper_32_bits(msi_target); 390 391 msg->data = d->hwirq; 392 393 dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n", 394 (int)d->hwirq, msg->address_hi, msg->address_lo); 395 } 396 397 static int dra7xx_pcie_msi_set_affinity(struct irq_data *d, 398 const struct cpumask *mask, 399 bool force) 400 { 401 return -EINVAL; 402 } 403 404 static void dra7xx_pcie_bottom_mask(struct irq_data *d) 405 { 406 struct pcie_port *pp = irq_data_get_irq_chip_data(d); 407 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 408 unsigned int res, bit, ctrl; 409 unsigned long flags; 410 411 raw_spin_lock_irqsave(&pp->lock, flags); 412 413 ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; 414 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; 415 bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; 416 417 pp->irq_mask[ctrl] |= BIT(bit); 418 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, 419 pp->irq_mask[ctrl]); 420 421 raw_spin_unlock_irqrestore(&pp->lock, flags); 422 } 423 424 static void dra7xx_pcie_bottom_unmask(struct irq_data *d) 425 { 426 struct pcie_port *pp = irq_data_get_irq_chip_data(d); 427 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 428 unsigned int res, bit, ctrl; 429 unsigned long flags; 430 431 raw_spin_lock_irqsave(&pp->lock, flags); 432 433 ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; 434 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; 435 bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; 436 437 pp->irq_mask[ctrl] &= ~BIT(bit); 438 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, 439 pp->irq_mask[ctrl]); 440 441 raw_spin_unlock_irqrestore(&pp->lock, flags); 442 } 443 444 static void dra7xx_pcie_bottom_ack(struct irq_data *d) 445 { 446 struct pcie_port *pp = irq_data_get_irq_chip_data(d); 447 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 448 unsigned int res, bit, ctrl; 449 450 ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; 451 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; 452 bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; 453 454 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, BIT(bit)); 455 } 456 457 static struct irq_chip dra7xx_pci_msi_bottom_irq_chip = { 458 .name = "DRA7XX-PCI-MSI", 459 .irq_ack = dra7xx_pcie_bottom_ack, 460 .irq_compose_msi_msg = dra7xx_pcie_setup_msi_msg, 461 .irq_set_affinity = dra7xx_pcie_msi_set_affinity, 462 .irq_mask = dra7xx_pcie_bottom_mask, 463 .irq_unmask = dra7xx_pcie_bottom_unmask, 464 }; 465 466 static int dra7xx_pcie_msi_host_init(struct pcie_port *pp) 467 { 468 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 469 struct device *dev = pci->dev; 470 u32 ctrl, num_ctrls; 471 int ret; 472 473 pp->msi_irq_chip = &dra7xx_pci_msi_bottom_irq_chip; 474 475 num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; 476 /* Initialize IRQ Status array */ 477 for (ctrl = 0; ctrl < num_ctrls; ctrl++) { 478 pp->irq_mask[ctrl] = ~0; 479 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + 480 (ctrl * MSI_REG_CTRL_BLOCK_SIZE), 481 pp->irq_mask[ctrl]); 482 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE + 483 (ctrl * MSI_REG_CTRL_BLOCK_SIZE), 484 ~0); 485 } 486 487 ret = dw_pcie_allocate_domains(pp); 488 if (ret) 489 return ret; 490 491 pp->msi_data = dma_map_single_attrs(dev, &pp->msi_msg, 492 sizeof(pp->msi_msg), 493 DMA_FROM_DEVICE, 494 DMA_ATTR_SKIP_CPU_SYNC); 495 ret = dma_mapping_error(dev, pp->msi_data); 496 if (ret) { 497 dev_err(dev, "Failed to map MSI data\n"); 498 pp->msi_data = 0; 499 dw_pcie_free_msi(pp); 500 } 501 return ret; 502 } 503 504 static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = { 505 .host_init = dra7xx_pcie_host_init, 506 .msi_host_init = dra7xx_pcie_msi_host_init, 507 }; 508 509 static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep) 510 { 511 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 512 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 513 enum pci_barno bar; 514 515 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) 516 dw_pcie_ep_reset_bar(pci, bar); 517 518 dra7xx_pcie_enable_wrapper_interrupts(dra7xx); 519 } 520 521 static void dra7xx_pcie_raise_legacy_irq(struct dra7xx_pcie *dra7xx) 522 { 523 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_ASSERT, 0x1); 524 mdelay(1); 525 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_DEASSERT, 0x1); 526 } 527 528 static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx, 529 u8 interrupt_num) 530 { 531 u32 reg; 532 533 reg = (interrupt_num - 1) << MSI_VECTOR_SHIFT; 534 reg |= MSI_REQ_GRANT; 535 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_MSI_XMT, reg); 536 } 537 538 static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no, 539 enum pci_epc_irq_type type, u16 interrupt_num) 540 { 541 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 542 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 543 544 switch (type) { 545 case PCI_EPC_IRQ_LEGACY: 546 dra7xx_pcie_raise_legacy_irq(dra7xx); 547 break; 548 case PCI_EPC_IRQ_MSI: 549 dra7xx_pcie_raise_msi_irq(dra7xx, interrupt_num); 550 break; 551 default: 552 dev_err(pci->dev, "UNKNOWN IRQ type\n"); 553 } 554 555 return 0; 556 } 557 558 static const struct pci_epc_features dra7xx_pcie_epc_features = { 559 .linkup_notifier = true, 560 .msi_capable = true, 561 .msix_capable = false, 562 }; 563 564 static const struct pci_epc_features* 565 dra7xx_pcie_get_features(struct dw_pcie_ep *ep) 566 { 567 return &dra7xx_pcie_epc_features; 568 } 569 570 static const struct dw_pcie_ep_ops pcie_ep_ops = { 571 .ep_init = dra7xx_pcie_ep_init, 572 .raise_irq = dra7xx_pcie_raise_irq, 573 .get_features = dra7xx_pcie_get_features, 574 }; 575 576 static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx, 577 struct platform_device *pdev) 578 { 579 int ret; 580 struct dw_pcie_ep *ep; 581 struct resource *res; 582 struct device *dev = &pdev->dev; 583 struct dw_pcie *pci = dra7xx->pci; 584 585 ep = &pci->ep; 586 ep->ops = &pcie_ep_ops; 587 588 pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "ep_dbics"); 589 if (IS_ERR(pci->dbi_base)) 590 return PTR_ERR(pci->dbi_base); 591 592 pci->dbi_base2 = 593 devm_platform_ioremap_resource_byname(pdev, "ep_dbics2"); 594 if (IS_ERR(pci->dbi_base2)) 595 return PTR_ERR(pci->dbi_base2); 596 597 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space"); 598 if (!res) 599 return -EINVAL; 600 601 ep->phys_base = res->start; 602 ep->addr_size = resource_size(res); 603 604 ret = dw_pcie_ep_init(ep); 605 if (ret) { 606 dev_err(dev, "failed to initialize endpoint\n"); 607 return ret; 608 } 609 610 return 0; 611 } 612 613 static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx, 614 struct platform_device *pdev) 615 { 616 int ret; 617 struct dw_pcie *pci = dra7xx->pci; 618 struct pcie_port *pp = &pci->pp; 619 struct device *dev = pci->dev; 620 621 pp->irq = platform_get_irq(pdev, 1); 622 if (pp->irq < 0) 623 return pp->irq; 624 625 ret = dra7xx_pcie_init_irq_domain(pp); 626 if (ret < 0) 627 return ret; 628 629 pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "rc_dbics"); 630 if (IS_ERR(pci->dbi_base)) 631 return PTR_ERR(pci->dbi_base); 632 633 pp->ops = &dra7xx_pcie_host_ops; 634 635 ret = dw_pcie_host_init(pp); 636 if (ret) { 637 dev_err(dev, "failed to initialize host\n"); 638 return ret; 639 } 640 641 return 0; 642 } 643 644 static const struct dw_pcie_ops dw_pcie_ops = { 645 .cpu_addr_fixup = dra7xx_pcie_cpu_addr_fixup, 646 .start_link = dra7xx_pcie_establish_link, 647 .stop_link = dra7xx_pcie_stop_link, 648 .link_up = dra7xx_pcie_link_up, 649 }; 650 651 static void dra7xx_pcie_disable_phy(struct dra7xx_pcie *dra7xx) 652 { 653 int phy_count = dra7xx->phy_count; 654 655 while (phy_count--) { 656 phy_power_off(dra7xx->phy[phy_count]); 657 phy_exit(dra7xx->phy[phy_count]); 658 } 659 } 660 661 static int dra7xx_pcie_enable_phy(struct dra7xx_pcie *dra7xx) 662 { 663 int phy_count = dra7xx->phy_count; 664 int ret; 665 int i; 666 667 for (i = 0; i < phy_count; i++) { 668 ret = phy_set_mode(dra7xx->phy[i], PHY_MODE_PCIE); 669 if (ret < 0) 670 goto err_phy; 671 672 ret = phy_init(dra7xx->phy[i]); 673 if (ret < 0) 674 goto err_phy; 675 676 ret = phy_power_on(dra7xx->phy[i]); 677 if (ret < 0) { 678 phy_exit(dra7xx->phy[i]); 679 goto err_phy; 680 } 681 } 682 683 return 0; 684 685 err_phy: 686 while (--i >= 0) { 687 phy_power_off(dra7xx->phy[i]); 688 phy_exit(dra7xx->phy[i]); 689 } 690 691 return ret; 692 } 693 694 static const struct dra7xx_pcie_of_data dra7xx_pcie_rc_of_data = { 695 .mode = DW_PCIE_RC_TYPE, 696 }; 697 698 static const struct dra7xx_pcie_of_data dra7xx_pcie_ep_of_data = { 699 .mode = DW_PCIE_EP_TYPE, 700 }; 701 702 static const struct dra7xx_pcie_of_data dra746_pcie_rc_of_data = { 703 .b1co_mode_sel_mask = BIT(2), 704 .mode = DW_PCIE_RC_TYPE, 705 }; 706 707 static const struct dra7xx_pcie_of_data dra726_pcie_rc_of_data = { 708 .b1co_mode_sel_mask = GENMASK(3, 2), 709 .mode = DW_PCIE_RC_TYPE, 710 }; 711 712 static const struct dra7xx_pcie_of_data dra746_pcie_ep_of_data = { 713 .b1co_mode_sel_mask = BIT(2), 714 .mode = DW_PCIE_EP_TYPE, 715 }; 716 717 static const struct dra7xx_pcie_of_data dra726_pcie_ep_of_data = { 718 .b1co_mode_sel_mask = GENMASK(3, 2), 719 .mode = DW_PCIE_EP_TYPE, 720 }; 721 722 static const struct of_device_id of_dra7xx_pcie_match[] = { 723 { 724 .compatible = "ti,dra7-pcie", 725 .data = &dra7xx_pcie_rc_of_data, 726 }, 727 { 728 .compatible = "ti,dra7-pcie-ep", 729 .data = &dra7xx_pcie_ep_of_data, 730 }, 731 { 732 .compatible = "ti,dra746-pcie-rc", 733 .data = &dra746_pcie_rc_of_data, 734 }, 735 { 736 .compatible = "ti,dra726-pcie-rc", 737 .data = &dra726_pcie_rc_of_data, 738 }, 739 { 740 .compatible = "ti,dra746-pcie-ep", 741 .data = &dra746_pcie_ep_of_data, 742 }, 743 { 744 .compatible = "ti,dra726-pcie-ep", 745 .data = &dra726_pcie_ep_of_data, 746 }, 747 {}, 748 }; 749 750 /* 751 * dra7xx_pcie_unaligned_memaccess: workaround for AM572x/AM571x Errata i870 752 * @dra7xx: the dra7xx device where the workaround should be applied 753 * 754 * Access to the PCIe slave port that are not 32-bit aligned will result 755 * in incorrect mapping to TLP Address and Byte enable fields. Therefore, 756 * byte and half-word accesses are not possible to byte offset 0x1, 0x2, or 757 * 0x3. 758 * 759 * To avoid this issue set PCIE_SS1_AXI2OCP_LEGACY_MODE_ENABLE to 1. 760 */ 761 static int dra7xx_pcie_unaligned_memaccess(struct device *dev) 762 { 763 int ret; 764 struct device_node *np = dev->of_node; 765 struct of_phandle_args args; 766 struct regmap *regmap; 767 768 regmap = syscon_regmap_lookup_by_phandle(np, 769 "ti,syscon-unaligned-access"); 770 if (IS_ERR(regmap)) { 771 dev_dbg(dev, "can't get ti,syscon-unaligned-access\n"); 772 return -EINVAL; 773 } 774 775 ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-unaligned-access", 776 2, 0, &args); 777 if (ret) { 778 dev_err(dev, "failed to parse ti,syscon-unaligned-access\n"); 779 return ret; 780 } 781 782 ret = regmap_update_bits(regmap, args.args[0], args.args[1], 783 args.args[1]); 784 if (ret) 785 dev_err(dev, "failed to enable unaligned access\n"); 786 787 of_node_put(args.np); 788 789 return ret; 790 } 791 792 static int dra7xx_pcie_configure_two_lane(struct device *dev, 793 u32 b1co_mode_sel_mask) 794 { 795 struct device_node *np = dev->of_node; 796 struct regmap *pcie_syscon; 797 unsigned int pcie_reg; 798 u32 mask; 799 u32 val; 800 801 pcie_syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-lane-sel"); 802 if (IS_ERR(pcie_syscon)) { 803 dev_err(dev, "unable to get ti,syscon-lane-sel\n"); 804 return -EINVAL; 805 } 806 807 if (of_property_read_u32_index(np, "ti,syscon-lane-sel", 1, 808 &pcie_reg)) { 809 dev_err(dev, "couldn't get lane selection reg offset\n"); 810 return -EINVAL; 811 } 812 813 mask = b1co_mode_sel_mask | PCIE_B0_B1_TSYNCEN; 814 val = PCIE_B1C0_MODE_SEL | PCIE_B0_B1_TSYNCEN; 815 regmap_update_bits(pcie_syscon, pcie_reg, mask, val); 816 817 return 0; 818 } 819 820 static int __init dra7xx_pcie_probe(struct platform_device *pdev) 821 { 822 u32 reg; 823 int ret; 824 int irq; 825 int i; 826 int phy_count; 827 struct phy **phy; 828 struct device_link **link; 829 void __iomem *base; 830 struct dw_pcie *pci; 831 struct dra7xx_pcie *dra7xx; 832 struct device *dev = &pdev->dev; 833 struct device_node *np = dev->of_node; 834 char name[10]; 835 struct gpio_desc *reset; 836 const struct of_device_id *match; 837 const struct dra7xx_pcie_of_data *data; 838 enum dw_pcie_device_mode mode; 839 u32 b1co_mode_sel_mask; 840 841 match = of_match_device(of_match_ptr(of_dra7xx_pcie_match), dev); 842 if (!match) 843 return -EINVAL; 844 845 data = (struct dra7xx_pcie_of_data *)match->data; 846 mode = (enum dw_pcie_device_mode)data->mode; 847 b1co_mode_sel_mask = data->b1co_mode_sel_mask; 848 849 dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL); 850 if (!dra7xx) 851 return -ENOMEM; 852 853 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); 854 if (!pci) 855 return -ENOMEM; 856 857 pci->dev = dev; 858 pci->ops = &dw_pcie_ops; 859 860 irq = platform_get_irq(pdev, 0); 861 if (irq < 0) 862 return irq; 863 864 base = devm_platform_ioremap_resource_byname(pdev, "ti_conf"); 865 if (IS_ERR(base)) 866 return PTR_ERR(base); 867 868 phy_count = of_property_count_strings(np, "phy-names"); 869 if (phy_count < 0) { 870 dev_err(dev, "unable to find the strings\n"); 871 return phy_count; 872 } 873 874 phy = devm_kcalloc(dev, phy_count, sizeof(*phy), GFP_KERNEL); 875 if (!phy) 876 return -ENOMEM; 877 878 link = devm_kcalloc(dev, phy_count, sizeof(*link), GFP_KERNEL); 879 if (!link) 880 return -ENOMEM; 881 882 for (i = 0; i < phy_count; i++) { 883 snprintf(name, sizeof(name), "pcie-phy%d", i); 884 phy[i] = devm_phy_get(dev, name); 885 if (IS_ERR(phy[i])) 886 return PTR_ERR(phy[i]); 887 888 link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS); 889 if (!link[i]) { 890 ret = -EINVAL; 891 goto err_link; 892 } 893 } 894 895 dra7xx->base = base; 896 dra7xx->phy = phy; 897 dra7xx->pci = pci; 898 dra7xx->phy_count = phy_count; 899 900 if (phy_count == 2) { 901 ret = dra7xx_pcie_configure_two_lane(dev, b1co_mode_sel_mask); 902 if (ret < 0) 903 dra7xx->phy_count = 1; /* Fallback to x1 lane mode */ 904 } 905 906 ret = dra7xx_pcie_enable_phy(dra7xx); 907 if (ret) { 908 dev_err(dev, "failed to enable phy\n"); 909 return ret; 910 } 911 912 platform_set_drvdata(pdev, dra7xx); 913 914 pm_runtime_enable(dev); 915 ret = pm_runtime_get_sync(dev); 916 if (ret < 0) { 917 dev_err(dev, "pm_runtime_get_sync failed\n"); 918 goto err_get_sync; 919 } 920 921 reset = devm_gpiod_get_optional(dev, NULL, GPIOD_OUT_HIGH); 922 if (IS_ERR(reset)) { 923 ret = PTR_ERR(reset); 924 dev_err(&pdev->dev, "gpio request failed, ret %d\n", ret); 925 goto err_gpio; 926 } 927 928 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); 929 reg &= ~LTSSM_EN; 930 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); 931 932 switch (mode) { 933 case DW_PCIE_RC_TYPE: 934 if (!IS_ENABLED(CONFIG_PCI_DRA7XX_HOST)) { 935 ret = -ENODEV; 936 goto err_gpio; 937 } 938 939 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE, 940 DEVICE_TYPE_RC); 941 942 ret = dra7xx_pcie_unaligned_memaccess(dev); 943 if (ret) 944 dev_err(dev, "WA for Errata i870 not applied\n"); 945 946 ret = dra7xx_add_pcie_port(dra7xx, pdev); 947 if (ret < 0) 948 goto err_gpio; 949 break; 950 case DW_PCIE_EP_TYPE: 951 if (!IS_ENABLED(CONFIG_PCI_DRA7XX_EP)) { 952 ret = -ENODEV; 953 goto err_gpio; 954 } 955 956 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE, 957 DEVICE_TYPE_EP); 958 959 ret = dra7xx_pcie_unaligned_memaccess(dev); 960 if (ret) 961 goto err_gpio; 962 963 ret = dra7xx_add_pcie_ep(dra7xx, pdev); 964 if (ret < 0) 965 goto err_gpio; 966 break; 967 default: 968 dev_err(dev, "INVALID device type %d\n", mode); 969 } 970 dra7xx->mode = mode; 971 972 ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler, 973 IRQF_SHARED, "dra7xx-pcie-main", dra7xx); 974 if (ret) { 975 dev_err(dev, "failed to request irq\n"); 976 goto err_gpio; 977 } 978 979 return 0; 980 981 err_gpio: 982 err_get_sync: 983 pm_runtime_put(dev); 984 pm_runtime_disable(dev); 985 dra7xx_pcie_disable_phy(dra7xx); 986 987 err_link: 988 while (--i >= 0) 989 device_link_del(link[i]); 990 991 return ret; 992 } 993 994 #ifdef CONFIG_PM_SLEEP 995 static int dra7xx_pcie_suspend(struct device *dev) 996 { 997 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); 998 struct dw_pcie *pci = dra7xx->pci; 999 u32 val; 1000 1001 if (dra7xx->mode != DW_PCIE_RC_TYPE) 1002 return 0; 1003 1004 /* clear MSE */ 1005 val = dw_pcie_readl_dbi(pci, PCI_COMMAND); 1006 val &= ~PCI_COMMAND_MEMORY; 1007 dw_pcie_writel_dbi(pci, PCI_COMMAND, val); 1008 1009 return 0; 1010 } 1011 1012 static int dra7xx_pcie_resume(struct device *dev) 1013 { 1014 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); 1015 struct dw_pcie *pci = dra7xx->pci; 1016 u32 val; 1017 1018 if (dra7xx->mode != DW_PCIE_RC_TYPE) 1019 return 0; 1020 1021 /* set MSE */ 1022 val = dw_pcie_readl_dbi(pci, PCI_COMMAND); 1023 val |= PCI_COMMAND_MEMORY; 1024 dw_pcie_writel_dbi(pci, PCI_COMMAND, val); 1025 1026 return 0; 1027 } 1028 1029 static int dra7xx_pcie_suspend_noirq(struct device *dev) 1030 { 1031 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); 1032 1033 dra7xx_pcie_disable_phy(dra7xx); 1034 1035 return 0; 1036 } 1037 1038 static int dra7xx_pcie_resume_noirq(struct device *dev) 1039 { 1040 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); 1041 int ret; 1042 1043 ret = dra7xx_pcie_enable_phy(dra7xx); 1044 if (ret) { 1045 dev_err(dev, "failed to enable phy\n"); 1046 return ret; 1047 } 1048 1049 return 0; 1050 } 1051 #endif 1052 1053 static void dra7xx_pcie_shutdown(struct platform_device *pdev) 1054 { 1055 struct device *dev = &pdev->dev; 1056 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); 1057 int ret; 1058 1059 dra7xx_pcie_stop_link(dra7xx->pci); 1060 1061 ret = pm_runtime_put_sync(dev); 1062 if (ret < 0) 1063 dev_dbg(dev, "pm_runtime_put_sync failed\n"); 1064 1065 pm_runtime_disable(dev); 1066 dra7xx_pcie_disable_phy(dra7xx); 1067 } 1068 1069 static const struct dev_pm_ops dra7xx_pcie_pm_ops = { 1070 SET_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume) 1071 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq, 1072 dra7xx_pcie_resume_noirq) 1073 }; 1074 1075 static struct platform_driver dra7xx_pcie_driver = { 1076 .driver = { 1077 .name = "dra7-pcie", 1078 .of_match_table = of_dra7xx_pcie_match, 1079 .suppress_bind_attrs = true, 1080 .pm = &dra7xx_pcie_pm_ops, 1081 }, 1082 .shutdown = dra7xx_pcie_shutdown, 1083 }; 1084 builtin_platform_driver_probe(dra7xx_pcie_driver, dra7xx_pcie_probe); 1085