1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * pcie-dra7xx - PCIe controller driver for TI DRA7xx SoCs 4 * 5 * Copyright (C) 2013-2014 Texas Instruments Incorporated - https://www.ti.com 6 * 7 * Authors: Kishon Vijay Abraham I <kishon@ti.com> 8 */ 9 10 #include <linux/delay.h> 11 #include <linux/device.h> 12 #include <linux/err.h> 13 #include <linux/interrupt.h> 14 #include <linux/irq.h> 15 #include <linux/irqdomain.h> 16 #include <linux/kernel.h> 17 #include <linux/init.h> 18 #include <linux/of_device.h> 19 #include <linux/of_gpio.h> 20 #include <linux/of_pci.h> 21 #include <linux/pci.h> 22 #include <linux/phy/phy.h> 23 #include <linux/platform_device.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/resource.h> 26 #include <linux/types.h> 27 #include <linux/mfd/syscon.h> 28 #include <linux/regmap.h> 29 #include <linux/gpio/consumer.h> 30 31 #include "../../pci.h" 32 #include "pcie-designware.h" 33 34 /* PCIe controller wrapper DRA7XX configuration registers */ 35 36 #define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN 0x0024 37 #define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN 0x0028 38 #define ERR_SYS BIT(0) 39 #define ERR_FATAL BIT(1) 40 #define ERR_NONFATAL BIT(2) 41 #define ERR_COR BIT(3) 42 #define ERR_AXI BIT(4) 43 #define ERR_ECRC BIT(5) 44 #define PME_TURN_OFF BIT(8) 45 #define PME_TO_ACK BIT(9) 46 #define PM_PME BIT(10) 47 #define LINK_REQ_RST BIT(11) 48 #define LINK_UP_EVT BIT(12) 49 #define CFG_BME_EVT BIT(13) 50 #define CFG_MSE_EVT BIT(14) 51 #define INTERRUPTS (ERR_SYS | ERR_FATAL | ERR_NONFATAL | ERR_COR | ERR_AXI | \ 52 ERR_ECRC | PME_TURN_OFF | PME_TO_ACK | PM_PME | \ 53 LINK_REQ_RST | LINK_UP_EVT | CFG_BME_EVT | CFG_MSE_EVT) 54 55 #define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI 0x0034 56 #define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI 0x0038 57 #define INTA BIT(0) 58 #define INTB BIT(1) 59 #define INTC BIT(2) 60 #define INTD BIT(3) 61 #define MSI BIT(4) 62 #define LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD) 63 64 #define PCIECTRL_TI_CONF_DEVICE_TYPE 0x0100 65 #define DEVICE_TYPE_EP 0x0 66 #define DEVICE_TYPE_LEG_EP 0x1 67 #define DEVICE_TYPE_RC 0x4 68 69 #define PCIECTRL_DRA7XX_CONF_DEVICE_CMD 0x0104 70 #define LTSSM_EN 0x1 71 72 #define PCIECTRL_DRA7XX_CONF_PHY_CS 0x010C 73 #define LINK_UP BIT(16) 74 #define DRA7XX_CPU_TO_BUS_ADDR 0x0FFFFFFF 75 76 #define EXP_CAP_ID_OFFSET 0x70 77 78 #define PCIECTRL_TI_CONF_INTX_ASSERT 0x0124 79 #define PCIECTRL_TI_CONF_INTX_DEASSERT 0x0128 80 81 #define PCIECTRL_TI_CONF_MSI_XMT 0x012c 82 #define MSI_REQ_GRANT BIT(0) 83 #define MSI_VECTOR_SHIFT 7 84 85 #define PCIE_1LANE_2LANE_SELECTION BIT(13) 86 #define PCIE_B1C0_MODE_SEL BIT(2) 87 #define PCIE_B0_B1_TSYNCEN BIT(0) 88 89 struct dra7xx_pcie { 90 struct dw_pcie *pci; 91 void __iomem *base; /* DT ti_conf */ 92 int phy_count; /* DT phy-names count */ 93 struct phy **phy; 94 int link_gen; 95 struct irq_domain *irq_domain; 96 enum dw_pcie_device_mode mode; 97 }; 98 99 struct dra7xx_pcie_of_data { 100 enum dw_pcie_device_mode mode; 101 u32 b1co_mode_sel_mask; 102 }; 103 104 #define to_dra7xx_pcie(x) dev_get_drvdata((x)->dev) 105 106 static inline u32 dra7xx_pcie_readl(struct dra7xx_pcie *pcie, u32 offset) 107 { 108 return readl(pcie->base + offset); 109 } 110 111 static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset, 112 u32 value) 113 { 114 writel(value, pcie->base + offset); 115 } 116 117 static u64 dra7xx_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr) 118 { 119 return pci_addr & DRA7XX_CPU_TO_BUS_ADDR; 120 } 121 122 static int dra7xx_pcie_link_up(struct dw_pcie *pci) 123 { 124 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 125 u32 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS); 126 127 return !!(reg & LINK_UP); 128 } 129 130 static void dra7xx_pcie_stop_link(struct dw_pcie *pci) 131 { 132 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 133 u32 reg; 134 135 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); 136 reg &= ~LTSSM_EN; 137 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); 138 } 139 140 static int dra7xx_pcie_establish_link(struct dw_pcie *pci) 141 { 142 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 143 struct device *dev = pci->dev; 144 u32 reg; 145 u32 exp_cap_off = EXP_CAP_ID_OFFSET; 146 147 if (dw_pcie_link_up(pci)) { 148 dev_err(dev, "link is already up\n"); 149 return 0; 150 } 151 152 if (dra7xx->link_gen == 1) { 153 dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCAP, 154 4, ®); 155 if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) { 156 reg &= ~((u32)PCI_EXP_LNKCAP_SLS); 157 reg |= PCI_EXP_LNKCAP_SLS_2_5GB; 158 dw_pcie_write(pci->dbi_base + exp_cap_off + 159 PCI_EXP_LNKCAP, 4, reg); 160 } 161 162 dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCTL2, 163 2, ®); 164 if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) { 165 reg &= ~((u32)PCI_EXP_LNKCAP_SLS); 166 reg |= PCI_EXP_LNKCAP_SLS_2_5GB; 167 dw_pcie_write(pci->dbi_base + exp_cap_off + 168 PCI_EXP_LNKCTL2, 2, reg); 169 } 170 } 171 172 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); 173 reg |= LTSSM_EN; 174 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); 175 176 return 0; 177 } 178 179 static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx) 180 { 181 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, 182 LEG_EP_INTERRUPTS | MSI); 183 184 dra7xx_pcie_writel(dra7xx, 185 PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI, 186 MSI | LEG_EP_INTERRUPTS); 187 } 188 189 static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie *dra7xx) 190 { 191 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, 192 INTERRUPTS); 193 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN, 194 INTERRUPTS); 195 } 196 197 static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx) 198 { 199 dra7xx_pcie_enable_wrapper_interrupts(dra7xx); 200 dra7xx_pcie_enable_msi_interrupts(dra7xx); 201 } 202 203 static int dra7xx_pcie_host_init(struct pcie_port *pp) 204 { 205 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 206 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 207 208 dw_pcie_setup_rc(pp); 209 210 dra7xx_pcie_establish_link(pci); 211 dw_pcie_wait_for_link(pci); 212 dw_pcie_msi_init(pp); 213 dra7xx_pcie_enable_interrupts(dra7xx); 214 215 return 0; 216 } 217 218 static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq, 219 irq_hw_number_t hwirq) 220 { 221 irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); 222 irq_set_chip_data(irq, domain->host_data); 223 224 return 0; 225 } 226 227 static const struct irq_domain_ops intx_domain_ops = { 228 .map = dra7xx_pcie_intx_map, 229 .xlate = pci_irqd_intx_xlate, 230 }; 231 232 static int dra7xx_pcie_handle_msi(struct pcie_port *pp, int index) 233 { 234 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 235 unsigned long val; 236 int pos, irq; 237 238 val = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS + 239 (index * MSI_REG_CTRL_BLOCK_SIZE)); 240 if (!val) 241 return 0; 242 243 pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, 0); 244 while (pos != MAX_MSI_IRQS_PER_CTRL) { 245 irq = irq_find_mapping(pp->irq_domain, 246 (index * MAX_MSI_IRQS_PER_CTRL) + pos); 247 generic_handle_irq(irq); 248 pos++; 249 pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, pos); 250 } 251 252 return 1; 253 } 254 255 static void dra7xx_pcie_handle_msi_irq(struct pcie_port *pp) 256 { 257 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 258 int ret, i, count, num_ctrls; 259 260 num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; 261 262 /** 263 * Need to make sure all MSI status bits read 0 before exiting. 264 * Else, new MSI IRQs are not registered by the wrapper. Have an 265 * upperbound for the loop and exit the IRQ in case of IRQ flood 266 * to avoid locking up system in interrupt context. 267 */ 268 count = 0; 269 do { 270 ret = 0; 271 272 for (i = 0; i < num_ctrls; i++) 273 ret |= dra7xx_pcie_handle_msi(pp, i); 274 count++; 275 } while (ret && count <= 1000); 276 277 if (count > 1000) 278 dev_warn_ratelimited(pci->dev, 279 "Too many MSI IRQs to handle\n"); 280 } 281 282 static void dra7xx_pcie_msi_irq_handler(struct irq_desc *desc) 283 { 284 struct irq_chip *chip = irq_desc_get_chip(desc); 285 struct dra7xx_pcie *dra7xx; 286 struct dw_pcie *pci; 287 struct pcie_port *pp; 288 unsigned long reg; 289 u32 virq, bit; 290 291 chained_irq_enter(chip, desc); 292 293 pp = irq_desc_get_handler_data(desc); 294 pci = to_dw_pcie_from_pp(pp); 295 dra7xx = to_dra7xx_pcie(pci); 296 297 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI); 298 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg); 299 300 switch (reg) { 301 case MSI: 302 dra7xx_pcie_handle_msi_irq(pp); 303 break; 304 case INTA: 305 case INTB: 306 case INTC: 307 case INTD: 308 for_each_set_bit(bit, ®, PCI_NUM_INTX) { 309 virq = irq_find_mapping(dra7xx->irq_domain, bit); 310 if (virq) 311 generic_handle_irq(virq); 312 } 313 break; 314 } 315 316 chained_irq_exit(chip, desc); 317 } 318 319 static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg) 320 { 321 struct dra7xx_pcie *dra7xx = arg; 322 struct dw_pcie *pci = dra7xx->pci; 323 struct device *dev = pci->dev; 324 struct dw_pcie_ep *ep = &pci->ep; 325 u32 reg; 326 327 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN); 328 329 if (reg & ERR_SYS) 330 dev_dbg(dev, "System Error\n"); 331 332 if (reg & ERR_FATAL) 333 dev_dbg(dev, "Fatal Error\n"); 334 335 if (reg & ERR_NONFATAL) 336 dev_dbg(dev, "Non Fatal Error\n"); 337 338 if (reg & ERR_COR) 339 dev_dbg(dev, "Correctable Error\n"); 340 341 if (reg & ERR_AXI) 342 dev_dbg(dev, "AXI tag lookup fatal Error\n"); 343 344 if (reg & ERR_ECRC) 345 dev_dbg(dev, "ECRC Error\n"); 346 347 if (reg & PME_TURN_OFF) 348 dev_dbg(dev, 349 "Power Management Event Turn-Off message received\n"); 350 351 if (reg & PME_TO_ACK) 352 dev_dbg(dev, 353 "Power Management Turn-Off Ack message received\n"); 354 355 if (reg & PM_PME) 356 dev_dbg(dev, "PM Power Management Event message received\n"); 357 358 if (reg & LINK_REQ_RST) 359 dev_dbg(dev, "Link Request Reset\n"); 360 361 if (reg & LINK_UP_EVT) { 362 if (dra7xx->mode == DW_PCIE_EP_TYPE) 363 dw_pcie_ep_linkup(ep); 364 dev_dbg(dev, "Link-up state change\n"); 365 } 366 367 if (reg & CFG_BME_EVT) 368 dev_dbg(dev, "CFG 'Bus Master Enable' change\n"); 369 370 if (reg & CFG_MSE_EVT) 371 dev_dbg(dev, "CFG 'Memory Space Enable' change\n"); 372 373 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, reg); 374 375 return IRQ_HANDLED; 376 } 377 378 static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp) 379 { 380 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 381 struct device *dev = pci->dev; 382 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 383 struct device_node *node = dev->of_node; 384 struct device_node *pcie_intc_node = of_get_next_child(node, NULL); 385 386 if (!pcie_intc_node) { 387 dev_err(dev, "No PCIe Intc node found\n"); 388 return -ENODEV; 389 } 390 391 irq_set_chained_handler_and_data(pp->irq, dra7xx_pcie_msi_irq_handler, 392 pp); 393 dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, 394 &intx_domain_ops, pp); 395 of_node_put(pcie_intc_node); 396 if (!dra7xx->irq_domain) { 397 dev_err(dev, "Failed to get a INTx IRQ domain\n"); 398 return -ENODEV; 399 } 400 401 return 0; 402 } 403 404 static void dra7xx_pcie_setup_msi_msg(struct irq_data *d, struct msi_msg *msg) 405 { 406 struct pcie_port *pp = irq_data_get_irq_chip_data(d); 407 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 408 u64 msi_target; 409 410 msi_target = (u64)pp->msi_data; 411 412 msg->address_lo = lower_32_bits(msi_target); 413 msg->address_hi = upper_32_bits(msi_target); 414 415 msg->data = d->hwirq; 416 417 dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n", 418 (int)d->hwirq, msg->address_hi, msg->address_lo); 419 } 420 421 static int dra7xx_pcie_msi_set_affinity(struct irq_data *d, 422 const struct cpumask *mask, 423 bool force) 424 { 425 return -EINVAL; 426 } 427 428 static void dra7xx_pcie_bottom_mask(struct irq_data *d) 429 { 430 struct pcie_port *pp = irq_data_get_irq_chip_data(d); 431 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 432 unsigned int res, bit, ctrl; 433 unsigned long flags; 434 435 raw_spin_lock_irqsave(&pp->lock, flags); 436 437 ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; 438 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; 439 bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; 440 441 pp->irq_mask[ctrl] |= BIT(bit); 442 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, 443 pp->irq_mask[ctrl]); 444 445 raw_spin_unlock_irqrestore(&pp->lock, flags); 446 } 447 448 static void dra7xx_pcie_bottom_unmask(struct irq_data *d) 449 { 450 struct pcie_port *pp = irq_data_get_irq_chip_data(d); 451 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 452 unsigned int res, bit, ctrl; 453 unsigned long flags; 454 455 raw_spin_lock_irqsave(&pp->lock, flags); 456 457 ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; 458 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; 459 bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; 460 461 pp->irq_mask[ctrl] &= ~BIT(bit); 462 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, 463 pp->irq_mask[ctrl]); 464 465 raw_spin_unlock_irqrestore(&pp->lock, flags); 466 } 467 468 static void dra7xx_pcie_bottom_ack(struct irq_data *d) 469 { 470 struct pcie_port *pp = irq_data_get_irq_chip_data(d); 471 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 472 unsigned int res, bit, ctrl; 473 474 ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; 475 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; 476 bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; 477 478 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, BIT(bit)); 479 } 480 481 static struct irq_chip dra7xx_pci_msi_bottom_irq_chip = { 482 .name = "DRA7XX-PCI-MSI", 483 .irq_ack = dra7xx_pcie_bottom_ack, 484 .irq_compose_msi_msg = dra7xx_pcie_setup_msi_msg, 485 .irq_set_affinity = dra7xx_pcie_msi_set_affinity, 486 .irq_mask = dra7xx_pcie_bottom_mask, 487 .irq_unmask = dra7xx_pcie_bottom_unmask, 488 }; 489 490 static int dra7xx_pcie_msi_host_init(struct pcie_port *pp) 491 { 492 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 493 u32 ctrl, num_ctrls; 494 495 pp->msi_irq_chip = &dra7xx_pci_msi_bottom_irq_chip; 496 497 num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; 498 /* Initialize IRQ Status array */ 499 for (ctrl = 0; ctrl < num_ctrls; ctrl++) { 500 pp->irq_mask[ctrl] = ~0; 501 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + 502 (ctrl * MSI_REG_CTRL_BLOCK_SIZE), 503 pp->irq_mask[ctrl]); 504 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE + 505 (ctrl * MSI_REG_CTRL_BLOCK_SIZE), 506 ~0); 507 } 508 509 return dw_pcie_allocate_domains(pp); 510 } 511 512 static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = { 513 .host_init = dra7xx_pcie_host_init, 514 .msi_host_init = dra7xx_pcie_msi_host_init, 515 }; 516 517 static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep) 518 { 519 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 520 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 521 enum pci_barno bar; 522 523 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) 524 dw_pcie_ep_reset_bar(pci, bar); 525 526 dra7xx_pcie_enable_wrapper_interrupts(dra7xx); 527 } 528 529 static void dra7xx_pcie_raise_legacy_irq(struct dra7xx_pcie *dra7xx) 530 { 531 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_ASSERT, 0x1); 532 mdelay(1); 533 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_DEASSERT, 0x1); 534 } 535 536 static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx, 537 u8 interrupt_num) 538 { 539 u32 reg; 540 541 reg = (interrupt_num - 1) << MSI_VECTOR_SHIFT; 542 reg |= MSI_REQ_GRANT; 543 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_MSI_XMT, reg); 544 } 545 546 static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no, 547 enum pci_epc_irq_type type, u16 interrupt_num) 548 { 549 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 550 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 551 552 switch (type) { 553 case PCI_EPC_IRQ_LEGACY: 554 dra7xx_pcie_raise_legacy_irq(dra7xx); 555 break; 556 case PCI_EPC_IRQ_MSI: 557 dra7xx_pcie_raise_msi_irq(dra7xx, interrupt_num); 558 break; 559 default: 560 dev_err(pci->dev, "UNKNOWN IRQ type\n"); 561 } 562 563 return 0; 564 } 565 566 static const struct pci_epc_features dra7xx_pcie_epc_features = { 567 .linkup_notifier = true, 568 .msi_capable = true, 569 .msix_capable = false, 570 }; 571 572 static const struct pci_epc_features* 573 dra7xx_pcie_get_features(struct dw_pcie_ep *ep) 574 { 575 return &dra7xx_pcie_epc_features; 576 } 577 578 static const struct dw_pcie_ep_ops pcie_ep_ops = { 579 .ep_init = dra7xx_pcie_ep_init, 580 .raise_irq = dra7xx_pcie_raise_irq, 581 .get_features = dra7xx_pcie_get_features, 582 }; 583 584 static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx, 585 struct platform_device *pdev) 586 { 587 int ret; 588 struct dw_pcie_ep *ep; 589 struct resource *res; 590 struct device *dev = &pdev->dev; 591 struct dw_pcie *pci = dra7xx->pci; 592 593 ep = &pci->ep; 594 ep->ops = &pcie_ep_ops; 595 596 pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "ep_dbics"); 597 if (IS_ERR(pci->dbi_base)) 598 return PTR_ERR(pci->dbi_base); 599 600 pci->dbi_base2 = 601 devm_platform_ioremap_resource_byname(pdev, "ep_dbics2"); 602 if (IS_ERR(pci->dbi_base2)) 603 return PTR_ERR(pci->dbi_base2); 604 605 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space"); 606 if (!res) 607 return -EINVAL; 608 609 ep->phys_base = res->start; 610 ep->addr_size = resource_size(res); 611 612 ret = dw_pcie_ep_init(ep); 613 if (ret) { 614 dev_err(dev, "failed to initialize endpoint\n"); 615 return ret; 616 } 617 618 return 0; 619 } 620 621 static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx, 622 struct platform_device *pdev) 623 { 624 int ret; 625 struct dw_pcie *pci = dra7xx->pci; 626 struct pcie_port *pp = &pci->pp; 627 struct device *dev = pci->dev; 628 629 pp->irq = platform_get_irq(pdev, 1); 630 if (pp->irq < 0) 631 return pp->irq; 632 633 ret = dra7xx_pcie_init_irq_domain(pp); 634 if (ret < 0) 635 return ret; 636 637 pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "rc_dbics"); 638 if (IS_ERR(pci->dbi_base)) 639 return PTR_ERR(pci->dbi_base); 640 641 pp->ops = &dra7xx_pcie_host_ops; 642 643 ret = dw_pcie_host_init(pp); 644 if (ret) { 645 dev_err(dev, "failed to initialize host\n"); 646 return ret; 647 } 648 649 return 0; 650 } 651 652 static const struct dw_pcie_ops dw_pcie_ops = { 653 .cpu_addr_fixup = dra7xx_pcie_cpu_addr_fixup, 654 .start_link = dra7xx_pcie_establish_link, 655 .stop_link = dra7xx_pcie_stop_link, 656 .link_up = dra7xx_pcie_link_up, 657 }; 658 659 static void dra7xx_pcie_disable_phy(struct dra7xx_pcie *dra7xx) 660 { 661 int phy_count = dra7xx->phy_count; 662 663 while (phy_count--) { 664 phy_power_off(dra7xx->phy[phy_count]); 665 phy_exit(dra7xx->phy[phy_count]); 666 } 667 } 668 669 static int dra7xx_pcie_enable_phy(struct dra7xx_pcie *dra7xx) 670 { 671 int phy_count = dra7xx->phy_count; 672 int ret; 673 int i; 674 675 for (i = 0; i < phy_count; i++) { 676 ret = phy_set_mode(dra7xx->phy[i], PHY_MODE_PCIE); 677 if (ret < 0) 678 goto err_phy; 679 680 ret = phy_init(dra7xx->phy[i]); 681 if (ret < 0) 682 goto err_phy; 683 684 ret = phy_power_on(dra7xx->phy[i]); 685 if (ret < 0) { 686 phy_exit(dra7xx->phy[i]); 687 goto err_phy; 688 } 689 } 690 691 return 0; 692 693 err_phy: 694 while (--i >= 0) { 695 phy_power_off(dra7xx->phy[i]); 696 phy_exit(dra7xx->phy[i]); 697 } 698 699 return ret; 700 } 701 702 static const struct dra7xx_pcie_of_data dra7xx_pcie_rc_of_data = { 703 .mode = DW_PCIE_RC_TYPE, 704 }; 705 706 static const struct dra7xx_pcie_of_data dra7xx_pcie_ep_of_data = { 707 .mode = DW_PCIE_EP_TYPE, 708 }; 709 710 static const struct dra7xx_pcie_of_data dra746_pcie_rc_of_data = { 711 .b1co_mode_sel_mask = BIT(2), 712 .mode = DW_PCIE_RC_TYPE, 713 }; 714 715 static const struct dra7xx_pcie_of_data dra726_pcie_rc_of_data = { 716 .b1co_mode_sel_mask = GENMASK(3, 2), 717 .mode = DW_PCIE_RC_TYPE, 718 }; 719 720 static const struct dra7xx_pcie_of_data dra746_pcie_ep_of_data = { 721 .b1co_mode_sel_mask = BIT(2), 722 .mode = DW_PCIE_EP_TYPE, 723 }; 724 725 static const struct dra7xx_pcie_of_data dra726_pcie_ep_of_data = { 726 .b1co_mode_sel_mask = GENMASK(3, 2), 727 .mode = DW_PCIE_EP_TYPE, 728 }; 729 730 static const struct of_device_id of_dra7xx_pcie_match[] = { 731 { 732 .compatible = "ti,dra7-pcie", 733 .data = &dra7xx_pcie_rc_of_data, 734 }, 735 { 736 .compatible = "ti,dra7-pcie-ep", 737 .data = &dra7xx_pcie_ep_of_data, 738 }, 739 { 740 .compatible = "ti,dra746-pcie-rc", 741 .data = &dra746_pcie_rc_of_data, 742 }, 743 { 744 .compatible = "ti,dra726-pcie-rc", 745 .data = &dra726_pcie_rc_of_data, 746 }, 747 { 748 .compatible = "ti,dra746-pcie-ep", 749 .data = &dra746_pcie_ep_of_data, 750 }, 751 { 752 .compatible = "ti,dra726-pcie-ep", 753 .data = &dra726_pcie_ep_of_data, 754 }, 755 {}, 756 }; 757 758 /* 759 * dra7xx_pcie_unaligned_memaccess: workaround for AM572x/AM571x Errata i870 760 * @dra7xx: the dra7xx device where the workaround should be applied 761 * 762 * Access to the PCIe slave port that are not 32-bit aligned will result 763 * in incorrect mapping to TLP Address and Byte enable fields. Therefore, 764 * byte and half-word accesses are not possible to byte offset 0x1, 0x2, or 765 * 0x3. 766 * 767 * To avoid this issue set PCIE_SS1_AXI2OCP_LEGACY_MODE_ENABLE to 1. 768 */ 769 static int dra7xx_pcie_unaligned_memaccess(struct device *dev) 770 { 771 int ret; 772 struct device_node *np = dev->of_node; 773 struct of_phandle_args args; 774 struct regmap *regmap; 775 776 regmap = syscon_regmap_lookup_by_phandle(np, 777 "ti,syscon-unaligned-access"); 778 if (IS_ERR(regmap)) { 779 dev_dbg(dev, "can't get ti,syscon-unaligned-access\n"); 780 return -EINVAL; 781 } 782 783 ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-unaligned-access", 784 2, 0, &args); 785 if (ret) { 786 dev_err(dev, "failed to parse ti,syscon-unaligned-access\n"); 787 return ret; 788 } 789 790 ret = regmap_update_bits(regmap, args.args[0], args.args[1], 791 args.args[1]); 792 if (ret) 793 dev_err(dev, "failed to enable unaligned access\n"); 794 795 of_node_put(args.np); 796 797 return ret; 798 } 799 800 static int dra7xx_pcie_configure_two_lane(struct device *dev, 801 u32 b1co_mode_sel_mask) 802 { 803 struct device_node *np = dev->of_node; 804 struct regmap *pcie_syscon; 805 unsigned int pcie_reg; 806 u32 mask; 807 u32 val; 808 809 pcie_syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-lane-sel"); 810 if (IS_ERR(pcie_syscon)) { 811 dev_err(dev, "unable to get ti,syscon-lane-sel\n"); 812 return -EINVAL; 813 } 814 815 if (of_property_read_u32_index(np, "ti,syscon-lane-sel", 1, 816 &pcie_reg)) { 817 dev_err(dev, "couldn't get lane selection reg offset\n"); 818 return -EINVAL; 819 } 820 821 mask = b1co_mode_sel_mask | PCIE_B0_B1_TSYNCEN; 822 val = PCIE_B1C0_MODE_SEL | PCIE_B0_B1_TSYNCEN; 823 regmap_update_bits(pcie_syscon, pcie_reg, mask, val); 824 825 return 0; 826 } 827 828 static int __init dra7xx_pcie_probe(struct platform_device *pdev) 829 { 830 u32 reg; 831 int ret; 832 int irq; 833 int i; 834 int phy_count; 835 struct phy **phy; 836 struct device_link **link; 837 void __iomem *base; 838 struct dw_pcie *pci; 839 struct dra7xx_pcie *dra7xx; 840 struct device *dev = &pdev->dev; 841 struct device_node *np = dev->of_node; 842 char name[10]; 843 struct gpio_desc *reset; 844 const struct of_device_id *match; 845 const struct dra7xx_pcie_of_data *data; 846 enum dw_pcie_device_mode mode; 847 u32 b1co_mode_sel_mask; 848 849 match = of_match_device(of_match_ptr(of_dra7xx_pcie_match), dev); 850 if (!match) 851 return -EINVAL; 852 853 data = (struct dra7xx_pcie_of_data *)match->data; 854 mode = (enum dw_pcie_device_mode)data->mode; 855 b1co_mode_sel_mask = data->b1co_mode_sel_mask; 856 857 dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL); 858 if (!dra7xx) 859 return -ENOMEM; 860 861 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); 862 if (!pci) 863 return -ENOMEM; 864 865 pci->dev = dev; 866 pci->ops = &dw_pcie_ops; 867 868 irq = platform_get_irq(pdev, 0); 869 if (irq < 0) 870 return irq; 871 872 base = devm_platform_ioremap_resource_byname(pdev, "ti_conf"); 873 if (IS_ERR(base)) 874 return PTR_ERR(base); 875 876 phy_count = of_property_count_strings(np, "phy-names"); 877 if (phy_count < 0) { 878 dev_err(dev, "unable to find the strings\n"); 879 return phy_count; 880 } 881 882 phy = devm_kcalloc(dev, phy_count, sizeof(*phy), GFP_KERNEL); 883 if (!phy) 884 return -ENOMEM; 885 886 link = devm_kcalloc(dev, phy_count, sizeof(*link), GFP_KERNEL); 887 if (!link) 888 return -ENOMEM; 889 890 for (i = 0; i < phy_count; i++) { 891 snprintf(name, sizeof(name), "pcie-phy%d", i); 892 phy[i] = devm_phy_get(dev, name); 893 if (IS_ERR(phy[i])) 894 return PTR_ERR(phy[i]); 895 896 link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS); 897 if (!link[i]) { 898 ret = -EINVAL; 899 goto err_link; 900 } 901 } 902 903 dra7xx->base = base; 904 dra7xx->phy = phy; 905 dra7xx->pci = pci; 906 dra7xx->phy_count = phy_count; 907 908 if (phy_count == 2) { 909 ret = dra7xx_pcie_configure_two_lane(dev, b1co_mode_sel_mask); 910 if (ret < 0) 911 dra7xx->phy_count = 1; /* Fallback to x1 lane mode */ 912 } 913 914 ret = dra7xx_pcie_enable_phy(dra7xx); 915 if (ret) { 916 dev_err(dev, "failed to enable phy\n"); 917 return ret; 918 } 919 920 platform_set_drvdata(pdev, dra7xx); 921 922 pm_runtime_enable(dev); 923 ret = pm_runtime_get_sync(dev); 924 if (ret < 0) { 925 dev_err(dev, "pm_runtime_get_sync failed\n"); 926 goto err_get_sync; 927 } 928 929 reset = devm_gpiod_get_optional(dev, NULL, GPIOD_OUT_HIGH); 930 if (IS_ERR(reset)) { 931 ret = PTR_ERR(reset); 932 dev_err(&pdev->dev, "gpio request failed, ret %d\n", ret); 933 goto err_gpio; 934 } 935 936 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); 937 reg &= ~LTSSM_EN; 938 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); 939 940 dra7xx->link_gen = of_pci_get_max_link_speed(np); 941 if (dra7xx->link_gen < 0 || dra7xx->link_gen > 2) 942 dra7xx->link_gen = 2; 943 944 switch (mode) { 945 case DW_PCIE_RC_TYPE: 946 if (!IS_ENABLED(CONFIG_PCI_DRA7XX_HOST)) { 947 ret = -ENODEV; 948 goto err_gpio; 949 } 950 951 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE, 952 DEVICE_TYPE_RC); 953 954 ret = dra7xx_pcie_unaligned_memaccess(dev); 955 if (ret) 956 dev_err(dev, "WA for Errata i870 not applied\n"); 957 958 ret = dra7xx_add_pcie_port(dra7xx, pdev); 959 if (ret < 0) 960 goto err_gpio; 961 break; 962 case DW_PCIE_EP_TYPE: 963 if (!IS_ENABLED(CONFIG_PCI_DRA7XX_EP)) { 964 ret = -ENODEV; 965 goto err_gpio; 966 } 967 968 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE, 969 DEVICE_TYPE_EP); 970 971 ret = dra7xx_pcie_unaligned_memaccess(dev); 972 if (ret) 973 goto err_gpio; 974 975 ret = dra7xx_add_pcie_ep(dra7xx, pdev); 976 if (ret < 0) 977 goto err_gpio; 978 break; 979 default: 980 dev_err(dev, "INVALID device type %d\n", mode); 981 } 982 dra7xx->mode = mode; 983 984 ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler, 985 IRQF_SHARED, "dra7xx-pcie-main", dra7xx); 986 if (ret) { 987 dev_err(dev, "failed to request irq\n"); 988 goto err_gpio; 989 } 990 991 return 0; 992 993 err_gpio: 994 err_get_sync: 995 pm_runtime_put(dev); 996 pm_runtime_disable(dev); 997 dra7xx_pcie_disable_phy(dra7xx); 998 999 err_link: 1000 while (--i >= 0) 1001 device_link_del(link[i]); 1002 1003 return ret; 1004 } 1005 1006 #ifdef CONFIG_PM_SLEEP 1007 static int dra7xx_pcie_suspend(struct device *dev) 1008 { 1009 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); 1010 struct dw_pcie *pci = dra7xx->pci; 1011 u32 val; 1012 1013 if (dra7xx->mode != DW_PCIE_RC_TYPE) 1014 return 0; 1015 1016 /* clear MSE */ 1017 val = dw_pcie_readl_dbi(pci, PCI_COMMAND); 1018 val &= ~PCI_COMMAND_MEMORY; 1019 dw_pcie_writel_dbi(pci, PCI_COMMAND, val); 1020 1021 return 0; 1022 } 1023 1024 static int dra7xx_pcie_resume(struct device *dev) 1025 { 1026 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); 1027 struct dw_pcie *pci = dra7xx->pci; 1028 u32 val; 1029 1030 if (dra7xx->mode != DW_PCIE_RC_TYPE) 1031 return 0; 1032 1033 /* set MSE */ 1034 val = dw_pcie_readl_dbi(pci, PCI_COMMAND); 1035 val |= PCI_COMMAND_MEMORY; 1036 dw_pcie_writel_dbi(pci, PCI_COMMAND, val); 1037 1038 return 0; 1039 } 1040 1041 static int dra7xx_pcie_suspend_noirq(struct device *dev) 1042 { 1043 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); 1044 1045 dra7xx_pcie_disable_phy(dra7xx); 1046 1047 return 0; 1048 } 1049 1050 static int dra7xx_pcie_resume_noirq(struct device *dev) 1051 { 1052 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); 1053 int ret; 1054 1055 ret = dra7xx_pcie_enable_phy(dra7xx); 1056 if (ret) { 1057 dev_err(dev, "failed to enable phy\n"); 1058 return ret; 1059 } 1060 1061 return 0; 1062 } 1063 #endif 1064 1065 static void dra7xx_pcie_shutdown(struct platform_device *pdev) 1066 { 1067 struct device *dev = &pdev->dev; 1068 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); 1069 int ret; 1070 1071 dra7xx_pcie_stop_link(dra7xx->pci); 1072 1073 ret = pm_runtime_put_sync(dev); 1074 if (ret < 0) 1075 dev_dbg(dev, "pm_runtime_put_sync failed\n"); 1076 1077 pm_runtime_disable(dev); 1078 dra7xx_pcie_disable_phy(dra7xx); 1079 } 1080 1081 static const struct dev_pm_ops dra7xx_pcie_pm_ops = { 1082 SET_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume) 1083 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq, 1084 dra7xx_pcie_resume_noirq) 1085 }; 1086 1087 static struct platform_driver dra7xx_pcie_driver = { 1088 .driver = { 1089 .name = "dra7-pcie", 1090 .of_match_table = of_dra7xx_pcie_match, 1091 .suppress_bind_attrs = true, 1092 .pm = &dra7xx_pcie_pm_ops, 1093 }, 1094 .shutdown = dra7xx_pcie_shutdown, 1095 }; 1096 builtin_platform_driver_probe(dra7xx_pcie_driver, dra7xx_pcie_probe); 1097