1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * pcie-dra7xx - PCIe controller driver for TI DRA7xx SoCs 4 * 5 * Copyright (C) 2013-2014 Texas Instruments Incorporated - http://www.ti.com 6 * 7 * Authors: Kishon Vijay Abraham I <kishon@ti.com> 8 */ 9 10 #include <linux/delay.h> 11 #include <linux/device.h> 12 #include <linux/err.h> 13 #include <linux/interrupt.h> 14 #include <linux/irq.h> 15 #include <linux/irqdomain.h> 16 #include <linux/kernel.h> 17 #include <linux/init.h> 18 #include <linux/of_device.h> 19 #include <linux/of_gpio.h> 20 #include <linux/of_pci.h> 21 #include <linux/pci.h> 22 #include <linux/phy/phy.h> 23 #include <linux/platform_device.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/resource.h> 26 #include <linux/types.h> 27 #include <linux/mfd/syscon.h> 28 #include <linux/regmap.h> 29 #include <linux/gpio/consumer.h> 30 31 #include "../../pci.h" 32 #include "pcie-designware.h" 33 34 /* PCIe controller wrapper DRA7XX configuration registers */ 35 36 #define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN 0x0024 37 #define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN 0x0028 38 #define ERR_SYS BIT(0) 39 #define ERR_FATAL BIT(1) 40 #define ERR_NONFATAL BIT(2) 41 #define ERR_COR BIT(3) 42 #define ERR_AXI BIT(4) 43 #define ERR_ECRC BIT(5) 44 #define PME_TURN_OFF BIT(8) 45 #define PME_TO_ACK BIT(9) 46 #define PM_PME BIT(10) 47 #define LINK_REQ_RST BIT(11) 48 #define LINK_UP_EVT BIT(12) 49 #define CFG_BME_EVT BIT(13) 50 #define CFG_MSE_EVT BIT(14) 51 #define INTERRUPTS (ERR_SYS | ERR_FATAL | ERR_NONFATAL | ERR_COR | ERR_AXI | \ 52 ERR_ECRC | PME_TURN_OFF | PME_TO_ACK | PM_PME | \ 53 LINK_REQ_RST | LINK_UP_EVT | CFG_BME_EVT | CFG_MSE_EVT) 54 55 #define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI 0x0034 56 #define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI 0x0038 57 #define INTA BIT(0) 58 #define INTB BIT(1) 59 #define INTC BIT(2) 60 #define INTD BIT(3) 61 #define MSI BIT(4) 62 #define LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD) 63 64 #define PCIECTRL_TI_CONF_DEVICE_TYPE 0x0100 65 #define DEVICE_TYPE_EP 0x0 66 #define DEVICE_TYPE_LEG_EP 0x1 67 #define DEVICE_TYPE_RC 0x4 68 69 #define PCIECTRL_DRA7XX_CONF_DEVICE_CMD 0x0104 70 #define LTSSM_EN 0x1 71 72 #define PCIECTRL_DRA7XX_CONF_PHY_CS 0x010C 73 #define LINK_UP BIT(16) 74 #define DRA7XX_CPU_TO_BUS_ADDR 0x0FFFFFFF 75 76 #define EXP_CAP_ID_OFFSET 0x70 77 78 #define PCIECTRL_TI_CONF_INTX_ASSERT 0x0124 79 #define PCIECTRL_TI_CONF_INTX_DEASSERT 0x0128 80 81 #define PCIECTRL_TI_CONF_MSI_XMT 0x012c 82 #define MSI_REQ_GRANT BIT(0) 83 #define MSI_VECTOR_SHIFT 7 84 85 #define PCIE_1LANE_2LANE_SELECTION BIT(13) 86 #define PCIE_B1C0_MODE_SEL BIT(2) 87 #define PCIE_B0_B1_TSYNCEN BIT(0) 88 89 struct dra7xx_pcie { 90 struct dw_pcie *pci; 91 void __iomem *base; /* DT ti_conf */ 92 int phy_count; /* DT phy-names count */ 93 struct phy **phy; 94 int link_gen; 95 struct irq_domain *irq_domain; 96 enum dw_pcie_device_mode mode; 97 }; 98 99 struct dra7xx_pcie_of_data { 100 enum dw_pcie_device_mode mode; 101 u32 b1co_mode_sel_mask; 102 }; 103 104 #define to_dra7xx_pcie(x) dev_get_drvdata((x)->dev) 105 106 static inline u32 dra7xx_pcie_readl(struct dra7xx_pcie *pcie, u32 offset) 107 { 108 return readl(pcie->base + offset); 109 } 110 111 static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset, 112 u32 value) 113 { 114 writel(value, pcie->base + offset); 115 } 116 117 static u64 dra7xx_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr) 118 { 119 return pci_addr & DRA7XX_CPU_TO_BUS_ADDR; 120 } 121 122 static int dra7xx_pcie_link_up(struct dw_pcie *pci) 123 { 124 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 125 u32 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS); 126 127 return !!(reg & LINK_UP); 128 } 129 130 static void dra7xx_pcie_stop_link(struct dw_pcie *pci) 131 { 132 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 133 u32 reg; 134 135 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); 136 reg &= ~LTSSM_EN; 137 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); 138 } 139 140 static int dra7xx_pcie_establish_link(struct dw_pcie *pci) 141 { 142 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 143 struct device *dev = pci->dev; 144 u32 reg; 145 u32 exp_cap_off = EXP_CAP_ID_OFFSET; 146 147 if (dw_pcie_link_up(pci)) { 148 dev_err(dev, "link is already up\n"); 149 return 0; 150 } 151 152 if (dra7xx->link_gen == 1) { 153 dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCAP, 154 4, ®); 155 if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) { 156 reg &= ~((u32)PCI_EXP_LNKCAP_SLS); 157 reg |= PCI_EXP_LNKCAP_SLS_2_5GB; 158 dw_pcie_write(pci->dbi_base + exp_cap_off + 159 PCI_EXP_LNKCAP, 4, reg); 160 } 161 162 dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCTL2, 163 2, ®); 164 if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) { 165 reg &= ~((u32)PCI_EXP_LNKCAP_SLS); 166 reg |= PCI_EXP_LNKCAP_SLS_2_5GB; 167 dw_pcie_write(pci->dbi_base + exp_cap_off + 168 PCI_EXP_LNKCTL2, 2, reg); 169 } 170 } 171 172 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); 173 reg |= LTSSM_EN; 174 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); 175 176 return 0; 177 } 178 179 static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx) 180 { 181 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, 182 LEG_EP_INTERRUPTS | MSI); 183 184 dra7xx_pcie_writel(dra7xx, 185 PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI, 186 MSI | LEG_EP_INTERRUPTS); 187 } 188 189 static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie *dra7xx) 190 { 191 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, 192 INTERRUPTS); 193 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN, 194 INTERRUPTS); 195 } 196 197 static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx) 198 { 199 dra7xx_pcie_enable_wrapper_interrupts(dra7xx); 200 dra7xx_pcie_enable_msi_interrupts(dra7xx); 201 } 202 203 static int dra7xx_pcie_host_init(struct pcie_port *pp) 204 { 205 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 206 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 207 208 dw_pcie_setup_rc(pp); 209 210 dra7xx_pcie_establish_link(pci); 211 dw_pcie_wait_for_link(pci); 212 dw_pcie_msi_init(pp); 213 dra7xx_pcie_enable_interrupts(dra7xx); 214 215 return 0; 216 } 217 218 static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq, 219 irq_hw_number_t hwirq) 220 { 221 irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); 222 irq_set_chip_data(irq, domain->host_data); 223 224 return 0; 225 } 226 227 static const struct irq_domain_ops intx_domain_ops = { 228 .map = dra7xx_pcie_intx_map, 229 .xlate = pci_irqd_intx_xlate, 230 }; 231 232 static int dra7xx_pcie_handle_msi(struct pcie_port *pp, int index) 233 { 234 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 235 unsigned long val; 236 int pos, irq; 237 238 val = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS + 239 (index * MSI_REG_CTRL_BLOCK_SIZE)); 240 if (!val) 241 return 0; 242 243 pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, 0); 244 while (pos != MAX_MSI_IRQS_PER_CTRL) { 245 irq = irq_find_mapping(pp->irq_domain, 246 (index * MAX_MSI_IRQS_PER_CTRL) + pos); 247 generic_handle_irq(irq); 248 pos++; 249 pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, pos); 250 } 251 252 return 1; 253 } 254 255 static void dra7xx_pcie_handle_msi_irq(struct pcie_port *pp) 256 { 257 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 258 int ret, i, count, num_ctrls; 259 260 num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; 261 262 /** 263 * Need to make sure all MSI status bits read 0 before exiting. 264 * Else, new MSI IRQs are not registered by the wrapper. Have an 265 * upperbound for the loop and exit the IRQ in case of IRQ flood 266 * to avoid locking up system in interrupt context. 267 */ 268 count = 0; 269 do { 270 ret = 0; 271 272 for (i = 0; i < num_ctrls; i++) 273 ret |= dra7xx_pcie_handle_msi(pp, i); 274 count++; 275 } while (ret && count <= 1000); 276 277 if (count > 1000) 278 dev_warn_ratelimited(pci->dev, 279 "Too many MSI IRQs to handle\n"); 280 } 281 282 static void dra7xx_pcie_msi_irq_handler(struct irq_desc *desc) 283 { 284 struct irq_chip *chip = irq_desc_get_chip(desc); 285 struct dra7xx_pcie *dra7xx; 286 struct dw_pcie *pci; 287 struct pcie_port *pp; 288 unsigned long reg; 289 u32 virq, bit; 290 291 chained_irq_enter(chip, desc); 292 293 pp = irq_desc_get_handler_data(desc); 294 pci = to_dw_pcie_from_pp(pp); 295 dra7xx = to_dra7xx_pcie(pci); 296 297 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI); 298 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg); 299 300 switch (reg) { 301 case MSI: 302 dra7xx_pcie_handle_msi_irq(pp); 303 break; 304 case INTA: 305 case INTB: 306 case INTC: 307 case INTD: 308 for_each_set_bit(bit, ®, PCI_NUM_INTX) { 309 virq = irq_find_mapping(dra7xx->irq_domain, bit); 310 if (virq) 311 generic_handle_irq(virq); 312 } 313 break; 314 } 315 316 chained_irq_exit(chip, desc); 317 } 318 319 static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg) 320 { 321 struct dra7xx_pcie *dra7xx = arg; 322 struct dw_pcie *pci = dra7xx->pci; 323 struct device *dev = pci->dev; 324 struct dw_pcie_ep *ep = &pci->ep; 325 u32 reg; 326 327 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN); 328 329 if (reg & ERR_SYS) 330 dev_dbg(dev, "System Error\n"); 331 332 if (reg & ERR_FATAL) 333 dev_dbg(dev, "Fatal Error\n"); 334 335 if (reg & ERR_NONFATAL) 336 dev_dbg(dev, "Non Fatal Error\n"); 337 338 if (reg & ERR_COR) 339 dev_dbg(dev, "Correctable Error\n"); 340 341 if (reg & ERR_AXI) 342 dev_dbg(dev, "AXI tag lookup fatal Error\n"); 343 344 if (reg & ERR_ECRC) 345 dev_dbg(dev, "ECRC Error\n"); 346 347 if (reg & PME_TURN_OFF) 348 dev_dbg(dev, 349 "Power Management Event Turn-Off message received\n"); 350 351 if (reg & PME_TO_ACK) 352 dev_dbg(dev, 353 "Power Management Turn-Off Ack message received\n"); 354 355 if (reg & PM_PME) 356 dev_dbg(dev, "PM Power Management Event message received\n"); 357 358 if (reg & LINK_REQ_RST) 359 dev_dbg(dev, "Link Request Reset\n"); 360 361 if (reg & LINK_UP_EVT) { 362 if (dra7xx->mode == DW_PCIE_EP_TYPE) 363 dw_pcie_ep_linkup(ep); 364 dev_dbg(dev, "Link-up state change\n"); 365 } 366 367 if (reg & CFG_BME_EVT) 368 dev_dbg(dev, "CFG 'Bus Master Enable' change\n"); 369 370 if (reg & CFG_MSE_EVT) 371 dev_dbg(dev, "CFG 'Memory Space Enable' change\n"); 372 373 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, reg); 374 375 return IRQ_HANDLED; 376 } 377 378 static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp) 379 { 380 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 381 struct device *dev = pci->dev; 382 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 383 struct device_node *node = dev->of_node; 384 struct device_node *pcie_intc_node = of_get_next_child(node, NULL); 385 386 if (!pcie_intc_node) { 387 dev_err(dev, "No PCIe Intc node found\n"); 388 return -ENODEV; 389 } 390 391 irq_set_chained_handler_and_data(pp->irq, dra7xx_pcie_msi_irq_handler, 392 pp); 393 dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, 394 &intx_domain_ops, pp); 395 of_node_put(pcie_intc_node); 396 if (!dra7xx->irq_domain) { 397 dev_err(dev, "Failed to get a INTx IRQ domain\n"); 398 return -ENODEV; 399 } 400 401 return 0; 402 } 403 404 static void dra7xx_pcie_setup_msi_msg(struct irq_data *d, struct msi_msg *msg) 405 { 406 struct pcie_port *pp = irq_data_get_irq_chip_data(d); 407 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 408 u64 msi_target; 409 410 msi_target = (u64)pp->msi_data; 411 412 msg->address_lo = lower_32_bits(msi_target); 413 msg->address_hi = upper_32_bits(msi_target); 414 415 msg->data = d->hwirq; 416 417 dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n", 418 (int)d->hwirq, msg->address_hi, msg->address_lo); 419 } 420 421 static int dra7xx_pcie_msi_set_affinity(struct irq_data *d, 422 const struct cpumask *mask, 423 bool force) 424 { 425 return -EINVAL; 426 } 427 428 static void dra7xx_pcie_bottom_mask(struct irq_data *d) 429 { 430 struct pcie_port *pp = irq_data_get_irq_chip_data(d); 431 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 432 unsigned int res, bit, ctrl; 433 unsigned long flags; 434 435 raw_spin_lock_irqsave(&pp->lock, flags); 436 437 ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; 438 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; 439 bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; 440 441 pp->irq_mask[ctrl] |= BIT(bit); 442 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, 443 pp->irq_mask[ctrl]); 444 445 raw_spin_unlock_irqrestore(&pp->lock, flags); 446 } 447 448 static void dra7xx_pcie_bottom_unmask(struct irq_data *d) 449 { 450 struct pcie_port *pp = irq_data_get_irq_chip_data(d); 451 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 452 unsigned int res, bit, ctrl; 453 unsigned long flags; 454 455 raw_spin_lock_irqsave(&pp->lock, flags); 456 457 ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; 458 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; 459 bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; 460 461 pp->irq_mask[ctrl] &= ~BIT(bit); 462 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, 463 pp->irq_mask[ctrl]); 464 465 raw_spin_unlock_irqrestore(&pp->lock, flags); 466 } 467 468 static void dra7xx_pcie_bottom_ack(struct irq_data *d) 469 { 470 struct pcie_port *pp = irq_data_get_irq_chip_data(d); 471 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 472 unsigned int res, bit, ctrl; 473 474 ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; 475 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; 476 bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; 477 478 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, BIT(bit)); 479 } 480 481 static struct irq_chip dra7xx_pci_msi_bottom_irq_chip = { 482 .name = "DRA7XX-PCI-MSI", 483 .irq_ack = dra7xx_pcie_bottom_ack, 484 .irq_compose_msi_msg = dra7xx_pcie_setup_msi_msg, 485 .irq_set_affinity = dra7xx_pcie_msi_set_affinity, 486 .irq_mask = dra7xx_pcie_bottom_mask, 487 .irq_unmask = dra7xx_pcie_bottom_unmask, 488 }; 489 490 static int dra7xx_pcie_msi_host_init(struct pcie_port *pp) 491 { 492 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 493 u32 ctrl, num_ctrls; 494 495 pp->msi_irq_chip = &dra7xx_pci_msi_bottom_irq_chip; 496 497 num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; 498 /* Initialize IRQ Status array */ 499 for (ctrl = 0; ctrl < num_ctrls; ctrl++) { 500 pp->irq_mask[ctrl] = ~0; 501 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + 502 (ctrl * MSI_REG_CTRL_BLOCK_SIZE), 503 pp->irq_mask[ctrl]); 504 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE + 505 (ctrl * MSI_REG_CTRL_BLOCK_SIZE), 506 ~0); 507 } 508 509 return dw_pcie_allocate_domains(pp); 510 } 511 512 static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = { 513 .host_init = dra7xx_pcie_host_init, 514 .msi_host_init = dra7xx_pcie_msi_host_init, 515 }; 516 517 static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep) 518 { 519 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 520 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 521 enum pci_barno bar; 522 523 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) 524 dw_pcie_ep_reset_bar(pci, bar); 525 526 dra7xx_pcie_enable_wrapper_interrupts(dra7xx); 527 } 528 529 static void dra7xx_pcie_raise_legacy_irq(struct dra7xx_pcie *dra7xx) 530 { 531 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_ASSERT, 0x1); 532 mdelay(1); 533 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_DEASSERT, 0x1); 534 } 535 536 static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx, 537 u8 interrupt_num) 538 { 539 u32 reg; 540 541 reg = (interrupt_num - 1) << MSI_VECTOR_SHIFT; 542 reg |= MSI_REQ_GRANT; 543 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_MSI_XMT, reg); 544 } 545 546 static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no, 547 enum pci_epc_irq_type type, u16 interrupt_num) 548 { 549 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 550 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 551 552 switch (type) { 553 case PCI_EPC_IRQ_LEGACY: 554 dra7xx_pcie_raise_legacy_irq(dra7xx); 555 break; 556 case PCI_EPC_IRQ_MSI: 557 dra7xx_pcie_raise_msi_irq(dra7xx, interrupt_num); 558 break; 559 default: 560 dev_err(pci->dev, "UNKNOWN IRQ type\n"); 561 } 562 563 return 0; 564 } 565 566 static const struct pci_epc_features dra7xx_pcie_epc_features = { 567 .linkup_notifier = true, 568 .msi_capable = true, 569 .msix_capable = false, 570 }; 571 572 static const struct pci_epc_features* 573 dra7xx_pcie_get_features(struct dw_pcie_ep *ep) 574 { 575 return &dra7xx_pcie_epc_features; 576 } 577 578 static const struct dw_pcie_ep_ops pcie_ep_ops = { 579 .ep_init = dra7xx_pcie_ep_init, 580 .raise_irq = dra7xx_pcie_raise_irq, 581 .get_features = dra7xx_pcie_get_features, 582 }; 583 584 static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx, 585 struct platform_device *pdev) 586 { 587 int ret; 588 struct dw_pcie_ep *ep; 589 struct resource *res; 590 struct device *dev = &pdev->dev; 591 struct dw_pcie *pci = dra7xx->pci; 592 593 ep = &pci->ep; 594 ep->ops = &pcie_ep_ops; 595 596 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics"); 597 pci->dbi_base = devm_ioremap_resource(dev, res); 598 if (IS_ERR(pci->dbi_base)) 599 return PTR_ERR(pci->dbi_base); 600 601 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics2"); 602 pci->dbi_base2 = devm_ioremap_resource(dev, res); 603 if (IS_ERR(pci->dbi_base2)) 604 return PTR_ERR(pci->dbi_base2); 605 606 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space"); 607 if (!res) 608 return -EINVAL; 609 610 ep->phys_base = res->start; 611 ep->addr_size = resource_size(res); 612 613 ret = dw_pcie_ep_init(ep); 614 if (ret) { 615 dev_err(dev, "failed to initialize endpoint\n"); 616 return ret; 617 } 618 619 return 0; 620 } 621 622 static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx, 623 struct platform_device *pdev) 624 { 625 int ret; 626 struct dw_pcie *pci = dra7xx->pci; 627 struct pcie_port *pp = &pci->pp; 628 struct device *dev = pci->dev; 629 struct resource *res; 630 631 pp->irq = platform_get_irq(pdev, 1); 632 if (pp->irq < 0) { 633 dev_err(dev, "missing IRQ resource\n"); 634 return pp->irq; 635 } 636 637 ret = dra7xx_pcie_init_irq_domain(pp); 638 if (ret < 0) 639 return ret; 640 641 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbics"); 642 pci->dbi_base = devm_ioremap_resource(dev, res); 643 if (IS_ERR(pci->dbi_base)) 644 return PTR_ERR(pci->dbi_base); 645 646 pp->ops = &dra7xx_pcie_host_ops; 647 648 ret = dw_pcie_host_init(pp); 649 if (ret) { 650 dev_err(dev, "failed to initialize host\n"); 651 return ret; 652 } 653 654 return 0; 655 } 656 657 static const struct dw_pcie_ops dw_pcie_ops = { 658 .cpu_addr_fixup = dra7xx_pcie_cpu_addr_fixup, 659 .start_link = dra7xx_pcie_establish_link, 660 .stop_link = dra7xx_pcie_stop_link, 661 .link_up = dra7xx_pcie_link_up, 662 }; 663 664 static void dra7xx_pcie_disable_phy(struct dra7xx_pcie *dra7xx) 665 { 666 int phy_count = dra7xx->phy_count; 667 668 while (phy_count--) { 669 phy_power_off(dra7xx->phy[phy_count]); 670 phy_exit(dra7xx->phy[phy_count]); 671 } 672 } 673 674 static int dra7xx_pcie_enable_phy(struct dra7xx_pcie *dra7xx) 675 { 676 int phy_count = dra7xx->phy_count; 677 int ret; 678 int i; 679 680 for (i = 0; i < phy_count; i++) { 681 ret = phy_set_mode(dra7xx->phy[i], PHY_MODE_PCIE); 682 if (ret < 0) 683 goto err_phy; 684 685 ret = phy_init(dra7xx->phy[i]); 686 if (ret < 0) 687 goto err_phy; 688 689 ret = phy_power_on(dra7xx->phy[i]); 690 if (ret < 0) { 691 phy_exit(dra7xx->phy[i]); 692 goto err_phy; 693 } 694 } 695 696 return 0; 697 698 err_phy: 699 while (--i >= 0) { 700 phy_power_off(dra7xx->phy[i]); 701 phy_exit(dra7xx->phy[i]); 702 } 703 704 return ret; 705 } 706 707 static const struct dra7xx_pcie_of_data dra7xx_pcie_rc_of_data = { 708 .mode = DW_PCIE_RC_TYPE, 709 }; 710 711 static const struct dra7xx_pcie_of_data dra7xx_pcie_ep_of_data = { 712 .mode = DW_PCIE_EP_TYPE, 713 }; 714 715 static const struct dra7xx_pcie_of_data dra746_pcie_rc_of_data = { 716 .b1co_mode_sel_mask = BIT(2), 717 .mode = DW_PCIE_RC_TYPE, 718 }; 719 720 static const struct dra7xx_pcie_of_data dra726_pcie_rc_of_data = { 721 .b1co_mode_sel_mask = GENMASK(3, 2), 722 .mode = DW_PCIE_RC_TYPE, 723 }; 724 725 static const struct dra7xx_pcie_of_data dra746_pcie_ep_of_data = { 726 .b1co_mode_sel_mask = BIT(2), 727 .mode = DW_PCIE_EP_TYPE, 728 }; 729 730 static const struct dra7xx_pcie_of_data dra726_pcie_ep_of_data = { 731 .b1co_mode_sel_mask = GENMASK(3, 2), 732 .mode = DW_PCIE_EP_TYPE, 733 }; 734 735 static const struct of_device_id of_dra7xx_pcie_match[] = { 736 { 737 .compatible = "ti,dra7-pcie", 738 .data = &dra7xx_pcie_rc_of_data, 739 }, 740 { 741 .compatible = "ti,dra7-pcie-ep", 742 .data = &dra7xx_pcie_ep_of_data, 743 }, 744 { 745 .compatible = "ti,dra746-pcie-rc", 746 .data = &dra746_pcie_rc_of_data, 747 }, 748 { 749 .compatible = "ti,dra726-pcie-rc", 750 .data = &dra726_pcie_rc_of_data, 751 }, 752 { 753 .compatible = "ti,dra746-pcie-ep", 754 .data = &dra746_pcie_ep_of_data, 755 }, 756 { 757 .compatible = "ti,dra726-pcie-ep", 758 .data = &dra726_pcie_ep_of_data, 759 }, 760 {}, 761 }; 762 763 /* 764 * dra7xx_pcie_unaligned_memaccess: workaround for AM572x/AM571x Errata i870 765 * @dra7xx: the dra7xx device where the workaround should be applied 766 * 767 * Access to the PCIe slave port that are not 32-bit aligned will result 768 * in incorrect mapping to TLP Address and Byte enable fields. Therefore, 769 * byte and half-word accesses are not possible to byte offset 0x1, 0x2, or 770 * 0x3. 771 * 772 * To avoid this issue set PCIE_SS1_AXI2OCP_LEGACY_MODE_ENABLE to 1. 773 */ 774 static int dra7xx_pcie_unaligned_memaccess(struct device *dev) 775 { 776 int ret; 777 struct device_node *np = dev->of_node; 778 struct of_phandle_args args; 779 struct regmap *regmap; 780 781 regmap = syscon_regmap_lookup_by_phandle(np, 782 "ti,syscon-unaligned-access"); 783 if (IS_ERR(regmap)) { 784 dev_dbg(dev, "can't get ti,syscon-unaligned-access\n"); 785 return -EINVAL; 786 } 787 788 ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-unaligned-access", 789 2, 0, &args); 790 if (ret) { 791 dev_err(dev, "failed to parse ti,syscon-unaligned-access\n"); 792 return ret; 793 } 794 795 ret = regmap_update_bits(regmap, args.args[0], args.args[1], 796 args.args[1]); 797 if (ret) 798 dev_err(dev, "failed to enable unaligned access\n"); 799 800 of_node_put(args.np); 801 802 return ret; 803 } 804 805 static int dra7xx_pcie_configure_two_lane(struct device *dev, 806 u32 b1co_mode_sel_mask) 807 { 808 struct device_node *np = dev->of_node; 809 struct regmap *pcie_syscon; 810 unsigned int pcie_reg; 811 u32 mask; 812 u32 val; 813 814 pcie_syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-lane-sel"); 815 if (IS_ERR(pcie_syscon)) { 816 dev_err(dev, "unable to get ti,syscon-lane-sel\n"); 817 return -EINVAL; 818 } 819 820 if (of_property_read_u32_index(np, "ti,syscon-lane-sel", 1, 821 &pcie_reg)) { 822 dev_err(dev, "couldn't get lane selection reg offset\n"); 823 return -EINVAL; 824 } 825 826 mask = b1co_mode_sel_mask | PCIE_B0_B1_TSYNCEN; 827 val = PCIE_B1C0_MODE_SEL | PCIE_B0_B1_TSYNCEN; 828 regmap_update_bits(pcie_syscon, pcie_reg, mask, val); 829 830 return 0; 831 } 832 833 static int __init dra7xx_pcie_probe(struct platform_device *pdev) 834 { 835 u32 reg; 836 int ret; 837 int irq; 838 int i; 839 int phy_count; 840 struct phy **phy; 841 struct device_link **link; 842 void __iomem *base; 843 struct resource *res; 844 struct dw_pcie *pci; 845 struct dra7xx_pcie *dra7xx; 846 struct device *dev = &pdev->dev; 847 struct device_node *np = dev->of_node; 848 char name[10]; 849 struct gpio_desc *reset; 850 const struct of_device_id *match; 851 const struct dra7xx_pcie_of_data *data; 852 enum dw_pcie_device_mode mode; 853 u32 b1co_mode_sel_mask; 854 855 match = of_match_device(of_match_ptr(of_dra7xx_pcie_match), dev); 856 if (!match) 857 return -EINVAL; 858 859 data = (struct dra7xx_pcie_of_data *)match->data; 860 mode = (enum dw_pcie_device_mode)data->mode; 861 b1co_mode_sel_mask = data->b1co_mode_sel_mask; 862 863 dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL); 864 if (!dra7xx) 865 return -ENOMEM; 866 867 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); 868 if (!pci) 869 return -ENOMEM; 870 871 pci->dev = dev; 872 pci->ops = &dw_pcie_ops; 873 874 irq = platform_get_irq(pdev, 0); 875 if (irq < 0) { 876 dev_err(dev, "missing IRQ resource: %d\n", irq); 877 return irq; 878 } 879 880 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf"); 881 base = devm_ioremap(dev, res->start, resource_size(res)); 882 if (!base) 883 return -ENOMEM; 884 885 phy_count = of_property_count_strings(np, "phy-names"); 886 if (phy_count < 0) { 887 dev_err(dev, "unable to find the strings\n"); 888 return phy_count; 889 } 890 891 phy = devm_kcalloc(dev, phy_count, sizeof(*phy), GFP_KERNEL); 892 if (!phy) 893 return -ENOMEM; 894 895 link = devm_kcalloc(dev, phy_count, sizeof(*link), GFP_KERNEL); 896 if (!link) 897 return -ENOMEM; 898 899 for (i = 0; i < phy_count; i++) { 900 snprintf(name, sizeof(name), "pcie-phy%d", i); 901 phy[i] = devm_phy_get(dev, name); 902 if (IS_ERR(phy[i])) 903 return PTR_ERR(phy[i]); 904 905 link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS); 906 if (!link[i]) { 907 ret = -EINVAL; 908 goto err_link; 909 } 910 } 911 912 dra7xx->base = base; 913 dra7xx->phy = phy; 914 dra7xx->pci = pci; 915 dra7xx->phy_count = phy_count; 916 917 if (phy_count == 2) { 918 ret = dra7xx_pcie_configure_two_lane(dev, b1co_mode_sel_mask); 919 if (ret < 0) 920 dra7xx->phy_count = 1; /* Fallback to x1 lane mode */ 921 } 922 923 ret = dra7xx_pcie_enable_phy(dra7xx); 924 if (ret) { 925 dev_err(dev, "failed to enable phy\n"); 926 return ret; 927 } 928 929 platform_set_drvdata(pdev, dra7xx); 930 931 pm_runtime_enable(dev); 932 ret = pm_runtime_get_sync(dev); 933 if (ret < 0) { 934 dev_err(dev, "pm_runtime_get_sync failed\n"); 935 goto err_get_sync; 936 } 937 938 reset = devm_gpiod_get_optional(dev, NULL, GPIOD_OUT_HIGH); 939 if (IS_ERR(reset)) { 940 ret = PTR_ERR(reset); 941 dev_err(&pdev->dev, "gpio request failed, ret %d\n", ret); 942 goto err_gpio; 943 } 944 945 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); 946 reg &= ~LTSSM_EN; 947 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); 948 949 dra7xx->link_gen = of_pci_get_max_link_speed(np); 950 if (dra7xx->link_gen < 0 || dra7xx->link_gen > 2) 951 dra7xx->link_gen = 2; 952 953 switch (mode) { 954 case DW_PCIE_RC_TYPE: 955 if (!IS_ENABLED(CONFIG_PCI_DRA7XX_HOST)) { 956 ret = -ENODEV; 957 goto err_gpio; 958 } 959 960 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE, 961 DEVICE_TYPE_RC); 962 963 ret = dra7xx_pcie_unaligned_memaccess(dev); 964 if (ret) 965 dev_err(dev, "WA for Errata i870 not applied\n"); 966 967 ret = dra7xx_add_pcie_port(dra7xx, pdev); 968 if (ret < 0) 969 goto err_gpio; 970 break; 971 case DW_PCIE_EP_TYPE: 972 if (!IS_ENABLED(CONFIG_PCI_DRA7XX_EP)) { 973 ret = -ENODEV; 974 goto err_gpio; 975 } 976 977 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE, 978 DEVICE_TYPE_EP); 979 980 ret = dra7xx_pcie_unaligned_memaccess(dev); 981 if (ret) 982 goto err_gpio; 983 984 ret = dra7xx_add_pcie_ep(dra7xx, pdev); 985 if (ret < 0) 986 goto err_gpio; 987 break; 988 default: 989 dev_err(dev, "INVALID device type %d\n", mode); 990 } 991 dra7xx->mode = mode; 992 993 ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler, 994 IRQF_SHARED, "dra7xx-pcie-main", dra7xx); 995 if (ret) { 996 dev_err(dev, "failed to request irq\n"); 997 goto err_gpio; 998 } 999 1000 return 0; 1001 1002 err_gpio: 1003 pm_runtime_put(dev); 1004 1005 err_get_sync: 1006 pm_runtime_disable(dev); 1007 dra7xx_pcie_disable_phy(dra7xx); 1008 1009 err_link: 1010 while (--i >= 0) 1011 device_link_del(link[i]); 1012 1013 return ret; 1014 } 1015 1016 #ifdef CONFIG_PM_SLEEP 1017 static int dra7xx_pcie_suspend(struct device *dev) 1018 { 1019 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); 1020 struct dw_pcie *pci = dra7xx->pci; 1021 u32 val; 1022 1023 if (dra7xx->mode != DW_PCIE_RC_TYPE) 1024 return 0; 1025 1026 /* clear MSE */ 1027 val = dw_pcie_readl_dbi(pci, PCI_COMMAND); 1028 val &= ~PCI_COMMAND_MEMORY; 1029 dw_pcie_writel_dbi(pci, PCI_COMMAND, val); 1030 1031 return 0; 1032 } 1033 1034 static int dra7xx_pcie_resume(struct device *dev) 1035 { 1036 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); 1037 struct dw_pcie *pci = dra7xx->pci; 1038 u32 val; 1039 1040 if (dra7xx->mode != DW_PCIE_RC_TYPE) 1041 return 0; 1042 1043 /* set MSE */ 1044 val = dw_pcie_readl_dbi(pci, PCI_COMMAND); 1045 val |= PCI_COMMAND_MEMORY; 1046 dw_pcie_writel_dbi(pci, PCI_COMMAND, val); 1047 1048 return 0; 1049 } 1050 1051 static int dra7xx_pcie_suspend_noirq(struct device *dev) 1052 { 1053 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); 1054 1055 dra7xx_pcie_disable_phy(dra7xx); 1056 1057 return 0; 1058 } 1059 1060 static int dra7xx_pcie_resume_noirq(struct device *dev) 1061 { 1062 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); 1063 int ret; 1064 1065 ret = dra7xx_pcie_enable_phy(dra7xx); 1066 if (ret) { 1067 dev_err(dev, "failed to enable phy\n"); 1068 return ret; 1069 } 1070 1071 return 0; 1072 } 1073 #endif 1074 1075 static void dra7xx_pcie_shutdown(struct platform_device *pdev) 1076 { 1077 struct device *dev = &pdev->dev; 1078 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); 1079 int ret; 1080 1081 dra7xx_pcie_stop_link(dra7xx->pci); 1082 1083 ret = pm_runtime_put_sync(dev); 1084 if (ret < 0) 1085 dev_dbg(dev, "pm_runtime_put_sync failed\n"); 1086 1087 pm_runtime_disable(dev); 1088 dra7xx_pcie_disable_phy(dra7xx); 1089 } 1090 1091 static const struct dev_pm_ops dra7xx_pcie_pm_ops = { 1092 SET_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume) 1093 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq, 1094 dra7xx_pcie_resume_noirq) 1095 }; 1096 1097 static struct platform_driver dra7xx_pcie_driver = { 1098 .driver = { 1099 .name = "dra7-pcie", 1100 .of_match_table = of_dra7xx_pcie_match, 1101 .suppress_bind_attrs = true, 1102 .pm = &dra7xx_pcie_pm_ops, 1103 }, 1104 .shutdown = dra7xx_pcie_shutdown, 1105 }; 1106 builtin_platform_driver_probe(dra7xx_pcie_driver, dra7xx_pcie_probe); 1107