1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * pcie-dra7xx - PCIe controller driver for TI DRA7xx SoCs 4 * 5 * Copyright (C) 2013-2014 Texas Instruments Incorporated - http://www.ti.com 6 * 7 * Authors: Kishon Vijay Abraham I <kishon@ti.com> 8 */ 9 10 #include <linux/delay.h> 11 #include <linux/device.h> 12 #include <linux/err.h> 13 #include <linux/interrupt.h> 14 #include <linux/irq.h> 15 #include <linux/irqdomain.h> 16 #include <linux/kernel.h> 17 #include <linux/init.h> 18 #include <linux/of_device.h> 19 #include <linux/of_gpio.h> 20 #include <linux/of_pci.h> 21 #include <linux/pci.h> 22 #include <linux/phy/phy.h> 23 #include <linux/platform_device.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/resource.h> 26 #include <linux/types.h> 27 #include <linux/mfd/syscon.h> 28 #include <linux/regmap.h> 29 #include <linux/gpio/consumer.h> 30 31 #include "../../pci.h" 32 #include "pcie-designware.h" 33 34 /* PCIe controller wrapper DRA7XX configuration registers */ 35 36 #define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN 0x0024 37 #define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN 0x0028 38 #define ERR_SYS BIT(0) 39 #define ERR_FATAL BIT(1) 40 #define ERR_NONFATAL BIT(2) 41 #define ERR_COR BIT(3) 42 #define ERR_AXI BIT(4) 43 #define ERR_ECRC BIT(5) 44 #define PME_TURN_OFF BIT(8) 45 #define PME_TO_ACK BIT(9) 46 #define PM_PME BIT(10) 47 #define LINK_REQ_RST BIT(11) 48 #define LINK_UP_EVT BIT(12) 49 #define CFG_BME_EVT BIT(13) 50 #define CFG_MSE_EVT BIT(14) 51 #define INTERRUPTS (ERR_SYS | ERR_FATAL | ERR_NONFATAL | ERR_COR | ERR_AXI | \ 52 ERR_ECRC | PME_TURN_OFF | PME_TO_ACK | PM_PME | \ 53 LINK_REQ_RST | LINK_UP_EVT | CFG_BME_EVT | CFG_MSE_EVT) 54 55 #define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI 0x0034 56 #define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI 0x0038 57 #define INTA BIT(0) 58 #define INTB BIT(1) 59 #define INTC BIT(2) 60 #define INTD BIT(3) 61 #define MSI BIT(4) 62 #define LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD) 63 64 #define PCIECTRL_TI_CONF_DEVICE_TYPE 0x0100 65 #define DEVICE_TYPE_EP 0x0 66 #define DEVICE_TYPE_LEG_EP 0x1 67 #define DEVICE_TYPE_RC 0x4 68 69 #define PCIECTRL_DRA7XX_CONF_DEVICE_CMD 0x0104 70 #define LTSSM_EN 0x1 71 72 #define PCIECTRL_DRA7XX_CONF_PHY_CS 0x010C 73 #define LINK_UP BIT(16) 74 #define DRA7XX_CPU_TO_BUS_ADDR 0x0FFFFFFF 75 76 #define EXP_CAP_ID_OFFSET 0x70 77 78 #define PCIECTRL_TI_CONF_INTX_ASSERT 0x0124 79 #define PCIECTRL_TI_CONF_INTX_DEASSERT 0x0128 80 81 #define PCIECTRL_TI_CONF_MSI_XMT 0x012c 82 #define MSI_REQ_GRANT BIT(0) 83 #define MSI_VECTOR_SHIFT 7 84 85 #define PCIE_1LANE_2LANE_SELECTION BIT(13) 86 #define PCIE_B1C0_MODE_SEL BIT(2) 87 #define PCIE_B0_B1_TSYNCEN BIT(0) 88 89 struct dra7xx_pcie { 90 struct dw_pcie *pci; 91 void __iomem *base; /* DT ti_conf */ 92 int phy_count; /* DT phy-names count */ 93 struct phy **phy; 94 int link_gen; 95 struct irq_domain *irq_domain; 96 enum dw_pcie_device_mode mode; 97 }; 98 99 struct dra7xx_pcie_of_data { 100 enum dw_pcie_device_mode mode; 101 u32 b1co_mode_sel_mask; 102 }; 103 104 #define to_dra7xx_pcie(x) dev_get_drvdata((x)->dev) 105 106 static inline u32 dra7xx_pcie_readl(struct dra7xx_pcie *pcie, u32 offset) 107 { 108 return readl(pcie->base + offset); 109 } 110 111 static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset, 112 u32 value) 113 { 114 writel(value, pcie->base + offset); 115 } 116 117 static u64 dra7xx_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr) 118 { 119 return pci_addr & DRA7XX_CPU_TO_BUS_ADDR; 120 } 121 122 static int dra7xx_pcie_link_up(struct dw_pcie *pci) 123 { 124 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 125 u32 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS); 126 127 return !!(reg & LINK_UP); 128 } 129 130 static void dra7xx_pcie_stop_link(struct dw_pcie *pci) 131 { 132 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 133 u32 reg; 134 135 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); 136 reg &= ~LTSSM_EN; 137 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); 138 } 139 140 static int dra7xx_pcie_establish_link(struct dw_pcie *pci) 141 { 142 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 143 struct device *dev = pci->dev; 144 u32 reg; 145 u32 exp_cap_off = EXP_CAP_ID_OFFSET; 146 147 if (dw_pcie_link_up(pci)) { 148 dev_err(dev, "link is already up\n"); 149 return 0; 150 } 151 152 if (dra7xx->link_gen == 1) { 153 dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCAP, 154 4, ®); 155 if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) { 156 reg &= ~((u32)PCI_EXP_LNKCAP_SLS); 157 reg |= PCI_EXP_LNKCAP_SLS_2_5GB; 158 dw_pcie_write(pci->dbi_base + exp_cap_off + 159 PCI_EXP_LNKCAP, 4, reg); 160 } 161 162 dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCTL2, 163 2, ®); 164 if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) { 165 reg &= ~((u32)PCI_EXP_LNKCAP_SLS); 166 reg |= PCI_EXP_LNKCAP_SLS_2_5GB; 167 dw_pcie_write(pci->dbi_base + exp_cap_off + 168 PCI_EXP_LNKCTL2, 2, reg); 169 } 170 } 171 172 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); 173 reg |= LTSSM_EN; 174 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); 175 176 return 0; 177 } 178 179 static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx) 180 { 181 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, 182 LEG_EP_INTERRUPTS | MSI); 183 184 dra7xx_pcie_writel(dra7xx, 185 PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI, 186 MSI | LEG_EP_INTERRUPTS); 187 } 188 189 static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie *dra7xx) 190 { 191 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, 192 INTERRUPTS); 193 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN, 194 INTERRUPTS); 195 } 196 197 static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx) 198 { 199 dra7xx_pcie_enable_wrapper_interrupts(dra7xx); 200 dra7xx_pcie_enable_msi_interrupts(dra7xx); 201 } 202 203 static int dra7xx_pcie_host_init(struct pcie_port *pp) 204 { 205 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 206 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 207 208 dw_pcie_setup_rc(pp); 209 210 dra7xx_pcie_establish_link(pci); 211 dw_pcie_wait_for_link(pci); 212 dw_pcie_msi_init(pp); 213 dra7xx_pcie_enable_interrupts(dra7xx); 214 215 return 0; 216 } 217 218 static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq, 219 irq_hw_number_t hwirq) 220 { 221 irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); 222 irq_set_chip_data(irq, domain->host_data); 223 224 return 0; 225 } 226 227 static const struct irq_domain_ops intx_domain_ops = { 228 .map = dra7xx_pcie_intx_map, 229 .xlate = pci_irqd_intx_xlate, 230 }; 231 232 static int dra7xx_pcie_handle_msi(struct pcie_port *pp, int index) 233 { 234 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 235 unsigned long val; 236 int pos, irq; 237 238 val = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS + 239 (index * MSI_REG_CTRL_BLOCK_SIZE)); 240 if (!val) 241 return 0; 242 243 pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, 0); 244 while (pos != MAX_MSI_IRQS_PER_CTRL) { 245 irq = irq_find_mapping(pp->irq_domain, 246 (index * MAX_MSI_IRQS_PER_CTRL) + pos); 247 generic_handle_irq(irq); 248 pos++; 249 pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, pos); 250 } 251 252 return 1; 253 } 254 255 static void dra7xx_pcie_handle_msi_irq(struct pcie_port *pp) 256 { 257 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 258 int ret, i, count, num_ctrls; 259 260 num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; 261 262 /** 263 * Need to make sure all MSI status bits read 0 before exiting. 264 * Else, new MSI IRQs are not registered by the wrapper. Have an 265 * upperbound for the loop and exit the IRQ in case of IRQ flood 266 * to avoid locking up system in interrupt context. 267 */ 268 count = 0; 269 do { 270 ret = 0; 271 272 for (i = 0; i < num_ctrls; i++) 273 ret |= dra7xx_pcie_handle_msi(pp, i); 274 count++; 275 } while (ret && count <= 1000); 276 277 if (count > 1000) 278 dev_warn_ratelimited(pci->dev, 279 "Too many MSI IRQs to handle\n"); 280 } 281 282 static void dra7xx_pcie_msi_irq_handler(struct irq_desc *desc) 283 { 284 struct irq_chip *chip = irq_desc_get_chip(desc); 285 struct dra7xx_pcie *dra7xx; 286 struct dw_pcie *pci; 287 struct pcie_port *pp; 288 unsigned long reg; 289 u32 virq, bit; 290 291 chained_irq_enter(chip, desc); 292 293 pp = irq_desc_get_handler_data(desc); 294 pci = to_dw_pcie_from_pp(pp); 295 dra7xx = to_dra7xx_pcie(pci); 296 297 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI); 298 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg); 299 300 switch (reg) { 301 case MSI: 302 dra7xx_pcie_handle_msi_irq(pp); 303 break; 304 case INTA: 305 case INTB: 306 case INTC: 307 case INTD: 308 for_each_set_bit(bit, ®, PCI_NUM_INTX) { 309 virq = irq_find_mapping(dra7xx->irq_domain, bit); 310 if (virq) 311 generic_handle_irq(virq); 312 } 313 break; 314 } 315 316 chained_irq_exit(chip, desc); 317 } 318 319 static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg) 320 { 321 struct dra7xx_pcie *dra7xx = arg; 322 struct dw_pcie *pci = dra7xx->pci; 323 struct device *dev = pci->dev; 324 struct dw_pcie_ep *ep = &pci->ep; 325 u32 reg; 326 327 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN); 328 329 if (reg & ERR_SYS) 330 dev_dbg(dev, "System Error\n"); 331 332 if (reg & ERR_FATAL) 333 dev_dbg(dev, "Fatal Error\n"); 334 335 if (reg & ERR_NONFATAL) 336 dev_dbg(dev, "Non Fatal Error\n"); 337 338 if (reg & ERR_COR) 339 dev_dbg(dev, "Correctable Error\n"); 340 341 if (reg & ERR_AXI) 342 dev_dbg(dev, "AXI tag lookup fatal Error\n"); 343 344 if (reg & ERR_ECRC) 345 dev_dbg(dev, "ECRC Error\n"); 346 347 if (reg & PME_TURN_OFF) 348 dev_dbg(dev, 349 "Power Management Event Turn-Off message received\n"); 350 351 if (reg & PME_TO_ACK) 352 dev_dbg(dev, 353 "Power Management Turn-Off Ack message received\n"); 354 355 if (reg & PM_PME) 356 dev_dbg(dev, "PM Power Management Event message received\n"); 357 358 if (reg & LINK_REQ_RST) 359 dev_dbg(dev, "Link Request Reset\n"); 360 361 if (reg & LINK_UP_EVT) { 362 if (dra7xx->mode == DW_PCIE_EP_TYPE) 363 dw_pcie_ep_linkup(ep); 364 dev_dbg(dev, "Link-up state change\n"); 365 } 366 367 if (reg & CFG_BME_EVT) 368 dev_dbg(dev, "CFG 'Bus Master Enable' change\n"); 369 370 if (reg & CFG_MSE_EVT) 371 dev_dbg(dev, "CFG 'Memory Space Enable' change\n"); 372 373 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, reg); 374 375 return IRQ_HANDLED; 376 } 377 378 static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp) 379 { 380 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 381 struct device *dev = pci->dev; 382 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 383 struct device_node *node = dev->of_node; 384 struct device_node *pcie_intc_node = of_get_next_child(node, NULL); 385 386 if (!pcie_intc_node) { 387 dev_err(dev, "No PCIe Intc node found\n"); 388 return -ENODEV; 389 } 390 391 irq_set_chained_handler_and_data(pp->irq, dra7xx_pcie_msi_irq_handler, 392 pp); 393 dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, 394 &intx_domain_ops, pp); 395 of_node_put(pcie_intc_node); 396 if (!dra7xx->irq_domain) { 397 dev_err(dev, "Failed to get a INTx IRQ domain\n"); 398 return -ENODEV; 399 } 400 401 return 0; 402 } 403 404 static void dra7xx_pcie_setup_msi_msg(struct irq_data *d, struct msi_msg *msg) 405 { 406 struct pcie_port *pp = irq_data_get_irq_chip_data(d); 407 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 408 u64 msi_target; 409 410 msi_target = (u64)pp->msi_data; 411 412 msg->address_lo = lower_32_bits(msi_target); 413 msg->address_hi = upper_32_bits(msi_target); 414 415 msg->data = d->hwirq; 416 417 dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n", 418 (int)d->hwirq, msg->address_hi, msg->address_lo); 419 } 420 421 static int dra7xx_pcie_msi_set_affinity(struct irq_data *d, 422 const struct cpumask *mask, 423 bool force) 424 { 425 return -EINVAL; 426 } 427 428 static void dra7xx_pcie_bottom_mask(struct irq_data *d) 429 { 430 struct pcie_port *pp = irq_data_get_irq_chip_data(d); 431 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 432 unsigned int res, bit, ctrl; 433 unsigned long flags; 434 435 raw_spin_lock_irqsave(&pp->lock, flags); 436 437 ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; 438 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; 439 bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; 440 441 pp->irq_mask[ctrl] |= BIT(bit); 442 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, 443 pp->irq_mask[ctrl]); 444 445 raw_spin_unlock_irqrestore(&pp->lock, flags); 446 } 447 448 static void dra7xx_pcie_bottom_unmask(struct irq_data *d) 449 { 450 struct pcie_port *pp = irq_data_get_irq_chip_data(d); 451 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 452 unsigned int res, bit, ctrl; 453 unsigned long flags; 454 455 raw_spin_lock_irqsave(&pp->lock, flags); 456 457 ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; 458 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; 459 bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; 460 461 pp->irq_mask[ctrl] &= ~BIT(bit); 462 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, 463 pp->irq_mask[ctrl]); 464 465 raw_spin_unlock_irqrestore(&pp->lock, flags); 466 } 467 468 static void dra7xx_pcie_bottom_ack(struct irq_data *d) 469 { 470 struct pcie_port *pp = irq_data_get_irq_chip_data(d); 471 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 472 unsigned int res, bit, ctrl; 473 474 ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; 475 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; 476 bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; 477 478 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, BIT(bit)); 479 } 480 481 static struct irq_chip dra7xx_pci_msi_bottom_irq_chip = { 482 .name = "DRA7XX-PCI-MSI", 483 .irq_ack = dra7xx_pcie_bottom_ack, 484 .irq_compose_msi_msg = dra7xx_pcie_setup_msi_msg, 485 .irq_set_affinity = dra7xx_pcie_msi_set_affinity, 486 .irq_mask = dra7xx_pcie_bottom_mask, 487 .irq_unmask = dra7xx_pcie_bottom_unmask, 488 }; 489 490 static int dra7xx_pcie_msi_host_init(struct pcie_port *pp) 491 { 492 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 493 u32 ctrl, num_ctrls; 494 495 pp->msi_irq_chip = &dra7xx_pci_msi_bottom_irq_chip; 496 497 num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; 498 /* Initialize IRQ Status array */ 499 for (ctrl = 0; ctrl < num_ctrls; ctrl++) { 500 pp->irq_mask[ctrl] = ~0; 501 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + 502 (ctrl * MSI_REG_CTRL_BLOCK_SIZE), 503 pp->irq_mask[ctrl]); 504 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE + 505 (ctrl * MSI_REG_CTRL_BLOCK_SIZE), 506 ~0); 507 } 508 509 return dw_pcie_allocate_domains(pp); 510 } 511 512 static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = { 513 .host_init = dra7xx_pcie_host_init, 514 .msi_host_init = dra7xx_pcie_msi_host_init, 515 }; 516 517 static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep) 518 { 519 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 520 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 521 enum pci_barno bar; 522 523 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) 524 dw_pcie_ep_reset_bar(pci, bar); 525 526 dra7xx_pcie_enable_wrapper_interrupts(dra7xx); 527 } 528 529 static void dra7xx_pcie_raise_legacy_irq(struct dra7xx_pcie *dra7xx) 530 { 531 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_ASSERT, 0x1); 532 mdelay(1); 533 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_DEASSERT, 0x1); 534 } 535 536 static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx, 537 u8 interrupt_num) 538 { 539 u32 reg; 540 541 reg = (interrupt_num - 1) << MSI_VECTOR_SHIFT; 542 reg |= MSI_REQ_GRANT; 543 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_MSI_XMT, reg); 544 } 545 546 static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no, 547 enum pci_epc_irq_type type, u16 interrupt_num) 548 { 549 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 550 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 551 552 switch (type) { 553 case PCI_EPC_IRQ_LEGACY: 554 dra7xx_pcie_raise_legacy_irq(dra7xx); 555 break; 556 case PCI_EPC_IRQ_MSI: 557 dra7xx_pcie_raise_msi_irq(dra7xx, interrupt_num); 558 break; 559 default: 560 dev_err(pci->dev, "UNKNOWN IRQ type\n"); 561 } 562 563 return 0; 564 } 565 566 static const struct pci_epc_features dra7xx_pcie_epc_features = { 567 .linkup_notifier = true, 568 .msi_capable = true, 569 .msix_capable = false, 570 }; 571 572 static const struct pci_epc_features* 573 dra7xx_pcie_get_features(struct dw_pcie_ep *ep) 574 { 575 return &dra7xx_pcie_epc_features; 576 } 577 578 static const struct dw_pcie_ep_ops pcie_ep_ops = { 579 .ep_init = dra7xx_pcie_ep_init, 580 .raise_irq = dra7xx_pcie_raise_irq, 581 .get_features = dra7xx_pcie_get_features, 582 }; 583 584 static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx, 585 struct platform_device *pdev) 586 { 587 int ret; 588 struct dw_pcie_ep *ep; 589 struct resource *res; 590 struct device *dev = &pdev->dev; 591 struct dw_pcie *pci = dra7xx->pci; 592 593 ep = &pci->ep; 594 ep->ops = &pcie_ep_ops; 595 596 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics"); 597 pci->dbi_base = devm_ioremap_resource(dev, res); 598 if (IS_ERR(pci->dbi_base)) 599 return PTR_ERR(pci->dbi_base); 600 601 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics2"); 602 pci->dbi_base2 = devm_ioremap_resource(dev, res); 603 if (IS_ERR(pci->dbi_base2)) 604 return PTR_ERR(pci->dbi_base2); 605 606 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space"); 607 if (!res) 608 return -EINVAL; 609 610 ep->phys_base = res->start; 611 ep->addr_size = resource_size(res); 612 613 ret = dw_pcie_ep_init(ep); 614 if (ret) { 615 dev_err(dev, "failed to initialize endpoint\n"); 616 return ret; 617 } 618 619 return 0; 620 } 621 622 static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx, 623 struct platform_device *pdev) 624 { 625 int ret; 626 struct dw_pcie *pci = dra7xx->pci; 627 struct pcie_port *pp = &pci->pp; 628 struct device *dev = pci->dev; 629 struct resource *res; 630 631 pp->irq = platform_get_irq(pdev, 1); 632 if (pp->irq < 0) { 633 dev_err(dev, "missing IRQ resource\n"); 634 return pp->irq; 635 } 636 637 ret = dra7xx_pcie_init_irq_domain(pp); 638 if (ret < 0) 639 return ret; 640 641 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbics"); 642 pci->dbi_base = devm_ioremap_resource(dev, res); 643 if (IS_ERR(pci->dbi_base)) 644 return PTR_ERR(pci->dbi_base); 645 646 pp->ops = &dra7xx_pcie_host_ops; 647 648 ret = dw_pcie_host_init(pp); 649 if (ret) { 650 dev_err(dev, "failed to initialize host\n"); 651 return ret; 652 } 653 654 return 0; 655 } 656 657 static const struct dw_pcie_ops dw_pcie_ops = { 658 .cpu_addr_fixup = dra7xx_pcie_cpu_addr_fixup, 659 .start_link = dra7xx_pcie_establish_link, 660 .stop_link = dra7xx_pcie_stop_link, 661 .link_up = dra7xx_pcie_link_up, 662 }; 663 664 static void dra7xx_pcie_disable_phy(struct dra7xx_pcie *dra7xx) 665 { 666 int phy_count = dra7xx->phy_count; 667 668 while (phy_count--) { 669 phy_power_off(dra7xx->phy[phy_count]); 670 phy_exit(dra7xx->phy[phy_count]); 671 } 672 } 673 674 static int dra7xx_pcie_enable_phy(struct dra7xx_pcie *dra7xx) 675 { 676 int phy_count = dra7xx->phy_count; 677 int ret; 678 int i; 679 680 for (i = 0; i < phy_count; i++) { 681 ret = phy_set_mode(dra7xx->phy[i], PHY_MODE_PCIE); 682 if (ret < 0) 683 goto err_phy; 684 685 ret = phy_init(dra7xx->phy[i]); 686 if (ret < 0) 687 goto err_phy; 688 689 ret = phy_power_on(dra7xx->phy[i]); 690 if (ret < 0) { 691 phy_exit(dra7xx->phy[i]); 692 goto err_phy; 693 } 694 } 695 696 return 0; 697 698 err_phy: 699 while (--i >= 0) { 700 phy_power_off(dra7xx->phy[i]); 701 phy_exit(dra7xx->phy[i]); 702 } 703 704 return ret; 705 } 706 707 static const struct dra7xx_pcie_of_data dra7xx_pcie_rc_of_data = { 708 .mode = DW_PCIE_RC_TYPE, 709 }; 710 711 static const struct dra7xx_pcie_of_data dra7xx_pcie_ep_of_data = { 712 .mode = DW_PCIE_EP_TYPE, 713 }; 714 715 static const struct dra7xx_pcie_of_data dra746_pcie_rc_of_data = { 716 .b1co_mode_sel_mask = BIT(2), 717 .mode = DW_PCIE_RC_TYPE, 718 }; 719 720 static const struct dra7xx_pcie_of_data dra726_pcie_rc_of_data = { 721 .b1co_mode_sel_mask = GENMASK(3, 2), 722 .mode = DW_PCIE_RC_TYPE, 723 }; 724 725 static const struct dra7xx_pcie_of_data dra746_pcie_ep_of_data = { 726 .b1co_mode_sel_mask = BIT(2), 727 .mode = DW_PCIE_EP_TYPE, 728 }; 729 730 static const struct dra7xx_pcie_of_data dra726_pcie_ep_of_data = { 731 .b1co_mode_sel_mask = GENMASK(3, 2), 732 .mode = DW_PCIE_EP_TYPE, 733 }; 734 735 static const struct of_device_id of_dra7xx_pcie_match[] = { 736 { 737 .compatible = "ti,dra7-pcie", 738 .data = &dra7xx_pcie_rc_of_data, 739 }, 740 { 741 .compatible = "ti,dra7-pcie-ep", 742 .data = &dra7xx_pcie_ep_of_data, 743 }, 744 { 745 .compatible = "ti,dra746-pcie-rc", 746 .data = &dra746_pcie_rc_of_data, 747 }, 748 { 749 .compatible = "ti,dra726-pcie-rc", 750 .data = &dra726_pcie_rc_of_data, 751 }, 752 { 753 .compatible = "ti,dra746-pcie-ep", 754 .data = &dra746_pcie_ep_of_data, 755 }, 756 { 757 .compatible = "ti,dra726-pcie-ep", 758 .data = &dra726_pcie_ep_of_data, 759 }, 760 {}, 761 }; 762 763 /* 764 * dra7xx_pcie_unaligned_memaccess: workaround for AM572x/AM571x Errata i870 765 * @dra7xx: the dra7xx device where the workaround should be applied 766 * 767 * Access to the PCIe slave port that are not 32-bit aligned will result 768 * in incorrect mapping to TLP Address and Byte enable fields. Therefore, 769 * byte and half-word accesses are not possible to byte offset 0x1, 0x2, or 770 * 0x3. 771 * 772 * To avoid this issue set PCIE_SS1_AXI2OCP_LEGACY_MODE_ENABLE to 1. 773 */ 774 static int dra7xx_pcie_unaligned_memaccess(struct device *dev) 775 { 776 int ret; 777 struct device_node *np = dev->of_node; 778 struct of_phandle_args args; 779 struct regmap *regmap; 780 781 regmap = syscon_regmap_lookup_by_phandle(np, 782 "ti,syscon-unaligned-access"); 783 if (IS_ERR(regmap)) { 784 dev_dbg(dev, "can't get ti,syscon-unaligned-access\n"); 785 return -EINVAL; 786 } 787 788 ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-unaligned-access", 789 2, 0, &args); 790 if (ret) { 791 dev_err(dev, "failed to parse ti,syscon-unaligned-access\n"); 792 return ret; 793 } 794 795 ret = regmap_update_bits(regmap, args.args[0], args.args[1], 796 args.args[1]); 797 if (ret) 798 dev_err(dev, "failed to enable unaligned access\n"); 799 800 of_node_put(args.np); 801 802 return ret; 803 } 804 805 static int dra7xx_pcie_configure_two_lane(struct device *dev, 806 u32 b1co_mode_sel_mask) 807 { 808 struct device_node *np = dev->of_node; 809 struct regmap *pcie_syscon; 810 unsigned int pcie_reg; 811 u32 mask; 812 u32 val; 813 814 pcie_syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-lane-sel"); 815 if (IS_ERR(pcie_syscon)) { 816 dev_err(dev, "unable to get ti,syscon-lane-sel\n"); 817 return -EINVAL; 818 } 819 820 if (of_property_read_u32_index(np, "ti,syscon-lane-sel", 1, 821 &pcie_reg)) { 822 dev_err(dev, "couldn't get lane selection reg offset\n"); 823 return -EINVAL; 824 } 825 826 mask = b1co_mode_sel_mask | PCIE_B0_B1_TSYNCEN; 827 val = PCIE_B1C0_MODE_SEL | PCIE_B0_B1_TSYNCEN; 828 regmap_update_bits(pcie_syscon, pcie_reg, mask, val); 829 830 return 0; 831 } 832 833 static int __init dra7xx_pcie_probe(struct platform_device *pdev) 834 { 835 u32 reg; 836 int ret; 837 int irq; 838 int i; 839 int phy_count; 840 struct phy **phy; 841 struct device_link **link; 842 void __iomem *base; 843 struct dw_pcie *pci; 844 struct dra7xx_pcie *dra7xx; 845 struct device *dev = &pdev->dev; 846 struct device_node *np = dev->of_node; 847 char name[10]; 848 struct gpio_desc *reset; 849 const struct of_device_id *match; 850 const struct dra7xx_pcie_of_data *data; 851 enum dw_pcie_device_mode mode; 852 u32 b1co_mode_sel_mask; 853 854 match = of_match_device(of_match_ptr(of_dra7xx_pcie_match), dev); 855 if (!match) 856 return -EINVAL; 857 858 data = (struct dra7xx_pcie_of_data *)match->data; 859 mode = (enum dw_pcie_device_mode)data->mode; 860 b1co_mode_sel_mask = data->b1co_mode_sel_mask; 861 862 dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL); 863 if (!dra7xx) 864 return -ENOMEM; 865 866 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); 867 if (!pci) 868 return -ENOMEM; 869 870 pci->dev = dev; 871 pci->ops = &dw_pcie_ops; 872 873 irq = platform_get_irq(pdev, 0); 874 if (irq < 0) { 875 dev_err(dev, "missing IRQ resource: %d\n", irq); 876 return irq; 877 } 878 879 base = devm_platform_ioremap_resource_byname(pdev, "ti_conf"); 880 if (IS_ERR(base)) 881 return PTR_ERR(base); 882 883 phy_count = of_property_count_strings(np, "phy-names"); 884 if (phy_count < 0) { 885 dev_err(dev, "unable to find the strings\n"); 886 return phy_count; 887 } 888 889 phy = devm_kcalloc(dev, phy_count, sizeof(*phy), GFP_KERNEL); 890 if (!phy) 891 return -ENOMEM; 892 893 link = devm_kcalloc(dev, phy_count, sizeof(*link), GFP_KERNEL); 894 if (!link) 895 return -ENOMEM; 896 897 for (i = 0; i < phy_count; i++) { 898 snprintf(name, sizeof(name), "pcie-phy%d", i); 899 phy[i] = devm_phy_get(dev, name); 900 if (IS_ERR(phy[i])) 901 return PTR_ERR(phy[i]); 902 903 link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS); 904 if (!link[i]) { 905 ret = -EINVAL; 906 goto err_link; 907 } 908 } 909 910 dra7xx->base = base; 911 dra7xx->phy = phy; 912 dra7xx->pci = pci; 913 dra7xx->phy_count = phy_count; 914 915 if (phy_count == 2) { 916 ret = dra7xx_pcie_configure_two_lane(dev, b1co_mode_sel_mask); 917 if (ret < 0) 918 dra7xx->phy_count = 1; /* Fallback to x1 lane mode */ 919 } 920 921 ret = dra7xx_pcie_enable_phy(dra7xx); 922 if (ret) { 923 dev_err(dev, "failed to enable phy\n"); 924 return ret; 925 } 926 927 platform_set_drvdata(pdev, dra7xx); 928 929 pm_runtime_enable(dev); 930 ret = pm_runtime_get_sync(dev); 931 if (ret < 0) { 932 dev_err(dev, "pm_runtime_get_sync failed\n"); 933 goto err_get_sync; 934 } 935 936 reset = devm_gpiod_get_optional(dev, NULL, GPIOD_OUT_HIGH); 937 if (IS_ERR(reset)) { 938 ret = PTR_ERR(reset); 939 dev_err(&pdev->dev, "gpio request failed, ret %d\n", ret); 940 goto err_gpio; 941 } 942 943 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); 944 reg &= ~LTSSM_EN; 945 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); 946 947 dra7xx->link_gen = of_pci_get_max_link_speed(np); 948 if (dra7xx->link_gen < 0 || dra7xx->link_gen > 2) 949 dra7xx->link_gen = 2; 950 951 switch (mode) { 952 case DW_PCIE_RC_TYPE: 953 if (!IS_ENABLED(CONFIG_PCI_DRA7XX_HOST)) { 954 ret = -ENODEV; 955 goto err_gpio; 956 } 957 958 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE, 959 DEVICE_TYPE_RC); 960 961 ret = dra7xx_pcie_unaligned_memaccess(dev); 962 if (ret) 963 dev_err(dev, "WA for Errata i870 not applied\n"); 964 965 ret = dra7xx_add_pcie_port(dra7xx, pdev); 966 if (ret < 0) 967 goto err_gpio; 968 break; 969 case DW_PCIE_EP_TYPE: 970 if (!IS_ENABLED(CONFIG_PCI_DRA7XX_EP)) { 971 ret = -ENODEV; 972 goto err_gpio; 973 } 974 975 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE, 976 DEVICE_TYPE_EP); 977 978 ret = dra7xx_pcie_unaligned_memaccess(dev); 979 if (ret) 980 goto err_gpio; 981 982 ret = dra7xx_add_pcie_ep(dra7xx, pdev); 983 if (ret < 0) 984 goto err_gpio; 985 break; 986 default: 987 dev_err(dev, "INVALID device type %d\n", mode); 988 } 989 dra7xx->mode = mode; 990 991 ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler, 992 IRQF_SHARED, "dra7xx-pcie-main", dra7xx); 993 if (ret) { 994 dev_err(dev, "failed to request irq\n"); 995 goto err_gpio; 996 } 997 998 return 0; 999 1000 err_gpio: 1001 pm_runtime_put(dev); 1002 1003 err_get_sync: 1004 pm_runtime_disable(dev); 1005 dra7xx_pcie_disable_phy(dra7xx); 1006 1007 err_link: 1008 while (--i >= 0) 1009 device_link_del(link[i]); 1010 1011 return ret; 1012 } 1013 1014 #ifdef CONFIG_PM_SLEEP 1015 static int dra7xx_pcie_suspend(struct device *dev) 1016 { 1017 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); 1018 struct dw_pcie *pci = dra7xx->pci; 1019 u32 val; 1020 1021 if (dra7xx->mode != DW_PCIE_RC_TYPE) 1022 return 0; 1023 1024 /* clear MSE */ 1025 val = dw_pcie_readl_dbi(pci, PCI_COMMAND); 1026 val &= ~PCI_COMMAND_MEMORY; 1027 dw_pcie_writel_dbi(pci, PCI_COMMAND, val); 1028 1029 return 0; 1030 } 1031 1032 static int dra7xx_pcie_resume(struct device *dev) 1033 { 1034 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); 1035 struct dw_pcie *pci = dra7xx->pci; 1036 u32 val; 1037 1038 if (dra7xx->mode != DW_PCIE_RC_TYPE) 1039 return 0; 1040 1041 /* set MSE */ 1042 val = dw_pcie_readl_dbi(pci, PCI_COMMAND); 1043 val |= PCI_COMMAND_MEMORY; 1044 dw_pcie_writel_dbi(pci, PCI_COMMAND, val); 1045 1046 return 0; 1047 } 1048 1049 static int dra7xx_pcie_suspend_noirq(struct device *dev) 1050 { 1051 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); 1052 1053 dra7xx_pcie_disable_phy(dra7xx); 1054 1055 return 0; 1056 } 1057 1058 static int dra7xx_pcie_resume_noirq(struct device *dev) 1059 { 1060 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); 1061 int ret; 1062 1063 ret = dra7xx_pcie_enable_phy(dra7xx); 1064 if (ret) { 1065 dev_err(dev, "failed to enable phy\n"); 1066 return ret; 1067 } 1068 1069 return 0; 1070 } 1071 #endif 1072 1073 static void dra7xx_pcie_shutdown(struct platform_device *pdev) 1074 { 1075 struct device *dev = &pdev->dev; 1076 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); 1077 int ret; 1078 1079 dra7xx_pcie_stop_link(dra7xx->pci); 1080 1081 ret = pm_runtime_put_sync(dev); 1082 if (ret < 0) 1083 dev_dbg(dev, "pm_runtime_put_sync failed\n"); 1084 1085 pm_runtime_disable(dev); 1086 dra7xx_pcie_disable_phy(dra7xx); 1087 } 1088 1089 static const struct dev_pm_ops dra7xx_pcie_pm_ops = { 1090 SET_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume) 1091 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq, 1092 dra7xx_pcie_resume_noirq) 1093 }; 1094 1095 static struct platform_driver dra7xx_pcie_driver = { 1096 .driver = { 1097 .name = "dra7-pcie", 1098 .of_match_table = of_dra7xx_pcie_match, 1099 .suppress_bind_attrs = true, 1100 .pm = &dra7xx_pcie_pm_ops, 1101 }, 1102 .shutdown = dra7xx_pcie_shutdown, 1103 }; 1104 builtin_platform_driver_probe(dra7xx_pcie_driver, dra7xx_pcie_probe); 1105