1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * PCIe host controller driver for Texas Instruments Keystone SoCs 4 * 5 * Copyright (C) 2013-2014 Texas Instruments., Ltd. 6 * http://www.ti.com 7 * 8 * Author: Murali Karicheri <m-karicheri2@ti.com> 9 * Implementation based on pci-exynos.c and pcie-designware.c 10 */ 11 12 #include <linux/clk.h> 13 #include <linux/delay.h> 14 #include <linux/init.h> 15 #include <linux/interrupt.h> 16 #include <linux/irqchip/chained_irq.h> 17 #include <linux/irqdomain.h> 18 #include <linux/mfd/syscon.h> 19 #include <linux/msi.h> 20 #include <linux/of.h> 21 #include <linux/of_irq.h> 22 #include <linux/of_pci.h> 23 #include <linux/phy/phy.h> 24 #include <linux/platform_device.h> 25 #include <linux/regmap.h> 26 #include <linux/resource.h> 27 #include <linux/signal.h> 28 29 #include "pcie-designware.h" 30 31 #define PCIE_VENDORID_MASK 0xffff 32 #define PCIE_DEVICEID_SHIFT 16 33 34 /* Application registers */ 35 #define CMD_STATUS 0x004 36 #define LTSSM_EN_VAL BIT(0) 37 #define OB_XLAT_EN_VAL BIT(1) 38 #define DBI_CS2 BIT(5) 39 40 #define CFG_SETUP 0x008 41 #define CFG_BUS(x) (((x) & 0xff) << 16) 42 #define CFG_DEVICE(x) (((x) & 0x1f) << 8) 43 #define CFG_FUNC(x) ((x) & 0x7) 44 #define CFG_TYPE1 BIT(24) 45 46 #define OB_SIZE 0x030 47 #define SPACE0_REMOTE_CFG_OFFSET 0x1000 48 #define OB_OFFSET_INDEX(n) (0x200 + (8 * (n))) 49 #define OB_OFFSET_HI(n) (0x204 + (8 * (n))) 50 #define OB_ENABLEN BIT(0) 51 #define OB_WIN_SIZE 8 /* 8MB */ 52 53 /* IRQ register defines */ 54 #define IRQ_EOI 0x050 55 #define IRQ_STATUS 0x184 56 #define IRQ_ENABLE_SET 0x188 57 #define IRQ_ENABLE_CLR 0x18c 58 59 #define MSI_IRQ 0x054 60 #define MSI0_IRQ_STATUS 0x104 61 #define MSI0_IRQ_ENABLE_SET 0x108 62 #define MSI0_IRQ_ENABLE_CLR 0x10c 63 #define IRQ_STATUS 0x184 64 #define MSI_IRQ_OFFSET 4 65 66 #define ERR_IRQ_STATUS 0x1c4 67 #define ERR_IRQ_ENABLE_SET 0x1c8 68 #define ERR_AER BIT(5) /* ECRC error */ 69 #define ERR_AXI BIT(4) /* AXI tag lookup fatal error */ 70 #define ERR_CORR BIT(3) /* Correctable error */ 71 #define ERR_NONFATAL BIT(2) /* Non-fatal error */ 72 #define ERR_FATAL BIT(1) /* Fatal error */ 73 #define ERR_SYS BIT(0) /* System error */ 74 #define ERR_IRQ_ALL (ERR_AER | ERR_AXI | ERR_CORR | \ 75 ERR_NONFATAL | ERR_FATAL | ERR_SYS) 76 77 #define MAX_MSI_HOST_IRQS 8 78 /* PCIE controller device IDs */ 79 #define PCIE_RC_K2HK 0xb008 80 #define PCIE_RC_K2E 0xb009 81 #define PCIE_RC_K2L 0xb00a 82 #define PCIE_RC_K2G 0xb00b 83 84 #define to_keystone_pcie(x) dev_get_drvdata((x)->dev) 85 86 struct keystone_pcie { 87 struct dw_pcie *pci; 88 /* PCI Device ID */ 89 u32 device_id; 90 int num_legacy_host_irqs; 91 int legacy_host_irqs[PCI_NUM_INTX]; 92 struct device_node *legacy_intc_np; 93 94 int num_msi_host_irqs; 95 int msi_host_irqs[MAX_MSI_HOST_IRQS]; 96 int num_lanes; 97 u32 num_viewport; 98 struct phy **phy; 99 struct device_link **link; 100 struct device_node *msi_intc_np; 101 struct irq_domain *legacy_irq_domain; 102 struct device_node *np; 103 104 int error_irq; 105 106 /* Application register space */ 107 void __iomem *va_app_base; /* DT 1st resource */ 108 struct resource app; 109 }; 110 111 static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset, 112 u32 *bit_pos) 113 { 114 *reg_offset = offset % 8; 115 *bit_pos = offset >> 3; 116 } 117 118 static phys_addr_t ks_pcie_get_msi_addr(struct pcie_port *pp) 119 { 120 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 121 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); 122 123 return ks_pcie->app.start + MSI_IRQ; 124 } 125 126 static u32 ks_pcie_app_readl(struct keystone_pcie *ks_pcie, u32 offset) 127 { 128 return readl(ks_pcie->va_app_base + offset); 129 } 130 131 static void ks_pcie_app_writel(struct keystone_pcie *ks_pcie, u32 offset, 132 u32 val) 133 { 134 writel(val, ks_pcie->va_app_base + offset); 135 } 136 137 static void ks_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset) 138 { 139 struct dw_pcie *pci = ks_pcie->pci; 140 struct pcie_port *pp = &pci->pp; 141 struct device *dev = pci->dev; 142 u32 pending, vector; 143 int src, virq; 144 145 pending = ks_pcie_app_readl(ks_pcie, MSI0_IRQ_STATUS + (offset << 4)); 146 147 /* 148 * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit 149 * shows 1, 9, 17, 25 and so forth 150 */ 151 for (src = 0; src < 4; src++) { 152 if (BIT(src) & pending) { 153 vector = offset + (src << 3); 154 virq = irq_linear_revmap(pp->irq_domain, vector); 155 dev_dbg(dev, "irq: bit %d, vector %d, virq %d\n", 156 src, vector, virq); 157 generic_handle_irq(virq); 158 } 159 } 160 } 161 162 static void ks_pcie_msi_irq_ack(int irq, struct pcie_port *pp) 163 { 164 u32 reg_offset, bit_pos; 165 struct keystone_pcie *ks_pcie; 166 struct dw_pcie *pci; 167 168 pci = to_dw_pcie_from_pp(pp); 169 ks_pcie = to_keystone_pcie(pci); 170 update_reg_offset_bit_pos(irq, ®_offset, &bit_pos); 171 172 ks_pcie_app_writel(ks_pcie, MSI0_IRQ_STATUS + (reg_offset << 4), 173 BIT(bit_pos)); 174 ks_pcie_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET); 175 } 176 177 static void ks_pcie_msi_set_irq(struct pcie_port *pp, int irq) 178 { 179 u32 reg_offset, bit_pos; 180 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 181 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); 182 183 update_reg_offset_bit_pos(irq, ®_offset, &bit_pos); 184 ks_pcie_app_writel(ks_pcie, MSI0_IRQ_ENABLE_SET + (reg_offset << 4), 185 BIT(bit_pos)); 186 } 187 188 static void ks_pcie_msi_clear_irq(struct pcie_port *pp, int irq) 189 { 190 u32 reg_offset, bit_pos; 191 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 192 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); 193 194 update_reg_offset_bit_pos(irq, ®_offset, &bit_pos); 195 ks_pcie_app_writel(ks_pcie, MSI0_IRQ_ENABLE_CLR + (reg_offset << 4), 196 BIT(bit_pos)); 197 } 198 199 static int ks_pcie_msi_host_init(struct pcie_port *pp) 200 { 201 return dw_pcie_allocate_domains(pp); 202 } 203 204 static void ks_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie) 205 { 206 int i; 207 208 for (i = 0; i < PCI_NUM_INTX; i++) 209 ks_pcie_app_writel(ks_pcie, IRQ_ENABLE_SET + (i << 4), 0x1); 210 } 211 212 static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, 213 int offset) 214 { 215 struct dw_pcie *pci = ks_pcie->pci; 216 struct device *dev = pci->dev; 217 u32 pending; 218 int virq; 219 220 pending = ks_pcie_app_readl(ks_pcie, IRQ_STATUS + (offset << 4)); 221 222 if (BIT(0) & pending) { 223 virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset); 224 dev_dbg(dev, ": irq: irq_offset %d, virq %d\n", offset, virq); 225 generic_handle_irq(virq); 226 } 227 228 /* EOI the INTx interrupt */ 229 ks_pcie_app_writel(ks_pcie, IRQ_EOI, offset); 230 } 231 232 static void ks_pcie_enable_error_irq(struct keystone_pcie *ks_pcie) 233 { 234 ks_pcie_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL); 235 } 236 237 static irqreturn_t ks_pcie_handle_error_irq(struct keystone_pcie *ks_pcie) 238 { 239 u32 reg; 240 struct device *dev = ks_pcie->pci->dev; 241 242 reg = ks_pcie_app_readl(ks_pcie, ERR_IRQ_STATUS); 243 if (!reg) 244 return IRQ_NONE; 245 246 if (reg & ERR_SYS) 247 dev_err(dev, "System Error\n"); 248 249 if (reg & ERR_FATAL) 250 dev_err(dev, "Fatal Error\n"); 251 252 if (reg & ERR_NONFATAL) 253 dev_dbg(dev, "Non Fatal Error\n"); 254 255 if (reg & ERR_CORR) 256 dev_dbg(dev, "Correctable Error\n"); 257 258 if (reg & ERR_AXI) 259 dev_err(dev, "AXI tag lookup fatal Error\n"); 260 261 if (reg & ERR_AER) 262 dev_err(dev, "ECRC Error\n"); 263 264 ks_pcie_app_writel(ks_pcie, ERR_IRQ_STATUS, reg); 265 266 return IRQ_HANDLED; 267 } 268 269 static void ks_pcie_ack_legacy_irq(struct irq_data *d) 270 { 271 } 272 273 static void ks_pcie_mask_legacy_irq(struct irq_data *d) 274 { 275 } 276 277 static void ks_pcie_unmask_legacy_irq(struct irq_data *d) 278 { 279 } 280 281 static struct irq_chip ks_pcie_legacy_irq_chip = { 282 .name = "Keystone-PCI-Legacy-IRQ", 283 .irq_ack = ks_pcie_ack_legacy_irq, 284 .irq_mask = ks_pcie_mask_legacy_irq, 285 .irq_unmask = ks_pcie_unmask_legacy_irq, 286 }; 287 288 static int ks_pcie_init_legacy_irq_map(struct irq_domain *d, 289 unsigned int irq, 290 irq_hw_number_t hw_irq) 291 { 292 irq_set_chip_and_handler(irq, &ks_pcie_legacy_irq_chip, 293 handle_level_irq); 294 irq_set_chip_data(irq, d->host_data); 295 296 return 0; 297 } 298 299 static const struct irq_domain_ops ks_pcie_legacy_irq_domain_ops = { 300 .map = ks_pcie_init_legacy_irq_map, 301 .xlate = irq_domain_xlate_onetwocell, 302 }; 303 304 /** 305 * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask 306 * registers 307 * 308 * Since modification of dbi_cs2 involves different clock domain, read the 309 * status back to ensure the transition is complete. 310 */ 311 static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie) 312 { 313 u32 val; 314 315 val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); 316 val |= DBI_CS2; 317 ks_pcie_app_writel(ks_pcie, CMD_STATUS, val); 318 319 do { 320 val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); 321 } while (!(val & DBI_CS2)); 322 } 323 324 /** 325 * ks_pcie_clear_dbi_mode() - Disable DBI mode 326 * 327 * Since modification of dbi_cs2 involves different clock domain, read the 328 * status back to ensure the transition is complete. 329 */ 330 static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie) 331 { 332 u32 val; 333 334 val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); 335 val &= ~DBI_CS2; 336 ks_pcie_app_writel(ks_pcie, CMD_STATUS, val); 337 338 do { 339 val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); 340 } while (val & DBI_CS2); 341 } 342 343 static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie) 344 { 345 u32 val; 346 u32 num_viewport = ks_pcie->num_viewport; 347 struct dw_pcie *pci = ks_pcie->pci; 348 struct pcie_port *pp = &pci->pp; 349 u64 start = pp->mem->start; 350 u64 end = pp->mem->end; 351 int i; 352 353 /* Disable BARs for inbound access */ 354 ks_pcie_set_dbi_mode(ks_pcie); 355 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0); 356 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0); 357 ks_pcie_clear_dbi_mode(ks_pcie); 358 359 val = ilog2(OB_WIN_SIZE); 360 ks_pcie_app_writel(ks_pcie, OB_SIZE, val); 361 362 /* Using Direct 1:1 mapping of RC <-> PCI memory space */ 363 for (i = 0; i < num_viewport && (start < end); i++) { 364 ks_pcie_app_writel(ks_pcie, OB_OFFSET_INDEX(i), 365 lower_32_bits(start) | OB_ENABLEN); 366 ks_pcie_app_writel(ks_pcie, OB_OFFSET_HI(i), 367 upper_32_bits(start)); 368 start += OB_WIN_SIZE; 369 } 370 371 val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); 372 val |= OB_XLAT_EN_VAL; 373 ks_pcie_app_writel(ks_pcie, CMD_STATUS, val); 374 } 375 376 static int ks_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, 377 unsigned int devfn, int where, int size, 378 u32 *val) 379 { 380 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 381 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); 382 u32 reg; 383 384 reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) | 385 CFG_FUNC(PCI_FUNC(devfn)); 386 if (bus->parent->number != pp->root_bus_nr) 387 reg |= CFG_TYPE1; 388 ks_pcie_app_writel(ks_pcie, CFG_SETUP, reg); 389 390 return dw_pcie_read(pp->va_cfg0_base + where, size, val); 391 } 392 393 static int ks_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, 394 unsigned int devfn, int where, int size, 395 u32 val) 396 { 397 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 398 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); 399 u32 reg; 400 401 reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) | 402 CFG_FUNC(PCI_FUNC(devfn)); 403 if (bus->parent->number != pp->root_bus_nr) 404 reg |= CFG_TYPE1; 405 ks_pcie_app_writel(ks_pcie, CFG_SETUP, reg); 406 407 return dw_pcie_write(pp->va_cfg0_base + where, size, val); 408 } 409 410 /** 411 * ks_pcie_v3_65_scan_bus() - keystone scan_bus post initialization 412 * 413 * This sets BAR0 to enable inbound access for MSI_IRQ register 414 */ 415 static void ks_pcie_v3_65_scan_bus(struct pcie_port *pp) 416 { 417 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 418 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); 419 420 /* Configure and set up BAR0 */ 421 ks_pcie_set_dbi_mode(ks_pcie); 422 423 /* Enable BAR0 */ 424 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1); 425 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1); 426 427 ks_pcie_clear_dbi_mode(ks_pcie); 428 429 /* 430 * For BAR0, just setting bus address for inbound writes (MSI) should 431 * be sufficient. Use physical address to avoid any conflicts. 432 */ 433 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start); 434 } 435 436 /** 437 * ks_pcie_link_up() - Check if link up 438 */ 439 static int ks_pcie_link_up(struct dw_pcie *pci) 440 { 441 u32 val; 442 443 val = dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0); 444 val &= PORT_LOGIC_LTSSM_STATE_MASK; 445 return (val == PORT_LOGIC_LTSSM_STATE_L0); 446 } 447 448 static void ks_pcie_initiate_link_train(struct keystone_pcie *ks_pcie) 449 { 450 u32 val; 451 452 /* Disable Link training */ 453 val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); 454 val &= ~LTSSM_EN_VAL; 455 ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val); 456 457 /* Initiate Link Training */ 458 val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); 459 ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val); 460 } 461 462 /** 463 * ks_pcie_dw_host_init() - initialize host for v3_65 dw hardware 464 * 465 * Ioremap the register resources, initialize legacy irq domain 466 * and call dw_pcie_v3_65_host_init() API to initialize the Keystone 467 * PCI host controller. 468 */ 469 static int __init ks_pcie_dw_host_init(struct keystone_pcie *ks_pcie) 470 { 471 struct dw_pcie *pci = ks_pcie->pci; 472 struct pcie_port *pp = &pci->pp; 473 struct device *dev = pci->dev; 474 struct platform_device *pdev = to_platform_device(dev); 475 struct resource *res; 476 477 /* Index 0 is the config reg. space address */ 478 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 479 pci->dbi_base = devm_pci_remap_cfg_resource(dev, res); 480 if (IS_ERR(pci->dbi_base)) 481 return PTR_ERR(pci->dbi_base); 482 483 /* 484 * We set these same and is used in pcie rd/wr_other_conf 485 * functions 486 */ 487 pp->va_cfg0_base = pci->dbi_base + SPACE0_REMOTE_CFG_OFFSET; 488 pp->va_cfg1_base = pp->va_cfg0_base; 489 490 /* Index 1 is the application reg. space address */ 491 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 492 ks_pcie->va_app_base = devm_ioremap_resource(dev, res); 493 if (IS_ERR(ks_pcie->va_app_base)) 494 return PTR_ERR(ks_pcie->va_app_base); 495 496 ks_pcie->app = *res; 497 498 /* Create legacy IRQ domain */ 499 ks_pcie->legacy_irq_domain = 500 irq_domain_add_linear(ks_pcie->legacy_intc_np, 501 PCI_NUM_INTX, 502 &ks_pcie_legacy_irq_domain_ops, 503 NULL); 504 if (!ks_pcie->legacy_irq_domain) { 505 dev_err(dev, "Failed to add irq domain for legacy irqs\n"); 506 return -EINVAL; 507 } 508 509 return dw_pcie_host_init(pp); 510 } 511 512 static void ks_pcie_quirk(struct pci_dev *dev) 513 { 514 struct pci_bus *bus = dev->bus; 515 struct pci_dev *bridge; 516 static const struct pci_device_id rc_pci_devids[] = { 517 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK), 518 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, 519 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2E), 520 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, 521 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L), 522 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, 523 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2G), 524 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, 525 { 0, }, 526 }; 527 528 if (pci_is_root_bus(bus)) 529 bridge = dev; 530 531 /* look for the host bridge */ 532 while (!pci_is_root_bus(bus)) { 533 bridge = bus->self; 534 bus = bus->parent; 535 } 536 537 if (!bridge) 538 return; 539 540 /* 541 * Keystone PCI controller has a h/w limitation of 542 * 256 bytes maximum read request size. It can't handle 543 * anything higher than this. So force this limit on 544 * all downstream devices. 545 */ 546 if (pci_match_id(rc_pci_devids, bridge)) { 547 if (pcie_get_readrq(dev) > 256) { 548 dev_info(&dev->dev, "limiting MRRS to 256\n"); 549 pcie_set_readrq(dev, 256); 550 } 551 } 552 } 553 DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, ks_pcie_quirk); 554 555 static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie) 556 { 557 struct dw_pcie *pci = ks_pcie->pci; 558 struct device *dev = pci->dev; 559 560 if (dw_pcie_link_up(pci)) { 561 dev_info(dev, "Link already up\n"); 562 return 0; 563 } 564 565 ks_pcie_initiate_link_train(ks_pcie); 566 567 /* check if the link is up or not */ 568 if (!dw_pcie_wait_for_link(pci)) 569 return 0; 570 571 dev_err(dev, "phy link never came up\n"); 572 return -ETIMEDOUT; 573 } 574 575 static void ks_pcie_msi_irq_handler(struct irq_desc *desc) 576 { 577 unsigned int irq = irq_desc_get_irq(desc); 578 struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc); 579 u32 offset = irq - ks_pcie->msi_host_irqs[0]; 580 struct dw_pcie *pci = ks_pcie->pci; 581 struct device *dev = pci->dev; 582 struct irq_chip *chip = irq_desc_get_chip(desc); 583 584 dev_dbg(dev, "%s, irq %d\n", __func__, irq); 585 586 /* 587 * The chained irq handler installation would have replaced normal 588 * interrupt driver handler so we need to take care of mask/unmask and 589 * ack operation. 590 */ 591 chained_irq_enter(chip, desc); 592 ks_pcie_handle_msi_irq(ks_pcie, offset); 593 chained_irq_exit(chip, desc); 594 } 595 596 /** 597 * ks_pcie_legacy_irq_handler() - Handle legacy interrupt 598 * @irq: IRQ line for legacy interrupts 599 * @desc: Pointer to irq descriptor 600 * 601 * Traverse through pending legacy interrupts and invoke handler for each. Also 602 * takes care of interrupt controller level mask/ack operation. 603 */ 604 static void ks_pcie_legacy_irq_handler(struct irq_desc *desc) 605 { 606 unsigned int irq = irq_desc_get_irq(desc); 607 struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc); 608 struct dw_pcie *pci = ks_pcie->pci; 609 struct device *dev = pci->dev; 610 u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0]; 611 struct irq_chip *chip = irq_desc_get_chip(desc); 612 613 dev_dbg(dev, ": Handling legacy irq %d\n", irq); 614 615 /* 616 * The chained irq handler installation would have replaced normal 617 * interrupt driver handler so we need to take care of mask/unmask and 618 * ack operation. 619 */ 620 chained_irq_enter(chip, desc); 621 ks_pcie_handle_legacy_irq(ks_pcie, irq_offset); 622 chained_irq_exit(chip, desc); 623 } 624 625 static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie, 626 char *controller, int *num_irqs) 627 { 628 int temp, max_host_irqs, legacy = 1, *host_irqs; 629 struct device *dev = ks_pcie->pci->dev; 630 struct device_node *np_pcie = dev->of_node, **np_temp; 631 632 if (!strcmp(controller, "msi-interrupt-controller")) 633 legacy = 0; 634 635 if (legacy) { 636 np_temp = &ks_pcie->legacy_intc_np; 637 max_host_irqs = PCI_NUM_INTX; 638 host_irqs = &ks_pcie->legacy_host_irqs[0]; 639 } else { 640 np_temp = &ks_pcie->msi_intc_np; 641 max_host_irqs = MAX_MSI_HOST_IRQS; 642 host_irqs = &ks_pcie->msi_host_irqs[0]; 643 } 644 645 /* interrupt controller is in a child node */ 646 *np_temp = of_get_child_by_name(np_pcie, controller); 647 if (!(*np_temp)) { 648 dev_err(dev, "Node for %s is absent\n", controller); 649 return -EINVAL; 650 } 651 652 temp = of_irq_count(*np_temp); 653 if (!temp) { 654 dev_err(dev, "No IRQ entries in %s\n", controller); 655 of_node_put(*np_temp); 656 return -EINVAL; 657 } 658 659 if (temp > max_host_irqs) 660 dev_warn(dev, "Too many %s interrupts defined %u\n", 661 (legacy ? "legacy" : "MSI"), temp); 662 663 /* 664 * support upto max_host_irqs. In dt from index 0 to 3 (legacy) or 0 to 665 * 7 (MSI) 666 */ 667 for (temp = 0; temp < max_host_irqs; temp++) { 668 host_irqs[temp] = irq_of_parse_and_map(*np_temp, temp); 669 if (!host_irqs[temp]) 670 break; 671 } 672 673 of_node_put(*np_temp); 674 675 if (temp) { 676 *num_irqs = temp; 677 return 0; 678 } 679 680 return -EINVAL; 681 } 682 683 static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie) 684 { 685 int i; 686 687 /* Legacy IRQ */ 688 for (i = 0; i < ks_pcie->num_legacy_host_irqs; i++) { 689 irq_set_chained_handler_and_data(ks_pcie->legacy_host_irqs[i], 690 ks_pcie_legacy_irq_handler, 691 ks_pcie); 692 } 693 ks_pcie_enable_legacy_irqs(ks_pcie); 694 695 /* MSI IRQ */ 696 if (IS_ENABLED(CONFIG_PCI_MSI)) { 697 for (i = 0; i < ks_pcie->num_msi_host_irqs; i++) { 698 irq_set_chained_handler_and_data(ks_pcie->msi_host_irqs[i], 699 ks_pcie_msi_irq_handler, 700 ks_pcie); 701 } 702 } 703 704 if (ks_pcie->error_irq > 0) 705 ks_pcie_enable_error_irq(ks_pcie); 706 } 707 708 /* 709 * When a PCI device does not exist during config cycles, keystone host gets a 710 * bus error instead of returning 0xffffffff. This handler always returns 0 711 * for this kind of faults. 712 */ 713 static int ks_pcie_fault(unsigned long addr, unsigned int fsr, 714 struct pt_regs *regs) 715 { 716 unsigned long instr = *(unsigned long *) instruction_pointer(regs); 717 718 if ((instr & 0x0e100090) == 0x00100090) { 719 int reg = (instr >> 12) & 15; 720 721 regs->uregs[reg] = -1; 722 regs->ARM_pc += 4; 723 } 724 725 return 0; 726 } 727 728 static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie) 729 { 730 int ret; 731 unsigned int id; 732 struct regmap *devctrl_regs; 733 struct dw_pcie *pci = ks_pcie->pci; 734 struct device *dev = pci->dev; 735 struct device_node *np = dev->of_node; 736 737 devctrl_regs = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-id"); 738 if (IS_ERR(devctrl_regs)) 739 return PTR_ERR(devctrl_regs); 740 741 ret = regmap_read(devctrl_regs, 0, &id); 742 if (ret) 743 return ret; 744 745 dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, id & PCIE_VENDORID_MASK); 746 dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, id >> PCIE_DEVICEID_SHIFT); 747 748 return 0; 749 } 750 751 static int __init ks_pcie_host_init(struct pcie_port *pp) 752 { 753 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 754 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); 755 int ret; 756 757 dw_pcie_setup_rc(pp); 758 759 ks_pcie_establish_link(ks_pcie); 760 ks_pcie_setup_rc_app_regs(ks_pcie); 761 ks_pcie_setup_interrupts(ks_pcie); 762 writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8), 763 pci->dbi_base + PCI_IO_BASE); 764 765 ret = ks_pcie_init_id(ks_pcie); 766 if (ret < 0) 767 return ret; 768 769 /* 770 * PCIe access errors that result into OCP errors are caught by ARM as 771 * "External aborts" 772 */ 773 hook_fault_code(17, ks_pcie_fault, SIGBUS, 0, 774 "Asynchronous external abort"); 775 776 return 0; 777 } 778 779 static const struct dw_pcie_host_ops ks_pcie_host_ops = { 780 .rd_other_conf = ks_pcie_rd_other_conf, 781 .wr_other_conf = ks_pcie_wr_other_conf, 782 .host_init = ks_pcie_host_init, 783 .msi_set_irq = ks_pcie_msi_set_irq, 784 .msi_clear_irq = ks_pcie_msi_clear_irq, 785 .get_msi_addr = ks_pcie_get_msi_addr, 786 .msi_host_init = ks_pcie_msi_host_init, 787 .msi_irq_ack = ks_pcie_msi_irq_ack, 788 .scan_bus = ks_pcie_v3_65_scan_bus, 789 }; 790 791 static irqreturn_t ks_pcie_err_irq_handler(int irq, void *priv) 792 { 793 struct keystone_pcie *ks_pcie = priv; 794 795 return ks_pcie_handle_error_irq(ks_pcie); 796 } 797 798 static int __init ks_pcie_add_pcie_port(struct keystone_pcie *ks_pcie, 799 struct platform_device *pdev) 800 { 801 struct dw_pcie *pci = ks_pcie->pci; 802 struct pcie_port *pp = &pci->pp; 803 struct device *dev = &pdev->dev; 804 int ret; 805 806 ret = ks_pcie_get_irq_controller_info(ks_pcie, 807 "legacy-interrupt-controller", 808 &ks_pcie->num_legacy_host_irqs); 809 if (ret) 810 return ret; 811 812 if (IS_ENABLED(CONFIG_PCI_MSI)) { 813 ret = ks_pcie_get_irq_controller_info(ks_pcie, 814 "msi-interrupt-controller", 815 &ks_pcie->num_msi_host_irqs); 816 if (ret) 817 return ret; 818 } 819 820 /* 821 * Index 0 is the platform interrupt for error interrupt 822 * from RC. This is optional. 823 */ 824 ks_pcie->error_irq = irq_of_parse_and_map(ks_pcie->np, 0); 825 if (ks_pcie->error_irq <= 0) 826 dev_info(dev, "no error IRQ defined\n"); 827 else { 828 ret = request_irq(ks_pcie->error_irq, ks_pcie_err_irq_handler, 829 IRQF_SHARED, "pcie-error-irq", ks_pcie); 830 if (ret < 0) { 831 dev_err(dev, "failed to request error IRQ %d\n", 832 ks_pcie->error_irq); 833 return ret; 834 } 835 } 836 837 pp->ops = &ks_pcie_host_ops; 838 ret = ks_pcie_dw_host_init(ks_pcie); 839 if (ret) { 840 dev_err(dev, "failed to initialize host\n"); 841 return ret; 842 } 843 844 return 0; 845 } 846 847 static const struct of_device_id ks_pcie_of_match[] = { 848 { 849 .type = "pci", 850 .compatible = "ti,keystone-pcie", 851 }, 852 { }, 853 }; 854 855 static const struct dw_pcie_ops ks_pcie_dw_pcie_ops = { 856 .link_up = ks_pcie_link_up, 857 }; 858 859 static void ks_pcie_disable_phy(struct keystone_pcie *ks_pcie) 860 { 861 int num_lanes = ks_pcie->num_lanes; 862 863 while (num_lanes--) { 864 phy_power_off(ks_pcie->phy[num_lanes]); 865 phy_exit(ks_pcie->phy[num_lanes]); 866 } 867 } 868 869 static int ks_pcie_enable_phy(struct keystone_pcie *ks_pcie) 870 { 871 int i; 872 int ret; 873 int num_lanes = ks_pcie->num_lanes; 874 875 for (i = 0; i < num_lanes; i++) { 876 ret = phy_init(ks_pcie->phy[i]); 877 if (ret < 0) 878 goto err_phy; 879 880 ret = phy_power_on(ks_pcie->phy[i]); 881 if (ret < 0) { 882 phy_exit(ks_pcie->phy[i]); 883 goto err_phy; 884 } 885 } 886 887 return 0; 888 889 err_phy: 890 while (--i >= 0) { 891 phy_power_off(ks_pcie->phy[i]); 892 phy_exit(ks_pcie->phy[i]); 893 } 894 895 return ret; 896 } 897 898 static int __init ks_pcie_probe(struct platform_device *pdev) 899 { 900 struct device *dev = &pdev->dev; 901 struct device_node *np = dev->of_node; 902 struct dw_pcie *pci; 903 struct keystone_pcie *ks_pcie; 904 struct device_link **link; 905 u32 num_viewport; 906 struct phy **phy; 907 u32 num_lanes; 908 char name[10]; 909 int ret; 910 int i; 911 912 ks_pcie = devm_kzalloc(dev, sizeof(*ks_pcie), GFP_KERNEL); 913 if (!ks_pcie) 914 return -ENOMEM; 915 916 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); 917 if (!pci) 918 return -ENOMEM; 919 920 pci->dev = dev; 921 pci->ops = &ks_pcie_dw_pcie_ops; 922 923 ret = of_property_read_u32(np, "num-viewport", &num_viewport); 924 if (ret < 0) { 925 dev_err(dev, "unable to read *num-viewport* property\n"); 926 return ret; 927 } 928 929 ret = of_property_read_u32(np, "num-lanes", &num_lanes); 930 if (ret) 931 num_lanes = 1; 932 933 phy = devm_kzalloc(dev, sizeof(*phy) * num_lanes, GFP_KERNEL); 934 if (!phy) 935 return -ENOMEM; 936 937 link = devm_kzalloc(dev, sizeof(*link) * num_lanes, GFP_KERNEL); 938 if (!link) 939 return -ENOMEM; 940 941 for (i = 0; i < num_lanes; i++) { 942 snprintf(name, sizeof(name), "pcie-phy%d", i); 943 phy[i] = devm_phy_optional_get(dev, name); 944 if (IS_ERR(phy[i])) { 945 ret = PTR_ERR(phy[i]); 946 goto err_link; 947 } 948 949 if (!phy[i]) 950 continue; 951 952 link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS); 953 if (!link[i]) { 954 ret = -EINVAL; 955 goto err_link; 956 } 957 } 958 959 ks_pcie->np = np; 960 ks_pcie->pci = pci; 961 ks_pcie->link = link; 962 ks_pcie->num_lanes = num_lanes; 963 ks_pcie->num_viewport = num_viewport; 964 ks_pcie->phy = phy; 965 966 ret = ks_pcie_enable_phy(ks_pcie); 967 if (ret) { 968 dev_err(dev, "failed to enable phy\n"); 969 goto err_link; 970 } 971 972 platform_set_drvdata(pdev, ks_pcie); 973 pm_runtime_enable(dev); 974 ret = pm_runtime_get_sync(dev); 975 if (ret < 0) { 976 dev_err(dev, "pm_runtime_get_sync failed\n"); 977 goto err_get_sync; 978 } 979 980 ret = ks_pcie_add_pcie_port(ks_pcie, pdev); 981 if (ret < 0) 982 goto err_get_sync; 983 984 return 0; 985 986 err_get_sync: 987 pm_runtime_put(dev); 988 pm_runtime_disable(dev); 989 ks_pcie_disable_phy(ks_pcie); 990 991 err_link: 992 while (--i >= 0 && link[i]) 993 device_link_del(link[i]); 994 995 return ret; 996 } 997 998 static int __exit ks_pcie_remove(struct platform_device *pdev) 999 { 1000 struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev); 1001 struct device_link **link = ks_pcie->link; 1002 int num_lanes = ks_pcie->num_lanes; 1003 struct device *dev = &pdev->dev; 1004 1005 pm_runtime_put(dev); 1006 pm_runtime_disable(dev); 1007 ks_pcie_disable_phy(ks_pcie); 1008 while (num_lanes--) 1009 device_link_del(link[num_lanes]); 1010 1011 return 0; 1012 } 1013 1014 static struct platform_driver ks_pcie_driver __refdata = { 1015 .probe = ks_pcie_probe, 1016 .remove = __exit_p(ks_pcie_remove), 1017 .driver = { 1018 .name = "keystone-pcie", 1019 .of_match_table = of_match_ptr(ks_pcie_of_match), 1020 }, 1021 }; 1022 builtin_platform_driver(ks_pcie_driver); 1023