1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Synopsys DesignWare PCIe host controller driver 4 * 5 * Copyright (C) 2013 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com 7 * 8 * Author: Jingoo Han <jg1.han@samsung.com> 9 */ 10 11 #include <linux/irqchip/chained_irq.h> 12 #include <linux/irqdomain.h> 13 #include <linux/of_address.h> 14 #include <linux/of_pci.h> 15 #include <linux/pci_regs.h> 16 #include <linux/platform_device.h> 17 18 #include "../../pci.h" 19 #include "pcie-designware.h" 20 21 static struct pci_ops dw_pcie_ops; 22 23 static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, 24 u32 *val) 25 { 26 struct dw_pcie *pci; 27 28 if (pp->ops->rd_own_conf) 29 return pp->ops->rd_own_conf(pp, where, size, val); 30 31 pci = to_dw_pcie_from_pp(pp); 32 return dw_pcie_read(pci->dbi_base + where, size, val); 33 } 34 35 static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, 36 u32 val) 37 { 38 struct dw_pcie *pci; 39 40 if (pp->ops->wr_own_conf) 41 return pp->ops->wr_own_conf(pp, where, size, val); 42 43 pci = to_dw_pcie_from_pp(pp); 44 return dw_pcie_write(pci->dbi_base + where, size, val); 45 } 46 47 static void dw_msi_ack_irq(struct irq_data *d) 48 { 49 irq_chip_ack_parent(d); 50 } 51 52 static void dw_msi_mask_irq(struct irq_data *d) 53 { 54 pci_msi_mask_irq(d); 55 irq_chip_mask_parent(d); 56 } 57 58 static void dw_msi_unmask_irq(struct irq_data *d) 59 { 60 pci_msi_unmask_irq(d); 61 irq_chip_unmask_parent(d); 62 } 63 64 static struct irq_chip dw_pcie_msi_irq_chip = { 65 .name = "PCI-MSI", 66 .irq_ack = dw_msi_ack_irq, 67 .irq_mask = dw_msi_mask_irq, 68 .irq_unmask = dw_msi_unmask_irq, 69 }; 70 71 static struct msi_domain_info dw_pcie_msi_domain_info = { 72 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | 73 MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI), 74 .chip = &dw_pcie_msi_irq_chip, 75 }; 76 77 /* MSI int handler */ 78 irqreturn_t dw_handle_msi_irq(struct pcie_port *pp) 79 { 80 int i, pos, irq; 81 u32 val, num_ctrls; 82 irqreturn_t ret = IRQ_NONE; 83 84 num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; 85 86 for (i = 0; i < num_ctrls; i++) { 87 dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + 88 (i * MSI_REG_CTRL_BLOCK_SIZE), 89 4, &val); 90 if (!val) 91 continue; 92 93 ret = IRQ_HANDLED; 94 pos = 0; 95 while ((pos = find_next_bit((unsigned long *) &val, 96 MAX_MSI_IRQS_PER_CTRL, 97 pos)) != MAX_MSI_IRQS_PER_CTRL) { 98 irq = irq_find_mapping(pp->irq_domain, 99 (i * MAX_MSI_IRQS_PER_CTRL) + 100 pos); 101 generic_handle_irq(irq); 102 pos++; 103 } 104 } 105 106 return ret; 107 } 108 109 /* Chained MSI interrupt service routine */ 110 static void dw_chained_msi_isr(struct irq_desc *desc) 111 { 112 struct irq_chip *chip = irq_desc_get_chip(desc); 113 struct pcie_port *pp; 114 115 chained_irq_enter(chip, desc); 116 117 pp = irq_desc_get_handler_data(desc); 118 dw_handle_msi_irq(pp); 119 120 chained_irq_exit(chip, desc); 121 } 122 123 static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg) 124 { 125 struct pcie_port *pp = irq_data_get_irq_chip_data(d); 126 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 127 u64 msi_target; 128 129 msi_target = (u64)pp->msi_data; 130 131 msg->address_lo = lower_32_bits(msi_target); 132 msg->address_hi = upper_32_bits(msi_target); 133 134 msg->data = d->hwirq; 135 136 dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n", 137 (int)d->hwirq, msg->address_hi, msg->address_lo); 138 } 139 140 static int dw_pci_msi_set_affinity(struct irq_data *d, 141 const struct cpumask *mask, bool force) 142 { 143 return -EINVAL; 144 } 145 146 static void dw_pci_bottom_mask(struct irq_data *d) 147 { 148 struct pcie_port *pp = irq_data_get_irq_chip_data(d); 149 unsigned int res, bit, ctrl; 150 unsigned long flags; 151 152 raw_spin_lock_irqsave(&pp->lock, flags); 153 154 ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; 155 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; 156 bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; 157 158 pp->irq_mask[ctrl] |= BIT(bit); 159 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4, 160 pp->irq_mask[ctrl]); 161 162 raw_spin_unlock_irqrestore(&pp->lock, flags); 163 } 164 165 static void dw_pci_bottom_unmask(struct irq_data *d) 166 { 167 struct pcie_port *pp = irq_data_get_irq_chip_data(d); 168 unsigned int res, bit, ctrl; 169 unsigned long flags; 170 171 raw_spin_lock_irqsave(&pp->lock, flags); 172 173 ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; 174 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; 175 bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; 176 177 pp->irq_mask[ctrl] &= ~BIT(bit); 178 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4, 179 pp->irq_mask[ctrl]); 180 181 raw_spin_unlock_irqrestore(&pp->lock, flags); 182 } 183 184 static void dw_pci_bottom_ack(struct irq_data *d) 185 { 186 struct pcie_port *pp = irq_data_get_irq_chip_data(d); 187 unsigned int res, bit, ctrl; 188 189 ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; 190 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; 191 bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; 192 193 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + res, 4, BIT(bit)); 194 } 195 196 static struct irq_chip dw_pci_msi_bottom_irq_chip = { 197 .name = "DWPCI-MSI", 198 .irq_ack = dw_pci_bottom_ack, 199 .irq_compose_msi_msg = dw_pci_setup_msi_msg, 200 .irq_set_affinity = dw_pci_msi_set_affinity, 201 .irq_mask = dw_pci_bottom_mask, 202 .irq_unmask = dw_pci_bottom_unmask, 203 }; 204 205 static int dw_pcie_irq_domain_alloc(struct irq_domain *domain, 206 unsigned int virq, unsigned int nr_irqs, 207 void *args) 208 { 209 struct pcie_port *pp = domain->host_data; 210 unsigned long flags; 211 u32 i; 212 int bit; 213 214 raw_spin_lock_irqsave(&pp->lock, flags); 215 216 bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors, 217 order_base_2(nr_irqs)); 218 219 raw_spin_unlock_irqrestore(&pp->lock, flags); 220 221 if (bit < 0) 222 return -ENOSPC; 223 224 for (i = 0; i < nr_irqs; i++) 225 irq_domain_set_info(domain, virq + i, bit + i, 226 pp->msi_irq_chip, 227 pp, handle_edge_irq, 228 NULL, NULL); 229 230 return 0; 231 } 232 233 static void dw_pcie_irq_domain_free(struct irq_domain *domain, 234 unsigned int virq, unsigned int nr_irqs) 235 { 236 struct irq_data *d = irq_domain_get_irq_data(domain, virq); 237 struct pcie_port *pp = irq_data_get_irq_chip_data(d); 238 unsigned long flags; 239 240 raw_spin_lock_irqsave(&pp->lock, flags); 241 242 bitmap_release_region(pp->msi_irq_in_use, d->hwirq, 243 order_base_2(nr_irqs)); 244 245 raw_spin_unlock_irqrestore(&pp->lock, flags); 246 } 247 248 static const struct irq_domain_ops dw_pcie_msi_domain_ops = { 249 .alloc = dw_pcie_irq_domain_alloc, 250 .free = dw_pcie_irq_domain_free, 251 }; 252 253 int dw_pcie_allocate_domains(struct pcie_port *pp) 254 { 255 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 256 struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node); 257 258 pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors, 259 &dw_pcie_msi_domain_ops, pp); 260 if (!pp->irq_domain) { 261 dev_err(pci->dev, "Failed to create IRQ domain\n"); 262 return -ENOMEM; 263 } 264 265 pp->msi_domain = pci_msi_create_irq_domain(fwnode, 266 &dw_pcie_msi_domain_info, 267 pp->irq_domain); 268 if (!pp->msi_domain) { 269 dev_err(pci->dev, "Failed to create MSI domain\n"); 270 irq_domain_remove(pp->irq_domain); 271 return -ENOMEM; 272 } 273 274 return 0; 275 } 276 277 void dw_pcie_free_msi(struct pcie_port *pp) 278 { 279 if (pp->msi_irq) { 280 irq_set_chained_handler(pp->msi_irq, NULL); 281 irq_set_handler_data(pp->msi_irq, NULL); 282 } 283 284 irq_domain_remove(pp->msi_domain); 285 irq_domain_remove(pp->irq_domain); 286 287 if (pp->msi_page) 288 __free_page(pp->msi_page); 289 } 290 291 void dw_pcie_msi_init(struct pcie_port *pp) 292 { 293 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 294 struct device *dev = pci->dev; 295 u64 msi_target; 296 297 pp->msi_page = alloc_page(GFP_KERNEL); 298 pp->msi_data = dma_map_page(dev, pp->msi_page, 0, PAGE_SIZE, 299 DMA_FROM_DEVICE); 300 if (dma_mapping_error(dev, pp->msi_data)) { 301 dev_err(dev, "Failed to map MSI data\n"); 302 __free_page(pp->msi_page); 303 pp->msi_page = NULL; 304 return; 305 } 306 msi_target = (u64)pp->msi_data; 307 308 /* Program the msi_data */ 309 dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4, 310 lower_32_bits(msi_target)); 311 dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4, 312 upper_32_bits(msi_target)); 313 } 314 EXPORT_SYMBOL_GPL(dw_pcie_msi_init); 315 316 int dw_pcie_host_init(struct pcie_port *pp) 317 { 318 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 319 struct device *dev = pci->dev; 320 struct device_node *np = dev->of_node; 321 struct platform_device *pdev = to_platform_device(dev); 322 struct resource_entry *win, *tmp; 323 struct pci_bus *child; 324 struct pci_host_bridge *bridge; 325 struct resource *cfg_res; 326 u32 hdr_type; 327 int ret; 328 329 raw_spin_lock_init(&pci->pp.lock); 330 331 cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); 332 if (cfg_res) { 333 pp->cfg0_size = resource_size(cfg_res) >> 1; 334 pp->cfg1_size = resource_size(cfg_res) >> 1; 335 pp->cfg0_base = cfg_res->start; 336 pp->cfg1_base = cfg_res->start + pp->cfg0_size; 337 } else if (!pp->va_cfg0_base) { 338 dev_err(dev, "Missing *config* reg space\n"); 339 } 340 341 bridge = devm_pci_alloc_host_bridge(dev, 0); 342 if (!bridge) 343 return -ENOMEM; 344 345 ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, 346 &bridge->windows, &pp->io_base); 347 if (ret) 348 return ret; 349 350 ret = devm_request_pci_bus_resources(dev, &bridge->windows); 351 if (ret) 352 return ret; 353 354 /* Get the I/O and memory ranges from DT */ 355 resource_list_for_each_entry_safe(win, tmp, &bridge->windows) { 356 switch (resource_type(win->res)) { 357 case IORESOURCE_IO: 358 ret = devm_pci_remap_iospace(dev, win->res, 359 pp->io_base); 360 if (ret) { 361 dev_warn(dev, "Error %d: failed to map resource %pR\n", 362 ret, win->res); 363 resource_list_destroy_entry(win); 364 } else { 365 pp->io = win->res; 366 pp->io->name = "I/O"; 367 pp->io_size = resource_size(pp->io); 368 pp->io_bus_addr = pp->io->start - win->offset; 369 } 370 break; 371 case IORESOURCE_MEM: 372 pp->mem = win->res; 373 pp->mem->name = "MEM"; 374 pp->mem_size = resource_size(pp->mem); 375 pp->mem_bus_addr = pp->mem->start - win->offset; 376 break; 377 case 0: 378 pp->cfg = win->res; 379 pp->cfg0_size = resource_size(pp->cfg) >> 1; 380 pp->cfg1_size = resource_size(pp->cfg) >> 1; 381 pp->cfg0_base = pp->cfg->start; 382 pp->cfg1_base = pp->cfg->start + pp->cfg0_size; 383 break; 384 case IORESOURCE_BUS: 385 pp->busn = win->res; 386 break; 387 } 388 } 389 390 if (!pci->dbi_base) { 391 pci->dbi_base = devm_pci_remap_cfgspace(dev, 392 pp->cfg->start, 393 resource_size(pp->cfg)); 394 if (!pci->dbi_base) { 395 dev_err(dev, "Error with ioremap\n"); 396 return -ENOMEM; 397 } 398 } 399 400 pp->mem_base = pp->mem->start; 401 402 if (!pp->va_cfg0_base) { 403 pp->va_cfg0_base = devm_pci_remap_cfgspace(dev, 404 pp->cfg0_base, pp->cfg0_size); 405 if (!pp->va_cfg0_base) { 406 dev_err(dev, "Error with ioremap in function\n"); 407 return -ENOMEM; 408 } 409 } 410 411 if (!pp->va_cfg1_base) { 412 pp->va_cfg1_base = devm_pci_remap_cfgspace(dev, 413 pp->cfg1_base, 414 pp->cfg1_size); 415 if (!pp->va_cfg1_base) { 416 dev_err(dev, "Error with ioremap\n"); 417 return -ENOMEM; 418 } 419 } 420 421 ret = of_property_read_u32(np, "num-viewport", &pci->num_viewport); 422 if (ret) 423 pci->num_viewport = 2; 424 425 if (pci_msi_enabled()) { 426 /* 427 * If a specific SoC driver needs to change the 428 * default number of vectors, it needs to implement 429 * the set_num_vectors callback. 430 */ 431 if (!pp->ops->set_num_vectors) { 432 pp->num_vectors = MSI_DEF_NUM_VECTORS; 433 } else { 434 pp->ops->set_num_vectors(pp); 435 436 if (pp->num_vectors > MAX_MSI_IRQS || 437 pp->num_vectors == 0) { 438 dev_err(dev, 439 "Invalid number of vectors\n"); 440 return -EINVAL; 441 } 442 } 443 444 if (!pp->ops->msi_host_init) { 445 pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip; 446 447 ret = dw_pcie_allocate_domains(pp); 448 if (ret) 449 return ret; 450 451 if (pp->msi_irq) 452 irq_set_chained_handler_and_data(pp->msi_irq, 453 dw_chained_msi_isr, 454 pp); 455 } else { 456 ret = pp->ops->msi_host_init(pp); 457 if (ret < 0) 458 return ret; 459 } 460 } 461 462 if (pp->ops->host_init) { 463 ret = pp->ops->host_init(pp); 464 if (ret) 465 goto err_free_msi; 466 } 467 468 ret = dw_pcie_rd_own_conf(pp, PCI_HEADER_TYPE, 1, &hdr_type); 469 if (ret != PCIBIOS_SUCCESSFUL) { 470 dev_err(pci->dev, "Failed reading PCI_HEADER_TYPE cfg space reg (ret: 0x%x)\n", 471 ret); 472 ret = pcibios_err_to_errno(ret); 473 goto err_free_msi; 474 } 475 if (hdr_type != PCI_HEADER_TYPE_BRIDGE) { 476 dev_err(pci->dev, 477 "PCIe controller is not set to bridge type (hdr_type: 0x%x)!\n", 478 hdr_type); 479 ret = -EIO; 480 goto err_free_msi; 481 } 482 483 pp->root_bus_nr = pp->busn->start; 484 485 bridge->dev.parent = dev; 486 bridge->sysdata = pp; 487 bridge->busnr = pp->root_bus_nr; 488 bridge->ops = &dw_pcie_ops; 489 bridge->map_irq = of_irq_parse_and_map_pci; 490 bridge->swizzle_irq = pci_common_swizzle; 491 492 ret = pci_scan_root_bus_bridge(bridge); 493 if (ret) 494 goto err_free_msi; 495 496 pp->root_bus = bridge->bus; 497 498 if (pp->ops->scan_bus) 499 pp->ops->scan_bus(pp); 500 501 pci_bus_size_bridges(pp->root_bus); 502 pci_bus_assign_resources(pp->root_bus); 503 504 list_for_each_entry(child, &pp->root_bus->children, node) 505 pcie_bus_configure_settings(child); 506 507 pci_bus_add_devices(pp->root_bus); 508 return 0; 509 510 err_free_msi: 511 if (pci_msi_enabled() && !pp->ops->msi_host_init) 512 dw_pcie_free_msi(pp); 513 return ret; 514 } 515 EXPORT_SYMBOL_GPL(dw_pcie_host_init); 516 517 void dw_pcie_host_deinit(struct pcie_port *pp) 518 { 519 pci_stop_root_bus(pp->root_bus); 520 pci_remove_root_bus(pp->root_bus); 521 if (pci_msi_enabled() && !pp->ops->msi_host_init) 522 dw_pcie_free_msi(pp); 523 } 524 EXPORT_SYMBOL_GPL(dw_pcie_host_deinit); 525 526 static int dw_pcie_access_other_conf(struct pcie_port *pp, struct pci_bus *bus, 527 u32 devfn, int where, int size, u32 *val, 528 bool write) 529 { 530 int ret, type; 531 u32 busdev, cfg_size; 532 u64 cpu_addr; 533 void __iomem *va_cfg_base; 534 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 535 536 busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | 537 PCIE_ATU_FUNC(PCI_FUNC(devfn)); 538 539 if (bus->parent->number == pp->root_bus_nr) { 540 type = PCIE_ATU_TYPE_CFG0; 541 cpu_addr = pp->cfg0_base; 542 cfg_size = pp->cfg0_size; 543 va_cfg_base = pp->va_cfg0_base; 544 } else { 545 type = PCIE_ATU_TYPE_CFG1; 546 cpu_addr = pp->cfg1_base; 547 cfg_size = pp->cfg1_size; 548 va_cfg_base = pp->va_cfg1_base; 549 } 550 551 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1, 552 type, cpu_addr, 553 busdev, cfg_size); 554 if (write) 555 ret = dw_pcie_write(va_cfg_base + where, size, *val); 556 else 557 ret = dw_pcie_read(va_cfg_base + where, size, val); 558 559 if (pci->num_viewport <= 2) 560 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1, 561 PCIE_ATU_TYPE_IO, pp->io_base, 562 pp->io_bus_addr, pp->io_size); 563 564 return ret; 565 } 566 567 static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, 568 u32 devfn, int where, int size, u32 *val) 569 { 570 if (pp->ops->rd_other_conf) 571 return pp->ops->rd_other_conf(pp, bus, devfn, where, 572 size, val); 573 574 return dw_pcie_access_other_conf(pp, bus, devfn, where, size, val, 575 false); 576 } 577 578 static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, 579 u32 devfn, int where, int size, u32 val) 580 { 581 if (pp->ops->wr_other_conf) 582 return pp->ops->wr_other_conf(pp, bus, devfn, where, 583 size, val); 584 585 return dw_pcie_access_other_conf(pp, bus, devfn, where, size, &val, 586 true); 587 } 588 589 static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus, 590 int dev) 591 { 592 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 593 594 /* If there is no link, then there is no device */ 595 if (bus->number != pp->root_bus_nr) { 596 if (!dw_pcie_link_up(pci)) 597 return 0; 598 } 599 600 /* Access only one slot on each root port */ 601 if (bus->number == pp->root_bus_nr && dev > 0) 602 return 0; 603 604 return 1; 605 } 606 607 static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, 608 int size, u32 *val) 609 { 610 struct pcie_port *pp = bus->sysdata; 611 612 if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) { 613 *val = 0xffffffff; 614 return PCIBIOS_DEVICE_NOT_FOUND; 615 } 616 617 if (bus->number == pp->root_bus_nr) 618 return dw_pcie_rd_own_conf(pp, where, size, val); 619 620 return dw_pcie_rd_other_conf(pp, bus, devfn, where, size, val); 621 } 622 623 static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn, 624 int where, int size, u32 val) 625 { 626 struct pcie_port *pp = bus->sysdata; 627 628 if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) 629 return PCIBIOS_DEVICE_NOT_FOUND; 630 631 if (bus->number == pp->root_bus_nr) 632 return dw_pcie_wr_own_conf(pp, where, size, val); 633 634 return dw_pcie_wr_other_conf(pp, bus, devfn, where, size, val); 635 } 636 637 static struct pci_ops dw_pcie_ops = { 638 .read = dw_pcie_rd_conf, 639 .write = dw_pcie_wr_conf, 640 }; 641 642 void dw_pcie_setup_rc(struct pcie_port *pp) 643 { 644 u32 val, ctrl, num_ctrls; 645 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 646 647 /* 648 * Enable DBI read-only registers for writing/updating configuration. 649 * Write permission gets disabled towards the end of this function. 650 */ 651 dw_pcie_dbi_ro_wr_en(pci); 652 653 dw_pcie_setup(pci); 654 655 if (!pp->ops->msi_host_init) { 656 num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; 657 658 /* Initialize IRQ Status array */ 659 for (ctrl = 0; ctrl < num_ctrls; ctrl++) { 660 pp->irq_mask[ctrl] = ~0; 661 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + 662 (ctrl * MSI_REG_CTRL_BLOCK_SIZE), 663 4, pp->irq_mask[ctrl]); 664 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + 665 (ctrl * MSI_REG_CTRL_BLOCK_SIZE), 666 4, ~0); 667 } 668 } 669 670 /* Setup RC BARs */ 671 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004); 672 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000); 673 674 /* Setup interrupt pins */ 675 val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE); 676 val &= 0xffff00ff; 677 val |= 0x00000100; 678 dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val); 679 680 /* Setup bus numbers */ 681 val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS); 682 val &= 0xff000000; 683 val |= 0x00ff0100; 684 dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val); 685 686 /* Setup command register */ 687 val = dw_pcie_readl_dbi(pci, PCI_COMMAND); 688 val &= 0xffff0000; 689 val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | 690 PCI_COMMAND_MASTER | PCI_COMMAND_SERR; 691 dw_pcie_writel_dbi(pci, PCI_COMMAND, val); 692 693 /* 694 * If the platform provides ->rd_other_conf, it means the platform 695 * uses its own address translation component rather than ATU, so 696 * we should not program the ATU here. 697 */ 698 if (!pp->ops->rd_other_conf) { 699 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0, 700 PCIE_ATU_TYPE_MEM, pp->mem_base, 701 pp->mem_bus_addr, pp->mem_size); 702 if (pci->num_viewport > 2) 703 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX2, 704 PCIE_ATU_TYPE_IO, pp->io_base, 705 pp->io_bus_addr, pp->io_size); 706 } 707 708 dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0); 709 710 /* Program correct class for RC */ 711 dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI); 712 713 dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val); 714 val |= PORT_LOGIC_SPEED_CHANGE; 715 dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val); 716 717 dw_pcie_dbi_ro_wr_dis(pci); 718 } 719 EXPORT_SYMBOL_GPL(dw_pcie_setup_rc); 720