1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Synopsys DesignWare PCIe host controller driver 4 * 5 * Copyright (C) 2013 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com 7 * 8 * Author: Jingoo Han <jg1.han@samsung.com> 9 */ 10 11 #include <linux/irqchip/chained_irq.h> 12 #include <linux/irqdomain.h> 13 #include <linux/of_address.h> 14 #include <linux/of_pci.h> 15 #include <linux/pci_regs.h> 16 #include <linux/platform_device.h> 17 18 #include "../../pci.h" 19 #include "pcie-designware.h" 20 21 static struct pci_ops dw_pcie_ops; 22 23 static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, 24 u32 *val) 25 { 26 struct dw_pcie *pci; 27 28 if (pp->ops->rd_own_conf) 29 return pp->ops->rd_own_conf(pp, where, size, val); 30 31 pci = to_dw_pcie_from_pp(pp); 32 return dw_pcie_read(pci->dbi_base + where, size, val); 33 } 34 35 static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, 36 u32 val) 37 { 38 struct dw_pcie *pci; 39 40 if (pp->ops->wr_own_conf) 41 return pp->ops->wr_own_conf(pp, where, size, val); 42 43 pci = to_dw_pcie_from_pp(pp); 44 return dw_pcie_write(pci->dbi_base + where, size, val); 45 } 46 47 static void dw_msi_ack_irq(struct irq_data *d) 48 { 49 irq_chip_ack_parent(d); 50 } 51 52 static void dw_msi_mask_irq(struct irq_data *d) 53 { 54 pci_msi_mask_irq(d); 55 irq_chip_mask_parent(d); 56 } 57 58 static void dw_msi_unmask_irq(struct irq_data *d) 59 { 60 pci_msi_unmask_irq(d); 61 irq_chip_unmask_parent(d); 62 } 63 64 static struct irq_chip dw_pcie_msi_irq_chip = { 65 .name = "PCI-MSI", 66 .irq_ack = dw_msi_ack_irq, 67 .irq_mask = dw_msi_mask_irq, 68 .irq_unmask = dw_msi_unmask_irq, 69 }; 70 71 static struct msi_domain_info dw_pcie_msi_domain_info = { 72 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | 73 MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI), 74 .chip = &dw_pcie_msi_irq_chip, 75 }; 76 77 /* MSI int handler */ 78 irqreturn_t dw_handle_msi_irq(struct pcie_port *pp) 79 { 80 int i, pos, irq; 81 u32 val, num_ctrls; 82 irqreturn_t ret = IRQ_NONE; 83 84 num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; 85 86 for (i = 0; i < num_ctrls; i++) { 87 dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + 88 (i * MSI_REG_CTRL_BLOCK_SIZE), 89 4, &val); 90 if (!val) 91 continue; 92 93 ret = IRQ_HANDLED; 94 pos = 0; 95 while ((pos = find_next_bit((unsigned long *) &val, 96 MAX_MSI_IRQS_PER_CTRL, 97 pos)) != MAX_MSI_IRQS_PER_CTRL) { 98 irq = irq_find_mapping(pp->irq_domain, 99 (i * MAX_MSI_IRQS_PER_CTRL) + 100 pos); 101 generic_handle_irq(irq); 102 pos++; 103 } 104 } 105 106 return ret; 107 } 108 109 /* Chained MSI interrupt service routine */ 110 static void dw_chained_msi_isr(struct irq_desc *desc) 111 { 112 struct irq_chip *chip = irq_desc_get_chip(desc); 113 struct pcie_port *pp; 114 115 chained_irq_enter(chip, desc); 116 117 pp = irq_desc_get_handler_data(desc); 118 dw_handle_msi_irq(pp); 119 120 chained_irq_exit(chip, desc); 121 } 122 123 static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg) 124 { 125 struct pcie_port *pp = irq_data_get_irq_chip_data(d); 126 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 127 u64 msi_target; 128 129 msi_target = (u64)pp->msi_data; 130 131 msg->address_lo = lower_32_bits(msi_target); 132 msg->address_hi = upper_32_bits(msi_target); 133 134 msg->data = d->hwirq; 135 136 dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n", 137 (int)d->hwirq, msg->address_hi, msg->address_lo); 138 } 139 140 static int dw_pci_msi_set_affinity(struct irq_data *d, 141 const struct cpumask *mask, bool force) 142 { 143 return -EINVAL; 144 } 145 146 static void dw_pci_bottom_mask(struct irq_data *d) 147 { 148 struct pcie_port *pp = irq_data_get_irq_chip_data(d); 149 unsigned int res, bit, ctrl; 150 unsigned long flags; 151 152 raw_spin_lock_irqsave(&pp->lock, flags); 153 154 ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; 155 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; 156 bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; 157 158 pp->irq_mask[ctrl] |= BIT(bit); 159 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4, 160 pp->irq_mask[ctrl]); 161 162 raw_spin_unlock_irqrestore(&pp->lock, flags); 163 } 164 165 static void dw_pci_bottom_unmask(struct irq_data *d) 166 { 167 struct pcie_port *pp = irq_data_get_irq_chip_data(d); 168 unsigned int res, bit, ctrl; 169 unsigned long flags; 170 171 raw_spin_lock_irqsave(&pp->lock, flags); 172 173 ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; 174 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; 175 bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; 176 177 pp->irq_mask[ctrl] &= ~BIT(bit); 178 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4, 179 pp->irq_mask[ctrl]); 180 181 raw_spin_unlock_irqrestore(&pp->lock, flags); 182 } 183 184 static void dw_pci_bottom_ack(struct irq_data *d) 185 { 186 struct pcie_port *pp = irq_data_get_irq_chip_data(d); 187 unsigned int res, bit, ctrl; 188 189 ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; 190 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; 191 bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; 192 193 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + res, 4, BIT(bit)); 194 } 195 196 static struct irq_chip dw_pci_msi_bottom_irq_chip = { 197 .name = "DWPCI-MSI", 198 .irq_ack = dw_pci_bottom_ack, 199 .irq_compose_msi_msg = dw_pci_setup_msi_msg, 200 .irq_set_affinity = dw_pci_msi_set_affinity, 201 .irq_mask = dw_pci_bottom_mask, 202 .irq_unmask = dw_pci_bottom_unmask, 203 }; 204 205 static int dw_pcie_irq_domain_alloc(struct irq_domain *domain, 206 unsigned int virq, unsigned int nr_irqs, 207 void *args) 208 { 209 struct pcie_port *pp = domain->host_data; 210 unsigned long flags; 211 u32 i; 212 int bit; 213 214 raw_spin_lock_irqsave(&pp->lock, flags); 215 216 bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors, 217 order_base_2(nr_irqs)); 218 219 raw_spin_unlock_irqrestore(&pp->lock, flags); 220 221 if (bit < 0) 222 return -ENOSPC; 223 224 for (i = 0; i < nr_irqs; i++) 225 irq_domain_set_info(domain, virq + i, bit + i, 226 pp->msi_irq_chip, 227 pp, handle_edge_irq, 228 NULL, NULL); 229 230 return 0; 231 } 232 233 static void dw_pcie_irq_domain_free(struct irq_domain *domain, 234 unsigned int virq, unsigned int nr_irqs) 235 { 236 struct irq_data *d = irq_domain_get_irq_data(domain, virq); 237 struct pcie_port *pp = irq_data_get_irq_chip_data(d); 238 unsigned long flags; 239 240 raw_spin_lock_irqsave(&pp->lock, flags); 241 242 bitmap_release_region(pp->msi_irq_in_use, d->hwirq, 243 order_base_2(nr_irqs)); 244 245 raw_spin_unlock_irqrestore(&pp->lock, flags); 246 } 247 248 static const struct irq_domain_ops dw_pcie_msi_domain_ops = { 249 .alloc = dw_pcie_irq_domain_alloc, 250 .free = dw_pcie_irq_domain_free, 251 }; 252 253 int dw_pcie_allocate_domains(struct pcie_port *pp) 254 { 255 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 256 struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node); 257 258 pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors, 259 &dw_pcie_msi_domain_ops, pp); 260 if (!pp->irq_domain) { 261 dev_err(pci->dev, "Failed to create IRQ domain\n"); 262 return -ENOMEM; 263 } 264 265 pp->msi_domain = pci_msi_create_irq_domain(fwnode, 266 &dw_pcie_msi_domain_info, 267 pp->irq_domain); 268 if (!pp->msi_domain) { 269 dev_err(pci->dev, "Failed to create MSI domain\n"); 270 irq_domain_remove(pp->irq_domain); 271 return -ENOMEM; 272 } 273 274 return 0; 275 } 276 277 void dw_pcie_free_msi(struct pcie_port *pp) 278 { 279 if (pp->msi_irq) { 280 irq_set_chained_handler(pp->msi_irq, NULL); 281 irq_set_handler_data(pp->msi_irq, NULL); 282 } 283 284 irq_domain_remove(pp->msi_domain); 285 irq_domain_remove(pp->irq_domain); 286 287 if (pp->msi_page) 288 __free_page(pp->msi_page); 289 } 290 291 void dw_pcie_msi_init(struct pcie_port *pp) 292 { 293 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 294 struct device *dev = pci->dev; 295 u64 msi_target; 296 297 pp->msi_page = alloc_page(GFP_KERNEL); 298 pp->msi_data = dma_map_page(dev, pp->msi_page, 0, PAGE_SIZE, 299 DMA_FROM_DEVICE); 300 if (dma_mapping_error(dev, pp->msi_data)) { 301 dev_err(dev, "Failed to map MSI data\n"); 302 __free_page(pp->msi_page); 303 pp->msi_page = NULL; 304 return; 305 } 306 msi_target = (u64)pp->msi_data; 307 308 /* Program the msi_data */ 309 dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4, 310 lower_32_bits(msi_target)); 311 dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4, 312 upper_32_bits(msi_target)); 313 } 314 EXPORT_SYMBOL_GPL(dw_pcie_msi_init); 315 316 int dw_pcie_host_init(struct pcie_port *pp) 317 { 318 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 319 struct device *dev = pci->dev; 320 struct device_node *np = dev->of_node; 321 struct platform_device *pdev = to_platform_device(dev); 322 struct resource_entry *win, *tmp; 323 struct pci_bus *child; 324 struct pci_host_bridge *bridge; 325 struct resource *cfg_res; 326 int ret; 327 328 raw_spin_lock_init(&pci->pp.lock); 329 330 cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); 331 if (cfg_res) { 332 pp->cfg0_size = resource_size(cfg_res) >> 1; 333 pp->cfg1_size = resource_size(cfg_res) >> 1; 334 pp->cfg0_base = cfg_res->start; 335 pp->cfg1_base = cfg_res->start + pp->cfg0_size; 336 } else if (!pp->va_cfg0_base) { 337 dev_err(dev, "Missing *config* reg space\n"); 338 } 339 340 bridge = devm_pci_alloc_host_bridge(dev, 0); 341 if (!bridge) 342 return -ENOMEM; 343 344 ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, 345 &bridge->windows, &pp->io_base); 346 if (ret) 347 return ret; 348 349 ret = devm_request_pci_bus_resources(dev, &bridge->windows); 350 if (ret) 351 return ret; 352 353 /* Get the I/O and memory ranges from DT */ 354 resource_list_for_each_entry_safe(win, tmp, &bridge->windows) { 355 switch (resource_type(win->res)) { 356 case IORESOURCE_IO: 357 ret = devm_pci_remap_iospace(dev, win->res, 358 pp->io_base); 359 if (ret) { 360 dev_warn(dev, "Error %d: failed to map resource %pR\n", 361 ret, win->res); 362 resource_list_destroy_entry(win); 363 } else { 364 pp->io = win->res; 365 pp->io->name = "I/O"; 366 pp->io_size = resource_size(pp->io); 367 pp->io_bus_addr = pp->io->start - win->offset; 368 } 369 break; 370 case IORESOURCE_MEM: 371 pp->mem = win->res; 372 pp->mem->name = "MEM"; 373 pp->mem_size = resource_size(pp->mem); 374 pp->mem_bus_addr = pp->mem->start - win->offset; 375 break; 376 case 0: 377 pp->cfg = win->res; 378 pp->cfg0_size = resource_size(pp->cfg) >> 1; 379 pp->cfg1_size = resource_size(pp->cfg) >> 1; 380 pp->cfg0_base = pp->cfg->start; 381 pp->cfg1_base = pp->cfg->start + pp->cfg0_size; 382 break; 383 case IORESOURCE_BUS: 384 pp->busn = win->res; 385 break; 386 } 387 } 388 389 if (!pci->dbi_base) { 390 pci->dbi_base = devm_pci_remap_cfgspace(dev, 391 pp->cfg->start, 392 resource_size(pp->cfg)); 393 if (!pci->dbi_base) { 394 dev_err(dev, "Error with ioremap\n"); 395 return -ENOMEM; 396 } 397 } 398 399 pp->mem_base = pp->mem->start; 400 401 if (!pp->va_cfg0_base) { 402 pp->va_cfg0_base = devm_pci_remap_cfgspace(dev, 403 pp->cfg0_base, pp->cfg0_size); 404 if (!pp->va_cfg0_base) { 405 dev_err(dev, "Error with ioremap in function\n"); 406 return -ENOMEM; 407 } 408 } 409 410 if (!pp->va_cfg1_base) { 411 pp->va_cfg1_base = devm_pci_remap_cfgspace(dev, 412 pp->cfg1_base, 413 pp->cfg1_size); 414 if (!pp->va_cfg1_base) { 415 dev_err(dev, "Error with ioremap\n"); 416 return -ENOMEM; 417 } 418 } 419 420 ret = of_property_read_u32(np, "num-viewport", &pci->num_viewport); 421 if (ret) 422 pci->num_viewport = 2; 423 424 if (pci_msi_enabled()) { 425 /* 426 * If a specific SoC driver needs to change the 427 * default number of vectors, it needs to implement 428 * the set_num_vectors callback. 429 */ 430 if (!pp->ops->set_num_vectors) { 431 pp->num_vectors = MSI_DEF_NUM_VECTORS; 432 } else { 433 pp->ops->set_num_vectors(pp); 434 435 if (pp->num_vectors > MAX_MSI_IRQS || 436 pp->num_vectors == 0) { 437 dev_err(dev, 438 "Invalid number of vectors\n"); 439 return -EINVAL; 440 } 441 } 442 443 if (!pp->ops->msi_host_init) { 444 pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip; 445 446 ret = dw_pcie_allocate_domains(pp); 447 if (ret) 448 return ret; 449 450 if (pp->msi_irq) 451 irq_set_chained_handler_and_data(pp->msi_irq, 452 dw_chained_msi_isr, 453 pp); 454 } else { 455 ret = pp->ops->msi_host_init(pp); 456 if (ret < 0) 457 return ret; 458 } 459 } 460 461 if (pp->ops->host_init) { 462 ret = pp->ops->host_init(pp); 463 if (ret) 464 goto err_free_msi; 465 } 466 467 pp->root_bus_nr = pp->busn->start; 468 469 bridge->dev.parent = dev; 470 bridge->sysdata = pp; 471 bridge->busnr = pp->root_bus_nr; 472 bridge->ops = &dw_pcie_ops; 473 bridge->map_irq = of_irq_parse_and_map_pci; 474 bridge->swizzle_irq = pci_common_swizzle; 475 476 ret = pci_scan_root_bus_bridge(bridge); 477 if (ret) 478 goto err_free_msi; 479 480 pp->root_bus = bridge->bus; 481 482 if (pp->ops->scan_bus) 483 pp->ops->scan_bus(pp); 484 485 pci_bus_size_bridges(pp->root_bus); 486 pci_bus_assign_resources(pp->root_bus); 487 488 list_for_each_entry(child, &pp->root_bus->children, node) 489 pcie_bus_configure_settings(child); 490 491 pci_bus_add_devices(pp->root_bus); 492 return 0; 493 494 err_free_msi: 495 if (pci_msi_enabled() && !pp->ops->msi_host_init) 496 dw_pcie_free_msi(pp); 497 return ret; 498 } 499 EXPORT_SYMBOL_GPL(dw_pcie_host_init); 500 501 void dw_pcie_host_deinit(struct pcie_port *pp) 502 { 503 pci_stop_root_bus(pp->root_bus); 504 pci_remove_root_bus(pp->root_bus); 505 if (pci_msi_enabled() && !pp->ops->msi_host_init) 506 dw_pcie_free_msi(pp); 507 } 508 EXPORT_SYMBOL_GPL(dw_pcie_host_deinit); 509 510 static int dw_pcie_access_other_conf(struct pcie_port *pp, struct pci_bus *bus, 511 u32 devfn, int where, int size, u32 *val, 512 bool write) 513 { 514 int ret, type; 515 u32 busdev, cfg_size; 516 u64 cpu_addr; 517 void __iomem *va_cfg_base; 518 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 519 520 busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | 521 PCIE_ATU_FUNC(PCI_FUNC(devfn)); 522 523 if (bus->parent->number == pp->root_bus_nr) { 524 type = PCIE_ATU_TYPE_CFG0; 525 cpu_addr = pp->cfg0_base; 526 cfg_size = pp->cfg0_size; 527 va_cfg_base = pp->va_cfg0_base; 528 } else { 529 type = PCIE_ATU_TYPE_CFG1; 530 cpu_addr = pp->cfg1_base; 531 cfg_size = pp->cfg1_size; 532 va_cfg_base = pp->va_cfg1_base; 533 } 534 535 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1, 536 type, cpu_addr, 537 busdev, cfg_size); 538 if (write) 539 ret = dw_pcie_write(va_cfg_base + where, size, *val); 540 else 541 ret = dw_pcie_read(va_cfg_base + where, size, val); 542 543 if (pci->num_viewport <= 2) 544 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1, 545 PCIE_ATU_TYPE_IO, pp->io_base, 546 pp->io_bus_addr, pp->io_size); 547 548 return ret; 549 } 550 551 static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, 552 u32 devfn, int where, int size, u32 *val) 553 { 554 if (pp->ops->rd_other_conf) 555 return pp->ops->rd_other_conf(pp, bus, devfn, where, 556 size, val); 557 558 return dw_pcie_access_other_conf(pp, bus, devfn, where, size, val, 559 false); 560 } 561 562 static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, 563 u32 devfn, int where, int size, u32 val) 564 { 565 if (pp->ops->wr_other_conf) 566 return pp->ops->wr_other_conf(pp, bus, devfn, where, 567 size, val); 568 569 return dw_pcie_access_other_conf(pp, bus, devfn, where, size, &val, 570 true); 571 } 572 573 static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus, 574 int dev) 575 { 576 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 577 578 /* If there is no link, then there is no device */ 579 if (bus->number != pp->root_bus_nr) { 580 if (!dw_pcie_link_up(pci)) 581 return 0; 582 } 583 584 /* Access only one slot on each root port */ 585 if (bus->number == pp->root_bus_nr && dev > 0) 586 return 0; 587 588 return 1; 589 } 590 591 static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, 592 int size, u32 *val) 593 { 594 struct pcie_port *pp = bus->sysdata; 595 596 if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) { 597 *val = 0xffffffff; 598 return PCIBIOS_DEVICE_NOT_FOUND; 599 } 600 601 if (bus->number == pp->root_bus_nr) 602 return dw_pcie_rd_own_conf(pp, where, size, val); 603 604 return dw_pcie_rd_other_conf(pp, bus, devfn, where, size, val); 605 } 606 607 static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn, 608 int where, int size, u32 val) 609 { 610 struct pcie_port *pp = bus->sysdata; 611 612 if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) 613 return PCIBIOS_DEVICE_NOT_FOUND; 614 615 if (bus->number == pp->root_bus_nr) 616 return dw_pcie_wr_own_conf(pp, where, size, val); 617 618 return dw_pcie_wr_other_conf(pp, bus, devfn, where, size, val); 619 } 620 621 static struct pci_ops dw_pcie_ops = { 622 .read = dw_pcie_rd_conf, 623 .write = dw_pcie_wr_conf, 624 }; 625 626 void dw_pcie_setup_rc(struct pcie_port *pp) 627 { 628 u32 val, ctrl, num_ctrls; 629 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 630 631 dw_pcie_setup(pci); 632 633 if (!pp->ops->msi_host_init) { 634 num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; 635 636 /* Initialize IRQ Status array */ 637 for (ctrl = 0; ctrl < num_ctrls; ctrl++) { 638 pp->irq_mask[ctrl] = ~0; 639 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + 640 (ctrl * MSI_REG_CTRL_BLOCK_SIZE), 641 4, pp->irq_mask[ctrl]); 642 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + 643 (ctrl * MSI_REG_CTRL_BLOCK_SIZE), 644 4, ~0); 645 } 646 } 647 648 /* Setup RC BARs */ 649 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004); 650 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000); 651 652 /* Setup interrupt pins */ 653 dw_pcie_dbi_ro_wr_en(pci); 654 val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE); 655 val &= 0xffff00ff; 656 val |= 0x00000100; 657 dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val); 658 dw_pcie_dbi_ro_wr_dis(pci); 659 660 /* Setup bus numbers */ 661 val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS); 662 val &= 0xff000000; 663 val |= 0x00ff0100; 664 dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val); 665 666 /* Setup command register */ 667 val = dw_pcie_readl_dbi(pci, PCI_COMMAND); 668 val &= 0xffff0000; 669 val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | 670 PCI_COMMAND_MASTER | PCI_COMMAND_SERR; 671 dw_pcie_writel_dbi(pci, PCI_COMMAND, val); 672 673 /* 674 * If the platform provides ->rd_other_conf, it means the platform 675 * uses its own address translation component rather than ATU, so 676 * we should not program the ATU here. 677 */ 678 if (!pp->ops->rd_other_conf) { 679 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0, 680 PCIE_ATU_TYPE_MEM, pp->mem_base, 681 pp->mem_bus_addr, pp->mem_size); 682 if (pci->num_viewport > 2) 683 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX2, 684 PCIE_ATU_TYPE_IO, pp->io_base, 685 pp->io_bus_addr, pp->io_size); 686 } 687 688 dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0); 689 690 /* Enable write permission for the DBI read-only register */ 691 dw_pcie_dbi_ro_wr_en(pci); 692 /* Program correct class for RC */ 693 dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI); 694 /* Better disable write permission right after the update */ 695 dw_pcie_dbi_ro_wr_dis(pci); 696 697 dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val); 698 val |= PORT_LOGIC_SPEED_CHANGE; 699 dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val); 700 } 701 EXPORT_SYMBOL_GPL(dw_pcie_setup_rc); 702