1 /* 2 * Broadcom specific AMBA 3 * PCI Core in hostmode 4 * 5 * Copyright 2005 - 2011, Broadcom Corporation 6 * Copyright 2006, 2007, Michael Buesch <m@bues.ch> 7 * Copyright 2011, 2012, Hauke Mehrtens <hauke@hauke-m.de> 8 * 9 * Licensed under the GNU/GPL. See COPYING for details. 10 */ 11 12 #include "bcma_private.h" 13 #include <linux/pci.h> 14 #include <linux/export.h> 15 #include <linux/bcma/bcma.h> 16 #include <asm/paccess.h> 17 18 /* Probe a 32bit value on the bus and catch bus exceptions. 19 * Returns nonzero on a bus exception. 20 * This is MIPS specific */ 21 #define mips_busprobe32(val, addr) get_dbe((val), ((u32 *)(addr))) 22 23 /* Assume one-hot slot wiring */ 24 #define BCMA_PCI_SLOT_MAX 16 25 #define PCI_CONFIG_SPACE_SIZE 256 26 27 bool bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc) 28 { 29 struct bcma_bus *bus = pc->core->bus; 30 u16 chipid_top; 31 u32 tmp; 32 33 chipid_top = (bus->chipinfo.id & 0xFF00); 34 if (chipid_top != 0x4700 && 35 chipid_top != 0x5300) 36 return false; 37 38 bcma_core_enable(pc->core, 0); 39 40 return !mips_busprobe32(tmp, pc->core->io_addr); 41 } 42 43 static u32 bcma_pcie_read_config(struct bcma_drv_pci *pc, u32 address) 44 { 45 pcicore_write32(pc, BCMA_CORE_PCI_CONFIG_ADDR, address); 46 pcicore_read32(pc, BCMA_CORE_PCI_CONFIG_ADDR); 47 return pcicore_read32(pc, BCMA_CORE_PCI_CONFIG_DATA); 48 } 49 50 static void bcma_pcie_write_config(struct bcma_drv_pci *pc, u32 address, 51 u32 data) 52 { 53 pcicore_write32(pc, BCMA_CORE_PCI_CONFIG_ADDR, address); 54 pcicore_read32(pc, BCMA_CORE_PCI_CONFIG_ADDR); 55 pcicore_write32(pc, BCMA_CORE_PCI_CONFIG_DATA, data); 56 } 57 58 static u32 bcma_get_cfgspace_addr(struct bcma_drv_pci *pc, unsigned int dev, 59 unsigned int func, unsigned int off) 60 { 61 u32 addr = 0; 62 63 /* Issue config commands only when the data link is up (atleast 64 * one external pcie device is present). 65 */ 66 if (dev >= 2 || !(bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_LSREG) 67 & BCMA_CORE_PCI_DLLP_LSREG_LINKUP)) 68 goto out; 69 70 /* Type 0 transaction */ 71 /* Slide the PCI window to the appropriate slot */ 72 pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI1, BCMA_CORE_PCI_SBTOPCI_CFG0); 73 /* Calculate the address */ 74 addr = pc->host_controller->host_cfg_addr; 75 addr |= (dev << BCMA_CORE_PCI_CFG_SLOT_SHIFT); 76 addr |= (func << BCMA_CORE_PCI_CFG_FUN_SHIFT); 77 addr |= (off & ~3); 78 79 out: 80 return addr; 81 } 82 83 static int bcma_extpci_read_config(struct bcma_drv_pci *pc, unsigned int dev, 84 unsigned int func, unsigned int off, 85 void *buf, int len) 86 { 87 int err = -EINVAL; 88 u32 addr, val; 89 void __iomem *mmio = 0; 90 91 WARN_ON(!pc->hostmode); 92 if (unlikely(len != 1 && len != 2 && len != 4)) 93 goto out; 94 if (dev == 0) { 95 /* we support only two functions on device 0 */ 96 if (func > 1) 97 goto out; 98 99 /* accesses to config registers with offsets >= 256 100 * requires indirect access. 101 */ 102 if (off >= PCI_CONFIG_SPACE_SIZE) { 103 addr = (func << 12); 104 addr |= (off & 0x0FFC); 105 val = bcma_pcie_read_config(pc, addr); 106 } else { 107 addr = BCMA_CORE_PCI_PCICFG0; 108 addr |= (func << 8); 109 addr |= (off & 0xFC); 110 val = pcicore_read32(pc, addr); 111 } 112 } else { 113 addr = bcma_get_cfgspace_addr(pc, dev, func, off); 114 if (unlikely(!addr)) 115 goto out; 116 err = -ENOMEM; 117 mmio = ioremap_nocache(addr, sizeof(val)); 118 if (!mmio) 119 goto out; 120 121 if (mips_busprobe32(val, mmio)) { 122 val = 0xFFFFFFFF; 123 goto unmap; 124 } 125 } 126 val >>= (8 * (off & 3)); 127 128 switch (len) { 129 case 1: 130 *((u8 *)buf) = (u8)val; 131 break; 132 case 2: 133 *((u16 *)buf) = (u16)val; 134 break; 135 case 4: 136 *((u32 *)buf) = (u32)val; 137 break; 138 } 139 err = 0; 140 unmap: 141 if (mmio) 142 iounmap(mmio); 143 out: 144 return err; 145 } 146 147 static int bcma_extpci_write_config(struct bcma_drv_pci *pc, unsigned int dev, 148 unsigned int func, unsigned int off, 149 const void *buf, int len) 150 { 151 int err = -EINVAL; 152 u32 addr, val; 153 void __iomem *mmio = 0; 154 u16 chipid = pc->core->bus->chipinfo.id; 155 156 WARN_ON(!pc->hostmode); 157 if (unlikely(len != 1 && len != 2 && len != 4)) 158 goto out; 159 if (dev == 0) { 160 /* we support only two functions on device 0 */ 161 if (func > 1) 162 goto out; 163 164 /* accesses to config registers with offsets >= 256 165 * requires indirect access. 166 */ 167 if (off >= PCI_CONFIG_SPACE_SIZE) { 168 addr = (func << 12); 169 addr |= (off & 0x0FFC); 170 val = bcma_pcie_read_config(pc, addr); 171 } else { 172 addr = BCMA_CORE_PCI_PCICFG0; 173 addr |= (func << 8); 174 addr |= (off & 0xFC); 175 val = pcicore_read32(pc, addr); 176 } 177 } else { 178 addr = bcma_get_cfgspace_addr(pc, dev, func, off); 179 if (unlikely(!addr)) 180 goto out; 181 err = -ENOMEM; 182 mmio = ioremap_nocache(addr, sizeof(val)); 183 if (!mmio) 184 goto out; 185 186 if (mips_busprobe32(val, mmio)) { 187 val = 0xFFFFFFFF; 188 goto unmap; 189 } 190 } 191 192 switch (len) { 193 case 1: 194 val &= ~(0xFF << (8 * (off & 3))); 195 val |= *((const u8 *)buf) << (8 * (off & 3)); 196 break; 197 case 2: 198 val &= ~(0xFFFF << (8 * (off & 3))); 199 val |= *((const u16 *)buf) << (8 * (off & 3)); 200 break; 201 case 4: 202 val = *((const u32 *)buf); 203 break; 204 } 205 if (dev == 0) { 206 /* accesses to config registers with offsets >= 256 207 * requires indirect access. 208 */ 209 if (off >= PCI_CONFIG_SPACE_SIZE) 210 bcma_pcie_write_config(pc, addr, val); 211 else 212 pcicore_write32(pc, addr, val); 213 } else { 214 writel(val, mmio); 215 216 if (chipid == BCMA_CHIP_ID_BCM4716 || 217 chipid == BCMA_CHIP_ID_BCM4748) 218 readl(mmio); 219 } 220 221 err = 0; 222 unmap: 223 if (mmio) 224 iounmap(mmio); 225 out: 226 return err; 227 } 228 229 static int bcma_core_pci_hostmode_read_config(struct pci_bus *bus, 230 unsigned int devfn, 231 int reg, int size, u32 *val) 232 { 233 unsigned long flags; 234 int err; 235 struct bcma_drv_pci *pc; 236 struct bcma_drv_pci_host *pc_host; 237 238 pc_host = container_of(bus->ops, struct bcma_drv_pci_host, pci_ops); 239 pc = pc_host->pdev; 240 241 spin_lock_irqsave(&pc_host->cfgspace_lock, flags); 242 err = bcma_extpci_read_config(pc, PCI_SLOT(devfn), 243 PCI_FUNC(devfn), reg, val, size); 244 spin_unlock_irqrestore(&pc_host->cfgspace_lock, flags); 245 246 return err ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL; 247 } 248 249 static int bcma_core_pci_hostmode_write_config(struct pci_bus *bus, 250 unsigned int devfn, 251 int reg, int size, u32 val) 252 { 253 unsigned long flags; 254 int err; 255 struct bcma_drv_pci *pc; 256 struct bcma_drv_pci_host *pc_host; 257 258 pc_host = container_of(bus->ops, struct bcma_drv_pci_host, pci_ops); 259 pc = pc_host->pdev; 260 261 spin_lock_irqsave(&pc_host->cfgspace_lock, flags); 262 err = bcma_extpci_write_config(pc, PCI_SLOT(devfn), 263 PCI_FUNC(devfn), reg, &val, size); 264 spin_unlock_irqrestore(&pc_host->cfgspace_lock, flags); 265 266 return err ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL; 267 } 268 269 /* return cap_offset if requested capability exists in the PCI config space */ 270 static u8 bcma_find_pci_capability(struct bcma_drv_pci *pc, unsigned int dev, 271 unsigned int func, u8 req_cap_id, 272 unsigned char *buf, u32 *buflen) 273 { 274 u8 cap_id; 275 u8 cap_ptr = 0; 276 u32 bufsize; 277 u8 byte_val; 278 279 /* check for Header type 0 */ 280 bcma_extpci_read_config(pc, dev, func, PCI_HEADER_TYPE, &byte_val, 281 sizeof(u8)); 282 if ((byte_val & 0x7F) != PCI_HEADER_TYPE_NORMAL) 283 return cap_ptr; 284 285 /* check if the capability pointer field exists */ 286 bcma_extpci_read_config(pc, dev, func, PCI_STATUS, &byte_val, 287 sizeof(u8)); 288 if (!(byte_val & PCI_STATUS_CAP_LIST)) 289 return cap_ptr; 290 291 /* check if the capability pointer is 0x00 */ 292 bcma_extpci_read_config(pc, dev, func, PCI_CAPABILITY_LIST, &cap_ptr, 293 sizeof(u8)); 294 if (cap_ptr == 0x00) 295 return cap_ptr; 296 297 /* loop thr'u the capability list and see if the requested capabilty 298 * exists */ 299 bcma_extpci_read_config(pc, dev, func, cap_ptr, &cap_id, sizeof(u8)); 300 while (cap_id != req_cap_id) { 301 bcma_extpci_read_config(pc, dev, func, cap_ptr + 1, &cap_ptr, 302 sizeof(u8)); 303 if (cap_ptr == 0x00) 304 return cap_ptr; 305 bcma_extpci_read_config(pc, dev, func, cap_ptr, &cap_id, 306 sizeof(u8)); 307 } 308 309 /* found the caller requested capability */ 310 if ((buf != NULL) && (buflen != NULL)) { 311 u8 cap_data; 312 313 bufsize = *buflen; 314 if (!bufsize) 315 return cap_ptr; 316 317 *buflen = 0; 318 319 /* copy the cpability data excluding cap ID and next ptr */ 320 cap_data = cap_ptr + 2; 321 if ((bufsize + cap_data) > PCI_CONFIG_SPACE_SIZE) 322 bufsize = PCI_CONFIG_SPACE_SIZE - cap_data; 323 *buflen = bufsize; 324 while (bufsize--) { 325 bcma_extpci_read_config(pc, dev, func, cap_data, buf, 326 sizeof(u8)); 327 cap_data++; 328 buf++; 329 } 330 } 331 332 return cap_ptr; 333 } 334 335 /* If the root port is capable of returning Config Request 336 * Retry Status (CRS) Completion Status to software then 337 * enable the feature. 338 */ 339 static void bcma_core_pci_enable_crs(struct bcma_drv_pci *pc) 340 { 341 struct bcma_bus *bus = pc->core->bus; 342 u8 cap_ptr, root_ctrl, root_cap, dev; 343 u16 val16; 344 int i; 345 346 cap_ptr = bcma_find_pci_capability(pc, 0, 0, PCI_CAP_ID_EXP, NULL, 347 NULL); 348 root_cap = cap_ptr + PCI_EXP_RTCAP; 349 bcma_extpci_read_config(pc, 0, 0, root_cap, &val16, sizeof(u16)); 350 if (val16 & BCMA_CORE_PCI_RC_CRS_VISIBILITY) { 351 /* Enable CRS software visibility */ 352 root_ctrl = cap_ptr + PCI_EXP_RTCTL; 353 val16 = PCI_EXP_RTCTL_CRSSVE; 354 bcma_extpci_read_config(pc, 0, 0, root_ctrl, &val16, 355 sizeof(u16)); 356 357 /* Initiate a configuration request to read the vendor id 358 * field of the device function's config space header after 359 * 100 ms wait time from the end of Reset. If the device is 360 * not done with its internal initialization, it must at 361 * least return a completion TLP, with a completion status 362 * of "Configuration Request Retry Status (CRS)". The root 363 * complex must complete the request to the host by returning 364 * a read-data value of 0001h for the Vendor ID field and 365 * all 1s for any additional bytes included in the request. 366 * Poll using the config reads for max wait time of 1 sec or 367 * until we receive the successful completion status. Repeat 368 * the procedure for all the devices. 369 */ 370 for (dev = 1; dev < BCMA_PCI_SLOT_MAX; dev++) { 371 for (i = 0; i < 100000; i++) { 372 bcma_extpci_read_config(pc, dev, 0, 373 PCI_VENDOR_ID, &val16, 374 sizeof(val16)); 375 if (val16 != 0x1) 376 break; 377 udelay(10); 378 } 379 if (val16 == 0x1) 380 bcma_err(bus, "PCI: Broken device in slot %d\n", 381 dev); 382 } 383 } 384 } 385 386 void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc) 387 { 388 struct bcma_bus *bus = pc->core->bus; 389 struct bcma_drv_pci_host *pc_host; 390 u32 tmp; 391 u32 pci_membase_1G; 392 unsigned long io_map_base; 393 394 bcma_info(bus, "PCIEcore in host mode found\n"); 395 396 if (bus->sprom.boardflags_lo & BCMA_CORE_PCI_BFL_NOPCI) { 397 bcma_info(bus, "This PCIE core is disabled and not working\n"); 398 return; 399 } 400 401 pc_host = kzalloc(sizeof(*pc_host), GFP_KERNEL); 402 if (!pc_host) { 403 bcma_err(bus, "can not allocate memory"); 404 return; 405 } 406 407 spin_lock_init(&pc_host->cfgspace_lock); 408 409 pc->host_controller = pc_host; 410 pc_host->pci_controller.io_resource = &pc_host->io_resource; 411 pc_host->pci_controller.mem_resource = &pc_host->mem_resource; 412 pc_host->pci_controller.pci_ops = &pc_host->pci_ops; 413 pc_host->pdev = pc; 414 415 pci_membase_1G = BCMA_SOC_PCI_DMA; 416 pc_host->host_cfg_addr = BCMA_SOC_PCI_CFG; 417 418 pc_host->pci_ops.read = bcma_core_pci_hostmode_read_config; 419 pc_host->pci_ops.write = bcma_core_pci_hostmode_write_config; 420 421 pc_host->mem_resource.name = "BCMA PCIcore external memory", 422 pc_host->mem_resource.start = BCMA_SOC_PCI_DMA; 423 pc_host->mem_resource.end = BCMA_SOC_PCI_DMA + BCMA_SOC_PCI_DMA_SZ - 1; 424 pc_host->mem_resource.flags = IORESOURCE_MEM | IORESOURCE_PCI_FIXED; 425 426 pc_host->io_resource.name = "BCMA PCIcore external I/O", 427 pc_host->io_resource.start = 0x100; 428 pc_host->io_resource.end = 0x7FF; 429 pc_host->io_resource.flags = IORESOURCE_IO | IORESOURCE_PCI_FIXED; 430 431 /* Reset RC */ 432 usleep_range(3000, 5000); 433 pcicore_write32(pc, BCMA_CORE_PCI_CTL, BCMA_CORE_PCI_CTL_RST_OE); 434 msleep(50); 435 pcicore_write32(pc, BCMA_CORE_PCI_CTL, BCMA_CORE_PCI_CTL_RST | 436 BCMA_CORE_PCI_CTL_RST_OE); 437 438 /* 64 MB I/O access window. On 4716, use 439 * sbtopcie0 to access the device registers. We 440 * can't use address match 2 (1 GB window) region 441 * as mips can't generate 64-bit address on the 442 * backplane. 443 */ 444 if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4716 || 445 bus->chipinfo.id == BCMA_CHIP_ID_BCM4748) { 446 pc_host->mem_resource.start = BCMA_SOC_PCI_MEM; 447 pc_host->mem_resource.end = BCMA_SOC_PCI_MEM + 448 BCMA_SOC_PCI_MEM_SZ - 1; 449 pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0, 450 BCMA_CORE_PCI_SBTOPCI_MEM | BCMA_SOC_PCI_MEM); 451 } else if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) { 452 tmp = BCMA_CORE_PCI_SBTOPCI_MEM; 453 tmp |= BCMA_CORE_PCI_SBTOPCI_PREF; 454 tmp |= BCMA_CORE_PCI_SBTOPCI_BURST; 455 if (pc->core->core_unit == 0) { 456 pc_host->mem_resource.start = BCMA_SOC_PCI_MEM; 457 pc_host->mem_resource.end = BCMA_SOC_PCI_MEM + 458 BCMA_SOC_PCI_MEM_SZ - 1; 459 pc_host->io_resource.start = 0x100; 460 pc_host->io_resource.end = 0x47F; 461 pci_membase_1G = BCMA_SOC_PCIE_DMA_H32; 462 pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0, 463 tmp | BCMA_SOC_PCI_MEM); 464 } else if (pc->core->core_unit == 1) { 465 pc_host->mem_resource.start = BCMA_SOC_PCI1_MEM; 466 pc_host->mem_resource.end = BCMA_SOC_PCI1_MEM + 467 BCMA_SOC_PCI_MEM_SZ - 1; 468 pc_host->io_resource.start = 0x480; 469 pc_host->io_resource.end = 0x7FF; 470 pci_membase_1G = BCMA_SOC_PCIE1_DMA_H32; 471 pc_host->host_cfg_addr = BCMA_SOC_PCI1_CFG; 472 pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0, 473 tmp | BCMA_SOC_PCI1_MEM); 474 } 475 } else 476 pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0, 477 BCMA_CORE_PCI_SBTOPCI_IO); 478 479 /* 64 MB configuration access window */ 480 pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI1, BCMA_CORE_PCI_SBTOPCI_CFG0); 481 482 /* 1 GB memory access window */ 483 pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI2, 484 BCMA_CORE_PCI_SBTOPCI_MEM | pci_membase_1G); 485 486 487 /* As per PCI Express Base Spec 1.1 we need to wait for 488 * at least 100 ms from the end of a reset (cold/warm/hot) 489 * before issuing configuration requests to PCI Express 490 * devices. 491 */ 492 msleep(100); 493 494 bcma_core_pci_enable_crs(pc); 495 496 if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706 || 497 bus->chipinfo.id == BCMA_CHIP_ID_BCM4716) { 498 u16 val16; 499 bcma_extpci_read_config(pc, 0, 0, BCMA_CORE_PCI_CFG_DEVCTRL, 500 &val16, sizeof(val16)); 501 val16 |= (2 << 5); /* Max payload size of 512 */ 502 val16 |= (2 << 12); /* MRRS 512 */ 503 bcma_extpci_write_config(pc, 0, 0, BCMA_CORE_PCI_CFG_DEVCTRL, 504 &val16, sizeof(val16)); 505 } 506 507 /* Enable PCI bridge BAR0 memory & master access */ 508 tmp = PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY; 509 bcma_extpci_write_config(pc, 0, 0, PCI_COMMAND, &tmp, sizeof(tmp)); 510 511 /* Enable PCI interrupts */ 512 pcicore_write32(pc, BCMA_CORE_PCI_IMASK, BCMA_CORE_PCI_IMASK_INTA); 513 514 /* Ok, ready to run, register it to the system. 515 * The following needs change, if we want to port hostmode 516 * to non-MIPS platform. */ 517 io_map_base = (unsigned long)ioremap_nocache(pc_host->mem_resource.start, 518 resource_size(&pc_host->mem_resource)); 519 pc_host->pci_controller.io_map_base = io_map_base; 520 set_io_port_base(pc_host->pci_controller.io_map_base); 521 /* Give some time to the PCI controller to configure itself with the new 522 * values. Not waiting at this point causes crashes of the machine. */ 523 usleep_range(10000, 15000); 524 register_pci_controller(&pc_host->pci_controller); 525 return; 526 } 527 528 /* Early PCI fixup for a device on the PCI-core bridge. */ 529 static void bcma_core_pci_fixup_pcibridge(struct pci_dev *dev) 530 { 531 if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) { 532 /* This is not a device on the PCI-core bridge. */ 533 return; 534 } 535 if (PCI_SLOT(dev->devfn) != 0) 536 return; 537 538 pr_info("PCI: Fixing up bridge %s\n", pci_name(dev)); 539 540 /* Enable PCI bridge bus mastering and memory space */ 541 pci_set_master(dev); 542 if (pcibios_enable_device(dev, ~0) < 0) { 543 pr_err("PCI: BCMA bridge enable failed\n"); 544 return; 545 } 546 547 /* Enable PCI bridge BAR1 prefetch and burst */ 548 pci_write_config_dword(dev, BCMA_PCI_BAR1_CONTROL, 3); 549 } 550 DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, bcma_core_pci_fixup_pcibridge); 551 552 /* Early PCI fixup for all PCI-cores to set the correct memory address. */ 553 static void bcma_core_pci_fixup_addresses(struct pci_dev *dev) 554 { 555 struct resource *res; 556 int pos, err; 557 558 if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) { 559 /* This is not a device on the PCI-core bridge. */ 560 return; 561 } 562 if (PCI_SLOT(dev->devfn) == 0) 563 return; 564 565 pr_info("PCI: Fixing up addresses %s\n", pci_name(dev)); 566 567 for (pos = 0; pos < 6; pos++) { 568 res = &dev->resource[pos]; 569 if (res->flags & (IORESOURCE_IO | IORESOURCE_MEM)) { 570 err = pci_assign_resource(dev, pos); 571 if (err) 572 pr_err("PCI: Problem fixing up the addresses on %s\n", 573 pci_name(dev)); 574 } 575 } 576 } 577 DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, bcma_core_pci_fixup_addresses); 578 579 /* This function is called when doing a pci_enable_device(). 580 * We must first check if the device is a device on the PCI-core bridge. */ 581 int bcma_core_pci_plat_dev_init(struct pci_dev *dev) 582 { 583 struct bcma_drv_pci_host *pc_host; 584 int readrq; 585 586 if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) { 587 /* This is not a device on the PCI-core bridge. */ 588 return -ENODEV; 589 } 590 pc_host = container_of(dev->bus->ops, struct bcma_drv_pci_host, 591 pci_ops); 592 593 pr_info("PCI: Fixing up device %s\n", pci_name(dev)); 594 595 /* Fix up interrupt lines */ 596 dev->irq = bcma_core_irq(pc_host->pdev->core, 0); 597 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); 598 599 readrq = pcie_get_readrq(dev); 600 if (readrq > 128) { 601 pr_info("change PCIe max read request size from %i to 128\n", readrq); 602 pcie_set_readrq(dev, 128); 603 } 604 return 0; 605 } 606 EXPORT_SYMBOL(bcma_core_pci_plat_dev_init); 607 608 /* PCI device IRQ mapping. */ 609 int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev) 610 { 611 struct bcma_drv_pci_host *pc_host; 612 613 if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) { 614 /* This is not a device on the PCI-core bridge. */ 615 return -ENODEV; 616 } 617 618 pc_host = container_of(dev->bus->ops, struct bcma_drv_pci_host, 619 pci_ops); 620 return bcma_core_irq(pc_host->pdev->core, 0); 621 } 622 EXPORT_SYMBOL(bcma_core_pci_pcibios_map_irq); 623