1 /* 2 * pci.c - Low-Level PCI Access in IA-64 3 * 4 * Derived from bios32.c of i386 tree. 5 * 6 * (c) Copyright 2002, 2005 Hewlett-Packard Development Company, L.P. 7 * David Mosberger-Tang <davidm@hpl.hp.com> 8 * Bjorn Helgaas <bjorn.helgaas@hp.com> 9 * Copyright (C) 2004 Silicon Graphics, Inc. 10 * 11 * Note: Above list of copyright holders is incomplete... 12 */ 13 #include <linux/config.h> 14 15 #include <linux/acpi.h> 16 #include <linux/types.h> 17 #include <linux/kernel.h> 18 #include <linux/pci.h> 19 #include <linux/init.h> 20 #include <linux/ioport.h> 21 #include <linux/slab.h> 22 #include <linux/smp_lock.h> 23 #include <linux/spinlock.h> 24 25 #include <asm/machvec.h> 26 #include <asm/page.h> 27 #include <asm/system.h> 28 #include <asm/io.h> 29 #include <asm/sal.h> 30 #include <asm/smp.h> 31 #include <asm/irq.h> 32 #include <asm/hw_irq.h> 33 34 35 /* 36 * Low-level SAL-based PCI configuration access functions. Note that SAL 37 * calls are already serialized (via sal_lock), so we don't need another 38 * synchronization mechanism here. 39 */ 40 41 #define PCI_SAL_ADDRESS(seg, bus, devfn, reg) \ 42 (((u64) seg << 24) | (bus << 16) | (devfn << 8) | (reg)) 43 44 /* SAL 3.2 adds support for extended config space. */ 45 46 #define PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg) \ 47 (((u64) seg << 28) | (bus << 20) | (devfn << 12) | (reg)) 48 49 static int 50 pci_sal_read (unsigned int seg, unsigned int bus, unsigned int devfn, 51 int reg, int len, u32 *value) 52 { 53 u64 addr, data = 0; 54 int mode, result; 55 56 if (!value || (seg > 65535) || (bus > 255) || (devfn > 255) || (reg > 4095)) 57 return -EINVAL; 58 59 if ((seg | reg) <= 255) { 60 addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg); 61 mode = 0; 62 } else { 63 addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg); 64 mode = 1; 65 } 66 result = ia64_sal_pci_config_read(addr, mode, len, &data); 67 if (result != 0) 68 return -EINVAL; 69 70 *value = (u32) data; 71 return 0; 72 } 73 74 static int 75 pci_sal_write (unsigned int seg, unsigned int bus, unsigned int devfn, 76 int reg, int len, u32 value) 77 { 78 u64 addr; 79 int mode, result; 80 81 if ((seg > 65535) || (bus > 255) || (devfn > 255) || (reg > 4095)) 82 return -EINVAL; 83 84 if ((seg | reg) <= 255) { 85 addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg); 86 mode = 0; 87 } else { 88 addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg); 89 mode = 1; 90 } 91 result = ia64_sal_pci_config_write(addr, mode, len, value); 92 if (result != 0) 93 return -EINVAL; 94 return 0; 95 } 96 97 static struct pci_raw_ops pci_sal_ops = { 98 .read = pci_sal_read, 99 .write = pci_sal_write 100 }; 101 102 struct pci_raw_ops *raw_pci_ops = &pci_sal_ops; 103 104 static int 105 pci_read (struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value) 106 { 107 return raw_pci_ops->read(pci_domain_nr(bus), bus->number, 108 devfn, where, size, value); 109 } 110 111 static int 112 pci_write (struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) 113 { 114 return raw_pci_ops->write(pci_domain_nr(bus), bus->number, 115 devfn, where, size, value); 116 } 117 118 struct pci_ops pci_root_ops = { 119 .read = pci_read, 120 .write = pci_write, 121 }; 122 123 /* Called by ACPI when it finds a new root bus. */ 124 125 static struct pci_controller * __devinit 126 alloc_pci_controller (int seg) 127 { 128 struct pci_controller *controller; 129 130 controller = kmalloc(sizeof(*controller), GFP_KERNEL); 131 if (!controller) 132 return NULL; 133 134 memset(controller, 0, sizeof(*controller)); 135 controller->segment = seg; 136 controller->node = -1; 137 return controller; 138 } 139 140 struct pci_root_info { 141 struct pci_controller *controller; 142 char *name; 143 }; 144 145 static unsigned int 146 new_space (u64 phys_base, int sparse) 147 { 148 u64 mmio_base; 149 int i; 150 151 if (phys_base == 0) 152 return 0; /* legacy I/O port space */ 153 154 mmio_base = (u64) ioremap(phys_base, 0); 155 for (i = 0; i < num_io_spaces; i++) 156 if (io_space[i].mmio_base == mmio_base && 157 io_space[i].sparse == sparse) 158 return i; 159 160 if (num_io_spaces == MAX_IO_SPACES) { 161 printk(KERN_ERR "PCI: Too many IO port spaces " 162 "(MAX_IO_SPACES=%lu)\n", MAX_IO_SPACES); 163 return ~0; 164 } 165 166 i = num_io_spaces++; 167 io_space[i].mmio_base = mmio_base; 168 io_space[i].sparse = sparse; 169 170 return i; 171 } 172 173 static u64 __devinit 174 add_io_space (struct pci_root_info *info, struct acpi_resource_address64 *addr) 175 { 176 struct resource *resource; 177 char *name; 178 u64 base, min, max, base_port; 179 unsigned int sparse = 0, space_nr, len; 180 181 resource = kzalloc(sizeof(*resource), GFP_KERNEL); 182 if (!resource) { 183 printk(KERN_ERR "PCI: No memory for %s I/O port space\n", 184 info->name); 185 goto out; 186 } 187 188 len = strlen(info->name) + 32; 189 name = kzalloc(len, GFP_KERNEL); 190 if (!name) { 191 printk(KERN_ERR "PCI: No memory for %s I/O port space name\n", 192 info->name); 193 goto free_resource; 194 } 195 196 min = addr->min_address_range; 197 max = min + addr->address_length - 1; 198 if (addr->attribute.io.translation_attribute == ACPI_SPARSE_TRANSLATION) 199 sparse = 1; 200 201 space_nr = new_space(addr->address_translation_offset, sparse); 202 if (space_nr == ~0) 203 goto free_name; 204 205 base = __pa(io_space[space_nr].mmio_base); 206 base_port = IO_SPACE_BASE(space_nr); 207 snprintf(name, len, "%s I/O Ports %08lx-%08lx", info->name, 208 base_port + min, base_port + max); 209 210 /* 211 * The SDM guarantees the legacy 0-64K space is sparse, but if the 212 * mapping is done by the processor (not the bridge), ACPI may not 213 * mark it as sparse. 214 */ 215 if (space_nr == 0) 216 sparse = 1; 217 218 resource->name = name; 219 resource->flags = IORESOURCE_MEM; 220 resource->start = base + (sparse ? IO_SPACE_SPARSE_ENCODING(min) : min); 221 resource->end = base + (sparse ? IO_SPACE_SPARSE_ENCODING(max) : max); 222 insert_resource(&iomem_resource, resource); 223 224 return base_port; 225 226 free_name: 227 kfree(name); 228 free_resource: 229 kfree(resource); 230 out: 231 return ~0; 232 } 233 234 static acpi_status __devinit resource_to_window(struct acpi_resource *resource, 235 struct acpi_resource_address64 *addr) 236 { 237 acpi_status status; 238 239 /* 240 * We're only interested in _CRS descriptors that are 241 * - address space descriptors for memory or I/O space 242 * - non-zero size 243 * - producers, i.e., the address space is routed downstream, 244 * not consumed by the bridge itself 245 */ 246 status = acpi_resource_to_address64(resource, addr); 247 if (ACPI_SUCCESS(status) && 248 (addr->resource_type == ACPI_MEMORY_RANGE || 249 addr->resource_type == ACPI_IO_RANGE) && 250 addr->address_length && 251 addr->producer_consumer == ACPI_PRODUCER) 252 return AE_OK; 253 254 return AE_ERROR; 255 } 256 257 static acpi_status __devinit 258 count_window (struct acpi_resource *resource, void *data) 259 { 260 unsigned int *windows = (unsigned int *) data; 261 struct acpi_resource_address64 addr; 262 acpi_status status; 263 264 status = resource_to_window(resource, &addr); 265 if (ACPI_SUCCESS(status)) 266 (*windows)++; 267 268 return AE_OK; 269 } 270 271 static __devinit acpi_status add_window(struct acpi_resource *res, void *data) 272 { 273 struct pci_root_info *info = data; 274 struct pci_window *window; 275 struct acpi_resource_address64 addr; 276 acpi_status status; 277 unsigned long flags, offset = 0; 278 struct resource *root; 279 280 /* Return AE_OK for non-window resources to keep scanning for more */ 281 status = resource_to_window(res, &addr); 282 if (!ACPI_SUCCESS(status)) 283 return AE_OK; 284 285 if (addr.resource_type == ACPI_MEMORY_RANGE) { 286 flags = IORESOURCE_MEM; 287 root = &iomem_resource; 288 offset = addr.address_translation_offset; 289 } else if (addr.resource_type == ACPI_IO_RANGE) { 290 flags = IORESOURCE_IO; 291 root = &ioport_resource; 292 offset = add_io_space(info, &addr); 293 if (offset == ~0) 294 return AE_OK; 295 } else 296 return AE_OK; 297 298 window = &info->controller->window[info->controller->windows++]; 299 window->resource.name = info->name; 300 window->resource.flags = flags; 301 window->resource.start = addr.min_address_range + offset; 302 window->resource.end = window->resource.start + addr.address_length - 1; 303 window->resource.child = NULL; 304 window->offset = offset; 305 306 if (insert_resource(root, &window->resource)) { 307 printk(KERN_ERR "alloc 0x%lx-0x%lx from %s for %s failed\n", 308 window->resource.start, window->resource.end, 309 root->name, info->name); 310 } 311 312 return AE_OK; 313 } 314 315 static void __devinit 316 pcibios_setup_root_windows(struct pci_bus *bus, struct pci_controller *ctrl) 317 { 318 int i, j; 319 320 j = 0; 321 for (i = 0; i < ctrl->windows; i++) { 322 struct resource *res = &ctrl->window[i].resource; 323 /* HP's firmware has a hack to work around a Windows bug. 324 * Ignore these tiny memory ranges */ 325 if ((res->flags & IORESOURCE_MEM) && 326 (res->end - res->start < 16)) 327 continue; 328 if (j >= PCI_BUS_NUM_RESOURCES) { 329 printk("Ignoring range [%lx-%lx] (%lx)\n", res->start, 330 res->end, res->flags); 331 continue; 332 } 333 bus->resource[j++] = res; 334 } 335 } 336 337 struct pci_bus * __devinit 338 pci_acpi_scan_root(struct acpi_device *device, int domain, int bus) 339 { 340 struct pci_root_info info; 341 struct pci_controller *controller; 342 unsigned int windows = 0; 343 struct pci_bus *pbus; 344 char *name; 345 int pxm; 346 347 controller = alloc_pci_controller(domain); 348 if (!controller) 349 goto out1; 350 351 controller->acpi_handle = device->handle; 352 353 pxm = acpi_get_pxm(controller->acpi_handle); 354 #ifdef CONFIG_NUMA 355 if (pxm >= 0) 356 controller->node = pxm_to_nid_map[pxm]; 357 #endif 358 359 acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_window, 360 &windows); 361 controller->window = kmalloc_node(sizeof(*controller->window) * windows, 362 GFP_KERNEL, controller->node); 363 if (!controller->window) 364 goto out2; 365 366 name = kmalloc(16, GFP_KERNEL); 367 if (!name) 368 goto out3; 369 370 sprintf(name, "PCI Bus %04x:%02x", domain, bus); 371 info.controller = controller; 372 info.name = name; 373 acpi_walk_resources(device->handle, METHOD_NAME__CRS, add_window, 374 &info); 375 376 pbus = pci_scan_bus_parented(NULL, bus, &pci_root_ops, controller); 377 if (pbus) 378 pcibios_setup_root_windows(pbus, controller); 379 380 return pbus; 381 382 out3: 383 kfree(controller->window); 384 out2: 385 kfree(controller); 386 out1: 387 return NULL; 388 } 389 390 void pcibios_resource_to_bus(struct pci_dev *dev, 391 struct pci_bus_region *region, struct resource *res) 392 { 393 struct pci_controller *controller = PCI_CONTROLLER(dev); 394 unsigned long offset = 0; 395 int i; 396 397 for (i = 0; i < controller->windows; i++) { 398 struct pci_window *window = &controller->window[i]; 399 if (!(window->resource.flags & res->flags)) 400 continue; 401 if (window->resource.start > res->start) 402 continue; 403 if (window->resource.end < res->end) 404 continue; 405 offset = window->offset; 406 break; 407 } 408 409 region->start = res->start - offset; 410 region->end = res->end - offset; 411 } 412 EXPORT_SYMBOL(pcibios_resource_to_bus); 413 414 void pcibios_bus_to_resource(struct pci_dev *dev, 415 struct resource *res, struct pci_bus_region *region) 416 { 417 struct pci_controller *controller = PCI_CONTROLLER(dev); 418 unsigned long offset = 0; 419 int i; 420 421 for (i = 0; i < controller->windows; i++) { 422 struct pci_window *window = &controller->window[i]; 423 if (!(window->resource.flags & res->flags)) 424 continue; 425 if (window->resource.start - window->offset > region->start) 426 continue; 427 if (window->resource.end - window->offset < region->end) 428 continue; 429 offset = window->offset; 430 break; 431 } 432 433 res->start = region->start + offset; 434 res->end = region->end + offset; 435 } 436 EXPORT_SYMBOL(pcibios_bus_to_resource); 437 438 static int __devinit is_valid_resource(struct pci_dev *dev, int idx) 439 { 440 unsigned int i, type_mask = IORESOURCE_IO | IORESOURCE_MEM; 441 struct resource *devr = &dev->resource[idx]; 442 443 if (!dev->bus) 444 return 0; 445 for (i=0; i<PCI_BUS_NUM_RESOURCES; i++) { 446 struct resource *busr = dev->bus->resource[i]; 447 448 if (!busr || ((busr->flags ^ devr->flags) & type_mask)) 449 continue; 450 if ((devr->start) && (devr->start >= busr->start) && 451 (devr->end <= busr->end)) 452 return 1; 453 } 454 return 0; 455 } 456 457 static void __devinit pcibios_fixup_device_resources(struct pci_dev *dev) 458 { 459 struct pci_bus_region region; 460 int i; 461 int limit = (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) ? \ 462 PCI_BRIDGE_RESOURCES : PCI_NUM_RESOURCES; 463 464 for (i = 0; i < limit; i++) { 465 if (!dev->resource[i].flags) 466 continue; 467 region.start = dev->resource[i].start; 468 region.end = dev->resource[i].end; 469 pcibios_bus_to_resource(dev, &dev->resource[i], ®ion); 470 if ((is_valid_resource(dev, i))) 471 pci_claim_resource(dev, i); 472 } 473 } 474 475 /* 476 * Called after each bus is probed, but before its children are examined. 477 */ 478 void __devinit 479 pcibios_fixup_bus (struct pci_bus *b) 480 { 481 struct pci_dev *dev; 482 483 if (b->self) { 484 pci_read_bridge_bases(b); 485 pcibios_fixup_device_resources(b->self); 486 } 487 list_for_each_entry(dev, &b->devices, bus_list) 488 pcibios_fixup_device_resources(dev); 489 490 return; 491 } 492 493 void __devinit 494 pcibios_update_irq (struct pci_dev *dev, int irq) 495 { 496 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq); 497 498 /* ??? FIXME -- record old value for shutdown. */ 499 } 500 501 static inline int 502 pcibios_enable_resources (struct pci_dev *dev, int mask) 503 { 504 u16 cmd, old_cmd; 505 int idx; 506 struct resource *r; 507 unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM; 508 509 if (!dev) 510 return -EINVAL; 511 512 pci_read_config_word(dev, PCI_COMMAND, &cmd); 513 old_cmd = cmd; 514 for (idx=0; idx<PCI_NUM_RESOURCES; idx++) { 515 /* Only set up the desired resources. */ 516 if (!(mask & (1 << idx))) 517 continue; 518 519 r = &dev->resource[idx]; 520 if (!(r->flags & type_mask)) 521 continue; 522 if ((idx == PCI_ROM_RESOURCE) && 523 (!(r->flags & IORESOURCE_ROM_ENABLE))) 524 continue; 525 if (!r->start && r->end) { 526 printk(KERN_ERR 527 "PCI: Device %s not available because of resource collisions\n", 528 pci_name(dev)); 529 return -EINVAL; 530 } 531 if (r->flags & IORESOURCE_IO) 532 cmd |= PCI_COMMAND_IO; 533 if (r->flags & IORESOURCE_MEM) 534 cmd |= PCI_COMMAND_MEMORY; 535 } 536 if (cmd != old_cmd) { 537 printk("PCI: Enabling device %s (%04x -> %04x)\n", pci_name(dev), old_cmd, cmd); 538 pci_write_config_word(dev, PCI_COMMAND, cmd); 539 } 540 return 0; 541 } 542 543 int 544 pcibios_enable_device (struct pci_dev *dev, int mask) 545 { 546 int ret; 547 548 ret = pcibios_enable_resources(dev, mask); 549 if (ret < 0) 550 return ret; 551 552 return acpi_pci_irq_enable(dev); 553 } 554 555 void 556 pcibios_disable_device (struct pci_dev *dev) 557 { 558 acpi_pci_irq_disable(dev); 559 } 560 561 void 562 pcibios_align_resource (void *data, struct resource *res, 563 unsigned long size, unsigned long align) 564 { 565 } 566 567 /* 568 * PCI BIOS setup, always defaults to SAL interface 569 */ 570 char * __init 571 pcibios_setup (char *str) 572 { 573 return NULL; 574 } 575 576 int 577 pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma, 578 enum pci_mmap_state mmap_state, int write_combine) 579 { 580 /* 581 * I/O space cannot be accessed via normal processor loads and 582 * stores on this platform. 583 */ 584 if (mmap_state == pci_mmap_io) 585 /* 586 * XXX we could relax this for I/O spaces for which ACPI 587 * indicates that the space is 1-to-1 mapped. But at the 588 * moment, we don't support multiple PCI address spaces and 589 * the legacy I/O space is not 1-to-1 mapped, so this is moot. 590 */ 591 return -EINVAL; 592 593 /* 594 * Leave vm_pgoff as-is, the PCI space address is the physical 595 * address on this platform. 596 */ 597 vma->vm_flags |= (VM_SHM | VM_RESERVED | VM_IO); 598 599 if (write_combine && efi_range_is_wc(vma->vm_start, 600 vma->vm_end - vma->vm_start)) 601 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 602 else 603 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 604 605 if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, 606 vma->vm_end - vma->vm_start, vma->vm_page_prot)) 607 return -EAGAIN; 608 609 return 0; 610 } 611 612 /** 613 * ia64_pci_get_legacy_mem - generic legacy mem routine 614 * @bus: bus to get legacy memory base address for 615 * 616 * Find the base of legacy memory for @bus. This is typically the first 617 * megabyte of bus address space for @bus or is simply 0 on platforms whose 618 * chipsets support legacy I/O and memory routing. Returns the base address 619 * or an error pointer if an error occurred. 620 * 621 * This is the ia64 generic version of this routine. Other platforms 622 * are free to override it with a machine vector. 623 */ 624 char *ia64_pci_get_legacy_mem(struct pci_bus *bus) 625 { 626 return (char *)__IA64_UNCACHED_OFFSET; 627 } 628 629 /** 630 * pci_mmap_legacy_page_range - map legacy memory space to userland 631 * @bus: bus whose legacy space we're mapping 632 * @vma: vma passed in by mmap 633 * 634 * Map legacy memory space for this device back to userspace using a machine 635 * vector to get the base address. 636 */ 637 int 638 pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma) 639 { 640 char *addr; 641 642 addr = pci_get_legacy_mem(bus); 643 if (IS_ERR(addr)) 644 return PTR_ERR(addr); 645 646 vma->vm_pgoff += (unsigned long)addr >> PAGE_SHIFT; 647 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 648 vma->vm_flags |= (VM_SHM | VM_RESERVED | VM_IO); 649 650 if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, 651 vma->vm_end - vma->vm_start, vma->vm_page_prot)) 652 return -EAGAIN; 653 654 return 0; 655 } 656 657 /** 658 * ia64_pci_legacy_read - read from legacy I/O space 659 * @bus: bus to read 660 * @port: legacy port value 661 * @val: caller allocated storage for returned value 662 * @size: number of bytes to read 663 * 664 * Simply reads @size bytes from @port and puts the result in @val. 665 * 666 * Again, this (and the write routine) are generic versions that can be 667 * overridden by the platform. This is necessary on platforms that don't 668 * support legacy I/O routing or that hard fail on legacy I/O timeouts. 669 */ 670 int ia64_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size) 671 { 672 int ret = size; 673 674 switch (size) { 675 case 1: 676 *val = inb(port); 677 break; 678 case 2: 679 *val = inw(port); 680 break; 681 case 4: 682 *val = inl(port); 683 break; 684 default: 685 ret = -EINVAL; 686 break; 687 } 688 689 return ret; 690 } 691 692 /** 693 * ia64_pci_legacy_write - perform a legacy I/O write 694 * @bus: bus pointer 695 * @port: port to write 696 * @val: value to write 697 * @size: number of bytes to write from @val 698 * 699 * Simply writes @size bytes of @val to @port. 700 */ 701 int ia64_pci_legacy_write(struct pci_dev *bus, u16 port, u32 val, u8 size) 702 { 703 int ret = size; 704 705 switch (size) { 706 case 1: 707 outb(val, port); 708 break; 709 case 2: 710 outw(val, port); 711 break; 712 case 4: 713 outl(val, port); 714 break; 715 default: 716 ret = -EINVAL; 717 break; 718 } 719 720 return ret; 721 } 722 723 /** 724 * pci_cacheline_size - determine cacheline size for PCI devices 725 * @dev: void 726 * 727 * We want to use the line-size of the outer-most cache. We assume 728 * that this line-size is the same for all CPUs. 729 * 730 * Code mostly taken from arch/ia64/kernel/palinfo.c:cache_info(). 731 * 732 * RETURNS: An appropriate -ERRNO error value on eror, or zero for success. 733 */ 734 static unsigned long 735 pci_cacheline_size (void) 736 { 737 u64 levels, unique_caches; 738 s64 status; 739 pal_cache_config_info_t cci; 740 static u8 cacheline_size; 741 742 if (cacheline_size) 743 return cacheline_size; 744 745 status = ia64_pal_cache_summary(&levels, &unique_caches); 746 if (status != 0) { 747 printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n", 748 __FUNCTION__, status); 749 return SMP_CACHE_BYTES; 750 } 751 752 status = ia64_pal_cache_config_info(levels - 1, /* cache_type (data_or_unified)= */ 2, 753 &cci); 754 if (status != 0) { 755 printk(KERN_ERR "%s: ia64_pal_cache_config_info() failed (status=%ld)\n", 756 __FUNCTION__, status); 757 return SMP_CACHE_BYTES; 758 } 759 cacheline_size = 1 << cci.pcci_line_size; 760 return cacheline_size; 761 } 762 763 /** 764 * pcibios_prep_mwi - helper function for drivers/pci/pci.c:pci_set_mwi() 765 * @dev: the PCI device for which MWI is enabled 766 * 767 * For ia64, we can get the cacheline sizes from PAL. 768 * 769 * RETURNS: An appropriate -ERRNO error value on eror, or zero for success. 770 */ 771 int 772 pcibios_prep_mwi (struct pci_dev *dev) 773 { 774 unsigned long desired_linesize, current_linesize; 775 int rc = 0; 776 u8 pci_linesize; 777 778 desired_linesize = pci_cacheline_size(); 779 780 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &pci_linesize); 781 current_linesize = 4 * pci_linesize; 782 if (desired_linesize != current_linesize) { 783 printk(KERN_WARNING "PCI: slot %s has incorrect PCI cache line size of %lu bytes,", 784 pci_name(dev), current_linesize); 785 if (current_linesize > desired_linesize) { 786 printk(" expected %lu bytes instead\n", desired_linesize); 787 rc = -EINVAL; 788 } else { 789 printk(" correcting to %lu\n", desired_linesize); 790 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, desired_linesize / 4); 791 } 792 } 793 return rc; 794 } 795 796 int pci_vector_resources(int last, int nr_released) 797 { 798 int count = nr_released; 799 800 count += (IA64_LAST_DEVICE_VECTOR - last); 801 802 return count; 803 } 804