Lines Matching +full:has +full:- +full:legacy +full:- +full:mode

1 // SPDX-License-Identifier: GPL-2.0-only
3 * pci.c - Low-Level PCI Access in IA-64
7 * (c) Copyright 2002, 2005 Hewlett-Packard Development Company, L.P.
8 * David Mosberger-Tang <davidm@hpl.hp.com>
19 #include <linux/pci-acpi.h>
35 * Low-level SAL-based PCI configuration access functions. Note that SAL
52 int mode, result; in raw_pci_read() local
55 return -EINVAL; in raw_pci_read()
59 mode = 0; in raw_pci_read()
62 mode = 1; in raw_pci_read()
64 return -EINVAL; in raw_pci_read()
67 result = ia64_sal_pci_config_read(addr, mode, len, &data); in raw_pci_read()
69 return -EINVAL; in raw_pci_read()
79 int mode, result; in raw_pci_write() local
82 return -EINVAL; in raw_pci_write()
86 mode = 0; in raw_pci_write()
89 mode = 1; in raw_pci_write()
91 return -EINVAL; in raw_pci_write()
93 result = ia64_sal_pci_config_write(addr, mode, len, value); in raw_pci_write()
95 return -EINVAL; in raw_pci_write()
102 return raw_pci_read(pci_domain_nr(bus), bus->number, in pci_read()
109 return raw_pci_write(pci_domain_nr(bus), bus->number, in pci_write()
130 return 0; /* legacy I/O port space */ in new_space()
155 struct resource *resource, *res = entry->res; in add_io_space()
160 len = strlen(info->common.name) + 32; in add_io_space()
164 info->common.name); in add_io_space()
165 return -ENOMEM; in add_io_space()
168 if (res->flags & IORESOURCE_IO_SPARSE) in add_io_space()
170 space_nr = new_space(entry->offset, sparse); in add_io_space()
175 min = res->start - entry->offset; in add_io_space()
176 max = res->end - entry->offset; in add_io_space()
179 snprintf(name, len, "%s I/O Ports %08lx-%08lx", info->common.name, in add_io_space()
183 * The SDM guarantees the legacy 0-64K space is sparse, but if the in add_io_space()
190 resource = iospace->res; in add_io_space()
191 resource->name = name; in add_io_space()
192 resource->flags = IORESOURCE_MEM; in add_io_space()
193 resource->start = base + (sparse ? IO_SPACE_SPARSE_ENCODING(min) : min); in add_io_space()
194 resource->end = base + (sparse ? IO_SPACE_SPARSE_ENCODING(max) : max); in add_io_space()
202 entry->offset = base_port; in add_io_space()
203 res->start = min + base_port; in add_io_space()
204 res->end = max + base_port; in add_io_space()
205 resource_list_add_tail(iospace, &info->io_resources); in add_io_space()
211 return -ENOSPC; in add_io_space()
223 * IO port [0xCF8-0xCFF] is consumed by the host bridge itself
226 * So explicitly filter out PCI CFG IO ports[0xCF8-0xCFF].
230 return (res->flags & IORESOURCE_IO) && in resource_is_pcicfg_ioport()
231 res->start == 0xCF8 && res->end == 0xCFF; in resource_is_pcicfg_ioport()
236 struct device *dev = &ci->bridge->dev; in pci_acpi_root_prepare_resources()
245 resource_list_for_each_entry_safe(entry, tmp, &ci->resources) { in pci_acpi_root_prepare_resources()
246 res = entry->res; in pci_acpi_root_prepare_resources()
247 if (res->flags & IORESOURCE_MEM) { in pci_acpi_root_prepare_resources()
249 * HP's firmware has a hack to work around a in pci_acpi_root_prepare_resources()
255 entry->res); in pci_acpi_root_prepare_resources()
257 &info->io_resources); in pci_acpi_root_prepare_resources()
259 } else if (res->flags & IORESOURCE_IO) { in pci_acpi_root_prepare_resources()
260 if (resource_is_pcicfg_ioport(entry->res)) in pci_acpi_root_prepare_resources()
277 resource_list_for_each_entry_safe(entry, tmp, &info->io_resources) { in pci_acpi_root_release_info()
278 release_resource(entry->res); in pci_acpi_root_release_info()
292 struct acpi_device *device = root->device; in pci_acpi_scan_root()
297 dev_err(&device->dev, in pci_acpi_scan_root()
299 root->segment, (int)root->secondary.start); in pci_acpi_scan_root()
303 info->controller.segment = root->segment; in pci_acpi_scan_root()
304 info->controller.companion = device; in pci_acpi_scan_root()
305 info->controller.node = acpi_get_node(device->handle); in pci_acpi_scan_root()
306 INIT_LIST_HEAD(&info->io_resources); in pci_acpi_scan_root()
308 &info->common, &info->controller); in pci_acpi_scan_root()
315 * here, pci_create_root_bus() has been called by someone else and in pcibios_root_bridge_prepare()
319 if (!bridge->dev.parent) { in pcibios_root_bridge_prepare()
320 struct pci_controller *controller = bridge->bus->sysdata; in pcibios_root_bridge_prepare()
321 ACPI_COMPANION_SET(&bridge->dev, controller->companion); in pcibios_root_bridge_prepare()
330 if (!dev->bus) in pcibios_fixup_device_resources()
334 struct resource *r = &dev->resource[idx]; in pcibios_fixup_device_resources()
336 if (!r->flags || r->parent || !r->start) in pcibios_fixup_device_resources()
348 if (!dev->bus) in pcibios_fixup_bridge_resources()
352 struct resource *r = &dev->resource[idx]; in pcibios_fixup_bridge_resources()
354 if (!r->flags || r->parent || !r->start) in pcibios_fixup_bridge_resources()
368 if (b->self) { in pcibios_fixup_bus()
370 pcibios_fixup_bridge_resources(b->self); in pcibios_fixup_bus()
372 list_for_each_entry(dev, &b->devices, bus_list) in pcibios_fixup_bus()
408 BUG_ON(atomic_read(&dev->enable_cnt)); in pcibios_disable_device()
414 * pci_get_legacy_mem - generic legacy mem routine
415 * @bus: bus to get legacy memory base address for
417 * Find the base of legacy memory for @bus. This is typically the first
419 * chipsets support legacy I/O and memory routing. Returns the base address
431 * pci_mmap_legacy_page_range - map legacy memory space to userland
432 * @bus: bus whose legacy space we're mapping
435 * Map legacy memory space for this device back to userspace using a machine
442 unsigned long size = vma->vm_end - vma->vm_start; in pci_mmap_legacy_page_range()
446 /* We only support mmap'ing of legacy memory space */ in pci_mmap_legacy_page_range()
448 return -ENOSYS; in pci_mmap_legacy_page_range()
454 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size)) in pci_mmap_legacy_page_range()
455 return -EINVAL; in pci_mmap_legacy_page_range()
456 prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size, in pci_mmap_legacy_page_range()
457 vma->vm_page_prot); in pci_mmap_legacy_page_range()
463 vma->vm_pgoff += (unsigned long)addr >> PAGE_SHIFT; in pci_mmap_legacy_page_range()
464 vma->vm_page_prot = prot; in pci_mmap_legacy_page_range()
466 if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, in pci_mmap_legacy_page_range()
467 size, vma->vm_page_prot)) in pci_mmap_legacy_page_range()
468 return -EAGAIN; in pci_mmap_legacy_page_range()
474 * pci_legacy_read - read from legacy I/O space
476 * @port: legacy port value
484 * support legacy I/O routing or that hard fail on legacy I/O timeouts.
501 ret = -EINVAL; in pci_legacy_read()
509 * pci_legacy_write - perform a legacy I/O write
532 ret = -EINVAL; in pci_legacy_write()
540 * set_pci_cacheline_size - determine cacheline size for PCI devices
542 * We want to use the line-size of the outer-most cache. We assume
543 * that this line-size is the same for all CPUs.
560 status = ia64_pal_cache_config_info(levels - 1, in set_pci_dfl_cacheline_size()