1 /* 2 * New-style PCI core. 3 * 4 * Copyright (c) 2004 - 2009 Paul Mundt 5 * Copyright (c) 2002 M. R. Brown 6 * 7 * Modelled after arch/mips/pci/pci.c: 8 * Copyright (C) 2003, 04 Ralf Baechle (ralf@linux-mips.org) 9 * 10 * This file is subject to the terms and conditions of the GNU General Public 11 * License. See the file "COPYING" in the main directory of this archive 12 * for more details. 13 */ 14 #include <linux/kernel.h> 15 #include <linux/mm.h> 16 #include <linux/pci.h> 17 #include <linux/init.h> 18 #include <linux/types.h> 19 #include <linux/dma-debug.h> 20 #include <linux/io.h> 21 #include <linux/mutex.h> 22 23 unsigned long PCIBIOS_MIN_IO = 0x0000; 24 unsigned long PCIBIOS_MIN_MEM = 0; 25 26 /* 27 * The PCI controller list. 28 */ 29 static struct pci_channel *hose_head, **hose_tail = &hose_head; 30 31 static int pci_initialized; 32 33 static void __devinit pcibios_scanbus(struct pci_channel *hose) 34 { 35 static int next_busno; 36 struct pci_bus *bus; 37 38 bus = pci_scan_bus(next_busno, hose->pci_ops, hose); 39 if (bus) { 40 next_busno = bus->subordinate + 1; 41 /* Don't allow 8-bit bus number overflow inside the hose - 42 reserve some space for bridges. */ 43 if (next_busno > 224) 44 next_busno = 0; 45 46 pci_bus_size_bridges(bus); 47 pci_bus_assign_resources(bus); 48 pci_enable_bridges(bus); 49 } 50 } 51 52 static DEFINE_MUTEX(pci_scan_mutex); 53 54 void __devinit register_pci_controller(struct pci_channel *hose) 55 { 56 if (request_resource(&iomem_resource, hose->mem_resource) < 0) 57 goto out; 58 if (request_resource(&ioport_resource, hose->io_resource) < 0) { 59 release_resource(hose->mem_resource); 60 goto out; 61 } 62 63 *hose_tail = hose; 64 hose_tail = &hose->next; 65 66 /* 67 * Do not panic here but later - this might hapen before console init. 68 */ 69 if (!hose->io_map_base) { 70 printk(KERN_WARNING 71 "registering PCI controller with io_map_base unset\n"); 72 } 73 74 /* 75 * Scan the bus if it is register after the PCI subsystem 76 * initialization. 77 */ 78 if (pci_initialized) { 79 mutex_lock(&pci_scan_mutex); 80 pcibios_scanbus(hose); 81 mutex_unlock(&pci_scan_mutex); 82 } 83 84 return; 85 86 out: 87 printk(KERN_WARNING 88 "Skipping PCI bus scan due to resource conflict\n"); 89 } 90 91 static int __init pcibios_init(void) 92 { 93 struct pci_channel *hose; 94 95 /* Scan all of the recorded PCI controllers. */ 96 for (hose = hose_head; hose; hose = hose->next) 97 pcibios_scanbus(hose); 98 99 pci_fixup_irqs(pci_common_swizzle, pcibios_map_platform_irq); 100 101 dma_debug_add_bus(&pci_bus_type); 102 103 pci_initialized = 1; 104 105 return 0; 106 } 107 subsys_initcall(pcibios_init); 108 109 static void pcibios_fixup_device_resources(struct pci_dev *dev, 110 struct pci_bus *bus) 111 { 112 /* Update device resources. */ 113 struct pci_channel *hose = bus->sysdata; 114 unsigned long offset = 0; 115 int i; 116 117 for (i = 0; i < PCI_NUM_RESOURCES; i++) { 118 if (!dev->resource[i].start) 119 continue; 120 if (dev->resource[i].flags & IORESOURCE_PCI_FIXED) 121 continue; 122 if (dev->resource[i].flags & IORESOURCE_IO) 123 offset = hose->io_offset; 124 else if (dev->resource[i].flags & IORESOURCE_MEM) 125 offset = hose->mem_offset; 126 127 dev->resource[i].start += offset; 128 dev->resource[i].end += offset; 129 } 130 } 131 132 /* 133 * Called after each bus is probed, but before its children 134 * are examined. 135 */ 136 void __devinit pcibios_fixup_bus(struct pci_bus *bus) 137 { 138 struct pci_dev *dev = bus->self; 139 struct list_head *ln; 140 struct pci_channel *chan = bus->sysdata; 141 142 if (!dev) { 143 bus->resource[0] = chan->io_resource; 144 bus->resource[1] = chan->mem_resource; 145 } 146 147 for (ln = bus->devices.next; ln != &bus->devices; ln = ln->next) { 148 dev = pci_dev_b(ln); 149 150 if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI) 151 pcibios_fixup_device_resources(dev, bus); 152 } 153 } 154 155 /* 156 * We need to avoid collisions with `mirrored' VGA ports 157 * and other strange ISA hardware, so we always want the 158 * addresses to be allocated in the 0x000-0x0ff region 159 * modulo 0x400. 160 */ 161 void pcibios_align_resource(void *data, struct resource *res, 162 resource_size_t size, resource_size_t align) 163 { 164 struct pci_dev *dev = data; 165 struct pci_channel *chan = dev->sysdata; 166 resource_size_t start = res->start; 167 168 if (res->flags & IORESOURCE_IO) { 169 if (start < PCIBIOS_MIN_IO + chan->io_resource->start) 170 start = PCIBIOS_MIN_IO + chan->io_resource->start; 171 172 /* 173 * Put everything into 0x00-0xff region modulo 0x400. 174 */ 175 if (start & 0x300) { 176 start = (start + 0x3ff) & ~0x3ff; 177 res->start = start; 178 } 179 } else if (res->flags & IORESOURCE_MEM) { 180 if (start < PCIBIOS_MIN_MEM + chan->mem_resource->start) 181 start = PCIBIOS_MIN_MEM + chan->mem_resource->start; 182 } 183 184 res->start = start; 185 } 186 187 void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, 188 struct resource *res) 189 { 190 struct pci_channel *hose = dev->sysdata; 191 unsigned long offset = 0; 192 193 if (res->flags & IORESOURCE_IO) 194 offset = hose->io_offset; 195 else if (res->flags & IORESOURCE_MEM) 196 offset = hose->mem_offset; 197 198 region->start = res->start - offset; 199 region->end = res->end - offset; 200 } 201 202 void __devinit 203 pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, 204 struct pci_bus_region *region) 205 { 206 struct pci_channel *hose = dev->sysdata; 207 unsigned long offset = 0; 208 209 if (res->flags & IORESOURCE_IO) 210 offset = hose->io_offset; 211 else if (res->flags & IORESOURCE_MEM) 212 offset = hose->mem_offset; 213 214 res->start = region->start + offset; 215 res->end = region->end + offset; 216 } 217 218 int pcibios_enable_device(struct pci_dev *dev, int mask) 219 { 220 u16 cmd, old_cmd; 221 int idx; 222 struct resource *r; 223 224 pci_read_config_word(dev, PCI_COMMAND, &cmd); 225 old_cmd = cmd; 226 for (idx=0; idx < PCI_NUM_RESOURCES; idx++) { 227 /* Only set up the requested stuff */ 228 if (!(mask & (1<<idx))) 229 continue; 230 231 r = &dev->resource[idx]; 232 if (!(r->flags & (IORESOURCE_IO | IORESOURCE_MEM))) 233 continue; 234 if ((idx == PCI_ROM_RESOURCE) && 235 (!(r->flags & IORESOURCE_ROM_ENABLE))) 236 continue; 237 if (!r->start && r->end) { 238 printk(KERN_ERR "PCI: Device %s not available " 239 "because of resource collisions\n", 240 pci_name(dev)); 241 return -EINVAL; 242 } 243 if (r->flags & IORESOURCE_IO) 244 cmd |= PCI_COMMAND_IO; 245 if (r->flags & IORESOURCE_MEM) 246 cmd |= PCI_COMMAND_MEMORY; 247 } 248 if (cmd != old_cmd) { 249 printk("PCI: Enabling device %s (%04x -> %04x)\n", 250 pci_name(dev), old_cmd, cmd); 251 pci_write_config_word(dev, PCI_COMMAND, cmd); 252 } 253 return 0; 254 } 255 256 /* 257 * If we set up a device for bus mastering, we need to check and set 258 * the latency timer as it may not be properly set. 259 */ 260 static unsigned int pcibios_max_latency = 255; 261 262 void pcibios_set_master(struct pci_dev *dev) 263 { 264 u8 lat; 265 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat); 266 if (lat < 16) 267 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency; 268 else if (lat > pcibios_max_latency) 269 lat = pcibios_max_latency; 270 else 271 return; 272 printk(KERN_INFO "PCI: Setting latency timer of device %s to %d\n", 273 pci_name(dev), lat); 274 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat); 275 } 276 277 void __init pcibios_update_irq(struct pci_dev *dev, int irq) 278 { 279 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq); 280 } 281 282 char * __devinit pcibios_setup(char *str) 283 { 284 return str; 285 } 286 287 int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, 288 enum pci_mmap_state mmap_state, int write_combine) 289 { 290 /* 291 * I/O space can be accessed via normal processor loads and stores on 292 * this platform but for now we elect not to do this and portable 293 * drivers should not do this anyway. 294 */ 295 if (mmap_state == pci_mmap_io) 296 return -EINVAL; 297 298 /* 299 * Ignore write-combine; for now only return uncached mappings. 300 */ 301 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 302 303 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, 304 vma->vm_end - vma->vm_start, 305 vma->vm_page_prot); 306 } 307 308 static void __iomem *ioport_map_pci(struct pci_dev *dev, 309 unsigned long port, unsigned int nr) 310 { 311 struct pci_channel *chan = dev->sysdata; 312 313 if (!chan->io_map_base) 314 chan->io_map_base = generic_io_base; 315 316 return (void __iomem *)(chan->io_map_base + port); 317 } 318 319 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) 320 { 321 resource_size_t start = pci_resource_start(dev, bar); 322 resource_size_t len = pci_resource_len(dev, bar); 323 unsigned long flags = pci_resource_flags(dev, bar); 324 325 if (unlikely(!len || !start)) 326 return NULL; 327 if (maxlen && len > maxlen) 328 len = maxlen; 329 330 if (flags & IORESOURCE_IO) 331 return ioport_map_pci(dev, start, len); 332 333 /* 334 * Presently the IORESOURCE_MEM case is a bit special, most 335 * SH7751 style PCI controllers have PCI memory at a fixed 336 * location in the address space where no remapping is desired. 337 * With the IORESOURCE_MEM case more care has to be taken 338 * to inhibit page table mapping for legacy cores, but this is 339 * punted off to __ioremap(). 340 * -- PFM. 341 */ 342 if (flags & IORESOURCE_MEM) { 343 if (flags & IORESOURCE_CACHEABLE) 344 return ioremap(start, len); 345 346 return ioremap_nocache(start, len); 347 } 348 349 return NULL; 350 } 351 EXPORT_SYMBOL(pci_iomap); 352 353 void pci_iounmap(struct pci_dev *dev, void __iomem *addr) 354 { 355 iounmap(addr); 356 } 357 EXPORT_SYMBOL(pci_iounmap); 358 359 #ifdef CONFIG_HOTPLUG 360 EXPORT_SYMBOL(pcibios_resource_to_bus); 361 EXPORT_SYMBOL(pcibios_bus_to_resource); 362 EXPORT_SYMBOL(PCIBIOS_MIN_IO); 363 EXPORT_SYMBOL(PCIBIOS_MIN_MEM); 364 #endif 365