xref: /openbmc/linux/arch/powerpc/kernel/pci-common.c (revision 174cd4b1)
1 /*
2  * Contains common pci routines for ALL ppc platform
3  * (based on pci_32.c and pci_64.c)
4  *
5  * Port for PPC64 David Engebretsen, IBM Corp.
6  * Contains common pci routines for ppc64 platform, pSeries and iSeries brands.
7  *
8  * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
9  *   Rework, based on alpha PCI code.
10  *
11  * Common pmac/prep/chrp pci routines. -- Cort
12  *
13  * This program is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU General Public License
15  * as published by the Free Software Foundation; either version
16  * 2 of the License, or (at your option) any later version.
17  */
18 
19 #include <linux/kernel.h>
20 #include <linux/pci.h>
21 #include <linux/string.h>
22 #include <linux/init.h>
23 #include <linux/delay.h>
24 #include <linux/export.h>
25 #include <linux/of_address.h>
26 #include <linux/of_pci.h>
27 #include <linux/mm.h>
28 #include <linux/shmem_fs.h>
29 #include <linux/list.h>
30 #include <linux/syscalls.h>
31 #include <linux/irq.h>
32 #include <linux/vmalloc.h>
33 #include <linux/slab.h>
34 #include <linux/vgaarb.h>
35 
36 #include <asm/processor.h>
37 #include <asm/io.h>
38 #include <asm/prom.h>
39 #include <asm/pci-bridge.h>
40 #include <asm/byteorder.h>
41 #include <asm/machdep.h>
42 #include <asm/ppc-pci.h>
43 #include <asm/eeh.h>
44 
45 /* hose_spinlock protects accesses to the the phb_bitmap. */
46 static DEFINE_SPINLOCK(hose_spinlock);
47 LIST_HEAD(hose_list);
48 
49 /* For dynamic PHB numbering on get_phb_number(): max number of PHBs. */
50 #define MAX_PHBS 0x10000
51 
52 /*
53  * For dynamic PHB numbering: used/free PHBs tracking bitmap.
54  * Accesses to this bitmap should be protected by hose_spinlock.
55  */
56 static DECLARE_BITMAP(phb_bitmap, MAX_PHBS);
57 
58 /* ISA Memory physical address */
59 resource_size_t isa_mem_base;
60 EXPORT_SYMBOL(isa_mem_base);
61 
62 
63 static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
64 
65 void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
66 {
67 	pci_dma_ops = dma_ops;
68 }
69 
70 const struct dma_map_ops *get_pci_dma_ops(void)
71 {
72 	return pci_dma_ops;
73 }
74 EXPORT_SYMBOL(get_pci_dma_ops);
75 
76 /*
77  * This function should run under locking protection, specifically
78  * hose_spinlock.
79  */
80 static int get_phb_number(struct device_node *dn)
81 {
82 	int ret, phb_id = -1;
83 	u32 prop_32;
84 	u64 prop;
85 
86 	/*
87 	 * Try fixed PHB numbering first, by checking archs and reading
88 	 * the respective device-tree properties. Firstly, try powernv by
89 	 * reading "ibm,opal-phbid", only present in OPAL environment.
90 	 */
91 	ret = of_property_read_u64(dn, "ibm,opal-phbid", &prop);
92 	if (ret) {
93 		ret = of_property_read_u32_index(dn, "reg", 1, &prop_32);
94 		prop = prop_32;
95 	}
96 
97 	if (!ret)
98 		phb_id = (int)(prop & (MAX_PHBS - 1));
99 
100 	/* We need to be sure to not use the same PHB number twice. */
101 	if ((phb_id >= 0) && !test_and_set_bit(phb_id, phb_bitmap))
102 		return phb_id;
103 
104 	/*
105 	 * If not pseries nor powernv, or if fixed PHB numbering tried to add
106 	 * the same PHB number twice, then fallback to dynamic PHB numbering.
107 	 */
108 	phb_id = find_first_zero_bit(phb_bitmap, MAX_PHBS);
109 	BUG_ON(phb_id >= MAX_PHBS);
110 	set_bit(phb_id, phb_bitmap);
111 
112 	return phb_id;
113 }
114 
115 struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
116 {
117 	struct pci_controller *phb;
118 
119 	phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL);
120 	if (phb == NULL)
121 		return NULL;
122 	spin_lock(&hose_spinlock);
123 	phb->global_number = get_phb_number(dev);
124 	list_add_tail(&phb->list_node, &hose_list);
125 	spin_unlock(&hose_spinlock);
126 	phb->dn = dev;
127 	phb->is_dynamic = slab_is_available();
128 #ifdef CONFIG_PPC64
129 	if (dev) {
130 		int nid = of_node_to_nid(dev);
131 
132 		if (nid < 0 || !node_online(nid))
133 			nid = -1;
134 
135 		PHB_SET_NODE(phb, nid);
136 	}
137 #endif
138 	return phb;
139 }
140 EXPORT_SYMBOL_GPL(pcibios_alloc_controller);
141 
142 void pcibios_free_controller(struct pci_controller *phb)
143 {
144 	spin_lock(&hose_spinlock);
145 
146 	/* Clear bit of phb_bitmap to allow reuse of this PHB number. */
147 	if (phb->global_number < MAX_PHBS)
148 		clear_bit(phb->global_number, phb_bitmap);
149 
150 	list_del(&phb->list_node);
151 	spin_unlock(&hose_spinlock);
152 
153 	if (phb->is_dynamic)
154 		kfree(phb);
155 }
156 EXPORT_SYMBOL_GPL(pcibios_free_controller);
157 
158 /*
159  * This function is used to call pcibios_free_controller()
160  * in a deferred manner: a callback from the PCI subsystem.
161  *
162  * _*DO NOT*_ call pcibios_free_controller() explicitly if
163  * this is used (or it may access an invalid *phb pointer).
164  *
165  * The callback occurs when all references to the root bus
166  * are dropped (e.g., child buses/devices and their users).
167  *
168  * It's called as .release_fn() of 'struct pci_host_bridge'
169  * which is associated with the 'struct pci_controller.bus'
170  * (root bus) - it expects .release_data to hold a pointer
171  * to 'struct pci_controller'.
172  *
173  * In order to use it, register .release_fn()/release_data
174  * like this:
175  *
176  * pci_set_host_bridge_release(bridge,
177  *                             pcibios_free_controller_deferred
178  *                             (void *) phb);
179  *
180  * e.g. in the pcibios_root_bridge_prepare() callback from
181  * pci_create_root_bus().
182  */
183 void pcibios_free_controller_deferred(struct pci_host_bridge *bridge)
184 {
185 	struct pci_controller *phb = (struct pci_controller *)
186 					 bridge->release_data;
187 
188 	pr_debug("domain %d, dynamic %d\n", phb->global_number, phb->is_dynamic);
189 
190 	pcibios_free_controller(phb);
191 }
192 EXPORT_SYMBOL_GPL(pcibios_free_controller_deferred);
193 
194 /*
195  * The function is used to return the minimal alignment
196  * for memory or I/O windows of the associated P2P bridge.
197  * By default, 4KiB alignment for I/O windows and 1MiB for
198  * memory windows.
199  */
200 resource_size_t pcibios_window_alignment(struct pci_bus *bus,
201 					 unsigned long type)
202 {
203 	struct pci_controller *phb = pci_bus_to_host(bus);
204 
205 	if (phb->controller_ops.window_alignment)
206 		return phb->controller_ops.window_alignment(bus, type);
207 
208 	/*
209 	 * PCI core will figure out the default
210 	 * alignment: 4KiB for I/O and 1MiB for
211 	 * memory window.
212 	 */
213 	return 1;
214 }
215 
216 void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type)
217 {
218 	struct pci_controller *hose = pci_bus_to_host(bus);
219 
220 	if (hose->controller_ops.setup_bridge)
221 		hose->controller_ops.setup_bridge(bus, type);
222 }
223 
224 void pcibios_reset_secondary_bus(struct pci_dev *dev)
225 {
226 	struct pci_controller *phb = pci_bus_to_host(dev->bus);
227 
228 	if (phb->controller_ops.reset_secondary_bus) {
229 		phb->controller_ops.reset_secondary_bus(dev);
230 		return;
231 	}
232 
233 	pci_reset_secondary_bus(dev);
234 }
235 
236 #ifdef CONFIG_PCI_IOV
237 resource_size_t pcibios_iov_resource_alignment(struct pci_dev *pdev, int resno)
238 {
239 	if (ppc_md.pcibios_iov_resource_alignment)
240 		return ppc_md.pcibios_iov_resource_alignment(pdev, resno);
241 
242 	return pci_iov_resource_size(pdev, resno);
243 }
244 #endif /* CONFIG_PCI_IOV */
245 
246 static resource_size_t pcibios_io_size(const struct pci_controller *hose)
247 {
248 #ifdef CONFIG_PPC64
249 	return hose->pci_io_size;
250 #else
251 	return resource_size(&hose->io_resource);
252 #endif
253 }
254 
255 int pcibios_vaddr_is_ioport(void __iomem *address)
256 {
257 	int ret = 0;
258 	struct pci_controller *hose;
259 	resource_size_t size;
260 
261 	spin_lock(&hose_spinlock);
262 	list_for_each_entry(hose, &hose_list, list_node) {
263 		size = pcibios_io_size(hose);
264 		if (address >= hose->io_base_virt &&
265 		    address < (hose->io_base_virt + size)) {
266 			ret = 1;
267 			break;
268 		}
269 	}
270 	spin_unlock(&hose_spinlock);
271 	return ret;
272 }
273 
274 unsigned long pci_address_to_pio(phys_addr_t address)
275 {
276 	struct pci_controller *hose;
277 	resource_size_t size;
278 	unsigned long ret = ~0;
279 
280 	spin_lock(&hose_spinlock);
281 	list_for_each_entry(hose, &hose_list, list_node) {
282 		size = pcibios_io_size(hose);
283 		if (address >= hose->io_base_phys &&
284 		    address < (hose->io_base_phys + size)) {
285 			unsigned long base =
286 				(unsigned long)hose->io_base_virt - _IO_BASE;
287 			ret = base + (address - hose->io_base_phys);
288 			break;
289 		}
290 	}
291 	spin_unlock(&hose_spinlock);
292 
293 	return ret;
294 }
295 EXPORT_SYMBOL_GPL(pci_address_to_pio);
296 
297 /*
298  * Return the domain number for this bus.
299  */
300 int pci_domain_nr(struct pci_bus *bus)
301 {
302 	struct pci_controller *hose = pci_bus_to_host(bus);
303 
304 	return hose->global_number;
305 }
306 EXPORT_SYMBOL(pci_domain_nr);
307 
308 /* This routine is meant to be used early during boot, when the
309  * PCI bus numbers have not yet been assigned, and you need to
310  * issue PCI config cycles to an OF device.
311  * It could also be used to "fix" RTAS config cycles if you want
312  * to set pci_assign_all_buses to 1 and still use RTAS for PCI
313  * config cycles.
314  */
315 struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
316 {
317 	while(node) {
318 		struct pci_controller *hose, *tmp;
319 		list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
320 			if (hose->dn == node)
321 				return hose;
322 		node = node->parent;
323 	}
324 	return NULL;
325 }
326 
327 /*
328  * Reads the interrupt pin to determine if interrupt is use by card.
329  * If the interrupt is used, then gets the interrupt line from the
330  * openfirmware and sets it in the pci_dev and pci_config line.
331  */
332 static int pci_read_irq_line(struct pci_dev *pci_dev)
333 {
334 	struct of_phandle_args oirq;
335 	unsigned int virq;
336 
337 	pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev));
338 
339 #ifdef DEBUG
340 	memset(&oirq, 0xff, sizeof(oirq));
341 #endif
342 	/* Try to get a mapping from the device-tree */
343 	if (of_irq_parse_pci(pci_dev, &oirq)) {
344 		u8 line, pin;
345 
346 		/* If that fails, lets fallback to what is in the config
347 		 * space and map that through the default controller. We
348 		 * also set the type to level low since that's what PCI
349 		 * interrupts are. If your platform does differently, then
350 		 * either provide a proper interrupt tree or don't use this
351 		 * function.
352 		 */
353 		if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &pin))
354 			return -1;
355 		if (pin == 0)
356 			return -1;
357 		if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) ||
358 		    line == 0xff || line == 0) {
359 			return -1;
360 		}
361 		pr_debug(" No map ! Using line %d (pin %d) from PCI config\n",
362 			 line, pin);
363 
364 		virq = irq_create_mapping(NULL, line);
365 		if (virq)
366 			irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
367 	} else {
368 		pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
369 			 oirq.args_count, oirq.args[0], oirq.args[1],
370 			 of_node_full_name(oirq.np));
371 
372 		virq = irq_create_of_mapping(&oirq);
373 	}
374 
375 	if (!virq) {
376 		pr_debug(" Failed to map !\n");
377 		return -1;
378 	}
379 
380 	pr_debug(" Mapped to linux irq %d\n", virq);
381 
382 	pci_dev->irq = virq;
383 
384 	return 0;
385 }
386 
387 /*
388  * Platform support for /proc/bus/pci/X/Y mmap()s,
389  * modelled on the sparc64 implementation by Dave Miller.
390  *  -- paulus.
391  */
392 
393 /*
394  * Adjust vm_pgoff of VMA such that it is the physical page offset
395  * corresponding to the 32-bit pci bus offset for DEV requested by the user.
396  *
397  * Basically, the user finds the base address for his device which he wishes
398  * to mmap.  They read the 32-bit value from the config space base register,
399  * add whatever PAGE_SIZE multiple offset they wish, and feed this into the
400  * offset parameter of mmap on /proc/bus/pci/XXX for that device.
401  *
402  * Returns negative error code on failure, zero on success.
403  */
404 static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
405 					       resource_size_t *offset,
406 					       enum pci_mmap_state mmap_state)
407 {
408 	struct pci_controller *hose = pci_bus_to_host(dev->bus);
409 	unsigned long io_offset = 0;
410 	int i, res_bit;
411 
412 	if (hose == NULL)
413 		return NULL;		/* should never happen */
414 
415 	/* If memory, add on the PCI bridge address offset */
416 	if (mmap_state == pci_mmap_mem) {
417 #if 0 /* See comment in pci_resource_to_user() for why this is disabled */
418 		*offset += hose->pci_mem_offset;
419 #endif
420 		res_bit = IORESOURCE_MEM;
421 	} else {
422 		io_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
423 		*offset += io_offset;
424 		res_bit = IORESOURCE_IO;
425 	}
426 
427 	/*
428 	 * Check that the offset requested corresponds to one of the
429 	 * resources of the device.
430 	 */
431 	for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
432 		struct resource *rp = &dev->resource[i];
433 		int flags = rp->flags;
434 
435 		/* treat ROM as memory (should be already) */
436 		if (i == PCI_ROM_RESOURCE)
437 			flags |= IORESOURCE_MEM;
438 
439 		/* Active and same type? */
440 		if ((flags & res_bit) == 0)
441 			continue;
442 
443 		/* In the range of this resource? */
444 		if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end)
445 			continue;
446 
447 		/* found it! construct the final physical address */
448 		if (mmap_state == pci_mmap_io)
449 			*offset += hose->io_base_phys - io_offset;
450 		return rp;
451 	}
452 
453 	return NULL;
454 }
455 
456 /*
457  * This one is used by /dev/mem and fbdev who have no clue about the
458  * PCI device, it tries to find the PCI device first and calls the
459  * above routine
460  */
461 pgprot_t pci_phys_mem_access_prot(struct file *file,
462 				  unsigned long pfn,
463 				  unsigned long size,
464 				  pgprot_t prot)
465 {
466 	struct pci_dev *pdev = NULL;
467 	struct resource *found = NULL;
468 	resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT;
469 	int i;
470 
471 	if (page_is_ram(pfn))
472 		return prot;
473 
474 	prot = pgprot_noncached(prot);
475 	for_each_pci_dev(pdev) {
476 		for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
477 			struct resource *rp = &pdev->resource[i];
478 			int flags = rp->flags;
479 
480 			/* Active and same type? */
481 			if ((flags & IORESOURCE_MEM) == 0)
482 				continue;
483 			/* In the range of this resource? */
484 			if (offset < (rp->start & PAGE_MASK) ||
485 			    offset > rp->end)
486 				continue;
487 			found = rp;
488 			break;
489 		}
490 		if (found)
491 			break;
492 	}
493 	if (found) {
494 		if (found->flags & IORESOURCE_PREFETCH)
495 			prot = pgprot_noncached_wc(prot);
496 		pci_dev_put(pdev);
497 	}
498 
499 	pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n",
500 		 (unsigned long long)offset, pgprot_val(prot));
501 
502 	return prot;
503 }
504 
505 
506 /*
507  * Perform the actual remap of the pages for a PCI device mapping, as
508  * appropriate for this architecture.  The region in the process to map
509  * is described by vm_start and vm_end members of VMA, the base physical
510  * address is found in vm_pgoff.
511  * The pci device structure is provided so that architectures may make mapping
512  * decisions on a per-device or per-bus basis.
513  *
514  * Returns a negative error code on failure, zero on success.
515  */
516 int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
517 			enum pci_mmap_state mmap_state, int write_combine)
518 {
519 	resource_size_t offset =
520 		((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
521 	struct resource *rp;
522 	int ret;
523 
524 	rp = __pci_mmap_make_offset(dev, &offset, mmap_state);
525 	if (rp == NULL)
526 		return -EINVAL;
527 
528 	vma->vm_pgoff = offset >> PAGE_SHIFT;
529 	if (write_combine)
530 		vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
531 	else
532 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
533 
534 	ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
535 			       vma->vm_end - vma->vm_start, vma->vm_page_prot);
536 
537 	return ret;
538 }
539 
540 /* This provides legacy IO read access on a bus */
541 int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size)
542 {
543 	unsigned long offset;
544 	struct pci_controller *hose = pci_bus_to_host(bus);
545 	struct resource *rp = &hose->io_resource;
546 	void __iomem *addr;
547 
548 	/* Check if port can be supported by that bus. We only check
549 	 * the ranges of the PHB though, not the bus itself as the rules
550 	 * for forwarding legacy cycles down bridges are not our problem
551 	 * here. So if the host bridge supports it, we do it.
552 	 */
553 	offset = (unsigned long)hose->io_base_virt - _IO_BASE;
554 	offset += port;
555 
556 	if (!(rp->flags & IORESOURCE_IO))
557 		return -ENXIO;
558 	if (offset < rp->start || (offset + size) > rp->end)
559 		return -ENXIO;
560 	addr = hose->io_base_virt + port;
561 
562 	switch(size) {
563 	case 1:
564 		*((u8 *)val) = in_8(addr);
565 		return 1;
566 	case 2:
567 		if (port & 1)
568 			return -EINVAL;
569 		*((u16 *)val) = in_le16(addr);
570 		return 2;
571 	case 4:
572 		if (port & 3)
573 			return -EINVAL;
574 		*((u32 *)val) = in_le32(addr);
575 		return 4;
576 	}
577 	return -EINVAL;
578 }
579 
580 /* This provides legacy IO write access on a bus */
581 int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size)
582 {
583 	unsigned long offset;
584 	struct pci_controller *hose = pci_bus_to_host(bus);
585 	struct resource *rp = &hose->io_resource;
586 	void __iomem *addr;
587 
588 	/* Check if port can be supported by that bus. We only check
589 	 * the ranges of the PHB though, not the bus itself as the rules
590 	 * for forwarding legacy cycles down bridges are not our problem
591 	 * here. So if the host bridge supports it, we do it.
592 	 */
593 	offset = (unsigned long)hose->io_base_virt - _IO_BASE;
594 	offset += port;
595 
596 	if (!(rp->flags & IORESOURCE_IO))
597 		return -ENXIO;
598 	if (offset < rp->start || (offset + size) > rp->end)
599 		return -ENXIO;
600 	addr = hose->io_base_virt + port;
601 
602 	/* WARNING: The generic code is idiotic. It gets passed a pointer
603 	 * to what can be a 1, 2 or 4 byte quantity and always reads that
604 	 * as a u32, which means that we have to correct the location of
605 	 * the data read within those 32 bits for size 1 and 2
606 	 */
607 	switch(size) {
608 	case 1:
609 		out_8(addr, val >> 24);
610 		return 1;
611 	case 2:
612 		if (port & 1)
613 			return -EINVAL;
614 		out_le16(addr, val >> 16);
615 		return 2;
616 	case 4:
617 		if (port & 3)
618 			return -EINVAL;
619 		out_le32(addr, val);
620 		return 4;
621 	}
622 	return -EINVAL;
623 }
624 
625 /* This provides legacy IO or memory mmap access on a bus */
626 int pci_mmap_legacy_page_range(struct pci_bus *bus,
627 			       struct vm_area_struct *vma,
628 			       enum pci_mmap_state mmap_state)
629 {
630 	struct pci_controller *hose = pci_bus_to_host(bus);
631 	resource_size_t offset =
632 		((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
633 	resource_size_t size = vma->vm_end - vma->vm_start;
634 	struct resource *rp;
635 
636 	pr_debug("pci_mmap_legacy_page_range(%04x:%02x, %s @%llx..%llx)\n",
637 		 pci_domain_nr(bus), bus->number,
638 		 mmap_state == pci_mmap_mem ? "MEM" : "IO",
639 		 (unsigned long long)offset,
640 		 (unsigned long long)(offset + size - 1));
641 
642 	if (mmap_state == pci_mmap_mem) {
643 		/* Hack alert !
644 		 *
645 		 * Because X is lame and can fail starting if it gets an error trying
646 		 * to mmap legacy_mem (instead of just moving on without legacy memory
647 		 * access) we fake it here by giving it anonymous memory, effectively
648 		 * behaving just like /dev/zero
649 		 */
650 		if ((offset + size) > hose->isa_mem_size) {
651 			printk(KERN_DEBUG
652 			       "Process %s (pid:%d) mapped non-existing PCI legacy memory for 0%04x:%02x\n",
653 			       current->comm, current->pid, pci_domain_nr(bus), bus->number);
654 			if (vma->vm_flags & VM_SHARED)
655 				return shmem_zero_setup(vma);
656 			return 0;
657 		}
658 		offset += hose->isa_mem_phys;
659 	} else {
660 		unsigned long io_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
661 		unsigned long roffset = offset + io_offset;
662 		rp = &hose->io_resource;
663 		if (!(rp->flags & IORESOURCE_IO))
664 			return -ENXIO;
665 		if (roffset < rp->start || (roffset + size) > rp->end)
666 			return -ENXIO;
667 		offset += hose->io_base_phys;
668 	}
669 	pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset);
670 
671 	vma->vm_pgoff = offset >> PAGE_SHIFT;
672 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
673 	return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
674 			       vma->vm_end - vma->vm_start,
675 			       vma->vm_page_prot);
676 }
677 
678 void pci_resource_to_user(const struct pci_dev *dev, int bar,
679 			  const struct resource *rsrc,
680 			  resource_size_t *start, resource_size_t *end)
681 {
682 	struct pci_bus_region region;
683 
684 	if (rsrc->flags & IORESOURCE_IO) {
685 		pcibios_resource_to_bus(dev->bus, &region,
686 					(struct resource *) rsrc);
687 		*start = region.start;
688 		*end = region.end;
689 		return;
690 	}
691 
692 	/* We pass a CPU physical address to userland for MMIO instead of a
693 	 * BAR value because X is lame and expects to be able to use that
694 	 * to pass to /dev/mem!
695 	 *
696 	 * That means we may have 64-bit values where some apps only expect
697 	 * 32 (like X itself since it thinks only Sparc has 64-bit MMIO).
698 	 */
699 	*start = rsrc->start;
700 	*end = rsrc->end;
701 }
702 
703 /**
704  * pci_process_bridge_OF_ranges - Parse PCI bridge resources from device tree
705  * @hose: newly allocated pci_controller to be setup
706  * @dev: device node of the host bridge
707  * @primary: set if primary bus (32 bits only, soon to be deprecated)
708  *
709  * This function will parse the "ranges" property of a PCI host bridge device
710  * node and setup the resource mapping of a pci controller based on its
711  * content.
712  *
713  * Life would be boring if it wasn't for a few issues that we have to deal
714  * with here:
715  *
716  *   - We can only cope with one IO space range and up to 3 Memory space
717  *     ranges. However, some machines (thanks Apple !) tend to split their
718  *     space into lots of small contiguous ranges. So we have to coalesce.
719  *
720  *   - Some busses have IO space not starting at 0, which causes trouble with
721  *     the way we do our IO resource renumbering. The code somewhat deals with
722  *     it for 64 bits but I would expect problems on 32 bits.
723  *
724  *   - Some 32 bits platforms such as 4xx can have physical space larger than
725  *     32 bits so we need to use 64 bits values for the parsing
726  */
727 void pci_process_bridge_OF_ranges(struct pci_controller *hose,
728 				  struct device_node *dev, int primary)
729 {
730 	int memno = 0;
731 	struct resource *res;
732 	struct of_pci_range range;
733 	struct of_pci_range_parser parser;
734 
735 	printk(KERN_INFO "PCI host bridge %s %s ranges:\n",
736 	       dev->full_name, primary ? "(primary)" : "");
737 
738 	/* Check for ranges property */
739 	if (of_pci_range_parser_init(&parser, dev))
740 		return;
741 
742 	/* Parse it */
743 	for_each_of_pci_range(&parser, &range) {
744 		/* If we failed translation or got a zero-sized region
745 		 * (some FW try to feed us with non sensical zero sized regions
746 		 * such as power3 which look like some kind of attempt at exposing
747 		 * the VGA memory hole)
748 		 */
749 		if (range.cpu_addr == OF_BAD_ADDR || range.size == 0)
750 			continue;
751 
752 		/* Act based on address space type */
753 		res = NULL;
754 		switch (range.flags & IORESOURCE_TYPE_BITS) {
755 		case IORESOURCE_IO:
756 			printk(KERN_INFO
757 			       "  IO 0x%016llx..0x%016llx -> 0x%016llx\n",
758 			       range.cpu_addr, range.cpu_addr + range.size - 1,
759 			       range.pci_addr);
760 
761 			/* We support only one IO range */
762 			if (hose->pci_io_size) {
763 				printk(KERN_INFO
764 				       " \\--> Skipped (too many) !\n");
765 				continue;
766 			}
767 #ifdef CONFIG_PPC32
768 			/* On 32 bits, limit I/O space to 16MB */
769 			if (range.size > 0x01000000)
770 				range.size = 0x01000000;
771 
772 			/* 32 bits needs to map IOs here */
773 			hose->io_base_virt = ioremap(range.cpu_addr,
774 						range.size);
775 
776 			/* Expect trouble if pci_addr is not 0 */
777 			if (primary)
778 				isa_io_base =
779 					(unsigned long)hose->io_base_virt;
780 #endif /* CONFIG_PPC32 */
781 			/* pci_io_size and io_base_phys always represent IO
782 			 * space starting at 0 so we factor in pci_addr
783 			 */
784 			hose->pci_io_size = range.pci_addr + range.size;
785 			hose->io_base_phys = range.cpu_addr - range.pci_addr;
786 
787 			/* Build resource */
788 			res = &hose->io_resource;
789 			range.cpu_addr = range.pci_addr;
790 			break;
791 		case IORESOURCE_MEM:
792 			printk(KERN_INFO
793 			       " MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n",
794 			       range.cpu_addr, range.cpu_addr + range.size - 1,
795 			       range.pci_addr,
796 			       (range.pci_space & 0x40000000) ?
797 			       "Prefetch" : "");
798 
799 			/* We support only 3 memory ranges */
800 			if (memno >= 3) {
801 				printk(KERN_INFO
802 				       " \\--> Skipped (too many) !\n");
803 				continue;
804 			}
805 			/* Handles ISA memory hole space here */
806 			if (range.pci_addr == 0) {
807 				if (primary || isa_mem_base == 0)
808 					isa_mem_base = range.cpu_addr;
809 				hose->isa_mem_phys = range.cpu_addr;
810 				hose->isa_mem_size = range.size;
811 			}
812 
813 			/* Build resource */
814 			hose->mem_offset[memno] = range.cpu_addr -
815 							range.pci_addr;
816 			res = &hose->mem_resources[memno++];
817 			break;
818 		}
819 		if (res != NULL) {
820 			res->name = dev->full_name;
821 			res->flags = range.flags;
822 			res->start = range.cpu_addr;
823 			res->end = range.cpu_addr + range.size - 1;
824 			res->parent = res->child = res->sibling = NULL;
825 		}
826 	}
827 }
828 
829 /* Decide whether to display the domain number in /proc */
830 int pci_proc_domain(struct pci_bus *bus)
831 {
832 	struct pci_controller *hose = pci_bus_to_host(bus);
833 
834 	if (!pci_has_flag(PCI_ENABLE_PROC_DOMAINS))
835 		return 0;
836 	if (pci_has_flag(PCI_COMPAT_DOMAIN_0))
837 		return hose->global_number != 0;
838 	return 1;
839 }
840 
841 int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
842 {
843 	if (ppc_md.pcibios_root_bridge_prepare)
844 		return ppc_md.pcibios_root_bridge_prepare(bridge);
845 
846 	return 0;
847 }
848 
849 /* This header fixup will do the resource fixup for all devices as they are
850  * probed, but not for bridge ranges
851  */
852 static void pcibios_fixup_resources(struct pci_dev *dev)
853 {
854 	struct pci_controller *hose = pci_bus_to_host(dev->bus);
855 	int i;
856 
857 	if (!hose) {
858 		printk(KERN_ERR "No host bridge for PCI dev %s !\n",
859 		       pci_name(dev));
860 		return;
861 	}
862 
863 	if (dev->is_virtfn)
864 		return;
865 
866 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
867 		struct resource *res = dev->resource + i;
868 		struct pci_bus_region reg;
869 		if (!res->flags)
870 			continue;
871 
872 		/* If we're going to re-assign everything, we mark all resources
873 		 * as unset (and 0-base them). In addition, we mark BARs starting
874 		 * at 0 as unset as well, except if PCI_PROBE_ONLY is also set
875 		 * since in that case, we don't want to re-assign anything
876 		 */
877 		pcibios_resource_to_bus(dev->bus, &reg, res);
878 		if (pci_has_flag(PCI_REASSIGN_ALL_RSRC) ||
879 		    (reg.start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) {
880 			/* Only print message if not re-assigning */
881 			if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC))
882 				pr_debug("PCI:%s Resource %d %pR is unassigned\n",
883 					 pci_name(dev), i, res);
884 			res->end -= res->start;
885 			res->start = 0;
886 			res->flags |= IORESOURCE_UNSET;
887 			continue;
888 		}
889 
890 		pr_debug("PCI:%s Resource %d %pR\n", pci_name(dev), i, res);
891 	}
892 
893 	/* Call machine specific resource fixup */
894 	if (ppc_md.pcibios_fixup_resources)
895 		ppc_md.pcibios_fixup_resources(dev);
896 }
897 DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources);
898 
899 /* This function tries to figure out if a bridge resource has been initialized
900  * by the firmware or not. It doesn't have to be absolutely bullet proof, but
901  * things go more smoothly when it gets it right. It should covers cases such
902  * as Apple "closed" bridge resources and bare-metal pSeries unassigned bridges
903  */
904 static int pcibios_uninitialized_bridge_resource(struct pci_bus *bus,
905 						 struct resource *res)
906 {
907 	struct pci_controller *hose = pci_bus_to_host(bus);
908 	struct pci_dev *dev = bus->self;
909 	resource_size_t offset;
910 	struct pci_bus_region region;
911 	u16 command;
912 	int i;
913 
914 	/* We don't do anything if PCI_PROBE_ONLY is set */
915 	if (pci_has_flag(PCI_PROBE_ONLY))
916 		return 0;
917 
918 	/* Job is a bit different between memory and IO */
919 	if (res->flags & IORESOURCE_MEM) {
920 		pcibios_resource_to_bus(dev->bus, &region, res);
921 
922 		/* If the BAR is non-0 then it's probably been initialized */
923 		if (region.start != 0)
924 			return 0;
925 
926 		/* The BAR is 0, let's check if memory decoding is enabled on
927 		 * the bridge. If not, we consider it unassigned
928 		 */
929 		pci_read_config_word(dev, PCI_COMMAND, &command);
930 		if ((command & PCI_COMMAND_MEMORY) == 0)
931 			return 1;
932 
933 		/* Memory decoding is enabled and the BAR is 0. If any of the bridge
934 		 * resources covers that starting address (0 then it's good enough for
935 		 * us for memory space)
936 		 */
937 		for (i = 0; i < 3; i++) {
938 			if ((hose->mem_resources[i].flags & IORESOURCE_MEM) &&
939 			    hose->mem_resources[i].start == hose->mem_offset[i])
940 				return 0;
941 		}
942 
943 		/* Well, it starts at 0 and we know it will collide so we may as
944 		 * well consider it as unassigned. That covers the Apple case.
945 		 */
946 		return 1;
947 	} else {
948 		/* If the BAR is non-0, then we consider it assigned */
949 		offset = (unsigned long)hose->io_base_virt - _IO_BASE;
950 		if (((res->start - offset) & 0xfffffffful) != 0)
951 			return 0;
952 
953 		/* Here, we are a bit different than memory as typically IO space
954 		 * starting at low addresses -is- valid. What we do instead if that
955 		 * we consider as unassigned anything that doesn't have IO enabled
956 		 * in the PCI command register, and that's it.
957 		 */
958 		pci_read_config_word(dev, PCI_COMMAND, &command);
959 		if (command & PCI_COMMAND_IO)
960 			return 0;
961 
962 		/* It's starting at 0 and IO is disabled in the bridge, consider
963 		 * it unassigned
964 		 */
965 		return 1;
966 	}
967 }
968 
969 /* Fixup resources of a PCI<->PCI bridge */
970 static void pcibios_fixup_bridge(struct pci_bus *bus)
971 {
972 	struct resource *res;
973 	int i;
974 
975 	struct pci_dev *dev = bus->self;
976 
977 	pci_bus_for_each_resource(bus, res, i) {
978 		if (!res || !res->flags)
979 			continue;
980 		if (i >= 3 && bus->self->transparent)
981 			continue;
982 
983 		/* If we're going to reassign everything, we can
984 		 * shrink the P2P resource to have size as being
985 		 * of 0 in order to save space.
986 		 */
987 		if (pci_has_flag(PCI_REASSIGN_ALL_RSRC)) {
988 			res->flags |= IORESOURCE_UNSET;
989 			res->start = 0;
990 			res->end = -1;
991 			continue;
992 		}
993 
994 		pr_debug("PCI:%s Bus rsrc %d %pR\n", pci_name(dev), i, res);
995 
996 		/* Try to detect uninitialized P2P bridge resources,
997 		 * and clear them out so they get re-assigned later
998 		 */
999 		if (pcibios_uninitialized_bridge_resource(bus, res)) {
1000 			res->flags = 0;
1001 			pr_debug("PCI:%s            (unassigned)\n", pci_name(dev));
1002 		}
1003 	}
1004 }
1005 
1006 void pcibios_setup_bus_self(struct pci_bus *bus)
1007 {
1008 	struct pci_controller *phb;
1009 
1010 	/* Fix up the bus resources for P2P bridges */
1011 	if (bus->self != NULL)
1012 		pcibios_fixup_bridge(bus);
1013 
1014 	/* Platform specific bus fixups. This is currently only used
1015 	 * by fsl_pci and I'm hoping to get rid of it at some point
1016 	 */
1017 	if (ppc_md.pcibios_fixup_bus)
1018 		ppc_md.pcibios_fixup_bus(bus);
1019 
1020 	/* Setup bus DMA mappings */
1021 	phb = pci_bus_to_host(bus);
1022 	if (phb->controller_ops.dma_bus_setup)
1023 		phb->controller_ops.dma_bus_setup(bus);
1024 }
1025 
1026 static void pcibios_setup_device(struct pci_dev *dev)
1027 {
1028 	struct pci_controller *phb;
1029 	/* Fixup NUMA node as it may not be setup yet by the generic
1030 	 * code and is needed by the DMA init
1031 	 */
1032 	set_dev_node(&dev->dev, pcibus_to_node(dev->bus));
1033 
1034 	/* Hook up default DMA ops */
1035 	set_dma_ops(&dev->dev, pci_dma_ops);
1036 	set_dma_offset(&dev->dev, PCI_DRAM_OFFSET);
1037 
1038 	/* Additional platform DMA/iommu setup */
1039 	phb = pci_bus_to_host(dev->bus);
1040 	if (phb->controller_ops.dma_dev_setup)
1041 		phb->controller_ops.dma_dev_setup(dev);
1042 
1043 	/* Read default IRQs and fixup if necessary */
1044 	pci_read_irq_line(dev);
1045 	if (ppc_md.pci_irq_fixup)
1046 		ppc_md.pci_irq_fixup(dev);
1047 }
1048 
1049 int pcibios_add_device(struct pci_dev *dev)
1050 {
1051 	/*
1052 	 * We can only call pcibios_setup_device() after bus setup is complete,
1053 	 * since some of the platform specific DMA setup code depends on it.
1054 	 */
1055 	if (dev->bus->is_added)
1056 		pcibios_setup_device(dev);
1057 
1058 #ifdef CONFIG_PCI_IOV
1059 	if (ppc_md.pcibios_fixup_sriov)
1060 		ppc_md.pcibios_fixup_sriov(dev);
1061 #endif /* CONFIG_PCI_IOV */
1062 
1063 	return 0;
1064 }
1065 
1066 void pcibios_setup_bus_devices(struct pci_bus *bus)
1067 {
1068 	struct pci_dev *dev;
1069 
1070 	pr_debug("PCI: Fixup bus devices %d (%s)\n",
1071 		 bus->number, bus->self ? pci_name(bus->self) : "PHB");
1072 
1073 	list_for_each_entry(dev, &bus->devices, bus_list) {
1074 		/* Cardbus can call us to add new devices to a bus, so ignore
1075 		 * those who are already fully discovered
1076 		 */
1077 		if (dev->is_added)
1078 			continue;
1079 
1080 		pcibios_setup_device(dev);
1081 	}
1082 }
1083 
1084 void pcibios_set_master(struct pci_dev *dev)
1085 {
1086 	/* No special bus mastering setup handling */
1087 }
1088 
1089 void pcibios_fixup_bus(struct pci_bus *bus)
1090 {
1091 	/* When called from the generic PCI probe, read PCI<->PCI bridge
1092 	 * bases. This is -not- called when generating the PCI tree from
1093 	 * the OF device-tree.
1094 	 */
1095 	pci_read_bridge_bases(bus);
1096 
1097 	/* Now fixup the bus bus */
1098 	pcibios_setup_bus_self(bus);
1099 
1100 	/* Now fixup devices on that bus */
1101 	pcibios_setup_bus_devices(bus);
1102 }
1103 EXPORT_SYMBOL(pcibios_fixup_bus);
1104 
1105 void pci_fixup_cardbus(struct pci_bus *bus)
1106 {
1107 	/* Now fixup devices on that bus */
1108 	pcibios_setup_bus_devices(bus);
1109 }
1110 
1111 
1112 static int skip_isa_ioresource_align(struct pci_dev *dev)
1113 {
1114 	if (pci_has_flag(PCI_CAN_SKIP_ISA_ALIGN) &&
1115 	    !(dev->bus->bridge_ctl & PCI_BRIDGE_CTL_ISA))
1116 		return 1;
1117 	return 0;
1118 }
1119 
1120 /*
1121  * We need to avoid collisions with `mirrored' VGA ports
1122  * and other strange ISA hardware, so we always want the
1123  * addresses to be allocated in the 0x000-0x0ff region
1124  * modulo 0x400.
1125  *
1126  * Why? Because some silly external IO cards only decode
1127  * the low 10 bits of the IO address. The 0x00-0xff region
1128  * is reserved for motherboard devices that decode all 16
1129  * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
1130  * but we want to try to avoid allocating at 0x2900-0x2bff
1131  * which might have be mirrored at 0x0100-0x03ff..
1132  */
1133 resource_size_t pcibios_align_resource(void *data, const struct resource *res,
1134 				resource_size_t size, resource_size_t align)
1135 {
1136 	struct pci_dev *dev = data;
1137 	resource_size_t start = res->start;
1138 
1139 	if (res->flags & IORESOURCE_IO) {
1140 		if (skip_isa_ioresource_align(dev))
1141 			return start;
1142 		if (start & 0x300)
1143 			start = (start + 0x3ff) & ~0x3ff;
1144 	}
1145 
1146 	return start;
1147 }
1148 EXPORT_SYMBOL(pcibios_align_resource);
1149 
1150 /*
1151  * Reparent resource children of pr that conflict with res
1152  * under res, and make res replace those children.
1153  */
1154 static int reparent_resources(struct resource *parent,
1155 				     struct resource *res)
1156 {
1157 	struct resource *p, **pp;
1158 	struct resource **firstpp = NULL;
1159 
1160 	for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) {
1161 		if (p->end < res->start)
1162 			continue;
1163 		if (res->end < p->start)
1164 			break;
1165 		if (p->start < res->start || p->end > res->end)
1166 			return -1;	/* not completely contained */
1167 		if (firstpp == NULL)
1168 			firstpp = pp;
1169 	}
1170 	if (firstpp == NULL)
1171 		return -1;	/* didn't find any conflicting entries? */
1172 	res->parent = parent;
1173 	res->child = *firstpp;
1174 	res->sibling = *pp;
1175 	*firstpp = res;
1176 	*pp = NULL;
1177 	for (p = res->child; p != NULL; p = p->sibling) {
1178 		p->parent = res;
1179 		pr_debug("PCI: Reparented %s %pR under %s\n",
1180 			 p->name, p, res->name);
1181 	}
1182 	return 0;
1183 }
1184 
1185 /*
1186  *  Handle resources of PCI devices.  If the world were perfect, we could
1187  *  just allocate all the resource regions and do nothing more.  It isn't.
1188  *  On the other hand, we cannot just re-allocate all devices, as it would
1189  *  require us to know lots of host bridge internals.  So we attempt to
1190  *  keep as much of the original configuration as possible, but tweak it
1191  *  when it's found to be wrong.
1192  *
1193  *  Known BIOS problems we have to work around:
1194  *	- I/O or memory regions not configured
1195  *	- regions configured, but not enabled in the command register
1196  *	- bogus I/O addresses above 64K used
1197  *	- expansion ROMs left enabled (this may sound harmless, but given
1198  *	  the fact the PCI specs explicitly allow address decoders to be
1199  *	  shared between expansion ROMs and other resource regions, it's
1200  *	  at least dangerous)
1201  *
1202  *  Our solution:
1203  *	(1) Allocate resources for all buses behind PCI-to-PCI bridges.
1204  *	    This gives us fixed barriers on where we can allocate.
1205  *	(2) Allocate resources for all enabled devices.  If there is
1206  *	    a collision, just mark the resource as unallocated. Also
1207  *	    disable expansion ROMs during this step.
1208  *	(3) Try to allocate resources for disabled devices.  If the
1209  *	    resources were assigned correctly, everything goes well,
1210  *	    if they weren't, they won't disturb allocation of other
1211  *	    resources.
1212  *	(4) Assign new addresses to resources which were either
1213  *	    not configured at all or misconfigured.  If explicitly
1214  *	    requested by the user, configure expansion ROM address
1215  *	    as well.
1216  */
1217 
1218 static void pcibios_allocate_bus_resources(struct pci_bus *bus)
1219 {
1220 	struct pci_bus *b;
1221 	int i;
1222 	struct resource *res, *pr;
1223 
1224 	pr_debug("PCI: Allocating bus resources for %04x:%02x...\n",
1225 		 pci_domain_nr(bus), bus->number);
1226 
1227 	pci_bus_for_each_resource(bus, res, i) {
1228 		if (!res || !res->flags || res->start > res->end || res->parent)
1229 			continue;
1230 
1231 		/* If the resource was left unset at this point, we clear it */
1232 		if (res->flags & IORESOURCE_UNSET)
1233 			goto clear_resource;
1234 
1235 		if (bus->parent == NULL)
1236 			pr = (res->flags & IORESOURCE_IO) ?
1237 				&ioport_resource : &iomem_resource;
1238 		else {
1239 			pr = pci_find_parent_resource(bus->self, res);
1240 			if (pr == res) {
1241 				/* this happens when the generic PCI
1242 				 * code (wrongly) decides that this
1243 				 * bridge is transparent  -- paulus
1244 				 */
1245 				continue;
1246 			}
1247 		}
1248 
1249 		pr_debug("PCI: %s (bus %d) bridge rsrc %d: %pR, parent %p (%s)\n",
1250 			 bus->self ? pci_name(bus->self) : "PHB", bus->number,
1251 			 i, res, pr, (pr && pr->name) ? pr->name : "nil");
1252 
1253 		if (pr && !(pr->flags & IORESOURCE_UNSET)) {
1254 			struct pci_dev *dev = bus->self;
1255 
1256 			if (request_resource(pr, res) == 0)
1257 				continue;
1258 			/*
1259 			 * Must be a conflict with an existing entry.
1260 			 * Move that entry (or entries) under the
1261 			 * bridge resource and try again.
1262 			 */
1263 			if (reparent_resources(pr, res) == 0)
1264 				continue;
1265 
1266 			if (dev && i < PCI_BRIDGE_RESOURCE_NUM &&
1267 			    pci_claim_bridge_resource(dev,
1268 						i + PCI_BRIDGE_RESOURCES) == 0)
1269 				continue;
1270 		}
1271 		pr_warning("PCI: Cannot allocate resource region "
1272 			   "%d of PCI bridge %d, will remap\n", i, bus->number);
1273 	clear_resource:
1274 		/* The resource might be figured out when doing
1275 		 * reassignment based on the resources required
1276 		 * by the downstream PCI devices. Here we set
1277 		 * the size of the resource to be 0 in order to
1278 		 * save more space.
1279 		 */
1280 		res->start = 0;
1281 		res->end = -1;
1282 		res->flags = 0;
1283 	}
1284 
1285 	list_for_each_entry(b, &bus->children, node)
1286 		pcibios_allocate_bus_resources(b);
1287 }
1288 
1289 static inline void alloc_resource(struct pci_dev *dev, int idx)
1290 {
1291 	struct resource *pr, *r = &dev->resource[idx];
1292 
1293 	pr_debug("PCI: Allocating %s: Resource %d: %pR\n",
1294 		 pci_name(dev), idx, r);
1295 
1296 	pr = pci_find_parent_resource(dev, r);
1297 	if (!pr || (pr->flags & IORESOURCE_UNSET) ||
1298 	    request_resource(pr, r) < 0) {
1299 		printk(KERN_WARNING "PCI: Cannot allocate resource region %d"
1300 		       " of device %s, will remap\n", idx, pci_name(dev));
1301 		if (pr)
1302 			pr_debug("PCI:  parent is %p: %pR\n", pr, pr);
1303 		/* We'll assign a new address later */
1304 		r->flags |= IORESOURCE_UNSET;
1305 		r->end -= r->start;
1306 		r->start = 0;
1307 	}
1308 }
1309 
1310 static void __init pcibios_allocate_resources(int pass)
1311 {
1312 	struct pci_dev *dev = NULL;
1313 	int idx, disabled;
1314 	u16 command;
1315 	struct resource *r;
1316 
1317 	for_each_pci_dev(dev) {
1318 		pci_read_config_word(dev, PCI_COMMAND, &command);
1319 		for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
1320 			r = &dev->resource[idx];
1321 			if (r->parent)		/* Already allocated */
1322 				continue;
1323 			if (!r->flags || (r->flags & IORESOURCE_UNSET))
1324 				continue;	/* Not assigned at all */
1325 			/* We only allocate ROMs on pass 1 just in case they
1326 			 * have been screwed up by firmware
1327 			 */
1328 			if (idx == PCI_ROM_RESOURCE )
1329 				disabled = 1;
1330 			if (r->flags & IORESOURCE_IO)
1331 				disabled = !(command & PCI_COMMAND_IO);
1332 			else
1333 				disabled = !(command & PCI_COMMAND_MEMORY);
1334 			if (pass == disabled)
1335 				alloc_resource(dev, idx);
1336 		}
1337 		if (pass)
1338 			continue;
1339 		r = &dev->resource[PCI_ROM_RESOURCE];
1340 		if (r->flags) {
1341 			/* Turn the ROM off, leave the resource region,
1342 			 * but keep it unregistered.
1343 			 */
1344 			u32 reg;
1345 			pci_read_config_dword(dev, dev->rom_base_reg, &reg);
1346 			if (reg & PCI_ROM_ADDRESS_ENABLE) {
1347 				pr_debug("PCI: Switching off ROM of %s\n",
1348 					 pci_name(dev));
1349 				r->flags &= ~IORESOURCE_ROM_ENABLE;
1350 				pci_write_config_dword(dev, dev->rom_base_reg,
1351 						       reg & ~PCI_ROM_ADDRESS_ENABLE);
1352 			}
1353 		}
1354 	}
1355 }
1356 
1357 static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus)
1358 {
1359 	struct pci_controller *hose = pci_bus_to_host(bus);
1360 	resource_size_t	offset;
1361 	struct resource *res, *pres;
1362 	int i;
1363 
1364 	pr_debug("Reserving legacy ranges for domain %04x\n", pci_domain_nr(bus));
1365 
1366 	/* Check for IO */
1367 	if (!(hose->io_resource.flags & IORESOURCE_IO))
1368 		goto no_io;
1369 	offset = (unsigned long)hose->io_base_virt - _IO_BASE;
1370 	res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1371 	BUG_ON(res == NULL);
1372 	res->name = "Legacy IO";
1373 	res->flags = IORESOURCE_IO;
1374 	res->start = offset;
1375 	res->end = (offset + 0xfff) & 0xfffffffful;
1376 	pr_debug("Candidate legacy IO: %pR\n", res);
1377 	if (request_resource(&hose->io_resource, res)) {
1378 		printk(KERN_DEBUG
1379 		       "PCI %04x:%02x Cannot reserve Legacy IO %pR\n",
1380 		       pci_domain_nr(bus), bus->number, res);
1381 		kfree(res);
1382 	}
1383 
1384  no_io:
1385 	/* Check for memory */
1386 	for (i = 0; i < 3; i++) {
1387 		pres = &hose->mem_resources[i];
1388 		offset = hose->mem_offset[i];
1389 		if (!(pres->flags & IORESOURCE_MEM))
1390 			continue;
1391 		pr_debug("hose mem res: %pR\n", pres);
1392 		if ((pres->start - offset) <= 0xa0000 &&
1393 		    (pres->end - offset) >= 0xbffff)
1394 			break;
1395 	}
1396 	if (i >= 3)
1397 		return;
1398 	res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1399 	BUG_ON(res == NULL);
1400 	res->name = "Legacy VGA memory";
1401 	res->flags = IORESOURCE_MEM;
1402 	res->start = 0xa0000 + offset;
1403 	res->end = 0xbffff + offset;
1404 	pr_debug("Candidate VGA memory: %pR\n", res);
1405 	if (request_resource(pres, res)) {
1406 		printk(KERN_DEBUG
1407 		       "PCI %04x:%02x Cannot reserve VGA memory %pR\n",
1408 		       pci_domain_nr(bus), bus->number, res);
1409 		kfree(res);
1410 	}
1411 }
1412 
1413 void __init pcibios_resource_survey(void)
1414 {
1415 	struct pci_bus *b;
1416 
1417 	/* Allocate and assign resources */
1418 	list_for_each_entry(b, &pci_root_buses, node)
1419 		pcibios_allocate_bus_resources(b);
1420 	if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC)) {
1421 		pcibios_allocate_resources(0);
1422 		pcibios_allocate_resources(1);
1423 	}
1424 
1425 	/* Before we start assigning unassigned resource, we try to reserve
1426 	 * the low IO area and the VGA memory area if they intersect the
1427 	 * bus available resources to avoid allocating things on top of them
1428 	 */
1429 	if (!pci_has_flag(PCI_PROBE_ONLY)) {
1430 		list_for_each_entry(b, &pci_root_buses, node)
1431 			pcibios_reserve_legacy_regions(b);
1432 	}
1433 
1434 	/* Now, if the platform didn't decide to blindly trust the firmware,
1435 	 * we proceed to assigning things that were left unassigned
1436 	 */
1437 	if (!pci_has_flag(PCI_PROBE_ONLY)) {
1438 		pr_debug("PCI: Assigning unassigned resources...\n");
1439 		pci_assign_unassigned_resources();
1440 	}
1441 
1442 	/* Call machine dependent fixup */
1443 	if (ppc_md.pcibios_fixup)
1444 		ppc_md.pcibios_fixup();
1445 }
1446 
1447 /* This is used by the PCI hotplug driver to allocate resource
1448  * of newly plugged busses. We can try to consolidate with the
1449  * rest of the code later, for now, keep it as-is as our main
1450  * resource allocation function doesn't deal with sub-trees yet.
1451  */
1452 void pcibios_claim_one_bus(struct pci_bus *bus)
1453 {
1454 	struct pci_dev *dev;
1455 	struct pci_bus *child_bus;
1456 
1457 	list_for_each_entry(dev, &bus->devices, bus_list) {
1458 		int i;
1459 
1460 		for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1461 			struct resource *r = &dev->resource[i];
1462 
1463 			if (r->parent || !r->start || !r->flags)
1464 				continue;
1465 
1466 			pr_debug("PCI: Claiming %s: Resource %d: %pR\n",
1467 				 pci_name(dev), i, r);
1468 
1469 			if (pci_claim_resource(dev, i) == 0)
1470 				continue;
1471 
1472 			pci_claim_bridge_resource(dev, i);
1473 		}
1474 	}
1475 
1476 	list_for_each_entry(child_bus, &bus->children, node)
1477 		pcibios_claim_one_bus(child_bus);
1478 }
1479 EXPORT_SYMBOL_GPL(pcibios_claim_one_bus);
1480 
1481 
1482 /* pcibios_finish_adding_to_bus
1483  *
1484  * This is to be called by the hotplug code after devices have been
1485  * added to a bus, this include calling it for a PHB that is just
1486  * being added
1487  */
1488 void pcibios_finish_adding_to_bus(struct pci_bus *bus)
1489 {
1490 	pr_debug("PCI: Finishing adding to hotplug bus %04x:%02x\n",
1491 		 pci_domain_nr(bus), bus->number);
1492 
1493 	/* Allocate bus and devices resources */
1494 	pcibios_allocate_bus_resources(bus);
1495 	pcibios_claim_one_bus(bus);
1496 	if (!pci_has_flag(PCI_PROBE_ONLY)) {
1497 		if (bus->self)
1498 			pci_assign_unassigned_bridge_resources(bus->self);
1499 		else
1500 			pci_assign_unassigned_bus_resources(bus);
1501 	}
1502 
1503 	/* Fixup EEH */
1504 	eeh_add_device_tree_late(bus);
1505 
1506 	/* Add new devices to global lists.  Register in proc, sysfs. */
1507 	pci_bus_add_devices(bus);
1508 
1509 	/* sysfs files should only be added after devices are added */
1510 	eeh_add_sysfs_files(bus);
1511 }
1512 EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus);
1513 
1514 int pcibios_enable_device(struct pci_dev *dev, int mask)
1515 {
1516 	struct pci_controller *phb = pci_bus_to_host(dev->bus);
1517 
1518 	if (phb->controller_ops.enable_device_hook)
1519 		if (!phb->controller_ops.enable_device_hook(dev))
1520 			return -EINVAL;
1521 
1522 	return pci_enable_resources(dev, mask);
1523 }
1524 
1525 void pcibios_disable_device(struct pci_dev *dev)
1526 {
1527 	struct pci_controller *phb = pci_bus_to_host(dev->bus);
1528 
1529 	if (phb->controller_ops.disable_device)
1530 		phb->controller_ops.disable_device(dev);
1531 }
1532 
1533 resource_size_t pcibios_io_space_offset(struct pci_controller *hose)
1534 {
1535 	return (unsigned long) hose->io_base_virt - _IO_BASE;
1536 }
1537 
1538 static void pcibios_setup_phb_resources(struct pci_controller *hose,
1539 					struct list_head *resources)
1540 {
1541 	struct resource *res;
1542 	resource_size_t offset;
1543 	int i;
1544 
1545 	/* Hookup PHB IO resource */
1546 	res = &hose->io_resource;
1547 
1548 	if (!res->flags) {
1549 		pr_debug("PCI: I/O resource not set for host"
1550 			 " bridge %s (domain %d)\n",
1551 			 hose->dn->full_name, hose->global_number);
1552 	} else {
1553 		offset = pcibios_io_space_offset(hose);
1554 
1555 		pr_debug("PCI: PHB IO resource    = %pR off 0x%08llx\n",
1556 			 res, (unsigned long long)offset);
1557 		pci_add_resource_offset(resources, res, offset);
1558 	}
1559 
1560 	/* Hookup PHB Memory resources */
1561 	for (i = 0; i < 3; ++i) {
1562 		res = &hose->mem_resources[i];
1563 		if (!res->flags)
1564 			continue;
1565 
1566 		offset = hose->mem_offset[i];
1567 		pr_debug("PCI: PHB MEM resource %d = %pR off 0x%08llx\n", i,
1568 			 res, (unsigned long long)offset);
1569 
1570 		pci_add_resource_offset(resources, res, offset);
1571 	}
1572 }
1573 
1574 /*
1575  * Null PCI config access functions, for the case when we can't
1576  * find a hose.
1577  */
1578 #define NULL_PCI_OP(rw, size, type)					\
1579 static int								\
1580 null_##rw##_config_##size(struct pci_dev *dev, int offset, type val)	\
1581 {									\
1582 	return PCIBIOS_DEVICE_NOT_FOUND;    				\
1583 }
1584 
1585 static int
1586 null_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
1587 		 int len, u32 *val)
1588 {
1589 	return PCIBIOS_DEVICE_NOT_FOUND;
1590 }
1591 
1592 static int
1593 null_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
1594 		  int len, u32 val)
1595 {
1596 	return PCIBIOS_DEVICE_NOT_FOUND;
1597 }
1598 
1599 static struct pci_ops null_pci_ops =
1600 {
1601 	.read = null_read_config,
1602 	.write = null_write_config,
1603 };
1604 
1605 /*
1606  * These functions are used early on before PCI scanning is done
1607  * and all of the pci_dev and pci_bus structures have been created.
1608  */
1609 static struct pci_bus *
1610 fake_pci_bus(struct pci_controller *hose, int busnr)
1611 {
1612 	static struct pci_bus bus;
1613 
1614 	if (hose == NULL) {
1615 		printk(KERN_ERR "Can't find hose for PCI bus %d!\n", busnr);
1616 	}
1617 	bus.number = busnr;
1618 	bus.sysdata = hose;
1619 	bus.ops = hose? hose->ops: &null_pci_ops;
1620 	return &bus;
1621 }
1622 
1623 #define EARLY_PCI_OP(rw, size, type)					\
1624 int early_##rw##_config_##size(struct pci_controller *hose, int bus,	\
1625 			       int devfn, int offset, type value)	\
1626 {									\
1627 	return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus),	\
1628 					    devfn, offset, value);	\
1629 }
1630 
1631 EARLY_PCI_OP(read, byte, u8 *)
1632 EARLY_PCI_OP(read, word, u16 *)
1633 EARLY_PCI_OP(read, dword, u32 *)
1634 EARLY_PCI_OP(write, byte, u8)
1635 EARLY_PCI_OP(write, word, u16)
1636 EARLY_PCI_OP(write, dword, u32)
1637 
1638 int early_find_capability(struct pci_controller *hose, int bus, int devfn,
1639 			  int cap)
1640 {
1641 	return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap);
1642 }
1643 
1644 struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
1645 {
1646 	struct pci_controller *hose = bus->sysdata;
1647 
1648 	return of_node_get(hose->dn);
1649 }
1650 
1651 /**
1652  * pci_scan_phb - Given a pci_controller, setup and scan the PCI bus
1653  * @hose: Pointer to the PCI host controller instance structure
1654  */
1655 void pcibios_scan_phb(struct pci_controller *hose)
1656 {
1657 	LIST_HEAD(resources);
1658 	struct pci_bus *bus;
1659 	struct device_node *node = hose->dn;
1660 	int mode;
1661 
1662 	pr_debug("PCI: Scanning PHB %s\n", of_node_full_name(node));
1663 
1664 	/* Get some IO space for the new PHB */
1665 	pcibios_setup_phb_io_space(hose);
1666 
1667 	/* Wire up PHB bus resources */
1668 	pcibios_setup_phb_resources(hose, &resources);
1669 
1670 	hose->busn.start = hose->first_busno;
1671 	hose->busn.end	 = hose->last_busno;
1672 	hose->busn.flags = IORESOURCE_BUS;
1673 	pci_add_resource(&resources, &hose->busn);
1674 
1675 	/* Create an empty bus for the toplevel */
1676 	bus = pci_create_root_bus(hose->parent, hose->first_busno,
1677 				  hose->ops, hose, &resources);
1678 	if (bus == NULL) {
1679 		pr_err("Failed to create bus for PCI domain %04x\n",
1680 			hose->global_number);
1681 		pci_free_resource_list(&resources);
1682 		return;
1683 	}
1684 	hose->bus = bus;
1685 
1686 	/* Get probe mode and perform scan */
1687 	mode = PCI_PROBE_NORMAL;
1688 	if (node && hose->controller_ops.probe_mode)
1689 		mode = hose->controller_ops.probe_mode(bus);
1690 	pr_debug("    probe mode: %d\n", mode);
1691 	if (mode == PCI_PROBE_DEVTREE)
1692 		of_scan_bus(node, bus);
1693 
1694 	if (mode == PCI_PROBE_NORMAL) {
1695 		pci_bus_update_busn_res_end(bus, 255);
1696 		hose->last_busno = pci_scan_child_bus(bus);
1697 		pci_bus_update_busn_res_end(bus, hose->last_busno);
1698 	}
1699 
1700 	/* Platform gets a chance to do some global fixups before
1701 	 * we proceed to resource allocation
1702 	 */
1703 	if (ppc_md.pcibios_fixup_phb)
1704 		ppc_md.pcibios_fixup_phb(hose);
1705 
1706 	/* Configure PCI Express settings */
1707 	if (bus && !pci_has_flag(PCI_PROBE_ONLY)) {
1708 		struct pci_bus *child;
1709 		list_for_each_entry(child, &bus->children, node)
1710 			pcie_bus_configure_settings(child);
1711 	}
1712 }
1713 EXPORT_SYMBOL_GPL(pcibios_scan_phb);
1714 
1715 static void fixup_hide_host_resource_fsl(struct pci_dev *dev)
1716 {
1717 	int i, class = dev->class >> 8;
1718 	/* When configured as agent, programing interface = 1 */
1719 	int prog_if = dev->class & 0xf;
1720 
1721 	if ((class == PCI_CLASS_PROCESSOR_POWERPC ||
1722 	     class == PCI_CLASS_BRIDGE_OTHER) &&
1723 		(dev->hdr_type == PCI_HEADER_TYPE_NORMAL) &&
1724 		(prog_if == 0) &&
1725 		(dev->bus->parent == NULL)) {
1726 		for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1727 			dev->resource[i].start = 0;
1728 			dev->resource[i].end = 0;
1729 			dev->resource[i].flags = 0;
1730 		}
1731 	}
1732 }
1733 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl);
1734 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl);
1735 
1736 static void fixup_vga(struct pci_dev *pdev)
1737 {
1738 	u16 cmd;
1739 
1740 	pci_read_config_word(pdev, PCI_COMMAND, &cmd);
1741 	if ((cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) || !vga_default_device())
1742 		vga_set_default_device(pdev);
1743 
1744 }
1745 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
1746 			      PCI_CLASS_DISPLAY_VGA, 8, fixup_vga);
1747