xref: /openbmc/linux/arch/x86/pci/i386.c (revision d2999e1b)
1 /*
2  *	Low-Level PCI Access for i386 machines
3  *
4  * Copyright 1993, 1994 Drew Eckhardt
5  *      Visionary Computing
6  *      (Unix and Linux consulting and custom programming)
7  *      Drew@Colorado.EDU
8  *      +1 (303) 786-7975
9  *
10  * Drew's work was sponsored by:
11  *	iX Multiuser Multitasking Magazine
12  *	Hannover, Germany
13  *	hm@ix.de
14  *
15  * Copyright 1997--2000 Martin Mares <mj@ucw.cz>
16  *
17  * For more information, please consult the following manuals (look at
18  * http://www.pcisig.com/ for how to get them):
19  *
20  * PCI BIOS Specification
21  * PCI Local Bus Specification
22  * PCI to PCI Bridge Specification
23  * PCI System Design Guide
24  *
25  */
26 
27 #include <linux/types.h>
28 #include <linux/kernel.h>
29 #include <linux/export.h>
30 #include <linux/pci.h>
31 #include <linux/init.h>
32 #include <linux/ioport.h>
33 #include <linux/errno.h>
34 #include <linux/bootmem.h>
35 
36 #include <asm/pat.h>
37 #include <asm/e820.h>
38 #include <asm/pci_x86.h>
39 #include <asm/io_apic.h>
40 
41 
42 /*
43  * This list of dynamic mappings is for temporarily maintaining
44  * original BIOS BAR addresses for possible reinstatement.
45  */
46 struct pcibios_fwaddrmap {
47 	struct list_head list;
48 	struct pci_dev *dev;
49 	resource_size_t fw_addr[DEVICE_COUNT_RESOURCE];
50 };
51 
52 static LIST_HEAD(pcibios_fwaddrmappings);
53 static DEFINE_SPINLOCK(pcibios_fwaddrmap_lock);
54 static bool pcibios_fw_addr_done;
55 
56 /* Must be called with 'pcibios_fwaddrmap_lock' lock held. */
57 static struct pcibios_fwaddrmap *pcibios_fwaddrmap_lookup(struct pci_dev *dev)
58 {
59 	struct pcibios_fwaddrmap *map;
60 
61 	WARN_ON_SMP(!spin_is_locked(&pcibios_fwaddrmap_lock));
62 
63 	list_for_each_entry(map, &pcibios_fwaddrmappings, list)
64 		if (map->dev == dev)
65 			return map;
66 
67 	return NULL;
68 }
69 
70 static void
71 pcibios_save_fw_addr(struct pci_dev *dev, int idx, resource_size_t fw_addr)
72 {
73 	unsigned long flags;
74 	struct pcibios_fwaddrmap *map;
75 
76 	if (pcibios_fw_addr_done)
77 		return;
78 
79 	spin_lock_irqsave(&pcibios_fwaddrmap_lock, flags);
80 	map = pcibios_fwaddrmap_lookup(dev);
81 	if (!map) {
82 		spin_unlock_irqrestore(&pcibios_fwaddrmap_lock, flags);
83 		map = kzalloc(sizeof(*map), GFP_KERNEL);
84 		if (!map)
85 			return;
86 
87 		map->dev = pci_dev_get(dev);
88 		map->fw_addr[idx] = fw_addr;
89 		INIT_LIST_HEAD(&map->list);
90 
91 		spin_lock_irqsave(&pcibios_fwaddrmap_lock, flags);
92 		list_add_tail(&map->list, &pcibios_fwaddrmappings);
93 	} else
94 		map->fw_addr[idx] = fw_addr;
95 	spin_unlock_irqrestore(&pcibios_fwaddrmap_lock, flags);
96 }
97 
98 resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx)
99 {
100 	unsigned long flags;
101 	struct pcibios_fwaddrmap *map;
102 	resource_size_t fw_addr = 0;
103 
104 	if (pcibios_fw_addr_done)
105 		return 0;
106 
107 	spin_lock_irqsave(&pcibios_fwaddrmap_lock, flags);
108 	map = pcibios_fwaddrmap_lookup(dev);
109 	if (map)
110 		fw_addr = map->fw_addr[idx];
111 	spin_unlock_irqrestore(&pcibios_fwaddrmap_lock, flags);
112 
113 	return fw_addr;
114 }
115 
116 static void __init pcibios_fw_addr_list_del(void)
117 {
118 	unsigned long flags;
119 	struct pcibios_fwaddrmap *entry, *next;
120 
121 	spin_lock_irqsave(&pcibios_fwaddrmap_lock, flags);
122 	list_for_each_entry_safe(entry, next, &pcibios_fwaddrmappings, list) {
123 		list_del(&entry->list);
124 		pci_dev_put(entry->dev);
125 		kfree(entry);
126 	}
127 	spin_unlock_irqrestore(&pcibios_fwaddrmap_lock, flags);
128 	pcibios_fw_addr_done = true;
129 }
130 
131 static int
132 skip_isa_ioresource_align(struct pci_dev *dev) {
133 
134 	if ((pci_probe & PCI_CAN_SKIP_ISA_ALIGN) &&
135 	    !(dev->bus->bridge_ctl & PCI_BRIDGE_CTL_ISA))
136 		return 1;
137 	return 0;
138 }
139 
140 /*
141  * We need to avoid collisions with `mirrored' VGA ports
142  * and other strange ISA hardware, so we always want the
143  * addresses to be allocated in the 0x000-0x0ff region
144  * modulo 0x400.
145  *
146  * Why? Because some silly external IO cards only decode
147  * the low 10 bits of the IO address. The 0x00-0xff region
148  * is reserved for motherboard devices that decode all 16
149  * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
150  * but we want to try to avoid allocating at 0x2900-0x2bff
151  * which might have be mirrored at 0x0100-0x03ff..
152  */
153 resource_size_t
154 pcibios_align_resource(void *data, const struct resource *res,
155 			resource_size_t size, resource_size_t align)
156 {
157 	struct pci_dev *dev = data;
158 	resource_size_t start = res->start;
159 
160 	if (res->flags & IORESOURCE_IO) {
161 		if (skip_isa_ioresource_align(dev))
162 			return start;
163 		if (start & 0x300)
164 			start = (start + 0x3ff) & ~0x3ff;
165 	}
166 	return start;
167 }
168 EXPORT_SYMBOL(pcibios_align_resource);
169 
170 /*
171  *  Handle resources of PCI devices.  If the world were perfect, we could
172  *  just allocate all the resource regions and do nothing more.  It isn't.
173  *  On the other hand, we cannot just re-allocate all devices, as it would
174  *  require us to know lots of host bridge internals.  So we attempt to
175  *  keep as much of the original configuration as possible, but tweak it
176  *  when it's found to be wrong.
177  *
178  *  Known BIOS problems we have to work around:
179  *	- I/O or memory regions not configured
180  *	- regions configured, but not enabled in the command register
181  *	- bogus I/O addresses above 64K used
182  *	- expansion ROMs left enabled (this may sound harmless, but given
183  *	  the fact the PCI specs explicitly allow address decoders to be
184  *	  shared between expansion ROMs and other resource regions, it's
185  *	  at least dangerous)
186  *	- bad resource sizes or overlaps with other regions
187  *
188  *  Our solution:
189  *	(1) Allocate resources for all buses behind PCI-to-PCI bridges.
190  *	    This gives us fixed barriers on where we can allocate.
191  *	(2) Allocate resources for all enabled devices.  If there is
192  *	    a collision, just mark the resource as unallocated. Also
193  *	    disable expansion ROMs during this step.
194  *	(3) Try to allocate resources for disabled devices.  If the
195  *	    resources were assigned correctly, everything goes well,
196  *	    if they weren't, they won't disturb allocation of other
197  *	    resources.
198  *	(4) Assign new addresses to resources which were either
199  *	    not configured at all or misconfigured.  If explicitly
200  *	    requested by the user, configure expansion ROM address
201  *	    as well.
202  */
203 
204 static void pcibios_allocate_bridge_resources(struct pci_dev *dev)
205 {
206 	int idx;
207 	struct resource *r;
208 
209 	for (idx = PCI_BRIDGE_RESOURCES; idx < PCI_NUM_RESOURCES; idx++) {
210 		r = &dev->resource[idx];
211 		if (!r->flags)
212 			continue;
213 		if (r->parent)	/* Already allocated */
214 			continue;
215 		if (!r->start || pci_claim_resource(dev, idx) < 0) {
216 			/*
217 			 * Something is wrong with the region.
218 			 * Invalidate the resource to prevent
219 			 * child resource allocations in this
220 			 * range.
221 			 */
222 			r->start = r->end = 0;
223 			r->flags = 0;
224 		}
225 	}
226 }
227 
228 static void pcibios_allocate_bus_resources(struct pci_bus *bus)
229 {
230 	struct pci_bus *child;
231 
232 	/* Depth-First Search on bus tree */
233 	if (bus->self)
234 		pcibios_allocate_bridge_resources(bus->self);
235 	list_for_each_entry(child, &bus->children, node)
236 		pcibios_allocate_bus_resources(child);
237 }
238 
239 struct pci_check_idx_range {
240 	int start;
241 	int end;
242 };
243 
244 static void pcibios_allocate_dev_resources(struct pci_dev *dev, int pass)
245 {
246 	int idx, disabled, i;
247 	u16 command;
248 	struct resource *r;
249 
250 	struct pci_check_idx_range idx_range[] = {
251 		{ PCI_STD_RESOURCES, PCI_STD_RESOURCE_END },
252 #ifdef CONFIG_PCI_IOV
253 		{ PCI_IOV_RESOURCES, PCI_IOV_RESOURCE_END },
254 #endif
255 	};
256 
257 	pci_read_config_word(dev, PCI_COMMAND, &command);
258 	for (i = 0; i < ARRAY_SIZE(idx_range); i++)
259 		for (idx = idx_range[i].start; idx <= idx_range[i].end; idx++) {
260 			r = &dev->resource[idx];
261 			if (r->parent)	/* Already allocated */
262 				continue;
263 			if (!r->start)	/* Address not assigned at all */
264 				continue;
265 			if (r->flags & IORESOURCE_IO)
266 				disabled = !(command & PCI_COMMAND_IO);
267 			else
268 				disabled = !(command & PCI_COMMAND_MEMORY);
269 			if (pass == disabled) {
270 				dev_dbg(&dev->dev,
271 					"BAR %d: reserving %pr (d=%d, p=%d)\n",
272 					idx, r, disabled, pass);
273 				if (pci_claim_resource(dev, idx) < 0) {
274 					if (r->flags & IORESOURCE_PCI_FIXED) {
275 						dev_info(&dev->dev, "BAR %d %pR is immovable\n",
276 							 idx, r);
277 					} else {
278 						/* We'll assign a new address later */
279 						pcibios_save_fw_addr(dev,
280 								idx, r->start);
281 						r->end -= r->start;
282 						r->start = 0;
283 					}
284 				}
285 			}
286 		}
287 	if (!pass) {
288 		r = &dev->resource[PCI_ROM_RESOURCE];
289 		if (r->flags & IORESOURCE_ROM_ENABLE) {
290 			/* Turn the ROM off, leave the resource region,
291 			 * but keep it unregistered. */
292 			u32 reg;
293 			dev_dbg(&dev->dev, "disabling ROM %pR\n", r);
294 			r->flags &= ~IORESOURCE_ROM_ENABLE;
295 			pci_read_config_dword(dev, dev->rom_base_reg, &reg);
296 			pci_write_config_dword(dev, dev->rom_base_reg,
297 						reg & ~PCI_ROM_ADDRESS_ENABLE);
298 		}
299 	}
300 }
301 
302 static void pcibios_allocate_resources(struct pci_bus *bus, int pass)
303 {
304 	struct pci_dev *dev;
305 	struct pci_bus *child;
306 
307 	list_for_each_entry(dev, &bus->devices, bus_list) {
308 		pcibios_allocate_dev_resources(dev, pass);
309 
310 		child = dev->subordinate;
311 		if (child)
312 			pcibios_allocate_resources(child, pass);
313 	}
314 }
315 
316 static void pcibios_allocate_dev_rom_resource(struct pci_dev *dev)
317 {
318 	struct resource *r;
319 
320 	/*
321 	 * Try to use BIOS settings for ROMs, otherwise let
322 	 * pci_assign_unassigned_resources() allocate the new
323 	 * addresses.
324 	 */
325 	r = &dev->resource[PCI_ROM_RESOURCE];
326 	if (!r->flags || !r->start)
327 		return;
328 	if (r->parent) /* Already allocated */
329 		return;
330 
331 	if (pci_claim_resource(dev, PCI_ROM_RESOURCE) < 0) {
332 		r->end -= r->start;
333 		r->start = 0;
334 	}
335 }
336 static void pcibios_allocate_rom_resources(struct pci_bus *bus)
337 {
338 	struct pci_dev *dev;
339 	struct pci_bus *child;
340 
341 	list_for_each_entry(dev, &bus->devices, bus_list) {
342 		pcibios_allocate_dev_rom_resource(dev);
343 
344 		child = dev->subordinate;
345 		if (child)
346 			pcibios_allocate_rom_resources(child);
347 	}
348 }
349 
350 static int __init pcibios_assign_resources(void)
351 {
352 	struct pci_bus *bus;
353 
354 	if (!(pci_probe & PCI_ASSIGN_ROMS))
355 		list_for_each_entry(bus, &pci_root_buses, node)
356 			pcibios_allocate_rom_resources(bus);
357 
358 	pci_assign_unassigned_resources();
359 	pcibios_fw_addr_list_del();
360 
361 	return 0;
362 }
363 
364 /**
365  * called in fs_initcall (one below subsys_initcall),
366  * give a chance for motherboard reserve resources
367  */
368 fs_initcall(pcibios_assign_resources);
369 
370 void pcibios_resource_survey_bus(struct pci_bus *bus)
371 {
372 	dev_printk(KERN_DEBUG, &bus->dev, "Allocating resources\n");
373 
374 	pcibios_allocate_bus_resources(bus);
375 
376 	pcibios_allocate_resources(bus, 0);
377 	pcibios_allocate_resources(bus, 1);
378 
379 	if (!(pci_probe & PCI_ASSIGN_ROMS))
380 		pcibios_allocate_rom_resources(bus);
381 }
382 
383 void __init pcibios_resource_survey(void)
384 {
385 	struct pci_bus *bus;
386 
387 	DBG("PCI: Allocating resources\n");
388 
389 	list_for_each_entry(bus, &pci_root_buses, node)
390 		pcibios_allocate_bus_resources(bus);
391 
392 	list_for_each_entry(bus, &pci_root_buses, node)
393 		pcibios_allocate_resources(bus, 0);
394 	list_for_each_entry(bus, &pci_root_buses, node)
395 		pcibios_allocate_resources(bus, 1);
396 
397 	e820_reserve_resources_late();
398 	/*
399 	 * Insert the IO APIC resources after PCI initialization has
400 	 * occurred to handle IO APICS that are mapped in on a BAR in
401 	 * PCI space, but before trying to assign unassigned pci res.
402 	 */
403 	ioapic_insert_resources();
404 }
405 
406 static const struct vm_operations_struct pci_mmap_ops = {
407 	.access = generic_access_phys,
408 };
409 
410 int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
411 			enum pci_mmap_state mmap_state, int write_combine)
412 {
413 	unsigned long prot;
414 
415 	/* I/O space cannot be accessed via normal processor loads and
416 	 * stores on this platform.
417 	 */
418 	if (mmap_state == pci_mmap_io)
419 		return -EINVAL;
420 
421 	prot = pgprot_val(vma->vm_page_prot);
422 
423 	/*
424  	 * Return error if pat is not enabled and write_combine is requested.
425  	 * Caller can followup with UC MINUS request and add a WC mtrr if there
426  	 * is a free mtrr slot.
427  	 */
428 	if (!pat_enabled && write_combine)
429 		return -EINVAL;
430 
431 	if (pat_enabled && write_combine)
432 		prot |= _PAGE_CACHE_WC;
433 	else if (pat_enabled || boot_cpu_data.x86 > 3)
434 		/*
435 		 * ioremap() and ioremap_nocache() defaults to UC MINUS for now.
436 		 * To avoid attribute conflicts, request UC MINUS here
437 		 * as well.
438 		 */
439 		prot |= _PAGE_CACHE_UC_MINUS;
440 
441 	prot |= _PAGE_IOMAP;	/* creating a mapping for IO */
442 
443 	vma->vm_page_prot = __pgprot(prot);
444 
445 	if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
446 			       vma->vm_end - vma->vm_start,
447 			       vma->vm_page_prot))
448 		return -EAGAIN;
449 
450 	vma->vm_ops = &pci_mmap_ops;
451 
452 	return 0;
453 }
454