xref: /openbmc/linux/arch/alpha/kernel/pci.c (revision 1da177e4)
1 /*
2  *	linux/arch/alpha/kernel/pci.c
3  *
4  * Extruded from code written by
5  *	Dave Rusling (david.rusling@reo.mts.dec.com)
6  *	David Mosberger (davidm@cs.arizona.edu)
7  */
8 
9 /* 2.3.x PCI/resources, 1999 Andrea Arcangeli <andrea@suse.de> */
10 
11 /*
12  * Nov 2000, Ivan Kokshaysky <ink@jurassic.park.msu.ru>
13  *	     PCI-PCI bridges cleanup
14  */
15 #include <linux/config.h>
16 #include <linux/string.h>
17 #include <linux/pci.h>
18 #include <linux/init.h>
19 #include <linux/ioport.h>
20 #include <linux/kernel.h>
21 #include <linux/bootmem.h>
22 #include <linux/module.h>
23 #include <linux/cache.h>
24 #include <linux/slab.h>
25 #include <asm/machvec.h>
26 
27 #include "proto.h"
28 #include "pci_impl.h"
29 
30 
31 /*
32  * Some string constants used by the various core logics.
33  */
34 
35 const char *const pci_io_names[] = {
36   "PCI IO bus 0", "PCI IO bus 1", "PCI IO bus 2", "PCI IO bus 3",
37   "PCI IO bus 4", "PCI IO bus 5", "PCI IO bus 6", "PCI IO bus 7"
38 };
39 
40 const char *const pci_mem_names[] = {
41   "PCI mem bus 0", "PCI mem bus 1", "PCI mem bus 2", "PCI mem bus 3",
42   "PCI mem bus 4", "PCI mem bus 5", "PCI mem bus 6", "PCI mem bus 7"
43 };
44 
45 const char pci_hae0_name[] = "HAE0";
46 
47 /* Indicate whether we respect the PCI setup left by console. */
48 /*
49  * Make this long-lived  so that we know when shutting down
50  * whether we probed only or not.
51  */
52 int pci_probe_only;
53 
54 /*
55  * The PCI controller list.
56  */
57 
58 struct pci_controller *hose_head, **hose_tail = &hose_head;
59 struct pci_controller *pci_isa_hose;
60 
61 /*
62  * Quirks.
63  */
64 
65 static void __init
66 quirk_isa_bridge(struct pci_dev *dev)
67 {
68 	dev->class = PCI_CLASS_BRIDGE_ISA << 8;
69 }
70 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82378, quirk_isa_bridge);
71 
72 static void __init
73 quirk_cypress(struct pci_dev *dev)
74 {
75 	/* The Notorious Cy82C693 chip.  */
76 
77 	/* The Cypress IDE controller doesn't support native mode, but it
78 	   has programmable addresses of IDE command/control registers.
79 	   This violates PCI specifications, confuses the IDE subsystem and
80 	   causes resource conflicts between the primary HD_CMD register and
81 	   the floppy controller.  Ugh.  Fix that.  */
82 	if (dev->class >> 8 == PCI_CLASS_STORAGE_IDE) {
83 		dev->resource[0].flags = 0;
84 		dev->resource[1].flags = 0;
85 	}
86 
87 	/* The Cypress bridge responds on the PCI bus in the address range
88 	   0xffff0000-0xffffffff (conventional x86 BIOS ROM).  There is no
89 	   way to turn this off.  The bridge also supports several extended
90 	   BIOS ranges (disabled after power-up), and some consoles do turn
91 	   them on.  So if we use a large direct-map window, or a large SG
92 	   window, we must avoid the entire 0xfff00000-0xffffffff region.  */
93 	else if (dev->class >> 8 == PCI_CLASS_BRIDGE_ISA) {
94 		if (__direct_map_base + __direct_map_size >= 0xfff00000UL)
95 			__direct_map_size = 0xfff00000UL - __direct_map_base;
96 		else {
97 			struct pci_controller *hose = dev->sysdata;
98 			struct pci_iommu_arena *pci = hose->sg_pci;
99 			if (pci && pci->dma_base + pci->size >= 0xfff00000UL)
100 				pci->size = 0xfff00000UL - pci->dma_base;
101 		}
102 	}
103 }
104 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CONTAQ, PCI_DEVICE_ID_CONTAQ_82C693, quirk_cypress);
105 
106 /* Called for each device after PCI setup is done. */
107 static void __init
108 pcibios_fixup_final(struct pci_dev *dev)
109 {
110 	unsigned int class = dev->class >> 8;
111 
112 	if (class == PCI_CLASS_BRIDGE_ISA || class == PCI_CLASS_BRIDGE_EISA) {
113 		dev->dma_mask = MAX_ISA_DMA_ADDRESS - 1;
114 		isa_bridge = dev;
115 	}
116 }
117 DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_final);
118 
119 /* Just declaring that the power-of-ten prefixes are actually the
120    power-of-two ones doesn't make it true :) */
121 #define KB			1024
122 #define MB			(1024*KB)
123 #define GB			(1024*MB)
124 
125 void
126 pcibios_align_resource(void *data, struct resource *res,
127 		       unsigned long size, unsigned long align)
128 {
129 	struct pci_dev *dev = data;
130 	struct pci_controller *hose = dev->sysdata;
131 	unsigned long alignto;
132 	unsigned long start = res->start;
133 
134 	if (res->flags & IORESOURCE_IO) {
135 		/* Make sure we start at our min on all hoses */
136 		if (start - hose->io_space->start < PCIBIOS_MIN_IO)
137 			start = PCIBIOS_MIN_IO + hose->io_space->start;
138 
139 		/*
140 		 * Put everything into 0x00-0xff region modulo 0x400
141 		 */
142 		if (start & 0x300)
143 			start = (start + 0x3ff) & ~0x3ff;
144 	}
145 	else if	(res->flags & IORESOURCE_MEM) {
146 		/* Make sure we start at our min on all hoses */
147 		if (start - hose->mem_space->start < PCIBIOS_MIN_MEM)
148 			start = PCIBIOS_MIN_MEM + hose->mem_space->start;
149 
150 		/*
151 		 * The following holds at least for the Low Cost
152 		 * Alpha implementation of the PCI interface:
153 		 *
154 		 * In sparse memory address space, the first
155 		 * octant (16MB) of every 128MB segment is
156 		 * aliased to the very first 16 MB of the
157 		 * address space (i.e., it aliases the ISA
158 		 * memory address space).  Thus, we try to
159 		 * avoid allocating PCI devices in that range.
160 		 * Can be allocated in 2nd-7th octant only.
161 		 * Devices that need more than 112MB of
162 		 * address space must be accessed through
163 		 * dense memory space only!
164 		 */
165 
166 		/* Align to multiple of size of minimum base.  */
167 		alignto = max(0x1000UL, align);
168 		start = ALIGN(start, alignto);
169 		if (hose->sparse_mem_base && size <= 7 * 16*MB) {
170 			if (((start / (16*MB)) & 0x7) == 0) {
171 				start &= ~(128*MB - 1);
172 				start += 16*MB;
173 				start  = ALIGN(start, alignto);
174 			}
175 			if (start/(128*MB) != (start + size - 1)/(128*MB)) {
176 				start &= ~(128*MB - 1);
177 				start += (128 + 16)*MB;
178 				start  = ALIGN(start, alignto);
179 			}
180 		}
181 	}
182 
183 	res->start = start;
184 }
185 #undef KB
186 #undef MB
187 #undef GB
188 
189 static int __init
190 pcibios_init(void)
191 {
192 	if (alpha_mv.init_pci)
193 		alpha_mv.init_pci();
194 	return 0;
195 }
196 
197 subsys_initcall(pcibios_init);
198 
199 char * __init
200 pcibios_setup(char *str)
201 {
202 	return str;
203 }
204 
205 #ifdef ALPHA_RESTORE_SRM_SETUP
206 static struct pdev_srm_saved_conf *srm_saved_configs;
207 
208 void __init
209 pdev_save_srm_config(struct pci_dev *dev)
210 {
211 	struct pdev_srm_saved_conf *tmp;
212 	static int printed = 0;
213 
214 	if (!alpha_using_srm || pci_probe_only)
215 		return;
216 
217 	if (!printed) {
218 		printk(KERN_INFO "pci: enabling save/restore of SRM state\n");
219 		printed = 1;
220 	}
221 
222 	tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
223 	if (!tmp) {
224 		printk(KERN_ERR "%s: kmalloc() failed!\n", __FUNCTION__);
225 		return;
226 	}
227 	tmp->next = srm_saved_configs;
228 	tmp->dev = dev;
229 
230 	pci_save_state(dev);
231 
232 	srm_saved_configs = tmp;
233 }
234 
235 void
236 pci_restore_srm_config(void)
237 {
238 	struct pdev_srm_saved_conf *tmp;
239 
240 	/* No need to restore if probed only. */
241 	if (pci_probe_only)
242 		return;
243 
244 	/* Restore SRM config. */
245 	for (tmp = srm_saved_configs; tmp; tmp = tmp->next) {
246 		pci_restore_state(tmp->dev);
247 	}
248 }
249 #endif
250 
251 void __init
252 pcibios_fixup_resource(struct resource *res, struct resource *root)
253 {
254 	res->start += root->start;
255 	res->end += root->start;
256 }
257 
258 void __init
259 pcibios_fixup_device_resources(struct pci_dev *dev, struct pci_bus *bus)
260 {
261 	/* Update device resources.  */
262 	struct pci_controller *hose = (struct pci_controller *)bus->sysdata;
263 	int i;
264 
265 	for (i = 0; i < PCI_NUM_RESOURCES; i++) {
266 		if (!dev->resource[i].start)
267 			continue;
268 		if (dev->resource[i].flags & IORESOURCE_IO)
269 			pcibios_fixup_resource(&dev->resource[i],
270 					       hose->io_space);
271 		else if (dev->resource[i].flags & IORESOURCE_MEM)
272 			pcibios_fixup_resource(&dev->resource[i],
273 					       hose->mem_space);
274 	}
275 }
276 
277 void __init
278 pcibios_fixup_bus(struct pci_bus *bus)
279 {
280 	/* Propagate hose info into the subordinate devices.  */
281 
282 	struct pci_controller *hose = bus->sysdata;
283 	struct pci_dev *dev = bus->self;
284 
285 	if (!dev) {
286 		/* Root bus. */
287 		u32 pci_mem_end;
288 		u32 sg_base = hose->sg_pci ? hose->sg_pci->dma_base : ~0;
289 		unsigned long end;
290 
291 		bus->resource[0] = hose->io_space;
292 		bus->resource[1] = hose->mem_space;
293 
294 		/* Adjust hose mem_space limit to prevent PCI allocations
295 		   in the iommu windows. */
296 		pci_mem_end = min((u32)__direct_map_base, sg_base) - 1;
297 		end = hose->mem_space->start + pci_mem_end;
298 		if (hose->mem_space->end > end)
299 			hose->mem_space->end = end;
300  	} else if (pci_probe_only &&
301  		   (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
302  		pci_read_bridge_bases(bus);
303  		pcibios_fixup_device_resources(dev, bus);
304 	}
305 
306 	list_for_each_entry(dev, &bus->devices, bus_list) {
307 		pdev_save_srm_config(dev);
308 		if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
309 			pcibios_fixup_device_resources(dev, bus);
310 	}
311 }
312 
313 void __init
314 pcibios_update_irq(struct pci_dev *dev, int irq)
315 {
316 	pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
317 }
318 
319 /* Most Alphas have straight-forward swizzling needs.  */
320 
321 u8 __init
322 common_swizzle(struct pci_dev *dev, u8 *pinp)
323 {
324 	u8 pin = *pinp;
325 
326 	while (dev->bus->parent) {
327 		pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn));
328 		/* Move up the chain of bridges. */
329 		dev = dev->bus->self;
330         }
331 	*pinp = pin;
332 
333 	/* The slot is the slot of the last bridge. */
334 	return PCI_SLOT(dev->devfn);
335 }
336 
337 void __devinit
338 pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
339 			 struct resource *res)
340 {
341 	struct pci_controller *hose = (struct pci_controller *)dev->sysdata;
342 	unsigned long offset = 0;
343 
344 	if (res->flags & IORESOURCE_IO)
345 		offset = hose->io_space->start;
346 	else if (res->flags & IORESOURCE_MEM)
347 		offset = hose->mem_space->start;
348 
349 	region->start = res->start - offset;
350 	region->end = res->end - offset;
351 }
352 
353 #ifdef CONFIG_HOTPLUG
354 EXPORT_SYMBOL(pcibios_resource_to_bus);
355 #endif
356 
357 int
358 pcibios_enable_device(struct pci_dev *dev, int mask)
359 {
360 	u16 cmd, oldcmd;
361 	int i;
362 
363 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
364 	oldcmd = cmd;
365 
366 	for (i = 0; i < PCI_NUM_RESOURCES; i++) {
367 		struct resource *res = &dev->resource[i];
368 
369 		if (res->flags & IORESOURCE_IO)
370 			cmd |= PCI_COMMAND_IO;
371 		else if (res->flags & IORESOURCE_MEM)
372 			cmd |= PCI_COMMAND_MEMORY;
373 	}
374 
375 	if (cmd != oldcmd) {
376 		printk(KERN_DEBUG "PCI: Enabling device: (%s), cmd %x\n",
377 		       pci_name(dev), cmd);
378 		/* Enable the appropriate bits in the PCI command register.  */
379 		pci_write_config_word(dev, PCI_COMMAND, cmd);
380 	}
381 	return 0;
382 }
383 
384 /*
385  *  If we set up a device for bus mastering, we need to check the latency
386  *  timer as certain firmware forgets to set it properly, as seen
387  *  on SX164 and LX164 with SRM.
388  */
389 void
390 pcibios_set_master(struct pci_dev *dev)
391 {
392 	u8 lat;
393 	pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
394 	if (lat >= 16) return;
395 	printk("PCI: Setting latency timer of device %s to 64\n",
396 							pci_name(dev));
397 	pci_write_config_byte(dev, PCI_LATENCY_TIMER, 64);
398 }
399 
400 static void __init
401 pcibios_claim_one_bus(struct pci_bus *b)
402 {
403 	struct pci_dev *dev;
404 	struct pci_bus *child_bus;
405 
406 	list_for_each_entry(dev, &b->devices, bus_list) {
407 		int i;
408 
409 		for (i = 0; i < PCI_NUM_RESOURCES; i++) {
410 			struct resource *r = &dev->resource[i];
411 
412 			if (r->parent || !r->start || !r->flags)
413 				continue;
414 			pci_claim_resource(dev, i);
415 		}
416 	}
417 
418 	list_for_each_entry(child_bus, &b->children, node)
419 		pcibios_claim_one_bus(child_bus);
420 }
421 
422 static void __init
423 pcibios_claim_console_setup(void)
424 {
425 	struct pci_bus *b;
426 
427 	list_for_each_entry(b, &pci_root_buses, node)
428 		pcibios_claim_one_bus(b);
429 }
430 
431 void __init
432 common_init_pci(void)
433 {
434 	struct pci_controller *hose;
435 	struct pci_bus *bus;
436 	int next_busno;
437 	int need_domain_info = 0;
438 
439 	/* Scan all of the recorded PCI controllers.  */
440 	for (next_busno = 0, hose = hose_head; hose; hose = hose->next) {
441 		bus = pci_scan_bus(next_busno, alpha_mv.pci_ops, hose);
442 		hose->bus = bus;
443 		hose->need_domain_info = need_domain_info;
444 		next_busno = bus->subordinate + 1;
445 		/* Don't allow 8-bit bus number overflow inside the hose -
446 		   reserve some space for bridges. */
447 		if (next_busno > 224) {
448 			next_busno = 0;
449 			need_domain_info = 1;
450 		}
451 	}
452 
453 	if (pci_probe_only)
454 		pcibios_claim_console_setup();
455 
456 	pci_assign_unassigned_resources();
457 	pci_fixup_irqs(alpha_mv.pci_swizzle, alpha_mv.pci_map_irq);
458 }
459 
460 
461 struct pci_controller * __init
462 alloc_pci_controller(void)
463 {
464 	struct pci_controller *hose;
465 
466 	hose = alloc_bootmem(sizeof(*hose));
467 
468 	*hose_tail = hose;
469 	hose_tail = &hose->next;
470 
471 	return hose;
472 }
473 
474 struct resource * __init
475 alloc_resource(void)
476 {
477 	struct resource *res;
478 
479 	res = alloc_bootmem(sizeof(*res));
480 
481 	return res;
482 }
483 
484 
485 /* Provide information on locations of various I/O regions in physical
486    memory.  Do this on a per-card basis so that we choose the right hose.  */
487 
488 asmlinkage long
489 sys_pciconfig_iobase(long which, unsigned long bus, unsigned long dfn)
490 {
491 	struct pci_controller *hose;
492 	struct pci_dev *dev;
493 
494 	/* from hose or from bus.devfn */
495 	if (which & IOBASE_FROM_HOSE) {
496 		for(hose = hose_head; hose; hose = hose->next)
497 			if (hose->index == bus) break;
498 		if (!hose) return -ENODEV;
499 	} else {
500 		/* Special hook for ISA access.  */
501 		if (bus == 0 && dfn == 0) {
502 			hose = pci_isa_hose;
503 		} else {
504 			dev = pci_find_slot(bus, dfn);
505 			if (!dev)
506 				return -ENODEV;
507 			hose = dev->sysdata;
508 		}
509 	}
510 
511 	switch (which & ~IOBASE_FROM_HOSE) {
512 	case IOBASE_HOSE:
513 		return hose->index;
514 	case IOBASE_SPARSE_MEM:
515 		return hose->sparse_mem_base;
516 	case IOBASE_DENSE_MEM:
517 		return hose->dense_mem_base;
518 	case IOBASE_SPARSE_IO:
519 		return hose->sparse_io_base;
520 	case IOBASE_DENSE_IO:
521 		return hose->dense_io_base;
522 	case IOBASE_ROOT_BUS:
523 		return hose->bus->number;
524 	}
525 
526 	return -EOPNOTSUPP;
527 }
528 
529 /* Create an __iomem token from a PCI BAR.  Copied from lib/iomap.c with
530    no changes, since we don't want the other things in that object file.  */
531 
532 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
533 {
534 	unsigned long start = pci_resource_start(dev, bar);
535 	unsigned long len = pci_resource_len(dev, bar);
536 	unsigned long flags = pci_resource_flags(dev, bar);
537 
538 	if (!len || !start)
539 		return NULL;
540 	if (maxlen && len > maxlen)
541 		len = maxlen;
542 	if (flags & IORESOURCE_IO)
543 		return ioport_map(start, len);
544 	if (flags & IORESOURCE_MEM) {
545 		/* Not checking IORESOURCE_CACHEABLE because alpha does
546 		   not distinguish between ioremap and ioremap_nocache.  */
547 		return ioremap(start, len);
548 	}
549 	return NULL;
550 }
551 
552 /* Destroy that token.  Not copied from lib/iomap.c.  */
553 
554 void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
555 {
556 	if (__is_mmio(addr))
557 		iounmap(addr);
558 }
559 
560 EXPORT_SYMBOL(pci_iomap);
561 EXPORT_SYMBOL(pci_iounmap);
562