xref: /openbmc/linux/arch/x86/pci/common.c (revision b6dcefde)
1 /*
2  *	Low-Level PCI Support for PC
3  *
4  *	(c) 1999--2000 Martin Mares <mj@ucw.cz>
5  */
6 
7 #include <linux/sched.h>
8 #include <linux/pci.h>
9 #include <linux/ioport.h>
10 #include <linux/init.h>
11 #include <linux/dmi.h>
12 
13 #include <asm/acpi.h>
14 #include <asm/segment.h>
15 #include <asm/io.h>
16 #include <asm/smp.h>
17 #include <asm/pci_x86.h>
18 
19 unsigned int pci_probe = PCI_PROBE_BIOS | PCI_PROBE_CONF1 | PCI_PROBE_CONF2 |
20 				PCI_PROBE_MMCONF;
21 
22 unsigned int pci_early_dump_regs;
23 static int pci_bf_sort;
24 int pci_routeirq;
25 int noioapicquirk;
26 #ifdef CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS
27 int noioapicreroute = 0;
28 #else
29 int noioapicreroute = 1;
30 #endif
31 int pcibios_last_bus = -1;
32 unsigned long pirq_table_addr;
33 struct pci_bus *pci_root_bus;
34 struct pci_raw_ops *raw_pci_ops;
35 struct pci_raw_ops *raw_pci_ext_ops;
36 
37 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
38 						int reg, int len, u32 *val)
39 {
40 	if (domain == 0 && reg < 256 && raw_pci_ops)
41 		return raw_pci_ops->read(domain, bus, devfn, reg, len, val);
42 	if (raw_pci_ext_ops)
43 		return raw_pci_ext_ops->read(domain, bus, devfn, reg, len, val);
44 	return -EINVAL;
45 }
46 
47 int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn,
48 						int reg, int len, u32 val)
49 {
50 	if (domain == 0 && reg < 256 && raw_pci_ops)
51 		return raw_pci_ops->write(domain, bus, devfn, reg, len, val);
52 	if (raw_pci_ext_ops)
53 		return raw_pci_ext_ops->write(domain, bus, devfn, reg, len, val);
54 	return -EINVAL;
55 }
56 
57 static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value)
58 {
59 	return raw_pci_read(pci_domain_nr(bus), bus->number,
60 				 devfn, where, size, value);
61 }
62 
63 static int pci_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value)
64 {
65 	return raw_pci_write(pci_domain_nr(bus), bus->number,
66 				  devfn, where, size, value);
67 }
68 
69 struct pci_ops pci_root_ops = {
70 	.read = pci_read,
71 	.write = pci_write,
72 };
73 
74 /*
75  * legacy, numa, and acpi all want to call pcibios_scan_root
76  * from their initcalls. This flag prevents that.
77  */
78 int pcibios_scanned;
79 
80 /*
81  * This interrupt-safe spinlock protects all accesses to PCI
82  * configuration space.
83  */
84 DEFINE_SPINLOCK(pci_config_lock);
85 
86 static int __devinit can_skip_ioresource_align(const struct dmi_system_id *d)
87 {
88 	pci_probe |= PCI_CAN_SKIP_ISA_ALIGN;
89 	printk(KERN_INFO "PCI: %s detected, can skip ISA alignment\n", d->ident);
90 	return 0;
91 }
92 
93 static const struct dmi_system_id can_skip_pciprobe_dmi_table[] __devinitconst = {
94 /*
95  * Systems where PCI IO resource ISA alignment can be skipped
96  * when the ISA enable bit in the bridge control is not set
97  */
98 	{
99 		.callback = can_skip_ioresource_align,
100 		.ident = "IBM System x3800",
101 		.matches = {
102 			DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
103 			DMI_MATCH(DMI_PRODUCT_NAME, "x3800"),
104 		},
105 	},
106 	{
107 		.callback = can_skip_ioresource_align,
108 		.ident = "IBM System x3850",
109 		.matches = {
110 			DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
111 			DMI_MATCH(DMI_PRODUCT_NAME, "x3850"),
112 		},
113 	},
114 	{
115 		.callback = can_skip_ioresource_align,
116 		.ident = "IBM System x3950",
117 		.matches = {
118 			DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
119 			DMI_MATCH(DMI_PRODUCT_NAME, "x3950"),
120 		},
121 	},
122 	{}
123 };
124 
125 void __init dmi_check_skip_isa_align(void)
126 {
127 	dmi_check_system(can_skip_pciprobe_dmi_table);
128 }
129 
130 static void __devinit pcibios_fixup_device_resources(struct pci_dev *dev)
131 {
132 	struct resource *rom_r = &dev->resource[PCI_ROM_RESOURCE];
133 
134 	if (pci_probe & PCI_NOASSIGN_ROMS) {
135 		if (rom_r->parent)
136 			return;
137 		if (rom_r->start) {
138 			/* we deal with BIOS assigned ROM later */
139 			return;
140 		}
141 		rom_r->start = rom_r->end = rom_r->flags = 0;
142 	}
143 }
144 
145 /*
146  *  Called after each bus is probed, but before its children
147  *  are examined.
148  */
149 
150 void __devinit pcibios_fixup_bus(struct pci_bus *b)
151 {
152 	struct pci_dev *dev;
153 
154 	/* root bus? */
155 	if (!b->parent)
156 		x86_pci_root_bus_res_quirks(b);
157 	pci_read_bridge_bases(b);
158 	list_for_each_entry(dev, &b->devices, bus_list)
159 		pcibios_fixup_device_resources(dev);
160 }
161 
162 /*
163  * Only use DMI information to set this if nothing was passed
164  * on the kernel command line (which was parsed earlier).
165  */
166 
167 static int __devinit set_bf_sort(const struct dmi_system_id *d)
168 {
169 	if (pci_bf_sort == pci_bf_sort_default) {
170 		pci_bf_sort = pci_dmi_bf;
171 		printk(KERN_INFO "PCI: %s detected, enabling pci=bfsort.\n", d->ident);
172 	}
173 	return 0;
174 }
175 
176 /*
177  * Enable renumbering of PCI bus# ranges to reach all PCI busses (Cardbus)
178  */
179 #ifdef __i386__
180 static int __devinit assign_all_busses(const struct dmi_system_id *d)
181 {
182 	pci_probe |= PCI_ASSIGN_ALL_BUSSES;
183 	printk(KERN_INFO "%s detected: enabling PCI bus# renumbering"
184 			" (pci=assign-busses)\n", d->ident);
185 	return 0;
186 }
187 #endif
188 
189 static const struct dmi_system_id __devinitconst pciprobe_dmi_table[] = {
190 #ifdef __i386__
191 /*
192  * Laptops which need pci=assign-busses to see Cardbus cards
193  */
194 	{
195 		.callback = assign_all_busses,
196 		.ident = "Samsung X20 Laptop",
197 		.matches = {
198 			DMI_MATCH(DMI_SYS_VENDOR, "Samsung Electronics"),
199 			DMI_MATCH(DMI_PRODUCT_NAME, "SX20S"),
200 		},
201 	},
202 #endif		/* __i386__ */
203 	{
204 		.callback = set_bf_sort,
205 		.ident = "Dell PowerEdge 1950",
206 		.matches = {
207 			DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
208 			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1950"),
209 		},
210 	},
211 	{
212 		.callback = set_bf_sort,
213 		.ident = "Dell PowerEdge 1955",
214 		.matches = {
215 			DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
216 			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1955"),
217 		},
218 	},
219 	{
220 		.callback = set_bf_sort,
221 		.ident = "Dell PowerEdge 2900",
222 		.matches = {
223 			DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
224 			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2900"),
225 		},
226 	},
227 	{
228 		.callback = set_bf_sort,
229 		.ident = "Dell PowerEdge 2950",
230 		.matches = {
231 			DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
232 			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2950"),
233 		},
234 	},
235 	{
236 		.callback = set_bf_sort,
237 		.ident = "Dell PowerEdge R900",
238 		.matches = {
239 			DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
240 			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R900"),
241 		},
242 	},
243 	{
244 		.callback = set_bf_sort,
245 		.ident = "HP ProLiant BL20p G3",
246 		.matches = {
247 			DMI_MATCH(DMI_SYS_VENDOR, "HP"),
248 			DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL20p G3"),
249 		},
250 	},
251 	{
252 		.callback = set_bf_sort,
253 		.ident = "HP ProLiant BL20p G4",
254 		.matches = {
255 			DMI_MATCH(DMI_SYS_VENDOR, "HP"),
256 			DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL20p G4"),
257 		},
258 	},
259 	{
260 		.callback = set_bf_sort,
261 		.ident = "HP ProLiant BL30p G1",
262 		.matches = {
263 			DMI_MATCH(DMI_SYS_VENDOR, "HP"),
264 			DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL30p G1"),
265 		},
266 	},
267 	{
268 		.callback = set_bf_sort,
269 		.ident = "HP ProLiant BL25p G1",
270 		.matches = {
271 			DMI_MATCH(DMI_SYS_VENDOR, "HP"),
272 			DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL25p G1"),
273 		},
274 	},
275 	{
276 		.callback = set_bf_sort,
277 		.ident = "HP ProLiant BL35p G1",
278 		.matches = {
279 			DMI_MATCH(DMI_SYS_VENDOR, "HP"),
280 			DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL35p G1"),
281 		},
282 	},
283 	{
284 		.callback = set_bf_sort,
285 		.ident = "HP ProLiant BL45p G1",
286 		.matches = {
287 			DMI_MATCH(DMI_SYS_VENDOR, "HP"),
288 			DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL45p G1"),
289 		},
290 	},
291 	{
292 		.callback = set_bf_sort,
293 		.ident = "HP ProLiant BL45p G2",
294 		.matches = {
295 			DMI_MATCH(DMI_SYS_VENDOR, "HP"),
296 			DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL45p G2"),
297 		},
298 	},
299 	{
300 		.callback = set_bf_sort,
301 		.ident = "HP ProLiant BL460c G1",
302 		.matches = {
303 			DMI_MATCH(DMI_SYS_VENDOR, "HP"),
304 			DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL460c G1"),
305 		},
306 	},
307 	{
308 		.callback = set_bf_sort,
309 		.ident = "HP ProLiant BL465c G1",
310 		.matches = {
311 			DMI_MATCH(DMI_SYS_VENDOR, "HP"),
312 			DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL465c G1"),
313 		},
314 	},
315 	{
316 		.callback = set_bf_sort,
317 		.ident = "HP ProLiant BL480c G1",
318 		.matches = {
319 			DMI_MATCH(DMI_SYS_VENDOR, "HP"),
320 			DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL480c G1"),
321 		},
322 	},
323 	{
324 		.callback = set_bf_sort,
325 		.ident = "HP ProLiant BL685c G1",
326 		.matches = {
327 			DMI_MATCH(DMI_SYS_VENDOR, "HP"),
328 			DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL685c G1"),
329 		},
330 	},
331 	{
332 		.callback = set_bf_sort,
333 		.ident = "HP ProLiant DL360",
334 		.matches = {
335 			DMI_MATCH(DMI_SYS_VENDOR, "HP"),
336 			DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL360"),
337 		},
338 	},
339 	{
340 		.callback = set_bf_sort,
341 		.ident = "HP ProLiant DL380",
342 		.matches = {
343 			DMI_MATCH(DMI_SYS_VENDOR, "HP"),
344 			DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL380"),
345 		},
346 	},
347 #ifdef __i386__
348 	{
349 		.callback = assign_all_busses,
350 		.ident = "Compaq EVO N800c",
351 		.matches = {
352 			DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
353 			DMI_MATCH(DMI_PRODUCT_NAME, "EVO N800c"),
354 		},
355 	},
356 #endif
357 	{
358 		.callback = set_bf_sort,
359 		.ident = "HP ProLiant DL385 G2",
360 		.matches = {
361 			DMI_MATCH(DMI_SYS_VENDOR, "HP"),
362 			DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL385 G2"),
363 		},
364 	},
365 	{
366 		.callback = set_bf_sort,
367 		.ident = "HP ProLiant DL585 G2",
368 		.matches = {
369 			DMI_MATCH(DMI_SYS_VENDOR, "HP"),
370 			DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL585 G2"),
371 		},
372 	},
373 	{}
374 };
375 
376 void __init dmi_check_pciprobe(void)
377 {
378 	dmi_check_system(pciprobe_dmi_table);
379 }
380 
381 struct pci_bus * __devinit pcibios_scan_root(int busnum)
382 {
383 	struct pci_bus *bus = NULL;
384 	struct pci_sysdata *sd;
385 
386 	while ((bus = pci_find_next_bus(bus)) != NULL) {
387 		if (bus->number == busnum) {
388 			/* Already scanned */
389 			return bus;
390 		}
391 	}
392 
393 	/* Allocate per-root-bus (not per bus) arch-specific data.
394 	 * TODO: leak; this memory is never freed.
395 	 * It's arguable whether it's worth the trouble to care.
396 	 */
397 	sd = kzalloc(sizeof(*sd), GFP_KERNEL);
398 	if (!sd) {
399 		printk(KERN_ERR "PCI: OOM, not probing PCI bus %02x\n", busnum);
400 		return NULL;
401 	}
402 
403 	sd->node = get_mp_bus_to_node(busnum);
404 
405 	printk(KERN_DEBUG "PCI: Probing PCI hardware (bus %02x)\n", busnum);
406 	bus = pci_scan_bus_parented(NULL, busnum, &pci_root_ops, sd);
407 	if (!bus)
408 		kfree(sd);
409 
410 	return bus;
411 }
412 
413 int __init pcibios_init(void)
414 {
415 	struct cpuinfo_x86 *c = &boot_cpu_data;
416 
417 	if (!raw_pci_ops) {
418 		printk(KERN_WARNING "PCI: System does not support PCI\n");
419 		return 0;
420 	}
421 
422 	/*
423 	 * Set PCI cacheline size to that of the CPU if the CPU has reported it.
424 	 * (For older CPUs that don't support cpuid, we se it to 32 bytes
425 	 * It's also good for 386/486s (which actually have 16)
426 	 * as quite a few PCI devices do not support smaller values.
427 	 */
428 	if (c->x86_clflush_size > 0) {
429 		pci_dfl_cache_line_size = c->x86_clflush_size >> 2;
430 		printk(KERN_DEBUG "PCI: pci_cache_line_size set to %d bytes\n",
431 			pci_dfl_cache_line_size << 2);
432 	} else {
433  		pci_dfl_cache_line_size = 32 >> 2;
434 		printk(KERN_DEBUG "PCI: Unknown cacheline size. Setting to 32 bytes\n");
435 	}
436 
437 	pcibios_resource_survey();
438 
439 	if (pci_bf_sort >= pci_force_bf)
440 		pci_sort_breadthfirst();
441 	return 0;
442 }
443 
444 char * __devinit  pcibios_setup(char *str)
445 {
446 	if (!strcmp(str, "off")) {
447 		pci_probe = 0;
448 		return NULL;
449 	} else if (!strcmp(str, "bfsort")) {
450 		pci_bf_sort = pci_force_bf;
451 		return NULL;
452 	} else if (!strcmp(str, "nobfsort")) {
453 		pci_bf_sort = pci_force_nobf;
454 		return NULL;
455 	}
456 #ifdef CONFIG_PCI_BIOS
457 	else if (!strcmp(str, "bios")) {
458 		pci_probe = PCI_PROBE_BIOS;
459 		return NULL;
460 	} else if (!strcmp(str, "nobios")) {
461 		pci_probe &= ~PCI_PROBE_BIOS;
462 		return NULL;
463 	} else if (!strcmp(str, "biosirq")) {
464 		pci_probe |= PCI_BIOS_IRQ_SCAN;
465 		return NULL;
466 	} else if (!strncmp(str, "pirqaddr=", 9)) {
467 		pirq_table_addr = simple_strtoul(str+9, NULL, 0);
468 		return NULL;
469 	}
470 #endif
471 #ifdef CONFIG_PCI_DIRECT
472 	else if (!strcmp(str, "conf1")) {
473 		pci_probe = PCI_PROBE_CONF1 | PCI_NO_CHECKS;
474 		return NULL;
475 	}
476 	else if (!strcmp(str, "conf2")) {
477 		pci_probe = PCI_PROBE_CONF2 | PCI_NO_CHECKS;
478 		return NULL;
479 	}
480 #endif
481 #ifdef CONFIG_PCI_MMCONFIG
482 	else if (!strcmp(str, "nommconf")) {
483 		pci_probe &= ~PCI_PROBE_MMCONF;
484 		return NULL;
485 	}
486 	else if (!strcmp(str, "check_enable_amd_mmconf")) {
487 		pci_probe |= PCI_CHECK_ENABLE_AMD_MMCONF;
488 		return NULL;
489 	}
490 #endif
491 	else if (!strcmp(str, "noacpi")) {
492 		acpi_noirq_set();
493 		return NULL;
494 	}
495 	else if (!strcmp(str, "noearly")) {
496 		pci_probe |= PCI_PROBE_NOEARLY;
497 		return NULL;
498 	}
499 #ifndef CONFIG_X86_VISWS
500 	else if (!strcmp(str, "usepirqmask")) {
501 		pci_probe |= PCI_USE_PIRQ_MASK;
502 		return NULL;
503 	} else if (!strncmp(str, "irqmask=", 8)) {
504 		pcibios_irq_mask = simple_strtol(str+8, NULL, 0);
505 		return NULL;
506 	} else if (!strncmp(str, "lastbus=", 8)) {
507 		pcibios_last_bus = simple_strtol(str+8, NULL, 0);
508 		return NULL;
509 	}
510 #endif
511 	else if (!strcmp(str, "rom")) {
512 		pci_probe |= PCI_ASSIGN_ROMS;
513 		return NULL;
514 	} else if (!strcmp(str, "norom")) {
515 		pci_probe |= PCI_NOASSIGN_ROMS;
516 		return NULL;
517 	} else if (!strcmp(str, "assign-busses")) {
518 		pci_probe |= PCI_ASSIGN_ALL_BUSSES;
519 		return NULL;
520 	} else if (!strcmp(str, "use_crs")) {
521 		pci_probe |= PCI_USE__CRS;
522 		return NULL;
523 	} else if (!strcmp(str, "earlydump")) {
524 		pci_early_dump_regs = 1;
525 		return NULL;
526 	} else if (!strcmp(str, "routeirq")) {
527 		pci_routeirq = 1;
528 		return NULL;
529 	} else if (!strcmp(str, "skip_isa_align")) {
530 		pci_probe |= PCI_CAN_SKIP_ISA_ALIGN;
531 		return NULL;
532 	} else if (!strcmp(str, "noioapicquirk")) {
533 		noioapicquirk = 1;
534 		return NULL;
535 	} else if (!strcmp(str, "ioapicreroute")) {
536 		if (noioapicreroute != -1)
537 			noioapicreroute = 0;
538 		return NULL;
539 	} else if (!strcmp(str, "noioapicreroute")) {
540 		if (noioapicreroute != -1)
541 			noioapicreroute = 1;
542 		return NULL;
543 	}
544 	return str;
545 }
546 
547 unsigned int pcibios_assign_all_busses(void)
548 {
549 	return (pci_probe & PCI_ASSIGN_ALL_BUSSES) ? 1 : 0;
550 }
551 
552 int pcibios_enable_device(struct pci_dev *dev, int mask)
553 {
554 	int err;
555 
556 	if ((err = pci_enable_resources(dev, mask)) < 0)
557 		return err;
558 
559 	if (!pci_dev_msi_enabled(dev))
560 		return pcibios_enable_irq(dev);
561 	return 0;
562 }
563 
564 void pcibios_disable_device (struct pci_dev *dev)
565 {
566 	if (!pci_dev_msi_enabled(dev) && pcibios_disable_irq)
567 		pcibios_disable_irq(dev);
568 }
569 
570 int pci_ext_cfg_avail(struct pci_dev *dev)
571 {
572 	if (raw_pci_ext_ops)
573 		return 1;
574 	else
575 		return 0;
576 }
577 
578 struct pci_bus * __devinit pci_scan_bus_on_node(int busno, struct pci_ops *ops, int node)
579 {
580 	struct pci_bus *bus = NULL;
581 	struct pci_sysdata *sd;
582 
583 	/*
584 	 * Allocate per-root-bus (not per bus) arch-specific data.
585 	 * TODO: leak; this memory is never freed.
586 	 * It's arguable whether it's worth the trouble to care.
587 	 */
588 	sd = kzalloc(sizeof(*sd), GFP_KERNEL);
589 	if (!sd) {
590 		printk(KERN_ERR "PCI: OOM, skipping PCI bus %02x\n", busno);
591 		return NULL;
592 	}
593 	sd->node = node;
594 	bus = pci_scan_bus(busno, ops, sd);
595 	if (!bus)
596 		kfree(sd);
597 
598 	return bus;
599 }
600 
601 struct pci_bus * __devinit pci_scan_bus_with_sysdata(int busno)
602 {
603 	return pci_scan_bus_on_node(busno, &pci_root_ops, -1);
604 }
605 
606 /*
607  * NUMA info for PCI busses
608  *
609  * Early arch code is responsible for filling in reasonable values here.
610  * A node id of "-1" means "use current node".  In other words, if a bus
611  * has a -1 node id, it's not tightly coupled to any particular chunk
612  * of memory (as is the case on some Nehalem systems).
613  */
614 #ifdef CONFIG_NUMA
615 
616 #define BUS_NR 256
617 
618 #ifdef CONFIG_X86_64
619 
620 static int mp_bus_to_node[BUS_NR] = {
621 	[0 ... BUS_NR - 1] = -1
622 };
623 
624 void set_mp_bus_to_node(int busnum, int node)
625 {
626 	if (busnum >= 0 &&  busnum < BUS_NR)
627 		mp_bus_to_node[busnum] = node;
628 }
629 
630 int get_mp_bus_to_node(int busnum)
631 {
632 	int node = -1;
633 
634 	if (busnum < 0 || busnum > (BUS_NR - 1))
635 		return node;
636 
637 	node = mp_bus_to_node[busnum];
638 
639 	/*
640 	 * let numa_node_id to decide it later in dma_alloc_pages
641 	 * if there is no ram on that node
642 	 */
643 	if (node != -1 && !node_online(node))
644 		node = -1;
645 
646 	return node;
647 }
648 
649 #else /* CONFIG_X86_32 */
650 
651 static int mp_bus_to_node[BUS_NR] = {
652 	[0 ... BUS_NR - 1] = -1
653 };
654 
655 void set_mp_bus_to_node(int busnum, int node)
656 {
657 	if (busnum >= 0 &&  busnum < BUS_NR)
658 	mp_bus_to_node[busnum] = (unsigned char) node;
659 }
660 
661 int get_mp_bus_to_node(int busnum)
662 {
663 	int node;
664 
665 	if (busnum < 0 || busnum > (BUS_NR - 1))
666 		return 0;
667 	node = mp_bus_to_node[busnum];
668 	return node;
669 }
670 
671 #endif /* CONFIG_X86_32 */
672 
673 #endif /* CONFIG_NUMA */
674