xref: /openbmc/linux/arch/x86/kernel/mpparse.c (revision 4bb1eb3c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *	Intel Multiprocessor Specification 1.1 and 1.4
4  *	compliant MP-table parsing routines.
5  *
6  *	(c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
7  *	(c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
8  *      (c) 2008 Alexey Starikovskiy <astarikovskiy@suse.de>
9  */
10 
11 #include <linux/mm.h>
12 #include <linux/init.h>
13 #include <linux/delay.h>
14 #include <linux/memblock.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/mc146818rtc.h>
17 #include <linux/bitops.h>
18 #include <linux/acpi.h>
19 #include <linux/smp.h>
20 #include <linux/pci.h>
21 
22 #include <asm/io_apic.h>
23 #include <asm/acpi.h>
24 #include <asm/irqdomain.h>
25 #include <asm/mtrr.h>
26 #include <asm/mpspec.h>
27 #include <asm/io_apic.h>
28 #include <asm/proto.h>
29 #include <asm/bios_ebda.h>
30 #include <asm/e820/api.h>
31 #include <asm/setup.h>
32 #include <asm/smp.h>
33 
34 #include <asm/apic.h>
35 /*
36  * Checksum an MP configuration block.
37  */
38 
39 static int __init mpf_checksum(unsigned char *mp, int len)
40 {
41 	int sum = 0;
42 
43 	while (len--)
44 		sum += *mp++;
45 
46 	return sum & 0xFF;
47 }
48 
49 int __init default_mpc_apic_id(struct mpc_cpu *m)
50 {
51 	return m->apicid;
52 }
53 
54 static void __init MP_processor_info(struct mpc_cpu *m)
55 {
56 	int apicid;
57 	char *bootup_cpu = "";
58 
59 	if (!(m->cpuflag & CPU_ENABLED)) {
60 		disabled_cpus++;
61 		return;
62 	}
63 
64 	apicid = x86_init.mpparse.mpc_apic_id(m);
65 
66 	if (m->cpuflag & CPU_BOOTPROCESSOR) {
67 		bootup_cpu = " (Bootup-CPU)";
68 		boot_cpu_physical_apicid = m->apicid;
69 	}
70 
71 	pr_info("Processor #%d%s\n", m->apicid, bootup_cpu);
72 	generic_processor_info(apicid, m->apicver);
73 }
74 
75 #ifdef CONFIG_X86_IO_APIC
76 void __init default_mpc_oem_bus_info(struct mpc_bus *m, char *str)
77 {
78 	memcpy(str, m->bustype, 6);
79 	str[6] = 0;
80 	apic_printk(APIC_VERBOSE, "Bus #%d is %s\n", m->busid, str);
81 }
82 
83 static void __init MP_bus_info(struct mpc_bus *m)
84 {
85 	char str[7];
86 
87 	x86_init.mpparse.mpc_oem_bus_info(m, str);
88 
89 #if MAX_MP_BUSSES < 256
90 	if (m->busid >= MAX_MP_BUSSES) {
91 		pr_warn("MP table busid value (%d) for bustype %s is too large, max. supported is %d\n",
92 			m->busid, str, MAX_MP_BUSSES - 1);
93 		return;
94 	}
95 #endif
96 
97 	set_bit(m->busid, mp_bus_not_pci);
98 	if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) {
99 #ifdef CONFIG_EISA
100 		mp_bus_id_to_type[m->busid] = MP_BUS_ISA;
101 #endif
102 	} else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) {
103 		if (x86_init.mpparse.mpc_oem_pci_bus)
104 			x86_init.mpparse.mpc_oem_pci_bus(m);
105 
106 		clear_bit(m->busid, mp_bus_not_pci);
107 #ifdef CONFIG_EISA
108 		mp_bus_id_to_type[m->busid] = MP_BUS_PCI;
109 	} else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA) - 1) == 0) {
110 		mp_bus_id_to_type[m->busid] = MP_BUS_EISA;
111 #endif
112 	} else
113 		pr_warn("Unknown bustype %s - ignoring\n", str);
114 }
115 
116 static void __init MP_ioapic_info(struct mpc_ioapic *m)
117 {
118 	struct ioapic_domain_cfg cfg = {
119 		.type = IOAPIC_DOMAIN_LEGACY,
120 		.ops = &mp_ioapic_irqdomain_ops,
121 	};
122 
123 	if (m->flags & MPC_APIC_USABLE)
124 		mp_register_ioapic(m->apicid, m->apicaddr, gsi_top, &cfg);
125 }
126 
127 static void __init print_mp_irq_info(struct mpc_intsrc *mp_irq)
128 {
129 	apic_printk(APIC_VERBOSE,
130 		"Int: type %d, pol %d, trig %d, bus %02x, IRQ %02x, APIC ID %x, APIC INT %02x\n",
131 		mp_irq->irqtype, mp_irq->irqflag & 3,
132 		(mp_irq->irqflag >> 2) & 3, mp_irq->srcbus,
133 		mp_irq->srcbusirq, mp_irq->dstapic, mp_irq->dstirq);
134 }
135 
136 #else /* CONFIG_X86_IO_APIC */
137 static inline void __init MP_bus_info(struct mpc_bus *m) {}
138 static inline void __init MP_ioapic_info(struct mpc_ioapic *m) {}
139 #endif /* CONFIG_X86_IO_APIC */
140 
141 static void __init MP_lintsrc_info(struct mpc_lintsrc *m)
142 {
143 	apic_printk(APIC_VERBOSE,
144 		"Lint: type %d, pol %d, trig %d, bus %02x, IRQ %02x, APIC ID %x, APIC LINT %02x\n",
145 		m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbusid,
146 		m->srcbusirq, m->destapic, m->destapiclint);
147 }
148 
149 /*
150  * Read/parse the MPC
151  */
152 static int __init smp_check_mpc(struct mpc_table *mpc, char *oem, char *str)
153 {
154 
155 	if (memcmp(mpc->signature, MPC_SIGNATURE, 4)) {
156 		pr_err("MPTABLE: bad signature [%c%c%c%c]!\n",
157 		       mpc->signature[0], mpc->signature[1],
158 		       mpc->signature[2], mpc->signature[3]);
159 		return 0;
160 	}
161 	if (mpf_checksum((unsigned char *)mpc, mpc->length)) {
162 		pr_err("MPTABLE: checksum error!\n");
163 		return 0;
164 	}
165 	if (mpc->spec != 0x01 && mpc->spec != 0x04) {
166 		pr_err("MPTABLE: bad table version (%d)!!\n", mpc->spec);
167 		return 0;
168 	}
169 	if (!mpc->lapic) {
170 		pr_err("MPTABLE: null local APIC address!\n");
171 		return 0;
172 	}
173 	memcpy(oem, mpc->oem, 8);
174 	oem[8] = 0;
175 	pr_info("MPTABLE: OEM ID: %s\n", oem);
176 
177 	memcpy(str, mpc->productid, 12);
178 	str[12] = 0;
179 
180 	pr_info("MPTABLE: Product ID: %s\n", str);
181 
182 	pr_info("MPTABLE: APIC at: 0x%X\n", mpc->lapic);
183 
184 	return 1;
185 }
186 
187 static void skip_entry(unsigned char **ptr, int *count, int size)
188 {
189 	*ptr += size;
190 	*count += size;
191 }
192 
193 static void __init smp_dump_mptable(struct mpc_table *mpc, unsigned char *mpt)
194 {
195 	pr_err("Your mptable is wrong, contact your HW vendor!\n");
196 	pr_cont("type %x\n", *mpt);
197 	print_hex_dump(KERN_ERR, "  ", DUMP_PREFIX_ADDRESS, 16,
198 			1, mpc, mpc->length, 1);
199 }
200 
201 void __init default_smp_read_mpc_oem(struct mpc_table *mpc) { }
202 
203 static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
204 {
205 	char str[16];
206 	char oem[10];
207 
208 	int count = sizeof(*mpc);
209 	unsigned char *mpt = ((unsigned char *)mpc) + count;
210 
211 	if (!smp_check_mpc(mpc, oem, str))
212 		return 0;
213 
214 	/* Initialize the lapic mapping */
215 	if (!acpi_lapic)
216 		register_lapic_address(mpc->lapic);
217 
218 	if (early)
219 		return 1;
220 
221 	if (mpc->oemptr)
222 		x86_init.mpparse.smp_read_mpc_oem(mpc);
223 
224 	/*
225 	 *      Now process the configuration blocks.
226 	 */
227 	x86_init.mpparse.mpc_record(0);
228 
229 	while (count < mpc->length) {
230 		switch (*mpt) {
231 		case MP_PROCESSOR:
232 			/* ACPI may have already provided this data */
233 			if (!acpi_lapic)
234 				MP_processor_info((struct mpc_cpu *)mpt);
235 			skip_entry(&mpt, &count, sizeof(struct mpc_cpu));
236 			break;
237 		case MP_BUS:
238 			MP_bus_info((struct mpc_bus *)mpt);
239 			skip_entry(&mpt, &count, sizeof(struct mpc_bus));
240 			break;
241 		case MP_IOAPIC:
242 			MP_ioapic_info((struct mpc_ioapic *)mpt);
243 			skip_entry(&mpt, &count, sizeof(struct mpc_ioapic));
244 			break;
245 		case MP_INTSRC:
246 			mp_save_irq((struct mpc_intsrc *)mpt);
247 			skip_entry(&mpt, &count, sizeof(struct mpc_intsrc));
248 			break;
249 		case MP_LINTSRC:
250 			MP_lintsrc_info((struct mpc_lintsrc *)mpt);
251 			skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc));
252 			break;
253 		default:
254 			/* wrong mptable */
255 			smp_dump_mptable(mpc, mpt);
256 			count = mpc->length;
257 			break;
258 		}
259 		x86_init.mpparse.mpc_record(1);
260 	}
261 
262 	if (!num_processors)
263 		pr_err("MPTABLE: no processors registered!\n");
264 	return num_processors;
265 }
266 
267 #ifdef CONFIG_X86_IO_APIC
268 
269 static int __init ELCR_trigger(unsigned int irq)
270 {
271 	unsigned int port;
272 
273 	port = 0x4d0 + (irq >> 3);
274 	return (inb(port) >> (irq & 7)) & 1;
275 }
276 
277 static void __init construct_default_ioirq_mptable(int mpc_default_type)
278 {
279 	struct mpc_intsrc intsrc;
280 	int i;
281 	int ELCR_fallback = 0;
282 
283 	intsrc.type = MP_INTSRC;
284 	intsrc.irqflag = MP_IRQTRIG_DEFAULT | MP_IRQPOL_DEFAULT;
285 	intsrc.srcbus = 0;
286 	intsrc.dstapic = mpc_ioapic_id(0);
287 
288 	intsrc.irqtype = mp_INT;
289 
290 	/*
291 	 *  If true, we have an ISA/PCI system with no IRQ entries
292 	 *  in the MP table. To prevent the PCI interrupts from being set up
293 	 *  incorrectly, we try to use the ELCR. The sanity check to see if
294 	 *  there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
295 	 *  never be level sensitive, so we simply see if the ELCR agrees.
296 	 *  If it does, we assume it's valid.
297 	 */
298 	if (mpc_default_type == 5) {
299 		pr_info("ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
300 
301 		if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) ||
302 		    ELCR_trigger(13))
303 			pr_err("ELCR contains invalid data... not using ELCR\n");
304 		else {
305 			pr_info("Using ELCR to identify PCI interrupts\n");
306 			ELCR_fallback = 1;
307 		}
308 	}
309 
310 	for (i = 0; i < 16; i++) {
311 		switch (mpc_default_type) {
312 		case 2:
313 			if (i == 0 || i == 13)
314 				continue;	/* IRQ0 & IRQ13 not connected */
315 			/* fall through */
316 		default:
317 			if (i == 2)
318 				continue;	/* IRQ2 is never connected */
319 		}
320 
321 		if (ELCR_fallback) {
322 			/*
323 			 *  If the ELCR indicates a level-sensitive interrupt, we
324 			 *  copy that information over to the MP table in the
325 			 *  irqflag field (level sensitive, active high polarity).
326 			 */
327 			if (ELCR_trigger(i)) {
328 				intsrc.irqflag = MP_IRQTRIG_LEVEL |
329 						 MP_IRQPOL_ACTIVE_HIGH;
330 			} else {
331 				intsrc.irqflag = MP_IRQTRIG_DEFAULT |
332 						 MP_IRQPOL_DEFAULT;
333 			}
334 		}
335 
336 		intsrc.srcbusirq = i;
337 		intsrc.dstirq = i ? i : 2;	/* IRQ0 to INTIN2 */
338 		mp_save_irq(&intsrc);
339 	}
340 
341 	intsrc.irqtype = mp_ExtINT;
342 	intsrc.srcbusirq = 0;
343 	intsrc.dstirq = 0;	/* 8259A to INTIN0 */
344 	mp_save_irq(&intsrc);
345 }
346 
347 
348 static void __init construct_ioapic_table(int mpc_default_type)
349 {
350 	struct mpc_ioapic ioapic;
351 	struct mpc_bus bus;
352 
353 	bus.type = MP_BUS;
354 	bus.busid = 0;
355 	switch (mpc_default_type) {
356 	default:
357 		pr_err("???\nUnknown standard configuration %d\n",
358 		       mpc_default_type);
359 		/* fall through */
360 	case 1:
361 	case 5:
362 		memcpy(bus.bustype, "ISA   ", 6);
363 		break;
364 	case 2:
365 	case 6:
366 	case 3:
367 		memcpy(bus.bustype, "EISA  ", 6);
368 		break;
369 	}
370 	MP_bus_info(&bus);
371 	if (mpc_default_type > 4) {
372 		bus.busid = 1;
373 		memcpy(bus.bustype, "PCI   ", 6);
374 		MP_bus_info(&bus);
375 	}
376 
377 	ioapic.type	= MP_IOAPIC;
378 	ioapic.apicid	= 2;
379 	ioapic.apicver	= mpc_default_type > 4 ? 0x10 : 0x01;
380 	ioapic.flags	= MPC_APIC_USABLE;
381 	ioapic.apicaddr	= IO_APIC_DEFAULT_PHYS_BASE;
382 	MP_ioapic_info(&ioapic);
383 
384 	/*
385 	 * We set up most of the low 16 IO-APIC pins according to MPS rules.
386 	 */
387 	construct_default_ioirq_mptable(mpc_default_type);
388 }
389 #else
390 static inline void __init construct_ioapic_table(int mpc_default_type) { }
391 #endif
392 
393 static inline void __init construct_default_ISA_mptable(int mpc_default_type)
394 {
395 	struct mpc_cpu processor;
396 	struct mpc_lintsrc lintsrc;
397 	int linttypes[2] = { mp_ExtINT, mp_NMI };
398 	int i;
399 
400 	/*
401 	 * local APIC has default address
402 	 */
403 	mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
404 
405 	/*
406 	 * 2 CPUs, numbered 0 & 1.
407 	 */
408 	processor.type = MP_PROCESSOR;
409 	/* Either an integrated APIC or a discrete 82489DX. */
410 	processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
411 	processor.cpuflag = CPU_ENABLED;
412 	processor.cpufeature = (boot_cpu_data.x86 << 8) |
413 	    (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_stepping;
414 	processor.featureflag = boot_cpu_data.x86_capability[CPUID_1_EDX];
415 	processor.reserved[0] = 0;
416 	processor.reserved[1] = 0;
417 	for (i = 0; i < 2; i++) {
418 		processor.apicid = i;
419 		MP_processor_info(&processor);
420 	}
421 
422 	construct_ioapic_table(mpc_default_type);
423 
424 	lintsrc.type = MP_LINTSRC;
425 	lintsrc.irqflag = MP_IRQTRIG_DEFAULT | MP_IRQPOL_DEFAULT;
426 	lintsrc.srcbusid = 0;
427 	lintsrc.srcbusirq = 0;
428 	lintsrc.destapic = MP_APIC_ALL;
429 	for (i = 0; i < 2; i++) {
430 		lintsrc.irqtype = linttypes[i];
431 		lintsrc.destapiclint = i;
432 		MP_lintsrc_info(&lintsrc);
433 	}
434 }
435 
436 static unsigned long mpf_base;
437 static bool mpf_found;
438 
439 static unsigned long __init get_mpc_size(unsigned long physptr)
440 {
441 	struct mpc_table *mpc;
442 	unsigned long size;
443 
444 	mpc = early_memremap(physptr, PAGE_SIZE);
445 	size = mpc->length;
446 	early_memunmap(mpc, PAGE_SIZE);
447 	apic_printk(APIC_VERBOSE, "  mpc: %lx-%lx\n", physptr, physptr + size);
448 
449 	return size;
450 }
451 
452 static int __init check_physptr(struct mpf_intel *mpf, unsigned int early)
453 {
454 	struct mpc_table *mpc;
455 	unsigned long size;
456 
457 	size = get_mpc_size(mpf->physptr);
458 	mpc = early_memremap(mpf->physptr, size);
459 
460 	/*
461 	 * Read the physical hardware table.  Anything here will
462 	 * override the defaults.
463 	 */
464 	if (!smp_read_mpc(mpc, early)) {
465 #ifdef CONFIG_X86_LOCAL_APIC
466 		smp_found_config = 0;
467 #endif
468 		pr_err("BIOS bug, MP table errors detected!...\n");
469 		pr_cont("... disabling SMP support. (tell your hw vendor)\n");
470 		early_memunmap(mpc, size);
471 		return -1;
472 	}
473 	early_memunmap(mpc, size);
474 
475 	if (early)
476 		return -1;
477 
478 #ifdef CONFIG_X86_IO_APIC
479 	/*
480 	 * If there are no explicit MP IRQ entries, then we are
481 	 * broken.  We set up most of the low 16 IO-APIC pins to
482 	 * ISA defaults and hope it will work.
483 	 */
484 	if (!mp_irq_entries) {
485 		struct mpc_bus bus;
486 
487 		pr_err("BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
488 
489 		bus.type = MP_BUS;
490 		bus.busid = 0;
491 		memcpy(bus.bustype, "ISA   ", 6);
492 		MP_bus_info(&bus);
493 
494 		construct_default_ioirq_mptable(0);
495 	}
496 #endif
497 
498 	return 0;
499 }
500 
501 /*
502  * Scan the memory blocks for an SMP configuration block.
503  */
504 void __init default_get_smp_config(unsigned int early)
505 {
506 	struct mpf_intel *mpf;
507 
508 	if (!smp_found_config)
509 		return;
510 
511 	if (!mpf_found)
512 		return;
513 
514 	if (acpi_lapic && early)
515 		return;
516 
517 	/*
518 	 * MPS doesn't support hyperthreading, aka only have
519 	 * thread 0 apic id in MPS table
520 	 */
521 	if (acpi_lapic && acpi_ioapic)
522 		return;
523 
524 	mpf = early_memremap(mpf_base, sizeof(*mpf));
525 	if (!mpf) {
526 		pr_err("MPTABLE: error mapping MP table\n");
527 		return;
528 	}
529 
530 	pr_info("Intel MultiProcessor Specification v1.%d\n",
531 		mpf->specification);
532 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
533 	if (mpf->feature2 & (1 << 7)) {
534 		pr_info("    IMCR and PIC compatibility mode.\n");
535 		pic_mode = 1;
536 	} else {
537 		pr_info("    Virtual Wire compatibility mode.\n");
538 		pic_mode = 0;
539 	}
540 #endif
541 	/*
542 	 * Now see if we need to read further.
543 	 */
544 	if (mpf->feature1) {
545 		if (early) {
546 			/*
547 			 * local APIC has default address
548 			 */
549 			mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
550 			goto out;
551 		}
552 
553 		pr_info("Default MP configuration #%d\n", mpf->feature1);
554 		construct_default_ISA_mptable(mpf->feature1);
555 
556 	} else if (mpf->physptr) {
557 		if (check_physptr(mpf, early))
558 			goto out;
559 	} else
560 		BUG();
561 
562 	if (!early)
563 		pr_info("Processors: %d\n", num_processors);
564 	/*
565 	 * Only use the first configuration found.
566 	 */
567 out:
568 	early_memunmap(mpf, sizeof(*mpf));
569 }
570 
571 static void __init smp_reserve_memory(struct mpf_intel *mpf)
572 {
573 	memblock_reserve(mpf->physptr, get_mpc_size(mpf->physptr));
574 }
575 
576 static int __init smp_scan_config(unsigned long base, unsigned long length)
577 {
578 	unsigned int *bp;
579 	struct mpf_intel *mpf;
580 	int ret = 0;
581 
582 	apic_printk(APIC_VERBOSE, "Scan for SMP in [mem %#010lx-%#010lx]\n",
583 		    base, base + length - 1);
584 	BUILD_BUG_ON(sizeof(*mpf) != 16);
585 
586 	while (length > 0) {
587 		bp = early_memremap(base, length);
588 		mpf = (struct mpf_intel *)bp;
589 		if ((*bp == SMP_MAGIC_IDENT) &&
590 		    (mpf->length == 1) &&
591 		    !mpf_checksum((unsigned char *)bp, 16) &&
592 		    ((mpf->specification == 1)
593 		     || (mpf->specification == 4))) {
594 #ifdef CONFIG_X86_LOCAL_APIC
595 			smp_found_config = 1;
596 #endif
597 			mpf_base = base;
598 			mpf_found = true;
599 
600 			pr_info("found SMP MP-table at [mem %#010lx-%#010lx]\n",
601 				base, base + sizeof(*mpf) - 1);
602 
603 			memblock_reserve(base, sizeof(*mpf));
604 			if (mpf->physptr)
605 				smp_reserve_memory(mpf);
606 
607 			ret = 1;
608 		}
609 		early_memunmap(bp, length);
610 
611 		if (ret)
612 			break;
613 
614 		base += 16;
615 		length -= 16;
616 	}
617 	return ret;
618 }
619 
620 void __init default_find_smp_config(void)
621 {
622 	unsigned int address;
623 
624 	/*
625 	 * FIXME: Linux assumes you have 640K of base ram..
626 	 * this continues the error...
627 	 *
628 	 * 1) Scan the bottom 1K for a signature
629 	 * 2) Scan the top 1K of base RAM
630 	 * 3) Scan the 64K of bios
631 	 */
632 	if (smp_scan_config(0x0, 0x400) ||
633 	    smp_scan_config(639 * 0x400, 0x400) ||
634 	    smp_scan_config(0xF0000, 0x10000))
635 		return;
636 	/*
637 	 * If it is an SMP machine we should know now, unless the
638 	 * configuration is in an EISA bus machine with an
639 	 * extended bios data area.
640 	 *
641 	 * there is a real-mode segmented pointer pointing to the
642 	 * 4K EBDA area at 0x40E, calculate and scan it here.
643 	 *
644 	 * NOTE! There are Linux loaders that will corrupt the EBDA
645 	 * area, and as such this kind of SMP config may be less
646 	 * trustworthy, simply because the SMP table may have been
647 	 * stomped on during early boot. These loaders are buggy and
648 	 * should be fixed.
649 	 *
650 	 * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
651 	 */
652 
653 	address = get_bios_ebda();
654 	if (address)
655 		smp_scan_config(address, 0x400);
656 }
657 
658 #ifdef CONFIG_X86_IO_APIC
659 static u8 __initdata irq_used[MAX_IRQ_SOURCES];
660 
661 static int  __init get_MP_intsrc_index(struct mpc_intsrc *m)
662 {
663 	int i;
664 
665 	if (m->irqtype != mp_INT)
666 		return 0;
667 
668 	if (m->irqflag != (MP_IRQTRIG_LEVEL | MP_IRQPOL_ACTIVE_LOW))
669 		return 0;
670 
671 	/* not legacy */
672 
673 	for (i = 0; i < mp_irq_entries; i++) {
674 		if (mp_irqs[i].irqtype != mp_INT)
675 			continue;
676 
677 		if (mp_irqs[i].irqflag != (MP_IRQTRIG_LEVEL |
678 					   MP_IRQPOL_ACTIVE_LOW))
679 			continue;
680 
681 		if (mp_irqs[i].srcbus != m->srcbus)
682 			continue;
683 		if (mp_irqs[i].srcbusirq != m->srcbusirq)
684 			continue;
685 		if (irq_used[i]) {
686 			/* already claimed */
687 			return -2;
688 		}
689 		irq_used[i] = 1;
690 		return i;
691 	}
692 
693 	/* not found */
694 	return -1;
695 }
696 
697 #define SPARE_SLOT_NUM 20
698 
699 static struct mpc_intsrc __initdata *m_spare[SPARE_SLOT_NUM];
700 
701 static void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare)
702 {
703 	int i;
704 
705 	apic_printk(APIC_VERBOSE, "OLD ");
706 	print_mp_irq_info(m);
707 
708 	i = get_MP_intsrc_index(m);
709 	if (i > 0) {
710 		memcpy(m, &mp_irqs[i], sizeof(*m));
711 		apic_printk(APIC_VERBOSE, "NEW ");
712 		print_mp_irq_info(&mp_irqs[i]);
713 		return;
714 	}
715 	if (!i) {
716 		/* legacy, do nothing */
717 		return;
718 	}
719 	if (*nr_m_spare < SPARE_SLOT_NUM) {
720 		/*
721 		 * not found (-1), or duplicated (-2) are invalid entries,
722 		 * we need to use the slot later
723 		 */
724 		m_spare[*nr_m_spare] = m;
725 		*nr_m_spare += 1;
726 	}
727 }
728 
729 static int __init
730 check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length, int count)
731 {
732 	if (!mpc_new_phys || count <= mpc_new_length) {
733 		WARN(1, "update_mptable: No spare slots (length: %x)\n", count);
734 		return -1;
735 	}
736 
737 	return 0;
738 }
739 #else /* CONFIG_X86_IO_APIC */
740 static
741 inline void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {}
742 #endif /* CONFIG_X86_IO_APIC */
743 
744 static int  __init replace_intsrc_all(struct mpc_table *mpc,
745 					unsigned long mpc_new_phys,
746 					unsigned long mpc_new_length)
747 {
748 #ifdef CONFIG_X86_IO_APIC
749 	int i;
750 #endif
751 	int count = sizeof(*mpc);
752 	int nr_m_spare = 0;
753 	unsigned char *mpt = ((unsigned char *)mpc) + count;
754 
755 	pr_info("mpc_length %x\n", mpc->length);
756 	while (count < mpc->length) {
757 		switch (*mpt) {
758 		case MP_PROCESSOR:
759 			skip_entry(&mpt, &count, sizeof(struct mpc_cpu));
760 			break;
761 		case MP_BUS:
762 			skip_entry(&mpt, &count, sizeof(struct mpc_bus));
763 			break;
764 		case MP_IOAPIC:
765 			skip_entry(&mpt, &count, sizeof(struct mpc_ioapic));
766 			break;
767 		case MP_INTSRC:
768 			check_irq_src((struct mpc_intsrc *)mpt, &nr_m_spare);
769 			skip_entry(&mpt, &count, sizeof(struct mpc_intsrc));
770 			break;
771 		case MP_LINTSRC:
772 			skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc));
773 			break;
774 		default:
775 			/* wrong mptable */
776 			smp_dump_mptable(mpc, mpt);
777 			goto out;
778 		}
779 	}
780 
781 #ifdef CONFIG_X86_IO_APIC
782 	for (i = 0; i < mp_irq_entries; i++) {
783 		if (irq_used[i])
784 			continue;
785 
786 		if (mp_irqs[i].irqtype != mp_INT)
787 			continue;
788 
789 		if (mp_irqs[i].irqflag != (MP_IRQTRIG_LEVEL |
790 					   MP_IRQPOL_ACTIVE_LOW))
791 			continue;
792 
793 		if (nr_m_spare > 0) {
794 			apic_printk(APIC_VERBOSE, "*NEW* found\n");
795 			nr_m_spare--;
796 			memcpy(m_spare[nr_m_spare], &mp_irqs[i], sizeof(mp_irqs[i]));
797 			m_spare[nr_m_spare] = NULL;
798 		} else {
799 			struct mpc_intsrc *m = (struct mpc_intsrc *)mpt;
800 			count += sizeof(struct mpc_intsrc);
801 			if (check_slot(mpc_new_phys, mpc_new_length, count) < 0)
802 				goto out;
803 			memcpy(m, &mp_irqs[i], sizeof(*m));
804 			mpc->length = count;
805 			mpt += sizeof(struct mpc_intsrc);
806 		}
807 		print_mp_irq_info(&mp_irqs[i]);
808 	}
809 #endif
810 out:
811 	/* update checksum */
812 	mpc->checksum = 0;
813 	mpc->checksum -= mpf_checksum((unsigned char *)mpc, mpc->length);
814 
815 	return 0;
816 }
817 
818 int enable_update_mptable;
819 
820 static int __init update_mptable_setup(char *str)
821 {
822 	enable_update_mptable = 1;
823 #ifdef CONFIG_PCI
824 	pci_routeirq = 1;
825 #endif
826 	return 0;
827 }
828 early_param("update_mptable", update_mptable_setup);
829 
830 static unsigned long __initdata mpc_new_phys;
831 static unsigned long mpc_new_length __initdata = 4096;
832 
833 /* alloc_mptable or alloc_mptable=4k */
834 static int __initdata alloc_mptable;
835 static int __init parse_alloc_mptable_opt(char *p)
836 {
837 	enable_update_mptable = 1;
838 #ifdef CONFIG_PCI
839 	pci_routeirq = 1;
840 #endif
841 	alloc_mptable = 1;
842 	if (!p)
843 		return 0;
844 	mpc_new_length = memparse(p, &p);
845 	return 0;
846 }
847 early_param("alloc_mptable", parse_alloc_mptable_opt);
848 
849 void __init e820__memblock_alloc_reserved_mpc_new(void)
850 {
851 	if (enable_update_mptable && alloc_mptable)
852 		mpc_new_phys = e820__memblock_alloc_reserved(mpc_new_length, 4);
853 }
854 
855 static int __init update_mp_table(void)
856 {
857 	char str[16];
858 	char oem[10];
859 	struct mpf_intel *mpf;
860 	struct mpc_table *mpc, *mpc_new;
861 	unsigned long size;
862 
863 	if (!enable_update_mptable)
864 		return 0;
865 
866 	if (!mpf_found)
867 		return 0;
868 
869 	mpf = early_memremap(mpf_base, sizeof(*mpf));
870 	if (!mpf) {
871 		pr_err("MPTABLE: mpf early_memremap() failed\n");
872 		return 0;
873 	}
874 
875 	/*
876 	 * Now see if we need to go further.
877 	 */
878 	if (mpf->feature1)
879 		goto do_unmap_mpf;
880 
881 	if (!mpf->physptr)
882 		goto do_unmap_mpf;
883 
884 	size = get_mpc_size(mpf->physptr);
885 	mpc = early_memremap(mpf->physptr, size);
886 	if (!mpc) {
887 		pr_err("MPTABLE: mpc early_memremap() failed\n");
888 		goto do_unmap_mpf;
889 	}
890 
891 	if (!smp_check_mpc(mpc, oem, str))
892 		goto do_unmap_mpc;
893 
894 	pr_info("mpf: %llx\n", (u64)mpf_base);
895 	pr_info("physptr: %x\n", mpf->physptr);
896 
897 	if (mpc_new_phys && mpc->length > mpc_new_length) {
898 		mpc_new_phys = 0;
899 		pr_info("mpc_new_length is %ld, please use alloc_mptable=8k\n",
900 			mpc_new_length);
901 	}
902 
903 	if (!mpc_new_phys) {
904 		unsigned char old, new;
905 		/* check if we can change the position */
906 		mpc->checksum = 0;
907 		old = mpf_checksum((unsigned char *)mpc, mpc->length);
908 		mpc->checksum = 0xff;
909 		new = mpf_checksum((unsigned char *)mpc, mpc->length);
910 		if (old == new) {
911 			pr_info("mpc is readonly, please try alloc_mptable instead\n");
912 			goto do_unmap_mpc;
913 		}
914 		pr_info("use in-position replacing\n");
915 	} else {
916 		mpc_new = early_memremap(mpc_new_phys, mpc_new_length);
917 		if (!mpc_new) {
918 			pr_err("MPTABLE: new mpc early_memremap() failed\n");
919 			goto do_unmap_mpc;
920 		}
921 		mpf->physptr = mpc_new_phys;
922 		memcpy(mpc_new, mpc, mpc->length);
923 		early_memunmap(mpc, size);
924 		mpc = mpc_new;
925 		size = mpc_new_length;
926 		/* check if we can modify that */
927 		if (mpc_new_phys - mpf->physptr) {
928 			struct mpf_intel *mpf_new;
929 			/* steal 16 bytes from [0, 1k) */
930 			mpf_new = early_memremap(0x400 - 16, sizeof(*mpf_new));
931 			if (!mpf_new) {
932 				pr_err("MPTABLE: new mpf early_memremap() failed\n");
933 				goto do_unmap_mpc;
934 			}
935 			pr_info("mpf new: %x\n", 0x400 - 16);
936 			memcpy(mpf_new, mpf, 16);
937 			early_memunmap(mpf, sizeof(*mpf));
938 			mpf = mpf_new;
939 			mpf->physptr = mpc_new_phys;
940 		}
941 		mpf->checksum = 0;
942 		mpf->checksum -= mpf_checksum((unsigned char *)mpf, 16);
943 		pr_info("physptr new: %x\n", mpf->physptr);
944 	}
945 
946 	/*
947 	 * only replace the one with mp_INT and
948 	 *	 MP_IRQ_TRIGGER_LEVEL|MP_IRQ_POLARITY_LOW,
949 	 * already in mp_irqs , stored by ... and mp_config_acpi_gsi,
950 	 * may need pci=routeirq for all coverage
951 	 */
952 	replace_intsrc_all(mpc, mpc_new_phys, mpc_new_length);
953 
954 do_unmap_mpc:
955 	early_memunmap(mpc, size);
956 
957 do_unmap_mpf:
958 	early_memunmap(mpf, sizeof(*mpf));
959 
960 	return 0;
961 }
962 
963 late_initcall(update_mp_table);
964