1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Intel Multiprocessor Specification 1.1 and 1.4 4 * compliant MP-table parsing routines. 5 * 6 * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk> 7 * (c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com> 8 * (c) 2008 Alexey Starikovskiy <astarikovskiy@suse.de> 9 */ 10 11 #include <linux/mm.h> 12 #include <linux/init.h> 13 #include <linux/delay.h> 14 #include <linux/bootmem.h> 15 #include <linux/memblock.h> 16 #include <linux/kernel_stat.h> 17 #include <linux/mc146818rtc.h> 18 #include <linux/bitops.h> 19 #include <linux/acpi.h> 20 #include <linux/smp.h> 21 #include <linux/pci.h> 22 23 #include <asm/irqdomain.h> 24 #include <asm/mtrr.h> 25 #include <asm/mpspec.h> 26 #include <asm/pgalloc.h> 27 #include <asm/io_apic.h> 28 #include <asm/proto.h> 29 #include <asm/bios_ebda.h> 30 #include <asm/e820/api.h> 31 #include <asm/setup.h> 32 #include <asm/smp.h> 33 34 #include <asm/apic.h> 35 /* 36 * Checksum an MP configuration block. 37 */ 38 39 static int __init mpf_checksum(unsigned char *mp, int len) 40 { 41 int sum = 0; 42 43 while (len--) 44 sum += *mp++; 45 46 return sum & 0xFF; 47 } 48 49 int __init default_mpc_apic_id(struct mpc_cpu *m) 50 { 51 return m->apicid; 52 } 53 54 static void __init MP_processor_info(struct mpc_cpu *m) 55 { 56 int apicid; 57 char *bootup_cpu = ""; 58 59 if (!(m->cpuflag & CPU_ENABLED)) { 60 disabled_cpus++; 61 return; 62 } 63 64 apicid = x86_init.mpparse.mpc_apic_id(m); 65 66 if (m->cpuflag & CPU_BOOTPROCESSOR) { 67 bootup_cpu = " (Bootup-CPU)"; 68 boot_cpu_physical_apicid = m->apicid; 69 } 70 71 pr_info("Processor #%d%s\n", m->apicid, bootup_cpu); 72 generic_processor_info(apicid, m->apicver); 73 } 74 75 #ifdef CONFIG_X86_IO_APIC 76 void __init default_mpc_oem_bus_info(struct mpc_bus *m, char *str) 77 { 78 memcpy(str, m->bustype, 6); 79 str[6] = 0; 80 apic_printk(APIC_VERBOSE, "Bus #%d is %s\n", m->busid, str); 81 } 82 83 static void __init MP_bus_info(struct mpc_bus *m) 84 { 85 char str[7]; 86 87 x86_init.mpparse.mpc_oem_bus_info(m, str); 88 89 #if MAX_MP_BUSSES < 256 90 if (m->busid >= MAX_MP_BUSSES) { 91 pr_warn("MP table busid value (%d) for bustype %s is too large, max. supported is %d\n", 92 m->busid, str, MAX_MP_BUSSES - 1); 93 return; 94 } 95 #endif 96 97 set_bit(m->busid, mp_bus_not_pci); 98 if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) { 99 #ifdef CONFIG_EISA 100 mp_bus_id_to_type[m->busid] = MP_BUS_ISA; 101 #endif 102 } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) { 103 if (x86_init.mpparse.mpc_oem_pci_bus) 104 x86_init.mpparse.mpc_oem_pci_bus(m); 105 106 clear_bit(m->busid, mp_bus_not_pci); 107 #ifdef CONFIG_EISA 108 mp_bus_id_to_type[m->busid] = MP_BUS_PCI; 109 } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA) - 1) == 0) { 110 mp_bus_id_to_type[m->busid] = MP_BUS_EISA; 111 #endif 112 } else 113 pr_warn("Unknown bustype %s - ignoring\n", str); 114 } 115 116 static void __init MP_ioapic_info(struct mpc_ioapic *m) 117 { 118 struct ioapic_domain_cfg cfg = { 119 .type = IOAPIC_DOMAIN_LEGACY, 120 .ops = &mp_ioapic_irqdomain_ops, 121 }; 122 123 if (m->flags & MPC_APIC_USABLE) 124 mp_register_ioapic(m->apicid, m->apicaddr, gsi_top, &cfg); 125 } 126 127 static void __init print_mp_irq_info(struct mpc_intsrc *mp_irq) 128 { 129 apic_printk(APIC_VERBOSE, 130 "Int: type %d, pol %d, trig %d, bus %02x, IRQ %02x, APIC ID %x, APIC INT %02x\n", 131 mp_irq->irqtype, mp_irq->irqflag & 3, 132 (mp_irq->irqflag >> 2) & 3, mp_irq->srcbus, 133 mp_irq->srcbusirq, mp_irq->dstapic, mp_irq->dstirq); 134 } 135 136 #else /* CONFIG_X86_IO_APIC */ 137 static inline void __init MP_bus_info(struct mpc_bus *m) {} 138 static inline void __init MP_ioapic_info(struct mpc_ioapic *m) {} 139 #endif /* CONFIG_X86_IO_APIC */ 140 141 static void __init MP_lintsrc_info(struct mpc_lintsrc *m) 142 { 143 apic_printk(APIC_VERBOSE, 144 "Lint: type %d, pol %d, trig %d, bus %02x, IRQ %02x, APIC ID %x, APIC LINT %02x\n", 145 m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbusid, 146 m->srcbusirq, m->destapic, m->destapiclint); 147 } 148 149 /* 150 * Read/parse the MPC 151 */ 152 static int __init smp_check_mpc(struct mpc_table *mpc, char *oem, char *str) 153 { 154 155 if (memcmp(mpc->signature, MPC_SIGNATURE, 4)) { 156 pr_err("MPTABLE: bad signature [%c%c%c%c]!\n", 157 mpc->signature[0], mpc->signature[1], 158 mpc->signature[2], mpc->signature[3]); 159 return 0; 160 } 161 if (mpf_checksum((unsigned char *)mpc, mpc->length)) { 162 pr_err("MPTABLE: checksum error!\n"); 163 return 0; 164 } 165 if (mpc->spec != 0x01 && mpc->spec != 0x04) { 166 pr_err("MPTABLE: bad table version (%d)!!\n", mpc->spec); 167 return 0; 168 } 169 if (!mpc->lapic) { 170 pr_err("MPTABLE: null local APIC address!\n"); 171 return 0; 172 } 173 memcpy(oem, mpc->oem, 8); 174 oem[8] = 0; 175 pr_info("MPTABLE: OEM ID: %s\n", oem); 176 177 memcpy(str, mpc->productid, 12); 178 str[12] = 0; 179 180 pr_info("MPTABLE: Product ID: %s\n", str); 181 182 pr_info("MPTABLE: APIC at: 0x%X\n", mpc->lapic); 183 184 return 1; 185 } 186 187 static void skip_entry(unsigned char **ptr, int *count, int size) 188 { 189 *ptr += size; 190 *count += size; 191 } 192 193 static void __init smp_dump_mptable(struct mpc_table *mpc, unsigned char *mpt) 194 { 195 pr_err("Your mptable is wrong, contact your HW vendor!\n"); 196 pr_cont("type %x\n", *mpt); 197 print_hex_dump(KERN_ERR, " ", DUMP_PREFIX_ADDRESS, 16, 198 1, mpc, mpc->length, 1); 199 } 200 201 void __init default_smp_read_mpc_oem(struct mpc_table *mpc) { } 202 203 static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early) 204 { 205 char str[16]; 206 char oem[10]; 207 208 int count = sizeof(*mpc); 209 unsigned char *mpt = ((unsigned char *)mpc) + count; 210 211 if (!smp_check_mpc(mpc, oem, str)) 212 return 0; 213 214 /* Initialize the lapic mapping */ 215 if (!acpi_lapic) 216 register_lapic_address(mpc->lapic); 217 218 if (early) 219 return 1; 220 221 if (mpc->oemptr) 222 x86_init.mpparse.smp_read_mpc_oem(mpc); 223 224 /* 225 * Now process the configuration blocks. 226 */ 227 x86_init.mpparse.mpc_record(0); 228 229 while (count < mpc->length) { 230 switch (*mpt) { 231 case MP_PROCESSOR: 232 /* ACPI may have already provided this data */ 233 if (!acpi_lapic) 234 MP_processor_info((struct mpc_cpu *)mpt); 235 skip_entry(&mpt, &count, sizeof(struct mpc_cpu)); 236 break; 237 case MP_BUS: 238 MP_bus_info((struct mpc_bus *)mpt); 239 skip_entry(&mpt, &count, sizeof(struct mpc_bus)); 240 break; 241 case MP_IOAPIC: 242 MP_ioapic_info((struct mpc_ioapic *)mpt); 243 skip_entry(&mpt, &count, sizeof(struct mpc_ioapic)); 244 break; 245 case MP_INTSRC: 246 mp_save_irq((struct mpc_intsrc *)mpt); 247 skip_entry(&mpt, &count, sizeof(struct mpc_intsrc)); 248 break; 249 case MP_LINTSRC: 250 MP_lintsrc_info((struct mpc_lintsrc *)mpt); 251 skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc)); 252 break; 253 default: 254 /* wrong mptable */ 255 smp_dump_mptable(mpc, mpt); 256 count = mpc->length; 257 break; 258 } 259 x86_init.mpparse.mpc_record(1); 260 } 261 262 if (!num_processors) 263 pr_err("MPTABLE: no processors registered!\n"); 264 return num_processors; 265 } 266 267 #ifdef CONFIG_X86_IO_APIC 268 269 static int __init ELCR_trigger(unsigned int irq) 270 { 271 unsigned int port; 272 273 port = 0x4d0 + (irq >> 3); 274 return (inb(port) >> (irq & 7)) & 1; 275 } 276 277 static void __init construct_default_ioirq_mptable(int mpc_default_type) 278 { 279 struct mpc_intsrc intsrc; 280 int i; 281 int ELCR_fallback = 0; 282 283 intsrc.type = MP_INTSRC; 284 intsrc.irqflag = 0; /* conforming */ 285 intsrc.srcbus = 0; 286 intsrc.dstapic = mpc_ioapic_id(0); 287 288 intsrc.irqtype = mp_INT; 289 290 /* 291 * If true, we have an ISA/PCI system with no IRQ entries 292 * in the MP table. To prevent the PCI interrupts from being set up 293 * incorrectly, we try to use the ELCR. The sanity check to see if 294 * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can 295 * never be level sensitive, so we simply see if the ELCR agrees. 296 * If it does, we assume it's valid. 297 */ 298 if (mpc_default_type == 5) { 299 pr_info("ISA/PCI bus type with no IRQ information... falling back to ELCR\n"); 300 301 if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || 302 ELCR_trigger(13)) 303 pr_err("ELCR contains invalid data... not using ELCR\n"); 304 else { 305 pr_info("Using ELCR to identify PCI interrupts\n"); 306 ELCR_fallback = 1; 307 } 308 } 309 310 for (i = 0; i < 16; i++) { 311 switch (mpc_default_type) { 312 case 2: 313 if (i == 0 || i == 13) 314 continue; /* IRQ0 & IRQ13 not connected */ 315 /* fall through */ 316 default: 317 if (i == 2) 318 continue; /* IRQ2 is never connected */ 319 } 320 321 if (ELCR_fallback) { 322 /* 323 * If the ELCR indicates a level-sensitive interrupt, we 324 * copy that information over to the MP table in the 325 * irqflag field (level sensitive, active high polarity). 326 */ 327 if (ELCR_trigger(i)) 328 intsrc.irqflag = 13; 329 else 330 intsrc.irqflag = 0; 331 } 332 333 intsrc.srcbusirq = i; 334 intsrc.dstirq = i ? i : 2; /* IRQ0 to INTIN2 */ 335 mp_save_irq(&intsrc); 336 } 337 338 intsrc.irqtype = mp_ExtINT; 339 intsrc.srcbusirq = 0; 340 intsrc.dstirq = 0; /* 8259A to INTIN0 */ 341 mp_save_irq(&intsrc); 342 } 343 344 345 static void __init construct_ioapic_table(int mpc_default_type) 346 { 347 struct mpc_ioapic ioapic; 348 struct mpc_bus bus; 349 350 bus.type = MP_BUS; 351 bus.busid = 0; 352 switch (mpc_default_type) { 353 default: 354 pr_err("???\nUnknown standard configuration %d\n", 355 mpc_default_type); 356 /* fall through */ 357 case 1: 358 case 5: 359 memcpy(bus.bustype, "ISA ", 6); 360 break; 361 case 2: 362 case 6: 363 case 3: 364 memcpy(bus.bustype, "EISA ", 6); 365 break; 366 } 367 MP_bus_info(&bus); 368 if (mpc_default_type > 4) { 369 bus.busid = 1; 370 memcpy(bus.bustype, "PCI ", 6); 371 MP_bus_info(&bus); 372 } 373 374 ioapic.type = MP_IOAPIC; 375 ioapic.apicid = 2; 376 ioapic.apicver = mpc_default_type > 4 ? 0x10 : 0x01; 377 ioapic.flags = MPC_APIC_USABLE; 378 ioapic.apicaddr = IO_APIC_DEFAULT_PHYS_BASE; 379 MP_ioapic_info(&ioapic); 380 381 /* 382 * We set up most of the low 16 IO-APIC pins according to MPS rules. 383 */ 384 construct_default_ioirq_mptable(mpc_default_type); 385 } 386 #else 387 static inline void __init construct_ioapic_table(int mpc_default_type) { } 388 #endif 389 390 static inline void __init construct_default_ISA_mptable(int mpc_default_type) 391 { 392 struct mpc_cpu processor; 393 struct mpc_lintsrc lintsrc; 394 int linttypes[2] = { mp_ExtINT, mp_NMI }; 395 int i; 396 397 /* 398 * local APIC has default address 399 */ 400 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; 401 402 /* 403 * 2 CPUs, numbered 0 & 1. 404 */ 405 processor.type = MP_PROCESSOR; 406 /* Either an integrated APIC or a discrete 82489DX. */ 407 processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01; 408 processor.cpuflag = CPU_ENABLED; 409 processor.cpufeature = (boot_cpu_data.x86 << 8) | 410 (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask; 411 processor.featureflag = boot_cpu_data.x86_capability[CPUID_1_EDX]; 412 processor.reserved[0] = 0; 413 processor.reserved[1] = 0; 414 for (i = 0; i < 2; i++) { 415 processor.apicid = i; 416 MP_processor_info(&processor); 417 } 418 419 construct_ioapic_table(mpc_default_type); 420 421 lintsrc.type = MP_LINTSRC; 422 lintsrc.irqflag = 0; /* conforming */ 423 lintsrc.srcbusid = 0; 424 lintsrc.srcbusirq = 0; 425 lintsrc.destapic = MP_APIC_ALL; 426 for (i = 0; i < 2; i++) { 427 lintsrc.irqtype = linttypes[i]; 428 lintsrc.destapiclint = i; 429 MP_lintsrc_info(&lintsrc); 430 } 431 } 432 433 static unsigned long mpf_base; 434 435 static unsigned long __init get_mpc_size(unsigned long physptr) 436 { 437 struct mpc_table *mpc; 438 unsigned long size; 439 440 mpc = early_memremap(physptr, PAGE_SIZE); 441 size = mpc->length; 442 early_memunmap(mpc, PAGE_SIZE); 443 apic_printk(APIC_VERBOSE, " mpc: %lx-%lx\n", physptr, physptr + size); 444 445 return size; 446 } 447 448 static int __init check_physptr(struct mpf_intel *mpf, unsigned int early) 449 { 450 struct mpc_table *mpc; 451 unsigned long size; 452 453 size = get_mpc_size(mpf->physptr); 454 mpc = early_memremap(mpf->physptr, size); 455 456 /* 457 * Read the physical hardware table. Anything here will 458 * override the defaults. 459 */ 460 if (!smp_read_mpc(mpc, early)) { 461 #ifdef CONFIG_X86_LOCAL_APIC 462 smp_found_config = 0; 463 #endif 464 pr_err("BIOS bug, MP table errors detected!...\n"); 465 pr_cont("... disabling SMP support. (tell your hw vendor)\n"); 466 early_memunmap(mpc, size); 467 return -1; 468 } 469 early_memunmap(mpc, size); 470 471 if (early) 472 return -1; 473 474 #ifdef CONFIG_X86_IO_APIC 475 /* 476 * If there are no explicit MP IRQ entries, then we are 477 * broken. We set up most of the low 16 IO-APIC pins to 478 * ISA defaults and hope it will work. 479 */ 480 if (!mp_irq_entries) { 481 struct mpc_bus bus; 482 483 pr_err("BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n"); 484 485 bus.type = MP_BUS; 486 bus.busid = 0; 487 memcpy(bus.bustype, "ISA ", 6); 488 MP_bus_info(&bus); 489 490 construct_default_ioirq_mptable(0); 491 } 492 #endif 493 494 return 0; 495 } 496 497 /* 498 * Scan the memory blocks for an SMP configuration block. 499 */ 500 void __init default_get_smp_config(unsigned int early) 501 { 502 struct mpf_intel *mpf; 503 504 if (!smp_found_config) 505 return; 506 507 if (!mpf_base) 508 return; 509 510 if (acpi_lapic && early) 511 return; 512 513 /* 514 * MPS doesn't support hyperthreading, aka only have 515 * thread 0 apic id in MPS table 516 */ 517 if (acpi_lapic && acpi_ioapic) 518 return; 519 520 mpf = early_memremap(mpf_base, sizeof(*mpf)); 521 if (!mpf) { 522 pr_err("MPTABLE: error mapping MP table\n"); 523 return; 524 } 525 526 pr_info("Intel MultiProcessor Specification v1.%d\n", 527 mpf->specification); 528 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32) 529 if (mpf->feature2 & (1 << 7)) { 530 pr_info(" IMCR and PIC compatibility mode.\n"); 531 pic_mode = 1; 532 } else { 533 pr_info(" Virtual Wire compatibility mode.\n"); 534 pic_mode = 0; 535 } 536 #endif 537 /* 538 * Now see if we need to read further. 539 */ 540 if (mpf->feature1) { 541 if (early) { 542 /* 543 * local APIC has default address 544 */ 545 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; 546 return; 547 } 548 549 pr_info("Default MP configuration #%d\n", mpf->feature1); 550 construct_default_ISA_mptable(mpf->feature1); 551 552 } else if (mpf->physptr) { 553 if (check_physptr(mpf, early)) { 554 early_memunmap(mpf, sizeof(*mpf)); 555 return; 556 } 557 } else 558 BUG(); 559 560 if (!early) 561 pr_info("Processors: %d\n", num_processors); 562 /* 563 * Only use the first configuration found. 564 */ 565 566 early_memunmap(mpf, sizeof(*mpf)); 567 } 568 569 static void __init smp_reserve_memory(struct mpf_intel *mpf) 570 { 571 memblock_reserve(mpf->physptr, get_mpc_size(mpf->physptr)); 572 } 573 574 static int __init smp_scan_config(unsigned long base, unsigned long length) 575 { 576 unsigned int *bp; 577 struct mpf_intel *mpf; 578 int ret = 0; 579 580 apic_printk(APIC_VERBOSE, "Scan for SMP in [mem %#010lx-%#010lx]\n", 581 base, base + length - 1); 582 BUILD_BUG_ON(sizeof(*mpf) != 16); 583 584 while (length > 0) { 585 bp = early_memremap(base, length); 586 mpf = (struct mpf_intel *)bp; 587 if ((*bp == SMP_MAGIC_IDENT) && 588 (mpf->length == 1) && 589 !mpf_checksum((unsigned char *)bp, 16) && 590 ((mpf->specification == 1) 591 || (mpf->specification == 4))) { 592 #ifdef CONFIG_X86_LOCAL_APIC 593 smp_found_config = 1; 594 #endif 595 mpf_base = base; 596 597 pr_info("found SMP MP-table at [mem %#010lx-%#010lx] mapped at [%p]\n", 598 base, base + sizeof(*mpf) - 1, mpf); 599 600 memblock_reserve(base, sizeof(*mpf)); 601 if (mpf->physptr) 602 smp_reserve_memory(mpf); 603 604 ret = 1; 605 } 606 early_memunmap(bp, length); 607 608 if (ret) 609 break; 610 611 base += 16; 612 length -= 16; 613 } 614 return ret; 615 } 616 617 void __init default_find_smp_config(void) 618 { 619 unsigned int address; 620 621 /* 622 * FIXME: Linux assumes you have 640K of base ram.. 623 * this continues the error... 624 * 625 * 1) Scan the bottom 1K for a signature 626 * 2) Scan the top 1K of base RAM 627 * 3) Scan the 64K of bios 628 */ 629 if (smp_scan_config(0x0, 0x400) || 630 smp_scan_config(639 * 0x400, 0x400) || 631 smp_scan_config(0xF0000, 0x10000)) 632 return; 633 /* 634 * If it is an SMP machine we should know now, unless the 635 * configuration is in an EISA bus machine with an 636 * extended bios data area. 637 * 638 * there is a real-mode segmented pointer pointing to the 639 * 4K EBDA area at 0x40E, calculate and scan it here. 640 * 641 * NOTE! There are Linux loaders that will corrupt the EBDA 642 * area, and as such this kind of SMP config may be less 643 * trustworthy, simply because the SMP table may have been 644 * stomped on during early boot. These loaders are buggy and 645 * should be fixed. 646 * 647 * MP1.4 SPEC states to only scan first 1K of 4K EBDA. 648 */ 649 650 address = get_bios_ebda(); 651 if (address) 652 smp_scan_config(address, 0x400); 653 } 654 655 #ifdef CONFIG_X86_IO_APIC 656 static u8 __initdata irq_used[MAX_IRQ_SOURCES]; 657 658 static int __init get_MP_intsrc_index(struct mpc_intsrc *m) 659 { 660 int i; 661 662 if (m->irqtype != mp_INT) 663 return 0; 664 665 if (m->irqflag != 0x0f) 666 return 0; 667 668 /* not legacy */ 669 670 for (i = 0; i < mp_irq_entries; i++) { 671 if (mp_irqs[i].irqtype != mp_INT) 672 continue; 673 674 if (mp_irqs[i].irqflag != 0x0f) 675 continue; 676 677 if (mp_irqs[i].srcbus != m->srcbus) 678 continue; 679 if (mp_irqs[i].srcbusirq != m->srcbusirq) 680 continue; 681 if (irq_used[i]) { 682 /* already claimed */ 683 return -2; 684 } 685 irq_used[i] = 1; 686 return i; 687 } 688 689 /* not found */ 690 return -1; 691 } 692 693 #define SPARE_SLOT_NUM 20 694 695 static struct mpc_intsrc __initdata *m_spare[SPARE_SLOT_NUM]; 696 697 static void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) 698 { 699 int i; 700 701 apic_printk(APIC_VERBOSE, "OLD "); 702 print_mp_irq_info(m); 703 704 i = get_MP_intsrc_index(m); 705 if (i > 0) { 706 memcpy(m, &mp_irqs[i], sizeof(*m)); 707 apic_printk(APIC_VERBOSE, "NEW "); 708 print_mp_irq_info(&mp_irqs[i]); 709 return; 710 } 711 if (!i) { 712 /* legacy, do nothing */ 713 return; 714 } 715 if (*nr_m_spare < SPARE_SLOT_NUM) { 716 /* 717 * not found (-1), or duplicated (-2) are invalid entries, 718 * we need to use the slot later 719 */ 720 m_spare[*nr_m_spare] = m; 721 *nr_m_spare += 1; 722 } 723 } 724 725 static int __init 726 check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length, int count) 727 { 728 if (!mpc_new_phys || count <= mpc_new_length) { 729 WARN(1, "update_mptable: No spare slots (length: %x)\n", count); 730 return -1; 731 } 732 733 return 0; 734 } 735 #else /* CONFIG_X86_IO_APIC */ 736 static 737 inline void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {} 738 #endif /* CONFIG_X86_IO_APIC */ 739 740 static int __init replace_intsrc_all(struct mpc_table *mpc, 741 unsigned long mpc_new_phys, 742 unsigned long mpc_new_length) 743 { 744 #ifdef CONFIG_X86_IO_APIC 745 int i; 746 #endif 747 int count = sizeof(*mpc); 748 int nr_m_spare = 0; 749 unsigned char *mpt = ((unsigned char *)mpc) + count; 750 751 pr_info("mpc_length %x\n", mpc->length); 752 while (count < mpc->length) { 753 switch (*mpt) { 754 case MP_PROCESSOR: 755 skip_entry(&mpt, &count, sizeof(struct mpc_cpu)); 756 break; 757 case MP_BUS: 758 skip_entry(&mpt, &count, sizeof(struct mpc_bus)); 759 break; 760 case MP_IOAPIC: 761 skip_entry(&mpt, &count, sizeof(struct mpc_ioapic)); 762 break; 763 case MP_INTSRC: 764 check_irq_src((struct mpc_intsrc *)mpt, &nr_m_spare); 765 skip_entry(&mpt, &count, sizeof(struct mpc_intsrc)); 766 break; 767 case MP_LINTSRC: 768 skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc)); 769 break; 770 default: 771 /* wrong mptable */ 772 smp_dump_mptable(mpc, mpt); 773 goto out; 774 } 775 } 776 777 #ifdef CONFIG_X86_IO_APIC 778 for (i = 0; i < mp_irq_entries; i++) { 779 if (irq_used[i]) 780 continue; 781 782 if (mp_irqs[i].irqtype != mp_INT) 783 continue; 784 785 if (mp_irqs[i].irqflag != 0x0f) 786 continue; 787 788 if (nr_m_spare > 0) { 789 apic_printk(APIC_VERBOSE, "*NEW* found\n"); 790 nr_m_spare--; 791 memcpy(m_spare[nr_m_spare], &mp_irqs[i], sizeof(mp_irqs[i])); 792 m_spare[nr_m_spare] = NULL; 793 } else { 794 struct mpc_intsrc *m = (struct mpc_intsrc *)mpt; 795 count += sizeof(struct mpc_intsrc); 796 if (check_slot(mpc_new_phys, mpc_new_length, count) < 0) 797 goto out; 798 memcpy(m, &mp_irqs[i], sizeof(*m)); 799 mpc->length = count; 800 mpt += sizeof(struct mpc_intsrc); 801 } 802 print_mp_irq_info(&mp_irqs[i]); 803 } 804 #endif 805 out: 806 /* update checksum */ 807 mpc->checksum = 0; 808 mpc->checksum -= mpf_checksum((unsigned char *)mpc, mpc->length); 809 810 return 0; 811 } 812 813 int enable_update_mptable; 814 815 static int __init update_mptable_setup(char *str) 816 { 817 enable_update_mptable = 1; 818 #ifdef CONFIG_PCI 819 pci_routeirq = 1; 820 #endif 821 return 0; 822 } 823 early_param("update_mptable", update_mptable_setup); 824 825 static unsigned long __initdata mpc_new_phys; 826 static unsigned long mpc_new_length __initdata = 4096; 827 828 /* alloc_mptable or alloc_mptable=4k */ 829 static int __initdata alloc_mptable; 830 static int __init parse_alloc_mptable_opt(char *p) 831 { 832 enable_update_mptable = 1; 833 #ifdef CONFIG_PCI 834 pci_routeirq = 1; 835 #endif 836 alloc_mptable = 1; 837 if (!p) 838 return 0; 839 mpc_new_length = memparse(p, &p); 840 return 0; 841 } 842 early_param("alloc_mptable", parse_alloc_mptable_opt); 843 844 void __init e820__memblock_alloc_reserved_mpc_new(void) 845 { 846 if (enable_update_mptable && alloc_mptable) 847 mpc_new_phys = e820__memblock_alloc_reserved(mpc_new_length, 4); 848 } 849 850 static int __init update_mp_table(void) 851 { 852 char str[16]; 853 char oem[10]; 854 struct mpf_intel *mpf; 855 struct mpc_table *mpc, *mpc_new; 856 unsigned long size; 857 858 if (!enable_update_mptable) 859 return 0; 860 861 if (!mpf_base) 862 return 0; 863 864 mpf = early_memremap(mpf_base, sizeof(*mpf)); 865 if (!mpf) { 866 pr_err("MPTABLE: mpf early_memremap() failed\n"); 867 return 0; 868 } 869 870 /* 871 * Now see if we need to go further. 872 */ 873 if (mpf->feature1) 874 goto do_unmap_mpf; 875 876 if (!mpf->physptr) 877 goto do_unmap_mpf; 878 879 size = get_mpc_size(mpf->physptr); 880 mpc = early_memremap(mpf->physptr, size); 881 if (!mpc) { 882 pr_err("MPTABLE: mpc early_memremap() failed\n"); 883 goto do_unmap_mpf; 884 } 885 886 if (!smp_check_mpc(mpc, oem, str)) 887 goto do_unmap_mpc; 888 889 pr_info("mpf: %llx\n", (u64)mpf_base); 890 pr_info("physptr: %x\n", mpf->physptr); 891 892 if (mpc_new_phys && mpc->length > mpc_new_length) { 893 mpc_new_phys = 0; 894 pr_info("mpc_new_length is %ld, please use alloc_mptable=8k\n", 895 mpc_new_length); 896 } 897 898 if (!mpc_new_phys) { 899 unsigned char old, new; 900 /* check if we can change the position */ 901 mpc->checksum = 0; 902 old = mpf_checksum((unsigned char *)mpc, mpc->length); 903 mpc->checksum = 0xff; 904 new = mpf_checksum((unsigned char *)mpc, mpc->length); 905 if (old == new) { 906 pr_info("mpc is readonly, please try alloc_mptable instead\n"); 907 goto do_unmap_mpc; 908 } 909 pr_info("use in-position replacing\n"); 910 } else { 911 mpc_new = early_memremap(mpc_new_phys, mpc_new_length); 912 if (!mpc_new) { 913 pr_err("MPTABLE: new mpc early_memremap() failed\n"); 914 goto do_unmap_mpc; 915 } 916 mpf->physptr = mpc_new_phys; 917 memcpy(mpc_new, mpc, mpc->length); 918 early_memunmap(mpc, size); 919 mpc = mpc_new; 920 size = mpc_new_length; 921 /* check if we can modify that */ 922 if (mpc_new_phys - mpf->physptr) { 923 struct mpf_intel *mpf_new; 924 /* steal 16 bytes from [0, 1k) */ 925 mpf_new = early_memremap(0x400 - 16, sizeof(*mpf_new)); 926 if (!mpf_new) { 927 pr_err("MPTABLE: new mpf early_memremap() failed\n"); 928 goto do_unmap_mpc; 929 } 930 pr_info("mpf new: %x\n", 0x400 - 16); 931 memcpy(mpf_new, mpf, 16); 932 early_memunmap(mpf, sizeof(*mpf)); 933 mpf = mpf_new; 934 mpf->physptr = mpc_new_phys; 935 } 936 mpf->checksum = 0; 937 mpf->checksum -= mpf_checksum((unsigned char *)mpf, 16); 938 pr_info("physptr new: %x\n", mpf->physptr); 939 } 940 941 /* 942 * only replace the one with mp_INT and 943 * MP_IRQ_TRIGGER_LEVEL|MP_IRQ_POLARITY_LOW, 944 * already in mp_irqs , stored by ... and mp_config_acpi_gsi, 945 * may need pci=routeirq for all coverage 946 */ 947 replace_intsrc_all(mpc, mpc_new_phys, mpc_new_length); 948 949 do_unmap_mpc: 950 early_memunmap(mpc, size); 951 952 do_unmap_mpf: 953 early_memunmap(mpf, sizeof(*mpf)); 954 955 return 0; 956 } 957 958 late_initcall(update_mp_table); 959