1 /* 2 * Intel Multiprocessor Specification 1.1 and 1.4 3 * compliant MP-table parsing routines. 4 * 5 * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk> 6 * (c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com> 7 * (c) 2008 Alexey Starikovskiy <astarikovskiy@suse.de> 8 */ 9 10 #include <linux/mm.h> 11 #include <linux/init.h> 12 #include <linux/delay.h> 13 #include <linux/bootmem.h> 14 #include <linux/memblock.h> 15 #include <linux/kernel_stat.h> 16 #include <linux/mc146818rtc.h> 17 #include <linux/bitops.h> 18 #include <linux/acpi.h> 19 #include <linux/smp.h> 20 #include <linux/pci.h> 21 22 #include <asm/irqdomain.h> 23 #include <asm/mtrr.h> 24 #include <asm/mpspec.h> 25 #include <asm/pgalloc.h> 26 #include <asm/io_apic.h> 27 #include <asm/proto.h> 28 #include <asm/bios_ebda.h> 29 #include <asm/e820/api.h> 30 #include <asm/setup.h> 31 #include <asm/smp.h> 32 33 #include <asm/apic.h> 34 /* 35 * Checksum an MP configuration block. 36 */ 37 38 static int __init mpf_checksum(unsigned char *mp, int len) 39 { 40 int sum = 0; 41 42 while (len--) 43 sum += *mp++; 44 45 return sum & 0xFF; 46 } 47 48 int __init default_mpc_apic_id(struct mpc_cpu *m) 49 { 50 return m->apicid; 51 } 52 53 static void __init MP_processor_info(struct mpc_cpu *m) 54 { 55 int apicid; 56 char *bootup_cpu = ""; 57 58 if (!(m->cpuflag & CPU_ENABLED)) { 59 disabled_cpus++; 60 return; 61 } 62 63 apicid = x86_init.mpparse.mpc_apic_id(m); 64 65 if (m->cpuflag & CPU_BOOTPROCESSOR) { 66 bootup_cpu = " (Bootup-CPU)"; 67 boot_cpu_physical_apicid = m->apicid; 68 } 69 70 pr_info("Processor #%d%s\n", m->apicid, bootup_cpu); 71 generic_processor_info(apicid, m->apicver); 72 } 73 74 #ifdef CONFIG_X86_IO_APIC 75 void __init default_mpc_oem_bus_info(struct mpc_bus *m, char *str) 76 { 77 memcpy(str, m->bustype, 6); 78 str[6] = 0; 79 apic_printk(APIC_VERBOSE, "Bus #%d is %s\n", m->busid, str); 80 } 81 82 static void __init MP_bus_info(struct mpc_bus *m) 83 { 84 char str[7]; 85 86 x86_init.mpparse.mpc_oem_bus_info(m, str); 87 88 #if MAX_MP_BUSSES < 256 89 if (m->busid >= MAX_MP_BUSSES) { 90 pr_warn("MP table busid value (%d) for bustype %s is too large, max. supported is %d\n", 91 m->busid, str, MAX_MP_BUSSES - 1); 92 return; 93 } 94 #endif 95 96 set_bit(m->busid, mp_bus_not_pci); 97 if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) { 98 #ifdef CONFIG_EISA 99 mp_bus_id_to_type[m->busid] = MP_BUS_ISA; 100 #endif 101 } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) { 102 if (x86_init.mpparse.mpc_oem_pci_bus) 103 x86_init.mpparse.mpc_oem_pci_bus(m); 104 105 clear_bit(m->busid, mp_bus_not_pci); 106 #ifdef CONFIG_EISA 107 mp_bus_id_to_type[m->busid] = MP_BUS_PCI; 108 } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA) - 1) == 0) { 109 mp_bus_id_to_type[m->busid] = MP_BUS_EISA; 110 #endif 111 } else 112 pr_warn("Unknown bustype %s - ignoring\n", str); 113 } 114 115 static void __init MP_ioapic_info(struct mpc_ioapic *m) 116 { 117 struct ioapic_domain_cfg cfg = { 118 .type = IOAPIC_DOMAIN_LEGACY, 119 .ops = &mp_ioapic_irqdomain_ops, 120 }; 121 122 if (m->flags & MPC_APIC_USABLE) 123 mp_register_ioapic(m->apicid, m->apicaddr, gsi_top, &cfg); 124 } 125 126 static void __init print_mp_irq_info(struct mpc_intsrc *mp_irq) 127 { 128 apic_printk(APIC_VERBOSE, 129 "Int: type %d, pol %d, trig %d, bus %02x, IRQ %02x, APIC ID %x, APIC INT %02x\n", 130 mp_irq->irqtype, mp_irq->irqflag & 3, 131 (mp_irq->irqflag >> 2) & 3, mp_irq->srcbus, 132 mp_irq->srcbusirq, mp_irq->dstapic, mp_irq->dstirq); 133 } 134 135 #else /* CONFIG_X86_IO_APIC */ 136 static inline void __init MP_bus_info(struct mpc_bus *m) {} 137 static inline void __init MP_ioapic_info(struct mpc_ioapic *m) {} 138 #endif /* CONFIG_X86_IO_APIC */ 139 140 static void __init MP_lintsrc_info(struct mpc_lintsrc *m) 141 { 142 apic_printk(APIC_VERBOSE, 143 "Lint: type %d, pol %d, trig %d, bus %02x, IRQ %02x, APIC ID %x, APIC LINT %02x\n", 144 m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbusid, 145 m->srcbusirq, m->destapic, m->destapiclint); 146 } 147 148 /* 149 * Read/parse the MPC 150 */ 151 static int __init smp_check_mpc(struct mpc_table *mpc, char *oem, char *str) 152 { 153 154 if (memcmp(mpc->signature, MPC_SIGNATURE, 4)) { 155 pr_err("MPTABLE: bad signature [%c%c%c%c]!\n", 156 mpc->signature[0], mpc->signature[1], 157 mpc->signature[2], mpc->signature[3]); 158 return 0; 159 } 160 if (mpf_checksum((unsigned char *)mpc, mpc->length)) { 161 pr_err("MPTABLE: checksum error!\n"); 162 return 0; 163 } 164 if (mpc->spec != 0x01 && mpc->spec != 0x04) { 165 pr_err("MPTABLE: bad table version (%d)!!\n", mpc->spec); 166 return 0; 167 } 168 if (!mpc->lapic) { 169 pr_err("MPTABLE: null local APIC address!\n"); 170 return 0; 171 } 172 memcpy(oem, mpc->oem, 8); 173 oem[8] = 0; 174 pr_info("MPTABLE: OEM ID: %s\n", oem); 175 176 memcpy(str, mpc->productid, 12); 177 str[12] = 0; 178 179 pr_info("MPTABLE: Product ID: %s\n", str); 180 181 pr_info("MPTABLE: APIC at: 0x%X\n", mpc->lapic); 182 183 return 1; 184 } 185 186 static void skip_entry(unsigned char **ptr, int *count, int size) 187 { 188 *ptr += size; 189 *count += size; 190 } 191 192 static void __init smp_dump_mptable(struct mpc_table *mpc, unsigned char *mpt) 193 { 194 pr_err("Your mptable is wrong, contact your HW vendor!\n"); 195 pr_cont("type %x\n", *mpt); 196 print_hex_dump(KERN_ERR, " ", DUMP_PREFIX_ADDRESS, 16, 197 1, mpc, mpc->length, 1); 198 } 199 200 void __init default_smp_read_mpc_oem(struct mpc_table *mpc) { } 201 202 static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early) 203 { 204 char str[16]; 205 char oem[10]; 206 207 int count = sizeof(*mpc); 208 unsigned char *mpt = ((unsigned char *)mpc) + count; 209 210 if (!smp_check_mpc(mpc, oem, str)) 211 return 0; 212 213 /* Initialize the lapic mapping */ 214 if (!acpi_lapic) 215 register_lapic_address(mpc->lapic); 216 217 if (early) 218 return 1; 219 220 if (mpc->oemptr) 221 x86_init.mpparse.smp_read_mpc_oem(mpc); 222 223 /* 224 * Now process the configuration blocks. 225 */ 226 x86_init.mpparse.mpc_record(0); 227 228 while (count < mpc->length) { 229 switch (*mpt) { 230 case MP_PROCESSOR: 231 /* ACPI may have already provided this data */ 232 if (!acpi_lapic) 233 MP_processor_info((struct mpc_cpu *)mpt); 234 skip_entry(&mpt, &count, sizeof(struct mpc_cpu)); 235 break; 236 case MP_BUS: 237 MP_bus_info((struct mpc_bus *)mpt); 238 skip_entry(&mpt, &count, sizeof(struct mpc_bus)); 239 break; 240 case MP_IOAPIC: 241 MP_ioapic_info((struct mpc_ioapic *)mpt); 242 skip_entry(&mpt, &count, sizeof(struct mpc_ioapic)); 243 break; 244 case MP_INTSRC: 245 mp_save_irq((struct mpc_intsrc *)mpt); 246 skip_entry(&mpt, &count, sizeof(struct mpc_intsrc)); 247 break; 248 case MP_LINTSRC: 249 MP_lintsrc_info((struct mpc_lintsrc *)mpt); 250 skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc)); 251 break; 252 default: 253 /* wrong mptable */ 254 smp_dump_mptable(mpc, mpt); 255 count = mpc->length; 256 break; 257 } 258 x86_init.mpparse.mpc_record(1); 259 } 260 261 if (!num_processors) 262 pr_err("MPTABLE: no processors registered!\n"); 263 return num_processors; 264 } 265 266 #ifdef CONFIG_X86_IO_APIC 267 268 static int __init ELCR_trigger(unsigned int irq) 269 { 270 unsigned int port; 271 272 port = 0x4d0 + (irq >> 3); 273 return (inb(port) >> (irq & 7)) & 1; 274 } 275 276 static void __init construct_default_ioirq_mptable(int mpc_default_type) 277 { 278 struct mpc_intsrc intsrc; 279 int i; 280 int ELCR_fallback = 0; 281 282 intsrc.type = MP_INTSRC; 283 intsrc.irqflag = 0; /* conforming */ 284 intsrc.srcbus = 0; 285 intsrc.dstapic = mpc_ioapic_id(0); 286 287 intsrc.irqtype = mp_INT; 288 289 /* 290 * If true, we have an ISA/PCI system with no IRQ entries 291 * in the MP table. To prevent the PCI interrupts from being set up 292 * incorrectly, we try to use the ELCR. The sanity check to see if 293 * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can 294 * never be level sensitive, so we simply see if the ELCR agrees. 295 * If it does, we assume it's valid. 296 */ 297 if (mpc_default_type == 5) { 298 pr_info("ISA/PCI bus type with no IRQ information... falling back to ELCR\n"); 299 300 if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || 301 ELCR_trigger(13)) 302 pr_err("ELCR contains invalid data... not using ELCR\n"); 303 else { 304 pr_info("Using ELCR to identify PCI interrupts\n"); 305 ELCR_fallback = 1; 306 } 307 } 308 309 for (i = 0; i < 16; i++) { 310 switch (mpc_default_type) { 311 case 2: 312 if (i == 0 || i == 13) 313 continue; /* IRQ0 & IRQ13 not connected */ 314 /* fall through */ 315 default: 316 if (i == 2) 317 continue; /* IRQ2 is never connected */ 318 } 319 320 if (ELCR_fallback) { 321 /* 322 * If the ELCR indicates a level-sensitive interrupt, we 323 * copy that information over to the MP table in the 324 * irqflag field (level sensitive, active high polarity). 325 */ 326 if (ELCR_trigger(i)) 327 intsrc.irqflag = 13; 328 else 329 intsrc.irqflag = 0; 330 } 331 332 intsrc.srcbusirq = i; 333 intsrc.dstirq = i ? i : 2; /* IRQ0 to INTIN2 */ 334 mp_save_irq(&intsrc); 335 } 336 337 intsrc.irqtype = mp_ExtINT; 338 intsrc.srcbusirq = 0; 339 intsrc.dstirq = 0; /* 8259A to INTIN0 */ 340 mp_save_irq(&intsrc); 341 } 342 343 344 static void __init construct_ioapic_table(int mpc_default_type) 345 { 346 struct mpc_ioapic ioapic; 347 struct mpc_bus bus; 348 349 bus.type = MP_BUS; 350 bus.busid = 0; 351 switch (mpc_default_type) { 352 default: 353 pr_err("???\nUnknown standard configuration %d\n", 354 mpc_default_type); 355 /* fall through */ 356 case 1: 357 case 5: 358 memcpy(bus.bustype, "ISA ", 6); 359 break; 360 case 2: 361 case 6: 362 case 3: 363 memcpy(bus.bustype, "EISA ", 6); 364 break; 365 } 366 MP_bus_info(&bus); 367 if (mpc_default_type > 4) { 368 bus.busid = 1; 369 memcpy(bus.bustype, "PCI ", 6); 370 MP_bus_info(&bus); 371 } 372 373 ioapic.type = MP_IOAPIC; 374 ioapic.apicid = 2; 375 ioapic.apicver = mpc_default_type > 4 ? 0x10 : 0x01; 376 ioapic.flags = MPC_APIC_USABLE; 377 ioapic.apicaddr = IO_APIC_DEFAULT_PHYS_BASE; 378 MP_ioapic_info(&ioapic); 379 380 /* 381 * We set up most of the low 16 IO-APIC pins according to MPS rules. 382 */ 383 construct_default_ioirq_mptable(mpc_default_type); 384 } 385 #else 386 static inline void __init construct_ioapic_table(int mpc_default_type) { } 387 #endif 388 389 static inline void __init construct_default_ISA_mptable(int mpc_default_type) 390 { 391 struct mpc_cpu processor; 392 struct mpc_lintsrc lintsrc; 393 int linttypes[2] = { mp_ExtINT, mp_NMI }; 394 int i; 395 396 /* 397 * local APIC has default address 398 */ 399 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; 400 401 /* 402 * 2 CPUs, numbered 0 & 1. 403 */ 404 processor.type = MP_PROCESSOR; 405 /* Either an integrated APIC or a discrete 82489DX. */ 406 processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01; 407 processor.cpuflag = CPU_ENABLED; 408 processor.cpufeature = (boot_cpu_data.x86 << 8) | 409 (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask; 410 processor.featureflag = boot_cpu_data.x86_capability[CPUID_1_EDX]; 411 processor.reserved[0] = 0; 412 processor.reserved[1] = 0; 413 for (i = 0; i < 2; i++) { 414 processor.apicid = i; 415 MP_processor_info(&processor); 416 } 417 418 construct_ioapic_table(mpc_default_type); 419 420 lintsrc.type = MP_LINTSRC; 421 lintsrc.irqflag = 0; /* conforming */ 422 lintsrc.srcbusid = 0; 423 lintsrc.srcbusirq = 0; 424 lintsrc.destapic = MP_APIC_ALL; 425 for (i = 0; i < 2; i++) { 426 lintsrc.irqtype = linttypes[i]; 427 lintsrc.destapiclint = i; 428 MP_lintsrc_info(&lintsrc); 429 } 430 } 431 432 static unsigned long mpf_base; 433 434 static unsigned long __init get_mpc_size(unsigned long physptr) 435 { 436 struct mpc_table *mpc; 437 unsigned long size; 438 439 mpc = early_memremap(physptr, PAGE_SIZE); 440 size = mpc->length; 441 early_memunmap(mpc, PAGE_SIZE); 442 apic_printk(APIC_VERBOSE, " mpc: %lx-%lx\n", physptr, physptr + size); 443 444 return size; 445 } 446 447 static int __init check_physptr(struct mpf_intel *mpf, unsigned int early) 448 { 449 struct mpc_table *mpc; 450 unsigned long size; 451 452 size = get_mpc_size(mpf->physptr); 453 mpc = early_memremap(mpf->physptr, size); 454 455 /* 456 * Read the physical hardware table. Anything here will 457 * override the defaults. 458 */ 459 if (!smp_read_mpc(mpc, early)) { 460 #ifdef CONFIG_X86_LOCAL_APIC 461 smp_found_config = 0; 462 #endif 463 pr_err("BIOS bug, MP table errors detected!...\n"); 464 pr_cont("... disabling SMP support. (tell your hw vendor)\n"); 465 early_memunmap(mpc, size); 466 return -1; 467 } 468 early_memunmap(mpc, size); 469 470 if (early) 471 return -1; 472 473 #ifdef CONFIG_X86_IO_APIC 474 /* 475 * If there are no explicit MP IRQ entries, then we are 476 * broken. We set up most of the low 16 IO-APIC pins to 477 * ISA defaults and hope it will work. 478 */ 479 if (!mp_irq_entries) { 480 struct mpc_bus bus; 481 482 pr_err("BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n"); 483 484 bus.type = MP_BUS; 485 bus.busid = 0; 486 memcpy(bus.bustype, "ISA ", 6); 487 MP_bus_info(&bus); 488 489 construct_default_ioirq_mptable(0); 490 } 491 #endif 492 493 return 0; 494 } 495 496 /* 497 * Scan the memory blocks for an SMP configuration block. 498 */ 499 void __init default_get_smp_config(unsigned int early) 500 { 501 struct mpf_intel *mpf; 502 503 if (!smp_found_config) 504 return; 505 506 if (!mpf_base) 507 return; 508 509 if (acpi_lapic && early) 510 return; 511 512 /* 513 * MPS doesn't support hyperthreading, aka only have 514 * thread 0 apic id in MPS table 515 */ 516 if (acpi_lapic && acpi_ioapic) 517 return; 518 519 mpf = early_memremap(mpf_base, sizeof(*mpf)); 520 if (!mpf) { 521 pr_err("MPTABLE: error mapping MP table\n"); 522 return; 523 } 524 525 pr_info("Intel MultiProcessor Specification v1.%d\n", 526 mpf->specification); 527 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32) 528 if (mpf->feature2 & (1 << 7)) { 529 pr_info(" IMCR and PIC compatibility mode.\n"); 530 pic_mode = 1; 531 } else { 532 pr_info(" Virtual Wire compatibility mode.\n"); 533 pic_mode = 0; 534 } 535 #endif 536 /* 537 * Now see if we need to read further. 538 */ 539 if (mpf->feature1) { 540 if (early) { 541 /* 542 * local APIC has default address 543 */ 544 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; 545 return; 546 } 547 548 pr_info("Default MP configuration #%d\n", mpf->feature1); 549 construct_default_ISA_mptable(mpf->feature1); 550 551 } else if (mpf->physptr) { 552 if (check_physptr(mpf, early)) { 553 early_memunmap(mpf, sizeof(*mpf)); 554 return; 555 } 556 } else 557 BUG(); 558 559 if (!early) 560 pr_info("Processors: %d\n", num_processors); 561 /* 562 * Only use the first configuration found. 563 */ 564 565 early_memunmap(mpf, sizeof(*mpf)); 566 } 567 568 static void __init smp_reserve_memory(struct mpf_intel *mpf) 569 { 570 memblock_reserve(mpf->physptr, get_mpc_size(mpf->physptr)); 571 } 572 573 static int __init smp_scan_config(unsigned long base, unsigned long length) 574 { 575 unsigned int *bp; 576 struct mpf_intel *mpf; 577 int ret = 0; 578 579 apic_printk(APIC_VERBOSE, "Scan for SMP in [mem %#010lx-%#010lx]\n", 580 base, base + length - 1); 581 BUILD_BUG_ON(sizeof(*mpf) != 16); 582 583 while (length > 0) { 584 bp = early_memremap(base, length); 585 mpf = (struct mpf_intel *)bp; 586 if ((*bp == SMP_MAGIC_IDENT) && 587 (mpf->length == 1) && 588 !mpf_checksum((unsigned char *)bp, 16) && 589 ((mpf->specification == 1) 590 || (mpf->specification == 4))) { 591 #ifdef CONFIG_X86_LOCAL_APIC 592 smp_found_config = 1; 593 #endif 594 mpf_base = base; 595 596 pr_info("found SMP MP-table at [mem %#010lx-%#010lx] mapped at [%p]\n", 597 base, base + sizeof(*mpf) - 1, mpf); 598 599 memblock_reserve(base, sizeof(*mpf)); 600 if (mpf->physptr) 601 smp_reserve_memory(mpf); 602 603 ret = 1; 604 } 605 early_memunmap(bp, length); 606 607 if (ret) 608 break; 609 610 base += 16; 611 length -= 16; 612 } 613 return ret; 614 } 615 616 void __init default_find_smp_config(void) 617 { 618 unsigned int address; 619 620 /* 621 * FIXME: Linux assumes you have 640K of base ram.. 622 * this continues the error... 623 * 624 * 1) Scan the bottom 1K for a signature 625 * 2) Scan the top 1K of base RAM 626 * 3) Scan the 64K of bios 627 */ 628 if (smp_scan_config(0x0, 0x400) || 629 smp_scan_config(639 * 0x400, 0x400) || 630 smp_scan_config(0xF0000, 0x10000)) 631 return; 632 /* 633 * If it is an SMP machine we should know now, unless the 634 * configuration is in an EISA bus machine with an 635 * extended bios data area. 636 * 637 * there is a real-mode segmented pointer pointing to the 638 * 4K EBDA area at 0x40E, calculate and scan it here. 639 * 640 * NOTE! There are Linux loaders that will corrupt the EBDA 641 * area, and as such this kind of SMP config may be less 642 * trustworthy, simply because the SMP table may have been 643 * stomped on during early boot. These loaders are buggy and 644 * should be fixed. 645 * 646 * MP1.4 SPEC states to only scan first 1K of 4K EBDA. 647 */ 648 649 address = get_bios_ebda(); 650 if (address) 651 smp_scan_config(address, 0x400); 652 } 653 654 #ifdef CONFIG_X86_IO_APIC 655 static u8 __initdata irq_used[MAX_IRQ_SOURCES]; 656 657 static int __init get_MP_intsrc_index(struct mpc_intsrc *m) 658 { 659 int i; 660 661 if (m->irqtype != mp_INT) 662 return 0; 663 664 if (m->irqflag != 0x0f) 665 return 0; 666 667 /* not legacy */ 668 669 for (i = 0; i < mp_irq_entries; i++) { 670 if (mp_irqs[i].irqtype != mp_INT) 671 continue; 672 673 if (mp_irqs[i].irqflag != 0x0f) 674 continue; 675 676 if (mp_irqs[i].srcbus != m->srcbus) 677 continue; 678 if (mp_irqs[i].srcbusirq != m->srcbusirq) 679 continue; 680 if (irq_used[i]) { 681 /* already claimed */ 682 return -2; 683 } 684 irq_used[i] = 1; 685 return i; 686 } 687 688 /* not found */ 689 return -1; 690 } 691 692 #define SPARE_SLOT_NUM 20 693 694 static struct mpc_intsrc __initdata *m_spare[SPARE_SLOT_NUM]; 695 696 static void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) 697 { 698 int i; 699 700 apic_printk(APIC_VERBOSE, "OLD "); 701 print_mp_irq_info(m); 702 703 i = get_MP_intsrc_index(m); 704 if (i > 0) { 705 memcpy(m, &mp_irqs[i], sizeof(*m)); 706 apic_printk(APIC_VERBOSE, "NEW "); 707 print_mp_irq_info(&mp_irqs[i]); 708 return; 709 } 710 if (!i) { 711 /* legacy, do nothing */ 712 return; 713 } 714 if (*nr_m_spare < SPARE_SLOT_NUM) { 715 /* 716 * not found (-1), or duplicated (-2) are invalid entries, 717 * we need to use the slot later 718 */ 719 m_spare[*nr_m_spare] = m; 720 *nr_m_spare += 1; 721 } 722 } 723 724 static int __init 725 check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length, int count) 726 { 727 if (!mpc_new_phys || count <= mpc_new_length) { 728 WARN(1, "update_mptable: No spare slots (length: %x)\n", count); 729 return -1; 730 } 731 732 return 0; 733 } 734 #else /* CONFIG_X86_IO_APIC */ 735 static 736 inline void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {} 737 #endif /* CONFIG_X86_IO_APIC */ 738 739 static int __init replace_intsrc_all(struct mpc_table *mpc, 740 unsigned long mpc_new_phys, 741 unsigned long mpc_new_length) 742 { 743 #ifdef CONFIG_X86_IO_APIC 744 int i; 745 #endif 746 int count = sizeof(*mpc); 747 int nr_m_spare = 0; 748 unsigned char *mpt = ((unsigned char *)mpc) + count; 749 750 pr_info("mpc_length %x\n", mpc->length); 751 while (count < mpc->length) { 752 switch (*mpt) { 753 case MP_PROCESSOR: 754 skip_entry(&mpt, &count, sizeof(struct mpc_cpu)); 755 break; 756 case MP_BUS: 757 skip_entry(&mpt, &count, sizeof(struct mpc_bus)); 758 break; 759 case MP_IOAPIC: 760 skip_entry(&mpt, &count, sizeof(struct mpc_ioapic)); 761 break; 762 case MP_INTSRC: 763 check_irq_src((struct mpc_intsrc *)mpt, &nr_m_spare); 764 skip_entry(&mpt, &count, sizeof(struct mpc_intsrc)); 765 break; 766 case MP_LINTSRC: 767 skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc)); 768 break; 769 default: 770 /* wrong mptable */ 771 smp_dump_mptable(mpc, mpt); 772 goto out; 773 } 774 } 775 776 #ifdef CONFIG_X86_IO_APIC 777 for (i = 0; i < mp_irq_entries; i++) { 778 if (irq_used[i]) 779 continue; 780 781 if (mp_irqs[i].irqtype != mp_INT) 782 continue; 783 784 if (mp_irqs[i].irqflag != 0x0f) 785 continue; 786 787 if (nr_m_spare > 0) { 788 apic_printk(APIC_VERBOSE, "*NEW* found\n"); 789 nr_m_spare--; 790 memcpy(m_spare[nr_m_spare], &mp_irqs[i], sizeof(mp_irqs[i])); 791 m_spare[nr_m_spare] = NULL; 792 } else { 793 struct mpc_intsrc *m = (struct mpc_intsrc *)mpt; 794 count += sizeof(struct mpc_intsrc); 795 if (check_slot(mpc_new_phys, mpc_new_length, count) < 0) 796 goto out; 797 memcpy(m, &mp_irqs[i], sizeof(*m)); 798 mpc->length = count; 799 mpt += sizeof(struct mpc_intsrc); 800 } 801 print_mp_irq_info(&mp_irqs[i]); 802 } 803 #endif 804 out: 805 /* update checksum */ 806 mpc->checksum = 0; 807 mpc->checksum -= mpf_checksum((unsigned char *)mpc, mpc->length); 808 809 return 0; 810 } 811 812 int enable_update_mptable; 813 814 static int __init update_mptable_setup(char *str) 815 { 816 enable_update_mptable = 1; 817 #ifdef CONFIG_PCI 818 pci_routeirq = 1; 819 #endif 820 return 0; 821 } 822 early_param("update_mptable", update_mptable_setup); 823 824 static unsigned long __initdata mpc_new_phys; 825 static unsigned long mpc_new_length __initdata = 4096; 826 827 /* alloc_mptable or alloc_mptable=4k */ 828 static int __initdata alloc_mptable; 829 static int __init parse_alloc_mptable_opt(char *p) 830 { 831 enable_update_mptable = 1; 832 #ifdef CONFIG_PCI 833 pci_routeirq = 1; 834 #endif 835 alloc_mptable = 1; 836 if (!p) 837 return 0; 838 mpc_new_length = memparse(p, &p); 839 return 0; 840 } 841 early_param("alloc_mptable", parse_alloc_mptable_opt); 842 843 void __init e820__memblock_alloc_reserved_mpc_new(void) 844 { 845 if (enable_update_mptable && alloc_mptable) 846 mpc_new_phys = e820__memblock_alloc_reserved(mpc_new_length, 4); 847 } 848 849 static int __init update_mp_table(void) 850 { 851 char str[16]; 852 char oem[10]; 853 struct mpf_intel *mpf; 854 struct mpc_table *mpc, *mpc_new; 855 unsigned long size; 856 857 if (!enable_update_mptable) 858 return 0; 859 860 if (!mpf_base) 861 return 0; 862 863 mpf = early_memremap(mpf_base, sizeof(*mpf)); 864 if (!mpf) { 865 pr_err("MPTABLE: mpf early_memremap() failed\n"); 866 return 0; 867 } 868 869 /* 870 * Now see if we need to go further. 871 */ 872 if (mpf->feature1) 873 goto do_unmap_mpf; 874 875 if (!mpf->physptr) 876 goto do_unmap_mpf; 877 878 size = get_mpc_size(mpf->physptr); 879 mpc = early_memremap(mpf->physptr, size); 880 if (!mpc) { 881 pr_err("MPTABLE: mpc early_memremap() failed\n"); 882 goto do_unmap_mpf; 883 } 884 885 if (!smp_check_mpc(mpc, oem, str)) 886 goto do_unmap_mpc; 887 888 pr_info("mpf: %llx\n", (u64)mpf_base); 889 pr_info("physptr: %x\n", mpf->physptr); 890 891 if (mpc_new_phys && mpc->length > mpc_new_length) { 892 mpc_new_phys = 0; 893 pr_info("mpc_new_length is %ld, please use alloc_mptable=8k\n", 894 mpc_new_length); 895 } 896 897 if (!mpc_new_phys) { 898 unsigned char old, new; 899 /* check if we can change the position */ 900 mpc->checksum = 0; 901 old = mpf_checksum((unsigned char *)mpc, mpc->length); 902 mpc->checksum = 0xff; 903 new = mpf_checksum((unsigned char *)mpc, mpc->length); 904 if (old == new) { 905 pr_info("mpc is readonly, please try alloc_mptable instead\n"); 906 goto do_unmap_mpc; 907 } 908 pr_info("use in-position replacing\n"); 909 } else { 910 mpc_new = early_memremap(mpc_new_phys, mpc_new_length); 911 if (!mpc_new) { 912 pr_err("MPTABLE: new mpc early_memremap() failed\n"); 913 goto do_unmap_mpc; 914 } 915 mpf->physptr = mpc_new_phys; 916 memcpy(mpc_new, mpc, mpc->length); 917 early_memunmap(mpc, size); 918 mpc = mpc_new; 919 size = mpc_new_length; 920 /* check if we can modify that */ 921 if (mpc_new_phys - mpf->physptr) { 922 struct mpf_intel *mpf_new; 923 /* steal 16 bytes from [0, 1k) */ 924 mpf_new = early_memremap(0x400 - 16, sizeof(*mpf_new)); 925 if (!mpf_new) { 926 pr_err("MPTABLE: new mpf early_memremap() failed\n"); 927 goto do_unmap_mpc; 928 } 929 pr_info("mpf new: %x\n", 0x400 - 16); 930 memcpy(mpf_new, mpf, 16); 931 early_memunmap(mpf, sizeof(*mpf)); 932 mpf = mpf_new; 933 mpf->physptr = mpc_new_phys; 934 } 935 mpf->checksum = 0; 936 mpf->checksum -= mpf_checksum((unsigned char *)mpf, 16); 937 pr_info("physptr new: %x\n", mpf->physptr); 938 } 939 940 /* 941 * only replace the one with mp_INT and 942 * MP_IRQ_TRIGGER_LEVEL|MP_IRQ_POLARITY_LOW, 943 * already in mp_irqs , stored by ... and mp_config_acpi_gsi, 944 * may need pci=routeirq for all coverage 945 */ 946 replace_intsrc_all(mpc, mpc_new_phys, mpc_new_length); 947 948 do_unmap_mpc: 949 early_memunmap(mpc, size); 950 951 do_unmap_mpf: 952 early_memunmap(mpf, sizeof(*mpf)); 953 954 return 0; 955 } 956 957 late_initcall(update_mp_table); 958