1 /* 2 * Intel Multiprocessor Specification 1.1 and 1.4 3 * compliant MP-table parsing routines. 4 * 5 * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk> 6 * (c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com> 7 * (c) 2008 Alexey Starikovskiy <astarikovskiy@suse.de> 8 */ 9 10 #include <linux/mm.h> 11 #include <linux/init.h> 12 #include <linux/delay.h> 13 #include <linux/bootmem.h> 14 #include <linux/memblock.h> 15 #include <linux/kernel_stat.h> 16 #include <linux/mc146818rtc.h> 17 #include <linux/bitops.h> 18 #include <linux/acpi.h> 19 #include <linux/module.h> 20 #include <linux/smp.h> 21 #include <linux/pci.h> 22 23 #include <asm/irqdomain.h> 24 #include <asm/mtrr.h> 25 #include <asm/mpspec.h> 26 #include <asm/pgalloc.h> 27 #include <asm/io_apic.h> 28 #include <asm/proto.h> 29 #include <asm/bios_ebda.h> 30 #include <asm/e820.h> 31 #include <asm/setup.h> 32 #include <asm/smp.h> 33 34 #include <asm/apic.h> 35 /* 36 * Checksum an MP configuration block. 37 */ 38 39 static int __init mpf_checksum(unsigned char *mp, int len) 40 { 41 int sum = 0; 42 43 while (len--) 44 sum += *mp++; 45 46 return sum & 0xFF; 47 } 48 49 int __init default_mpc_apic_id(struct mpc_cpu *m) 50 { 51 return m->apicid; 52 } 53 54 static void __init MP_processor_info(struct mpc_cpu *m) 55 { 56 int apicid; 57 char *bootup_cpu = ""; 58 59 if (!(m->cpuflag & CPU_ENABLED)) { 60 disabled_cpus++; 61 return; 62 } 63 64 apicid = x86_init.mpparse.mpc_apic_id(m); 65 66 if (m->cpuflag & CPU_BOOTPROCESSOR) { 67 bootup_cpu = " (Bootup-CPU)"; 68 boot_cpu_physical_apicid = m->apicid; 69 } 70 71 pr_info("Processor #%d%s\n", m->apicid, bootup_cpu); 72 generic_processor_info(apicid, m->apicver); 73 } 74 75 #ifdef CONFIG_X86_IO_APIC 76 void __init default_mpc_oem_bus_info(struct mpc_bus *m, char *str) 77 { 78 memcpy(str, m->bustype, 6); 79 str[6] = 0; 80 apic_printk(APIC_VERBOSE, "Bus #%d is %s\n", m->busid, str); 81 } 82 83 static void __init MP_bus_info(struct mpc_bus *m) 84 { 85 char str[7]; 86 87 x86_init.mpparse.mpc_oem_bus_info(m, str); 88 89 #if MAX_MP_BUSSES < 256 90 if (m->busid >= MAX_MP_BUSSES) { 91 pr_warn("MP table busid value (%d) for bustype %s is too large, max. supported is %d\n", 92 m->busid, str, MAX_MP_BUSSES - 1); 93 return; 94 } 95 #endif 96 97 set_bit(m->busid, mp_bus_not_pci); 98 if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) { 99 #ifdef CONFIG_EISA 100 mp_bus_id_to_type[m->busid] = MP_BUS_ISA; 101 #endif 102 } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) { 103 if (x86_init.mpparse.mpc_oem_pci_bus) 104 x86_init.mpparse.mpc_oem_pci_bus(m); 105 106 clear_bit(m->busid, mp_bus_not_pci); 107 #ifdef CONFIG_EISA 108 mp_bus_id_to_type[m->busid] = MP_BUS_PCI; 109 } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA) - 1) == 0) { 110 mp_bus_id_to_type[m->busid] = MP_BUS_EISA; 111 #endif 112 } else 113 pr_warn("Unknown bustype %s - ignoring\n", str); 114 } 115 116 static void __init MP_ioapic_info(struct mpc_ioapic *m) 117 { 118 struct ioapic_domain_cfg cfg = { 119 .type = IOAPIC_DOMAIN_LEGACY, 120 .ops = &mp_ioapic_irqdomain_ops, 121 }; 122 123 if (m->flags & MPC_APIC_USABLE) 124 mp_register_ioapic(m->apicid, m->apicaddr, gsi_top, &cfg); 125 } 126 127 static void __init print_mp_irq_info(struct mpc_intsrc *mp_irq) 128 { 129 apic_printk(APIC_VERBOSE, 130 "Int: type %d, pol %d, trig %d, bus %02x, IRQ %02x, APIC ID %x, APIC INT %02x\n", 131 mp_irq->irqtype, mp_irq->irqflag & 3, 132 (mp_irq->irqflag >> 2) & 3, mp_irq->srcbus, 133 mp_irq->srcbusirq, mp_irq->dstapic, mp_irq->dstirq); 134 } 135 136 #else /* CONFIG_X86_IO_APIC */ 137 static inline void __init MP_bus_info(struct mpc_bus *m) {} 138 static inline void __init MP_ioapic_info(struct mpc_ioapic *m) {} 139 #endif /* CONFIG_X86_IO_APIC */ 140 141 static void __init MP_lintsrc_info(struct mpc_lintsrc *m) 142 { 143 apic_printk(APIC_VERBOSE, 144 "Lint: type %d, pol %d, trig %d, bus %02x, IRQ %02x, APIC ID %x, APIC LINT %02x\n", 145 m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbusid, 146 m->srcbusirq, m->destapic, m->destapiclint); 147 } 148 149 /* 150 * Read/parse the MPC 151 */ 152 static int __init smp_check_mpc(struct mpc_table *mpc, char *oem, char *str) 153 { 154 155 if (memcmp(mpc->signature, MPC_SIGNATURE, 4)) { 156 pr_err("MPTABLE: bad signature [%c%c%c%c]!\n", 157 mpc->signature[0], mpc->signature[1], 158 mpc->signature[2], mpc->signature[3]); 159 return 0; 160 } 161 if (mpf_checksum((unsigned char *)mpc, mpc->length)) { 162 pr_err("MPTABLE: checksum error!\n"); 163 return 0; 164 } 165 if (mpc->spec != 0x01 && mpc->spec != 0x04) { 166 pr_err("MPTABLE: bad table version (%d)!!\n", mpc->spec); 167 return 0; 168 } 169 if (!mpc->lapic) { 170 pr_err("MPTABLE: null local APIC address!\n"); 171 return 0; 172 } 173 memcpy(oem, mpc->oem, 8); 174 oem[8] = 0; 175 pr_info("MPTABLE: OEM ID: %s\n", oem); 176 177 memcpy(str, mpc->productid, 12); 178 str[12] = 0; 179 180 pr_info("MPTABLE: Product ID: %s\n", str); 181 182 pr_info("MPTABLE: APIC at: 0x%X\n", mpc->lapic); 183 184 return 1; 185 } 186 187 static void skip_entry(unsigned char **ptr, int *count, int size) 188 { 189 *ptr += size; 190 *count += size; 191 } 192 193 static void __init smp_dump_mptable(struct mpc_table *mpc, unsigned char *mpt) 194 { 195 pr_err("Your mptable is wrong, contact your HW vendor!\n"); 196 pr_cont("type %x\n", *mpt); 197 print_hex_dump(KERN_ERR, " ", DUMP_PREFIX_ADDRESS, 16, 198 1, mpc, mpc->length, 1); 199 } 200 201 void __init default_smp_read_mpc_oem(struct mpc_table *mpc) { } 202 203 static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early) 204 { 205 char str[16]; 206 char oem[10]; 207 208 int count = sizeof(*mpc); 209 unsigned char *mpt = ((unsigned char *)mpc) + count; 210 211 if (!smp_check_mpc(mpc, oem, str)) 212 return 0; 213 214 /* Initialize the lapic mapping */ 215 if (!acpi_lapic) 216 register_lapic_address(mpc->lapic); 217 218 if (early) 219 return 1; 220 221 if (mpc->oemptr) 222 x86_init.mpparse.smp_read_mpc_oem(mpc); 223 224 /* 225 * Now process the configuration blocks. 226 */ 227 x86_init.mpparse.mpc_record(0); 228 229 while (count < mpc->length) { 230 switch (*mpt) { 231 case MP_PROCESSOR: 232 /* ACPI may have already provided this data */ 233 if (!acpi_lapic) 234 MP_processor_info((struct mpc_cpu *)mpt); 235 skip_entry(&mpt, &count, sizeof(struct mpc_cpu)); 236 break; 237 case MP_BUS: 238 MP_bus_info((struct mpc_bus *)mpt); 239 skip_entry(&mpt, &count, sizeof(struct mpc_bus)); 240 break; 241 case MP_IOAPIC: 242 MP_ioapic_info((struct mpc_ioapic *)mpt); 243 skip_entry(&mpt, &count, sizeof(struct mpc_ioapic)); 244 break; 245 case MP_INTSRC: 246 mp_save_irq((struct mpc_intsrc *)mpt); 247 skip_entry(&mpt, &count, sizeof(struct mpc_intsrc)); 248 break; 249 case MP_LINTSRC: 250 MP_lintsrc_info((struct mpc_lintsrc *)mpt); 251 skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc)); 252 break; 253 default: 254 /* wrong mptable */ 255 smp_dump_mptable(mpc, mpt); 256 count = mpc->length; 257 break; 258 } 259 x86_init.mpparse.mpc_record(1); 260 } 261 262 if (!num_processors) 263 pr_err("MPTABLE: no processors registered!\n"); 264 return num_processors; 265 } 266 267 #ifdef CONFIG_X86_IO_APIC 268 269 static int __init ELCR_trigger(unsigned int irq) 270 { 271 unsigned int port; 272 273 port = 0x4d0 + (irq >> 3); 274 return (inb(port) >> (irq & 7)) & 1; 275 } 276 277 static void __init construct_default_ioirq_mptable(int mpc_default_type) 278 { 279 struct mpc_intsrc intsrc; 280 int i; 281 int ELCR_fallback = 0; 282 283 intsrc.type = MP_INTSRC; 284 intsrc.irqflag = 0; /* conforming */ 285 intsrc.srcbus = 0; 286 intsrc.dstapic = mpc_ioapic_id(0); 287 288 intsrc.irqtype = mp_INT; 289 290 /* 291 * If true, we have an ISA/PCI system with no IRQ entries 292 * in the MP table. To prevent the PCI interrupts from being set up 293 * incorrectly, we try to use the ELCR. The sanity check to see if 294 * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can 295 * never be level sensitive, so we simply see if the ELCR agrees. 296 * If it does, we assume it's valid. 297 */ 298 if (mpc_default_type == 5) { 299 pr_info("ISA/PCI bus type with no IRQ information... falling back to ELCR\n"); 300 301 if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || 302 ELCR_trigger(13)) 303 pr_err("ELCR contains invalid data... not using ELCR\n"); 304 else { 305 pr_info("Using ELCR to identify PCI interrupts\n"); 306 ELCR_fallback = 1; 307 } 308 } 309 310 for (i = 0; i < 16; i++) { 311 switch (mpc_default_type) { 312 case 2: 313 if (i == 0 || i == 13) 314 continue; /* IRQ0 & IRQ13 not connected */ 315 /* fall through */ 316 default: 317 if (i == 2) 318 continue; /* IRQ2 is never connected */ 319 } 320 321 if (ELCR_fallback) { 322 /* 323 * If the ELCR indicates a level-sensitive interrupt, we 324 * copy that information over to the MP table in the 325 * irqflag field (level sensitive, active high polarity). 326 */ 327 if (ELCR_trigger(i)) 328 intsrc.irqflag = 13; 329 else 330 intsrc.irqflag = 0; 331 } 332 333 intsrc.srcbusirq = i; 334 intsrc.dstirq = i ? i : 2; /* IRQ0 to INTIN2 */ 335 mp_save_irq(&intsrc); 336 } 337 338 intsrc.irqtype = mp_ExtINT; 339 intsrc.srcbusirq = 0; 340 intsrc.dstirq = 0; /* 8259A to INTIN0 */ 341 mp_save_irq(&intsrc); 342 } 343 344 345 static void __init construct_ioapic_table(int mpc_default_type) 346 { 347 struct mpc_ioapic ioapic; 348 struct mpc_bus bus; 349 350 bus.type = MP_BUS; 351 bus.busid = 0; 352 switch (mpc_default_type) { 353 default: 354 pr_err("???\nUnknown standard configuration %d\n", 355 mpc_default_type); 356 /* fall through */ 357 case 1: 358 case 5: 359 memcpy(bus.bustype, "ISA ", 6); 360 break; 361 case 2: 362 case 6: 363 case 3: 364 memcpy(bus.bustype, "EISA ", 6); 365 break; 366 } 367 MP_bus_info(&bus); 368 if (mpc_default_type > 4) { 369 bus.busid = 1; 370 memcpy(bus.bustype, "PCI ", 6); 371 MP_bus_info(&bus); 372 } 373 374 ioapic.type = MP_IOAPIC; 375 ioapic.apicid = 2; 376 ioapic.apicver = mpc_default_type > 4 ? 0x10 : 0x01; 377 ioapic.flags = MPC_APIC_USABLE; 378 ioapic.apicaddr = IO_APIC_DEFAULT_PHYS_BASE; 379 MP_ioapic_info(&ioapic); 380 381 /* 382 * We set up most of the low 16 IO-APIC pins according to MPS rules. 383 */ 384 construct_default_ioirq_mptable(mpc_default_type); 385 } 386 #else 387 static inline void __init construct_ioapic_table(int mpc_default_type) { } 388 #endif 389 390 static inline void __init construct_default_ISA_mptable(int mpc_default_type) 391 { 392 struct mpc_cpu processor; 393 struct mpc_lintsrc lintsrc; 394 int linttypes[2] = { mp_ExtINT, mp_NMI }; 395 int i; 396 397 /* 398 * local APIC has default address 399 */ 400 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; 401 402 /* 403 * 2 CPUs, numbered 0 & 1. 404 */ 405 processor.type = MP_PROCESSOR; 406 /* Either an integrated APIC or a discrete 82489DX. */ 407 processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01; 408 processor.cpuflag = CPU_ENABLED; 409 processor.cpufeature = (boot_cpu_data.x86 << 8) | 410 (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask; 411 processor.featureflag = boot_cpu_data.x86_capability[CPUID_1_EDX]; 412 processor.reserved[0] = 0; 413 processor.reserved[1] = 0; 414 for (i = 0; i < 2; i++) { 415 processor.apicid = i; 416 MP_processor_info(&processor); 417 } 418 419 construct_ioapic_table(mpc_default_type); 420 421 lintsrc.type = MP_LINTSRC; 422 lintsrc.irqflag = 0; /* conforming */ 423 lintsrc.srcbusid = 0; 424 lintsrc.srcbusirq = 0; 425 lintsrc.destapic = MP_APIC_ALL; 426 for (i = 0; i < 2; i++) { 427 lintsrc.irqtype = linttypes[i]; 428 lintsrc.destapiclint = i; 429 MP_lintsrc_info(&lintsrc); 430 } 431 } 432 433 static struct mpf_intel *mpf_found; 434 435 static unsigned long __init get_mpc_size(unsigned long physptr) 436 { 437 struct mpc_table *mpc; 438 unsigned long size; 439 440 mpc = early_ioremap(physptr, PAGE_SIZE); 441 size = mpc->length; 442 early_iounmap(mpc, PAGE_SIZE); 443 apic_printk(APIC_VERBOSE, " mpc: %lx-%lx\n", physptr, physptr + size); 444 445 return size; 446 } 447 448 static int __init check_physptr(struct mpf_intel *mpf, unsigned int early) 449 { 450 struct mpc_table *mpc; 451 unsigned long size; 452 453 size = get_mpc_size(mpf->physptr); 454 mpc = early_ioremap(mpf->physptr, size); 455 /* 456 * Read the physical hardware table. Anything here will 457 * override the defaults. 458 */ 459 if (!smp_read_mpc(mpc, early)) { 460 #ifdef CONFIG_X86_LOCAL_APIC 461 smp_found_config = 0; 462 #endif 463 pr_err("BIOS bug, MP table errors detected!...\n"); 464 pr_cont("... disabling SMP support. (tell your hw vendor)\n"); 465 early_iounmap(mpc, size); 466 return -1; 467 } 468 early_iounmap(mpc, size); 469 470 if (early) 471 return -1; 472 473 #ifdef CONFIG_X86_IO_APIC 474 /* 475 * If there are no explicit MP IRQ entries, then we are 476 * broken. We set up most of the low 16 IO-APIC pins to 477 * ISA defaults and hope it will work. 478 */ 479 if (!mp_irq_entries) { 480 struct mpc_bus bus; 481 482 pr_err("BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n"); 483 484 bus.type = MP_BUS; 485 bus.busid = 0; 486 memcpy(bus.bustype, "ISA ", 6); 487 MP_bus_info(&bus); 488 489 construct_default_ioirq_mptable(0); 490 } 491 #endif 492 493 return 0; 494 } 495 496 /* 497 * Scan the memory blocks for an SMP configuration block. 498 */ 499 void __init default_get_smp_config(unsigned int early) 500 { 501 struct mpf_intel *mpf = mpf_found; 502 503 if (!mpf) 504 return; 505 506 if (acpi_lapic && early) 507 return; 508 509 /* 510 * MPS doesn't support hyperthreading, aka only have 511 * thread 0 apic id in MPS table 512 */ 513 if (acpi_lapic && acpi_ioapic) 514 return; 515 516 pr_info("Intel MultiProcessor Specification v1.%d\n", 517 mpf->specification); 518 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32) 519 if (mpf->feature2 & (1 << 7)) { 520 pr_info(" IMCR and PIC compatibility mode.\n"); 521 pic_mode = 1; 522 } else { 523 pr_info(" Virtual Wire compatibility mode.\n"); 524 pic_mode = 0; 525 } 526 #endif 527 /* 528 * Now see if we need to read further. 529 */ 530 if (mpf->feature1 != 0) { 531 if (early) { 532 /* 533 * local APIC has default address 534 */ 535 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; 536 return; 537 } 538 539 pr_info("Default MP configuration #%d\n", mpf->feature1); 540 construct_default_ISA_mptable(mpf->feature1); 541 542 } else if (mpf->physptr) { 543 if (check_physptr(mpf, early)) 544 return; 545 } else 546 BUG(); 547 548 if (!early) 549 pr_info("Processors: %d\n", num_processors); 550 /* 551 * Only use the first configuration found. 552 */ 553 } 554 555 static void __init smp_reserve_memory(struct mpf_intel *mpf) 556 { 557 memblock_reserve(mpf->physptr, get_mpc_size(mpf->physptr)); 558 } 559 560 static int __init smp_scan_config(unsigned long base, unsigned long length) 561 { 562 unsigned int *bp = phys_to_virt(base); 563 struct mpf_intel *mpf; 564 unsigned long mem; 565 566 apic_printk(APIC_VERBOSE, "Scan for SMP in [mem %#010lx-%#010lx]\n", 567 base, base + length - 1); 568 BUILD_BUG_ON(sizeof(*mpf) != 16); 569 570 while (length > 0) { 571 mpf = (struct mpf_intel *)bp; 572 if ((*bp == SMP_MAGIC_IDENT) && 573 (mpf->length == 1) && 574 !mpf_checksum((unsigned char *)bp, 16) && 575 ((mpf->specification == 1) 576 || (mpf->specification == 4))) { 577 #ifdef CONFIG_X86_LOCAL_APIC 578 smp_found_config = 1; 579 #endif 580 mpf_found = mpf; 581 582 pr_info("found SMP MP-table at [mem %#010llx-%#010llx] mapped at [%p]\n", 583 (unsigned long long) virt_to_phys(mpf), 584 (unsigned long long) virt_to_phys(mpf) + 585 sizeof(*mpf) - 1, mpf); 586 587 mem = virt_to_phys(mpf); 588 memblock_reserve(mem, sizeof(*mpf)); 589 if (mpf->physptr) 590 smp_reserve_memory(mpf); 591 592 return 1; 593 } 594 bp += 4; 595 length -= 16; 596 } 597 return 0; 598 } 599 600 void __init default_find_smp_config(void) 601 { 602 unsigned int address; 603 604 /* 605 * FIXME: Linux assumes you have 640K of base ram.. 606 * this continues the error... 607 * 608 * 1) Scan the bottom 1K for a signature 609 * 2) Scan the top 1K of base RAM 610 * 3) Scan the 64K of bios 611 */ 612 if (smp_scan_config(0x0, 0x400) || 613 smp_scan_config(639 * 0x400, 0x400) || 614 smp_scan_config(0xF0000, 0x10000)) 615 return; 616 /* 617 * If it is an SMP machine we should know now, unless the 618 * configuration is in an EISA bus machine with an 619 * extended bios data area. 620 * 621 * there is a real-mode segmented pointer pointing to the 622 * 4K EBDA area at 0x40E, calculate and scan it here. 623 * 624 * NOTE! There are Linux loaders that will corrupt the EBDA 625 * area, and as such this kind of SMP config may be less 626 * trustworthy, simply because the SMP table may have been 627 * stomped on during early boot. These loaders are buggy and 628 * should be fixed. 629 * 630 * MP1.4 SPEC states to only scan first 1K of 4K EBDA. 631 */ 632 633 address = get_bios_ebda(); 634 if (address) 635 smp_scan_config(address, 0x400); 636 } 637 638 #ifdef CONFIG_X86_IO_APIC 639 static u8 __initdata irq_used[MAX_IRQ_SOURCES]; 640 641 static int __init get_MP_intsrc_index(struct mpc_intsrc *m) 642 { 643 int i; 644 645 if (m->irqtype != mp_INT) 646 return 0; 647 648 if (m->irqflag != 0x0f) 649 return 0; 650 651 /* not legacy */ 652 653 for (i = 0; i < mp_irq_entries; i++) { 654 if (mp_irqs[i].irqtype != mp_INT) 655 continue; 656 657 if (mp_irqs[i].irqflag != 0x0f) 658 continue; 659 660 if (mp_irqs[i].srcbus != m->srcbus) 661 continue; 662 if (mp_irqs[i].srcbusirq != m->srcbusirq) 663 continue; 664 if (irq_used[i]) { 665 /* already claimed */ 666 return -2; 667 } 668 irq_used[i] = 1; 669 return i; 670 } 671 672 /* not found */ 673 return -1; 674 } 675 676 #define SPARE_SLOT_NUM 20 677 678 static struct mpc_intsrc __initdata *m_spare[SPARE_SLOT_NUM]; 679 680 static void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) 681 { 682 int i; 683 684 apic_printk(APIC_VERBOSE, "OLD "); 685 print_mp_irq_info(m); 686 687 i = get_MP_intsrc_index(m); 688 if (i > 0) { 689 memcpy(m, &mp_irqs[i], sizeof(*m)); 690 apic_printk(APIC_VERBOSE, "NEW "); 691 print_mp_irq_info(&mp_irqs[i]); 692 return; 693 } 694 if (!i) { 695 /* legacy, do nothing */ 696 return; 697 } 698 if (*nr_m_spare < SPARE_SLOT_NUM) { 699 /* 700 * not found (-1), or duplicated (-2) are invalid entries, 701 * we need to use the slot later 702 */ 703 m_spare[*nr_m_spare] = m; 704 *nr_m_spare += 1; 705 } 706 } 707 708 static int __init 709 check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length, int count) 710 { 711 if (!mpc_new_phys || count <= mpc_new_length) { 712 WARN(1, "update_mptable: No spare slots (length: %x)\n", count); 713 return -1; 714 } 715 716 return 0; 717 } 718 #else /* CONFIG_X86_IO_APIC */ 719 static 720 inline void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {} 721 #endif /* CONFIG_X86_IO_APIC */ 722 723 static int __init replace_intsrc_all(struct mpc_table *mpc, 724 unsigned long mpc_new_phys, 725 unsigned long mpc_new_length) 726 { 727 #ifdef CONFIG_X86_IO_APIC 728 int i; 729 #endif 730 int count = sizeof(*mpc); 731 int nr_m_spare = 0; 732 unsigned char *mpt = ((unsigned char *)mpc) + count; 733 734 pr_info("mpc_length %x\n", mpc->length); 735 while (count < mpc->length) { 736 switch (*mpt) { 737 case MP_PROCESSOR: 738 skip_entry(&mpt, &count, sizeof(struct mpc_cpu)); 739 break; 740 case MP_BUS: 741 skip_entry(&mpt, &count, sizeof(struct mpc_bus)); 742 break; 743 case MP_IOAPIC: 744 skip_entry(&mpt, &count, sizeof(struct mpc_ioapic)); 745 break; 746 case MP_INTSRC: 747 check_irq_src((struct mpc_intsrc *)mpt, &nr_m_spare); 748 skip_entry(&mpt, &count, sizeof(struct mpc_intsrc)); 749 break; 750 case MP_LINTSRC: 751 skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc)); 752 break; 753 default: 754 /* wrong mptable */ 755 smp_dump_mptable(mpc, mpt); 756 goto out; 757 } 758 } 759 760 #ifdef CONFIG_X86_IO_APIC 761 for (i = 0; i < mp_irq_entries; i++) { 762 if (irq_used[i]) 763 continue; 764 765 if (mp_irqs[i].irqtype != mp_INT) 766 continue; 767 768 if (mp_irqs[i].irqflag != 0x0f) 769 continue; 770 771 if (nr_m_spare > 0) { 772 apic_printk(APIC_VERBOSE, "*NEW* found\n"); 773 nr_m_spare--; 774 memcpy(m_spare[nr_m_spare], &mp_irqs[i], sizeof(mp_irqs[i])); 775 m_spare[nr_m_spare] = NULL; 776 } else { 777 struct mpc_intsrc *m = (struct mpc_intsrc *)mpt; 778 count += sizeof(struct mpc_intsrc); 779 if (check_slot(mpc_new_phys, mpc_new_length, count) < 0) 780 goto out; 781 memcpy(m, &mp_irqs[i], sizeof(*m)); 782 mpc->length = count; 783 mpt += sizeof(struct mpc_intsrc); 784 } 785 print_mp_irq_info(&mp_irqs[i]); 786 } 787 #endif 788 out: 789 /* update checksum */ 790 mpc->checksum = 0; 791 mpc->checksum -= mpf_checksum((unsigned char *)mpc, mpc->length); 792 793 return 0; 794 } 795 796 int enable_update_mptable; 797 798 static int __init update_mptable_setup(char *str) 799 { 800 enable_update_mptable = 1; 801 #ifdef CONFIG_PCI 802 pci_routeirq = 1; 803 #endif 804 return 0; 805 } 806 early_param("update_mptable", update_mptable_setup); 807 808 static unsigned long __initdata mpc_new_phys; 809 static unsigned long mpc_new_length __initdata = 4096; 810 811 /* alloc_mptable or alloc_mptable=4k */ 812 static int __initdata alloc_mptable; 813 static int __init parse_alloc_mptable_opt(char *p) 814 { 815 enable_update_mptable = 1; 816 #ifdef CONFIG_PCI 817 pci_routeirq = 1; 818 #endif 819 alloc_mptable = 1; 820 if (!p) 821 return 0; 822 mpc_new_length = memparse(p, &p); 823 return 0; 824 } 825 early_param("alloc_mptable", parse_alloc_mptable_opt); 826 827 void __init early_reserve_e820_mpc_new(void) 828 { 829 if (enable_update_mptable && alloc_mptable) 830 mpc_new_phys = early_reserve_e820(mpc_new_length, 4); 831 } 832 833 static int __init update_mp_table(void) 834 { 835 char str[16]; 836 char oem[10]; 837 struct mpf_intel *mpf; 838 struct mpc_table *mpc, *mpc_new; 839 840 if (!enable_update_mptable) 841 return 0; 842 843 mpf = mpf_found; 844 if (!mpf) 845 return 0; 846 847 /* 848 * Now see if we need to go further. 849 */ 850 if (mpf->feature1 != 0) 851 return 0; 852 853 if (!mpf->physptr) 854 return 0; 855 856 mpc = phys_to_virt(mpf->physptr); 857 858 if (!smp_check_mpc(mpc, oem, str)) 859 return 0; 860 861 pr_info("mpf: %llx\n", (u64)virt_to_phys(mpf)); 862 pr_info("physptr: %x\n", mpf->physptr); 863 864 if (mpc_new_phys && mpc->length > mpc_new_length) { 865 mpc_new_phys = 0; 866 pr_info("mpc_new_length is %ld, please use alloc_mptable=8k\n", 867 mpc_new_length); 868 } 869 870 if (!mpc_new_phys) { 871 unsigned char old, new; 872 /* check if we can change the position */ 873 mpc->checksum = 0; 874 old = mpf_checksum((unsigned char *)mpc, mpc->length); 875 mpc->checksum = 0xff; 876 new = mpf_checksum((unsigned char *)mpc, mpc->length); 877 if (old == new) { 878 pr_info("mpc is readonly, please try alloc_mptable instead\n"); 879 return 0; 880 } 881 pr_info("use in-position replacing\n"); 882 } else { 883 mpf->physptr = mpc_new_phys; 884 mpc_new = phys_to_virt(mpc_new_phys); 885 memcpy(mpc_new, mpc, mpc->length); 886 mpc = mpc_new; 887 /* check if we can modify that */ 888 if (mpc_new_phys - mpf->physptr) { 889 struct mpf_intel *mpf_new; 890 /* steal 16 bytes from [0, 1k) */ 891 pr_info("mpf new: %x\n", 0x400 - 16); 892 mpf_new = phys_to_virt(0x400 - 16); 893 memcpy(mpf_new, mpf, 16); 894 mpf = mpf_new; 895 mpf->physptr = mpc_new_phys; 896 } 897 mpf->checksum = 0; 898 mpf->checksum -= mpf_checksum((unsigned char *)mpf, 16); 899 pr_info("physptr new: %x\n", mpf->physptr); 900 } 901 902 /* 903 * only replace the one with mp_INT and 904 * MP_IRQ_TRIGGER_LEVEL|MP_IRQ_POLARITY_LOW, 905 * already in mp_irqs , stored by ... and mp_config_acpi_gsi, 906 * may need pci=routeirq for all coverage 907 */ 908 replace_intsrc_all(mpc, mpc_new_phys, mpc_new_length); 909 910 return 0; 911 } 912 913 late_initcall(update_mp_table); 914