1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * acpi.c - Architecture-Specific Low-Level ACPI Support 4 * 5 * Copyright (C) 1999 VA Linux Systems 6 * Copyright (C) 1999,2000 Walt Drummond <drummond@valinux.com> 7 * Copyright (C) 2000, 2002-2003 Hewlett-Packard Co. 8 * David Mosberger-Tang <davidm@hpl.hp.com> 9 * Copyright (C) 2000 Intel Corp. 10 * Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@intel.com> 11 * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 12 * Copyright (C) 2001 Jenna Hall <jenna.s.hall@intel.com> 13 * Copyright (C) 2001 Takayoshi Kochi <t-kochi@bq.jp.nec.com> 14 * Copyright (C) 2002 Erich Focht <efocht@ess.nec.de> 15 * Copyright (C) 2004 Ashok Raj <ashok.raj@intel.com> 16 */ 17 18 #include <linux/module.h> 19 #include <linux/init.h> 20 #include <linux/kernel.h> 21 #include <linux/sched.h> 22 #include <linux/smp.h> 23 #include <linux/string.h> 24 #include <linux/types.h> 25 #include <linux/irq.h> 26 #include <linux/acpi.h> 27 #include <linux/efi.h> 28 #include <linux/mmzone.h> 29 #include <linux/nodemask.h> 30 #include <linux/slab.h> 31 #include <acpi/processor.h> 32 #include <asm/io.h> 33 #include <asm/iosapic.h> 34 #include <asm/machvec.h> 35 #include <asm/page.h> 36 #include <asm/numa.h> 37 #include <asm/sal.h> 38 #include <asm/cyclone.h> 39 40 #define PREFIX "ACPI: " 41 42 int acpi_lapic; 43 unsigned int acpi_cpei_override; 44 unsigned int acpi_cpei_phys_cpuid; 45 46 unsigned long acpi_wakeup_address = 0; 47 48 #ifdef CONFIG_IA64_GENERIC 49 static unsigned long __init acpi_find_rsdp(void) 50 { 51 unsigned long rsdp_phys = 0; 52 53 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) 54 rsdp_phys = efi.acpi20; 55 else if (efi.acpi != EFI_INVALID_TABLE_ADDR) 56 printk(KERN_WARNING PREFIX 57 "v1.0/r0.71 tables no longer supported\n"); 58 return rsdp_phys; 59 } 60 61 const char __init * 62 acpi_get_sysname(void) 63 { 64 unsigned long rsdp_phys; 65 struct acpi_table_rsdp *rsdp; 66 struct acpi_table_xsdt *xsdt; 67 struct acpi_table_header *hdr; 68 #ifdef CONFIG_INTEL_IOMMU 69 u64 i, nentries; 70 #endif 71 72 rsdp_phys = acpi_find_rsdp(); 73 if (!rsdp_phys) { 74 printk(KERN_ERR 75 "ACPI 2.0 RSDP not found, default to \"dig\"\n"); 76 return "dig"; 77 } 78 79 rsdp = (struct acpi_table_rsdp *)__va(rsdp_phys); 80 if (strncmp(rsdp->signature, ACPI_SIG_RSDP, sizeof(ACPI_SIG_RSDP) - 1)) { 81 printk(KERN_ERR 82 "ACPI 2.0 RSDP signature incorrect, default to \"dig\"\n"); 83 return "dig"; 84 } 85 86 xsdt = (struct acpi_table_xsdt *)__va(rsdp->xsdt_physical_address); 87 hdr = &xsdt->header; 88 if (strncmp(hdr->signature, ACPI_SIG_XSDT, sizeof(ACPI_SIG_XSDT) - 1)) { 89 printk(KERN_ERR 90 "ACPI 2.0 XSDT signature incorrect, default to \"dig\"\n"); 91 return "dig"; 92 } 93 94 if (!strcmp(hdr->oem_id, "HP")) { 95 return "hpzx1"; 96 } else if (!strcmp(hdr->oem_id, "SGI")) { 97 if (!strcmp(hdr->oem_table_id + 4, "UV")) 98 return "uv"; 99 else 100 return "sn2"; 101 } 102 103 #ifdef CONFIG_INTEL_IOMMU 104 /* Look for Intel IOMMU */ 105 nentries = (hdr->length - sizeof(*hdr)) / 106 sizeof(xsdt->table_offset_entry[0]); 107 for (i = 0; i < nentries; i++) { 108 hdr = __va(xsdt->table_offset_entry[i]); 109 if (strncmp(hdr->signature, ACPI_SIG_DMAR, 110 sizeof(ACPI_SIG_DMAR) - 1) == 0) 111 return "dig_vtd"; 112 } 113 #endif 114 115 return "dig"; 116 } 117 #endif /* CONFIG_IA64_GENERIC */ 118 119 #define ACPI_MAX_PLATFORM_INTERRUPTS 256 120 121 /* Array to record platform interrupt vectors for generic interrupt routing. */ 122 int platform_intr_list[ACPI_MAX_PLATFORM_INTERRUPTS] = { 123 [0 ... ACPI_MAX_PLATFORM_INTERRUPTS - 1] = -1 124 }; 125 126 enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_IOSAPIC; 127 128 /* 129 * Interrupt routing API for device drivers. Provides interrupt vector for 130 * a generic platform event. Currently only CPEI is implemented. 131 */ 132 int acpi_request_vector(u32 int_type) 133 { 134 int vector = -1; 135 136 if (int_type < ACPI_MAX_PLATFORM_INTERRUPTS) { 137 /* corrected platform error interrupt */ 138 vector = platform_intr_list[int_type]; 139 } else 140 printk(KERN_ERR 141 "acpi_request_vector(): invalid interrupt type\n"); 142 return vector; 143 } 144 145 void __init __iomem *__acpi_map_table(unsigned long phys, unsigned long size) 146 { 147 return __va(phys); 148 } 149 150 void __init __acpi_unmap_table(void __iomem *map, unsigned long size) 151 { 152 } 153 154 /* -------------------------------------------------------------------------- 155 Boot-time Table Parsing 156 -------------------------------------------------------------------------- */ 157 158 static int available_cpus __initdata; 159 struct acpi_table_madt *acpi_madt __initdata; 160 static u8 has_8259; 161 162 static int __init 163 acpi_parse_lapic_addr_ovr(union acpi_subtable_headers * header, 164 const unsigned long end) 165 { 166 struct acpi_madt_local_apic_override *lapic; 167 168 lapic = (struct acpi_madt_local_apic_override *)header; 169 170 if (BAD_MADT_ENTRY(lapic, end)) 171 return -EINVAL; 172 173 if (lapic->address) { 174 iounmap(ipi_base_addr); 175 ipi_base_addr = ioremap(lapic->address, 0); 176 } 177 return 0; 178 } 179 180 static int __init 181 acpi_parse_lsapic(union acpi_subtable_headers *header, const unsigned long end) 182 { 183 struct acpi_madt_local_sapic *lsapic; 184 185 lsapic = (struct acpi_madt_local_sapic *)header; 186 187 /*Skip BAD_MADT_ENTRY check, as lsapic size could vary */ 188 189 if (lsapic->lapic_flags & ACPI_MADT_ENABLED) { 190 #ifdef CONFIG_SMP 191 smp_boot_data.cpu_phys_id[available_cpus] = 192 (lsapic->id << 8) | lsapic->eid; 193 #endif 194 ++available_cpus; 195 } 196 197 total_cpus++; 198 return 0; 199 } 200 201 static int __init 202 acpi_parse_lapic_nmi(union acpi_subtable_headers * header, const unsigned long end) 203 { 204 struct acpi_madt_local_apic_nmi *lacpi_nmi; 205 206 lacpi_nmi = (struct acpi_madt_local_apic_nmi *)header; 207 208 if (BAD_MADT_ENTRY(lacpi_nmi, end)) 209 return -EINVAL; 210 211 /* TBD: Support lapic_nmi entries */ 212 return 0; 213 } 214 215 static int __init 216 acpi_parse_iosapic(union acpi_subtable_headers * header, const unsigned long end) 217 { 218 struct acpi_madt_io_sapic *iosapic; 219 220 iosapic = (struct acpi_madt_io_sapic *)header; 221 222 if (BAD_MADT_ENTRY(iosapic, end)) 223 return -EINVAL; 224 225 return iosapic_init(iosapic->address, iosapic->global_irq_base); 226 } 227 228 static unsigned int __initdata acpi_madt_rev; 229 230 static int __init 231 acpi_parse_plat_int_src(union acpi_subtable_headers * header, 232 const unsigned long end) 233 { 234 struct acpi_madt_interrupt_source *plintsrc; 235 int vector; 236 237 plintsrc = (struct acpi_madt_interrupt_source *)header; 238 239 if (BAD_MADT_ENTRY(plintsrc, end)) 240 return -EINVAL; 241 242 /* 243 * Get vector assignment for this interrupt, set attributes, 244 * and program the IOSAPIC routing table. 245 */ 246 vector = iosapic_register_platform_intr(plintsrc->type, 247 plintsrc->global_irq, 248 plintsrc->io_sapic_vector, 249 plintsrc->eid, 250 plintsrc->id, 251 ((plintsrc->inti_flags & ACPI_MADT_POLARITY_MASK) == 252 ACPI_MADT_POLARITY_ACTIVE_HIGH) ? 253 IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW, 254 ((plintsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) == 255 ACPI_MADT_TRIGGER_EDGE) ? 256 IOSAPIC_EDGE : IOSAPIC_LEVEL); 257 258 platform_intr_list[plintsrc->type] = vector; 259 if (acpi_madt_rev > 1) { 260 acpi_cpei_override = plintsrc->flags & ACPI_MADT_CPEI_OVERRIDE; 261 } 262 263 /* 264 * Save the physical id, so we can check when its being removed 265 */ 266 acpi_cpei_phys_cpuid = ((plintsrc->id << 8) | (plintsrc->eid)) & 0xffff; 267 268 return 0; 269 } 270 271 #ifdef CONFIG_HOTPLUG_CPU 272 unsigned int can_cpei_retarget(void) 273 { 274 extern int cpe_vector; 275 extern unsigned int force_cpei_retarget; 276 277 /* 278 * Only if CPEI is supported and the override flag 279 * is present, otherwise return that its re-targettable 280 * if we are in polling mode. 281 */ 282 if (cpe_vector > 0) { 283 if (acpi_cpei_override || force_cpei_retarget) 284 return 1; 285 else 286 return 0; 287 } 288 return 1; 289 } 290 291 unsigned int is_cpu_cpei_target(unsigned int cpu) 292 { 293 unsigned int logical_id; 294 295 logical_id = cpu_logical_id(acpi_cpei_phys_cpuid); 296 297 if (logical_id == cpu) 298 return 1; 299 else 300 return 0; 301 } 302 303 void set_cpei_target_cpu(unsigned int cpu) 304 { 305 acpi_cpei_phys_cpuid = cpu_physical_id(cpu); 306 } 307 #endif 308 309 unsigned int get_cpei_target_cpu(void) 310 { 311 return acpi_cpei_phys_cpuid; 312 } 313 314 static int __init 315 acpi_parse_int_src_ovr(union acpi_subtable_headers * header, 316 const unsigned long end) 317 { 318 struct acpi_madt_interrupt_override *p; 319 320 p = (struct acpi_madt_interrupt_override *)header; 321 322 if (BAD_MADT_ENTRY(p, end)) 323 return -EINVAL; 324 325 iosapic_override_isa_irq(p->source_irq, p->global_irq, 326 ((p->inti_flags & ACPI_MADT_POLARITY_MASK) == 327 ACPI_MADT_POLARITY_ACTIVE_LOW) ? 328 IOSAPIC_POL_LOW : IOSAPIC_POL_HIGH, 329 ((p->inti_flags & ACPI_MADT_TRIGGER_MASK) == 330 ACPI_MADT_TRIGGER_LEVEL) ? 331 IOSAPIC_LEVEL : IOSAPIC_EDGE); 332 return 0; 333 } 334 335 static int __init 336 acpi_parse_nmi_src(union acpi_subtable_headers * header, const unsigned long end) 337 { 338 struct acpi_madt_nmi_source *nmi_src; 339 340 nmi_src = (struct acpi_madt_nmi_source *)header; 341 342 if (BAD_MADT_ENTRY(nmi_src, end)) 343 return -EINVAL; 344 345 /* TBD: Support nimsrc entries */ 346 return 0; 347 } 348 349 static void __init acpi_madt_oem_check(char *oem_id, char *oem_table_id) 350 { 351 if (!strncmp(oem_id, "IBM", 3) && (!strncmp(oem_table_id, "SERMOW", 6))) { 352 353 /* 354 * Unfortunately ITC_DRIFT is not yet part of the 355 * official SAL spec, so the ITC_DRIFT bit is not 356 * set by the BIOS on this hardware. 357 */ 358 sal_platform_features |= IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT; 359 360 cyclone_setup(); 361 } 362 } 363 364 static int __init acpi_parse_madt(struct acpi_table_header *table) 365 { 366 acpi_madt = (struct acpi_table_madt *)table; 367 368 acpi_madt_rev = acpi_madt->header.revision; 369 370 /* remember the value for reference after free_initmem() */ 371 #ifdef CONFIG_ITANIUM 372 has_8259 = 1; /* Firmware on old Itanium systems is broken */ 373 #else 374 has_8259 = acpi_madt->flags & ACPI_MADT_PCAT_COMPAT; 375 #endif 376 iosapic_system_init(has_8259); 377 378 /* Get base address of IPI Message Block */ 379 380 if (acpi_madt->address) 381 ipi_base_addr = ioremap(acpi_madt->address, 0); 382 383 printk(KERN_INFO PREFIX "Local APIC address %p\n", ipi_base_addr); 384 385 acpi_madt_oem_check(acpi_madt->header.oem_id, 386 acpi_madt->header.oem_table_id); 387 388 return 0; 389 } 390 391 #ifdef CONFIG_ACPI_NUMA 392 393 #undef SLIT_DEBUG 394 395 #define PXM_FLAG_LEN ((MAX_PXM_DOMAINS + 1)/32) 396 397 static int __initdata srat_num_cpus; /* number of cpus */ 398 static u32 pxm_flag[PXM_FLAG_LEN]; 399 #define pxm_bit_set(bit) (set_bit(bit,(void *)pxm_flag)) 400 #define pxm_bit_test(bit) (test_bit(bit,(void *)pxm_flag)) 401 static struct acpi_table_slit __initdata *slit_table; 402 cpumask_t early_cpu_possible_map = CPU_MASK_NONE; 403 404 static int __init 405 get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa) 406 { 407 int pxm; 408 409 pxm = pa->proximity_domain_lo; 410 if (ia64_platform_is("sn2") || acpi_srat_revision >= 2) 411 pxm += pa->proximity_domain_hi[0] << 8; 412 return pxm; 413 } 414 415 static int __init 416 get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma) 417 { 418 int pxm; 419 420 pxm = ma->proximity_domain; 421 if (!ia64_platform_is("sn2") && acpi_srat_revision <= 1) 422 pxm &= 0xff; 423 424 return pxm; 425 } 426 427 /* 428 * ACPI 2.0 SLIT (System Locality Information Table) 429 * http://devresource.hp.com/devresource/Docs/TechPapers/IA64/slit.pdf 430 */ 431 void __init acpi_numa_slit_init(struct acpi_table_slit *slit) 432 { 433 u32 len; 434 435 len = sizeof(struct acpi_table_header) + 8 436 + slit->locality_count * slit->locality_count; 437 if (slit->header.length != len) { 438 printk(KERN_ERR 439 "ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n", 440 len, slit->header.length); 441 return; 442 } 443 slit_table = slit; 444 } 445 446 void __init 447 acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) 448 { 449 int pxm; 450 451 if (!(pa->flags & ACPI_SRAT_CPU_ENABLED)) 452 return; 453 454 if (srat_num_cpus >= ARRAY_SIZE(node_cpuid)) { 455 printk_once(KERN_WARNING 456 "node_cpuid[%ld] is too small, may not be able to use all cpus\n", 457 ARRAY_SIZE(node_cpuid)); 458 return; 459 } 460 pxm = get_processor_proximity_domain(pa); 461 462 /* record this node in proximity bitmap */ 463 pxm_bit_set(pxm); 464 465 node_cpuid[srat_num_cpus].phys_id = 466 (pa->apic_id << 8) | (pa->local_sapic_eid); 467 /* nid should be overridden as logical node id later */ 468 node_cpuid[srat_num_cpus].nid = pxm; 469 cpumask_set_cpu(srat_num_cpus, &early_cpu_possible_map); 470 srat_num_cpus++; 471 } 472 473 int __init 474 acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) 475 { 476 unsigned long paddr, size; 477 int pxm; 478 struct node_memblk_s *p, *q, *pend; 479 480 pxm = get_memory_proximity_domain(ma); 481 482 /* fill node memory chunk structure */ 483 paddr = ma->base_address; 484 size = ma->length; 485 486 /* Ignore disabled entries */ 487 if (!(ma->flags & ACPI_SRAT_MEM_ENABLED)) 488 return -1; 489 490 if (num_node_memblks >= NR_NODE_MEMBLKS) { 491 pr_err("NUMA: too many memblk ranges\n"); 492 return -EINVAL; 493 } 494 495 /* record this node in proximity bitmap */ 496 pxm_bit_set(pxm); 497 498 /* Insertion sort based on base address */ 499 pend = &node_memblk[num_node_memblks]; 500 for (p = &node_memblk[0]; p < pend; p++) { 501 if (paddr < p->start_paddr) 502 break; 503 } 504 if (p < pend) { 505 for (q = pend - 1; q >= p; q--) 506 *(q + 1) = *q; 507 } 508 p->start_paddr = paddr; 509 p->size = size; 510 p->nid = pxm; 511 num_node_memblks++; 512 return 0; 513 } 514 515 void __init acpi_numa_fixup(void) 516 { 517 int i, j, node_from, node_to; 518 519 /* If there's no SRAT, fix the phys_id and mark node 0 online */ 520 if (srat_num_cpus == 0) { 521 node_set_online(0); 522 node_cpuid[0].phys_id = hard_smp_processor_id(); 523 return; 524 } 525 526 /* 527 * MCD - This can probably be dropped now. No need for pxm ID to node ID 528 * mapping with sparse node numbering iff MAX_PXM_DOMAINS <= MAX_NUMNODES. 529 */ 530 nodes_clear(node_online_map); 531 for (i = 0; i < MAX_PXM_DOMAINS; i++) { 532 if (pxm_bit_test(i)) { 533 int nid = acpi_map_pxm_to_node(i); 534 node_set_online(nid); 535 } 536 } 537 538 /* set logical node id in memory chunk structure */ 539 for (i = 0; i < num_node_memblks; i++) 540 node_memblk[i].nid = pxm_to_node(node_memblk[i].nid); 541 542 /* assign memory bank numbers for each chunk on each node */ 543 for_each_online_node(i) { 544 int bank; 545 546 bank = 0; 547 for (j = 0; j < num_node_memblks; j++) 548 if (node_memblk[j].nid == i) 549 node_memblk[j].bank = bank++; 550 } 551 552 /* set logical node id in cpu structure */ 553 for_each_possible_early_cpu(i) 554 node_cpuid[i].nid = pxm_to_node(node_cpuid[i].nid); 555 556 printk(KERN_INFO "Number of logical nodes in system = %d\n", 557 num_online_nodes()); 558 printk(KERN_INFO "Number of memory chunks in system = %d\n", 559 num_node_memblks); 560 561 if (!slit_table) { 562 for (i = 0; i < MAX_NUMNODES; i++) 563 for (j = 0; j < MAX_NUMNODES; j++) 564 slit_distance(i, j) = i == j ? 565 LOCAL_DISTANCE : REMOTE_DISTANCE; 566 return; 567 } 568 569 memset(numa_slit, -1, sizeof(numa_slit)); 570 for (i = 0; i < slit_table->locality_count; i++) { 571 if (!pxm_bit_test(i)) 572 continue; 573 node_from = pxm_to_node(i); 574 for (j = 0; j < slit_table->locality_count; j++) { 575 if (!pxm_bit_test(j)) 576 continue; 577 node_to = pxm_to_node(j); 578 slit_distance(node_from, node_to) = 579 slit_table->entry[i * slit_table->locality_count + j]; 580 } 581 } 582 583 #ifdef SLIT_DEBUG 584 printk("ACPI 2.0 SLIT locality table:\n"); 585 for_each_online_node(i) { 586 for_each_online_node(j) 587 printk("%03d ", node_distance(i, j)); 588 printk("\n"); 589 } 590 #endif 591 } 592 #endif /* CONFIG_ACPI_NUMA */ 593 594 /* 595 * success: return IRQ number (>=0) 596 * failure: return < 0 597 */ 598 int acpi_register_gsi(struct device *dev, u32 gsi, int triggering, int polarity) 599 { 600 if (acpi_irq_model == ACPI_IRQ_MODEL_PLATFORM) 601 return gsi; 602 603 if (has_8259 && gsi < 16) 604 return isa_irq_to_vector(gsi); 605 606 return iosapic_register_intr(gsi, 607 (polarity == 608 ACPI_ACTIVE_HIGH) ? IOSAPIC_POL_HIGH : 609 IOSAPIC_POL_LOW, 610 (triggering == 611 ACPI_EDGE_SENSITIVE) ? IOSAPIC_EDGE : 612 IOSAPIC_LEVEL); 613 } 614 EXPORT_SYMBOL_GPL(acpi_register_gsi); 615 616 void acpi_unregister_gsi(u32 gsi) 617 { 618 if (acpi_irq_model == ACPI_IRQ_MODEL_PLATFORM) 619 return; 620 621 if (has_8259 && gsi < 16) 622 return; 623 624 iosapic_unregister_intr(gsi); 625 } 626 EXPORT_SYMBOL_GPL(acpi_unregister_gsi); 627 628 static int __init acpi_parse_fadt(struct acpi_table_header *table) 629 { 630 struct acpi_table_header *fadt_header; 631 struct acpi_table_fadt *fadt; 632 633 fadt_header = (struct acpi_table_header *)table; 634 if (fadt_header->revision != 3) 635 return -ENODEV; /* Only deal with ACPI 2.0 FADT */ 636 637 fadt = (struct acpi_table_fadt *)fadt_header; 638 639 acpi_register_gsi(NULL, fadt->sci_interrupt, ACPI_LEVEL_SENSITIVE, 640 ACPI_ACTIVE_LOW); 641 return 0; 642 } 643 644 int __init early_acpi_boot_init(void) 645 { 646 int ret; 647 648 /* 649 * do a partial walk of MADT to determine how many CPUs 650 * we have including offline CPUs 651 */ 652 if (acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) { 653 printk(KERN_ERR PREFIX "Can't find MADT\n"); 654 return 0; 655 } 656 657 ret = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC, 658 acpi_parse_lsapic, NR_CPUS); 659 if (ret < 1) 660 printk(KERN_ERR PREFIX 661 "Error parsing MADT - no LAPIC entries\n"); 662 else 663 acpi_lapic = 1; 664 665 #ifdef CONFIG_SMP 666 if (available_cpus == 0) { 667 printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n"); 668 printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id()); 669 smp_boot_data.cpu_phys_id[available_cpus] = 670 hard_smp_processor_id(); 671 available_cpus = 1; /* We've got at least one of these, no? */ 672 } 673 smp_boot_data.cpu_count = available_cpus; 674 #endif 675 /* Make boot-up look pretty */ 676 printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus, 677 total_cpus); 678 679 return 0; 680 } 681 682 int __init acpi_boot_init(void) 683 { 684 685 /* 686 * MADT 687 * ---- 688 * Parse the Multiple APIC Description Table (MADT), if exists. 689 * Note that this table provides platform SMP configuration 690 * information -- the successor to MPS tables. 691 */ 692 693 if (acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) { 694 printk(KERN_ERR PREFIX "Can't find MADT\n"); 695 goto skip_madt; 696 } 697 698 /* Local APIC */ 699 700 if (acpi_table_parse_madt 701 (ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE, acpi_parse_lapic_addr_ovr, 0) < 0) 702 printk(KERN_ERR PREFIX 703 "Error parsing LAPIC address override entry\n"); 704 705 if (acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0) 706 < 0) 707 printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n"); 708 709 /* I/O APIC */ 710 711 if (acpi_table_parse_madt 712 (ACPI_MADT_TYPE_IO_SAPIC, acpi_parse_iosapic, NR_IOSAPICS) < 1) { 713 if (!ia64_platform_is("sn2")) 714 printk(KERN_ERR PREFIX 715 "Error parsing MADT - no IOSAPIC entries\n"); 716 } 717 718 /* System-Level Interrupt Routing */ 719 720 if (acpi_table_parse_madt 721 (ACPI_MADT_TYPE_INTERRUPT_SOURCE, acpi_parse_plat_int_src, 722 ACPI_MAX_PLATFORM_INTERRUPTS) < 0) 723 printk(KERN_ERR PREFIX 724 "Error parsing platform interrupt source entry\n"); 725 726 if (acpi_table_parse_madt 727 (ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr, 0) < 0) 728 printk(KERN_ERR PREFIX 729 "Error parsing interrupt source overrides entry\n"); 730 731 if (acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src, 0) < 0) 732 printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n"); 733 skip_madt: 734 735 /* 736 * FADT says whether a legacy keyboard controller is present. 737 * The FADT also contains an SCI_INT line, by which the system 738 * gets interrupts such as power and sleep buttons. If it's not 739 * on a Legacy interrupt, it needs to be setup. 740 */ 741 if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt)) 742 printk(KERN_ERR PREFIX "Can't find FADT\n"); 743 744 #ifdef CONFIG_ACPI_NUMA 745 #ifdef CONFIG_SMP 746 if (srat_num_cpus == 0) { 747 int cpu, i = 1; 748 for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++) 749 if (smp_boot_data.cpu_phys_id[cpu] != 750 hard_smp_processor_id()) 751 node_cpuid[i++].phys_id = 752 smp_boot_data.cpu_phys_id[cpu]; 753 } 754 #endif 755 build_cpu_to_node_map(); 756 #endif 757 return 0; 758 } 759 760 int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) 761 { 762 int tmp; 763 764 if (has_8259 && gsi < 16) 765 *irq = isa_irq_to_vector(gsi); 766 else { 767 tmp = gsi_to_irq(gsi); 768 if (tmp == -1) 769 return -1; 770 *irq = tmp; 771 } 772 return 0; 773 } 774 775 int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi) 776 { 777 if (isa_irq >= 16) 778 return -1; 779 *gsi = isa_irq; 780 return 0; 781 } 782 783 /* 784 * ACPI based hotplug CPU support 785 */ 786 #ifdef CONFIG_ACPI_HOTPLUG_CPU 787 int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) 788 { 789 #ifdef CONFIG_ACPI_NUMA 790 /* 791 * We don't have cpu-only-node hotadd. But if the system equips 792 * SRAT table, pxm is already found and node is ready. 793 * So, just pxm_to_nid(pxm) is OK. 794 * This code here is for the system which doesn't have full SRAT 795 * table for possible cpus. 796 */ 797 node_cpuid[cpu].phys_id = physid; 798 node_cpuid[cpu].nid = acpi_get_node(handle); 799 #endif 800 return 0; 801 } 802 803 int additional_cpus __initdata = -1; 804 805 static __init int setup_additional_cpus(char *s) 806 { 807 if (s) 808 additional_cpus = simple_strtol(s, NULL, 0); 809 810 return 0; 811 } 812 813 early_param("additional_cpus", setup_additional_cpus); 814 815 /* 816 * cpu_possible_mask should be static, it cannot change as CPUs 817 * are onlined, or offlined. The reason is per-cpu data-structures 818 * are allocated by some modules at init time, and dont expect to 819 * do this dynamically on cpu arrival/departure. 820 * cpu_present_mask on the other hand can change dynamically. 821 * In case when cpu_hotplug is not compiled, then we resort to current 822 * behaviour, which is cpu_possible == cpu_present. 823 * - Ashok Raj 824 * 825 * Three ways to find out the number of additional hotplug CPUs: 826 * - If the BIOS specified disabled CPUs in ACPI/mptables use that. 827 * - The user can overwrite it with additional_cpus=NUM 828 * - Otherwise don't reserve additional CPUs. 829 */ 830 __init void prefill_possible_map(void) 831 { 832 int i; 833 int possible, disabled_cpus; 834 835 disabled_cpus = total_cpus - available_cpus; 836 837 if (additional_cpus == -1) { 838 if (disabled_cpus > 0) 839 additional_cpus = disabled_cpus; 840 else 841 additional_cpus = 0; 842 } 843 844 possible = available_cpus + additional_cpus; 845 846 if (possible > nr_cpu_ids) 847 possible = nr_cpu_ids; 848 849 printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n", 850 possible, max((possible - available_cpus), 0)); 851 852 for (i = 0; i < possible; i++) 853 set_cpu_possible(i, true); 854 } 855 856 static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu) 857 { 858 cpumask_t tmp_map; 859 int cpu; 860 861 cpumask_complement(&tmp_map, cpu_present_mask); 862 cpu = cpumask_first(&tmp_map); 863 if (cpu >= nr_cpu_ids) 864 return -EINVAL; 865 866 acpi_map_cpu2node(handle, cpu, physid); 867 868 set_cpu_present(cpu, true); 869 ia64_cpu_to_sapicid[cpu] = physid; 870 871 acpi_processor_set_pdc(handle); 872 873 *pcpu = cpu; 874 return (0); 875 } 876 877 /* wrapper to silence section mismatch warning */ 878 int __ref acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, 879 int *pcpu) 880 { 881 return _acpi_map_lsapic(handle, physid, pcpu); 882 } 883 EXPORT_SYMBOL(acpi_map_cpu); 884 885 int acpi_unmap_cpu(int cpu) 886 { 887 ia64_cpu_to_sapicid[cpu] = -1; 888 set_cpu_present(cpu, false); 889 890 #ifdef CONFIG_ACPI_NUMA 891 /* NUMA specific cleanup's */ 892 #endif 893 894 return (0); 895 } 896 EXPORT_SYMBOL(acpi_unmap_cpu); 897 #endif /* CONFIG_ACPI_HOTPLUG_CPU */ 898 899 #ifdef CONFIG_ACPI_NUMA 900 static acpi_status acpi_map_iosapic(acpi_handle handle, u32 depth, 901 void *context, void **ret) 902 { 903 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 904 union acpi_object *obj; 905 struct acpi_madt_io_sapic *iosapic; 906 unsigned int gsi_base; 907 int node; 908 909 /* Only care about objects w/ a method that returns the MADT */ 910 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) 911 return AE_OK; 912 913 if (!buffer.length || !buffer.pointer) 914 return AE_OK; 915 916 obj = buffer.pointer; 917 if (obj->type != ACPI_TYPE_BUFFER || 918 obj->buffer.length < sizeof(*iosapic)) { 919 kfree(buffer.pointer); 920 return AE_OK; 921 } 922 923 iosapic = (struct acpi_madt_io_sapic *)obj->buffer.pointer; 924 925 if (iosapic->header.type != ACPI_MADT_TYPE_IO_SAPIC) { 926 kfree(buffer.pointer); 927 return AE_OK; 928 } 929 930 gsi_base = iosapic->global_irq_base; 931 932 kfree(buffer.pointer); 933 934 /* OK, it's an IOSAPIC MADT entry; associate it with a node */ 935 node = acpi_get_node(handle); 936 if (node == NUMA_NO_NODE || !node_online(node) || 937 cpumask_empty(cpumask_of_node(node))) 938 return AE_OK; 939 940 /* We know a gsi to node mapping! */ 941 map_iosapic_to_node(gsi_base, node); 942 return AE_OK; 943 } 944 945 static int __init 946 acpi_map_iosapics (void) 947 { 948 acpi_get_devices(NULL, acpi_map_iosapic, NULL, NULL); 949 return 0; 950 } 951 952 fs_initcall(acpi_map_iosapics); 953 #endif /* CONFIG_ACPI_NUMA */ 954 955 int __ref acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base) 956 { 957 int err; 958 959 if ((err = iosapic_init(phys_addr, gsi_base))) 960 return err; 961 962 #ifdef CONFIG_ACPI_NUMA 963 acpi_map_iosapic(handle, 0, NULL, NULL); 964 #endif /* CONFIG_ACPI_NUMA */ 965 966 return 0; 967 } 968 969 EXPORT_SYMBOL(acpi_register_ioapic); 970 971 int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base) 972 { 973 return iosapic_remove(gsi_base); 974 } 975 976 EXPORT_SYMBOL(acpi_unregister_ioapic); 977 978 /* 979 * acpi_suspend_lowlevel() - save kernel state and suspend. 980 * 981 * TBD when when IA64 starts to support suspend... 982 */ 983 int acpi_suspend_lowlevel(void) { return 0; } 984