1 /* 2 * acpi.c - Architecture-Specific Low-Level ACPI Support 3 * 4 * Copyright (C) 1999 VA Linux Systems 5 * Copyright (C) 1999,2000 Walt Drummond <drummond@valinux.com> 6 * Copyright (C) 2000, 2002-2003 Hewlett-Packard Co. 7 * David Mosberger-Tang <davidm@hpl.hp.com> 8 * Copyright (C) 2000 Intel Corp. 9 * Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@intel.com> 10 * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 11 * Copyright (C) 2001 Jenna Hall <jenna.s.hall@intel.com> 12 * Copyright (C) 2001 Takayoshi Kochi <t-kochi@bq.jp.nec.com> 13 * Copyright (C) 2002 Erich Focht <efocht@ess.nec.de> 14 * Copyright (C) 2004 Ashok Raj <ashok.raj@intel.com> 15 * 16 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 17 * 18 * This program is free software; you can redistribute it and/or modify 19 * it under the terms of the GNU General Public License as published by 20 * the Free Software Foundation; either version 2 of the License, or 21 * (at your option) any later version. 22 * 23 * This program is distributed in the hope that it will be useful, 24 * but WITHOUT ANY WARRANTY; without even the implied warranty of 25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 26 * GNU General Public License for more details. 27 * 28 * You should have received a copy of the GNU General Public License 29 * along with this program; if not, write to the Free Software 30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 31 * 32 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 33 */ 34 35 #include <linux/module.h> 36 #include <linux/init.h> 37 #include <linux/kernel.h> 38 #include <linux/sched.h> 39 #include <linux/smp.h> 40 #include <linux/string.h> 41 #include <linux/types.h> 42 #include <linux/irq.h> 43 #include <linux/acpi.h> 44 #include <linux/efi.h> 45 #include <linux/mmzone.h> 46 #include <linux/nodemask.h> 47 #include <linux/slab.h> 48 #include <acpi/processor.h> 49 #include <asm/io.h> 50 #include <asm/iosapic.h> 51 #include <asm/machvec.h> 52 #include <asm/page.h> 53 #include <asm/numa.h> 54 #include <asm/sal.h> 55 #include <asm/cyclone.h> 56 57 #define PREFIX "ACPI: " 58 59 unsigned int acpi_cpei_override; 60 unsigned int acpi_cpei_phys_cpuid; 61 62 unsigned long acpi_wakeup_address = 0; 63 64 #ifdef CONFIG_IA64_GENERIC 65 static unsigned long __init acpi_find_rsdp(void) 66 { 67 unsigned long rsdp_phys = 0; 68 69 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) 70 rsdp_phys = efi.acpi20; 71 else if (efi.acpi != EFI_INVALID_TABLE_ADDR) 72 printk(KERN_WARNING PREFIX 73 "v1.0/r0.71 tables no longer supported\n"); 74 return rsdp_phys; 75 } 76 77 const char __init * 78 acpi_get_sysname(void) 79 { 80 unsigned long rsdp_phys; 81 struct acpi_table_rsdp *rsdp; 82 struct acpi_table_xsdt *xsdt; 83 struct acpi_table_header *hdr; 84 #ifdef CONFIG_INTEL_IOMMU 85 u64 i, nentries; 86 #endif 87 88 rsdp_phys = acpi_find_rsdp(); 89 if (!rsdp_phys) { 90 printk(KERN_ERR 91 "ACPI 2.0 RSDP not found, default to \"dig\"\n"); 92 return "dig"; 93 } 94 95 rsdp = (struct acpi_table_rsdp *)__va(rsdp_phys); 96 if (strncmp(rsdp->signature, ACPI_SIG_RSDP, sizeof(ACPI_SIG_RSDP) - 1)) { 97 printk(KERN_ERR 98 "ACPI 2.0 RSDP signature incorrect, default to \"dig\"\n"); 99 return "dig"; 100 } 101 102 xsdt = (struct acpi_table_xsdt *)__va(rsdp->xsdt_physical_address); 103 hdr = &xsdt->header; 104 if (strncmp(hdr->signature, ACPI_SIG_XSDT, sizeof(ACPI_SIG_XSDT) - 1)) { 105 printk(KERN_ERR 106 "ACPI 2.0 XSDT signature incorrect, default to \"dig\"\n"); 107 return "dig"; 108 } 109 110 if (!strcmp(hdr->oem_id, "HP")) { 111 return "hpzx1"; 112 } else if (!strcmp(hdr->oem_id, "SGI")) { 113 if (!strcmp(hdr->oem_table_id + 4, "UV")) 114 return "uv"; 115 else 116 return "sn2"; 117 } 118 119 #ifdef CONFIG_INTEL_IOMMU 120 /* Look for Intel IOMMU */ 121 nentries = (hdr->length - sizeof(*hdr)) / 122 sizeof(xsdt->table_offset_entry[0]); 123 for (i = 0; i < nentries; i++) { 124 hdr = __va(xsdt->table_offset_entry[i]); 125 if (strncmp(hdr->signature, ACPI_SIG_DMAR, 126 sizeof(ACPI_SIG_DMAR) - 1) == 0) 127 return "dig_vtd"; 128 } 129 #endif 130 131 return "dig"; 132 } 133 #endif /* CONFIG_IA64_GENERIC */ 134 135 #define ACPI_MAX_PLATFORM_INTERRUPTS 256 136 137 /* Array to record platform interrupt vectors for generic interrupt routing. */ 138 int platform_intr_list[ACPI_MAX_PLATFORM_INTERRUPTS] = { 139 [0 ... ACPI_MAX_PLATFORM_INTERRUPTS - 1] = -1 140 }; 141 142 enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_IOSAPIC; 143 144 /* 145 * Interrupt routing API for device drivers. Provides interrupt vector for 146 * a generic platform event. Currently only CPEI is implemented. 147 */ 148 int acpi_request_vector(u32 int_type) 149 { 150 int vector = -1; 151 152 if (int_type < ACPI_MAX_PLATFORM_INTERRUPTS) { 153 /* corrected platform error interrupt */ 154 vector = platform_intr_list[int_type]; 155 } else 156 printk(KERN_ERR 157 "acpi_request_vector(): invalid interrupt type\n"); 158 return vector; 159 } 160 161 char *__init __acpi_map_table(unsigned long phys_addr, unsigned long size) 162 { 163 return __va(phys_addr); 164 } 165 166 void __init __acpi_unmap_table(char *map, unsigned long size) 167 { 168 } 169 170 /* -------------------------------------------------------------------------- 171 Boot-time Table Parsing 172 -------------------------------------------------------------------------- */ 173 174 static int available_cpus __initdata; 175 struct acpi_table_madt *acpi_madt __initdata; 176 static u8 has_8259; 177 178 static int __init 179 acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header, 180 const unsigned long end) 181 { 182 struct acpi_madt_local_apic_override *lapic; 183 184 lapic = (struct acpi_madt_local_apic_override *)header; 185 186 if (BAD_MADT_ENTRY(lapic, end)) 187 return -EINVAL; 188 189 if (lapic->address) { 190 iounmap(ipi_base_addr); 191 ipi_base_addr = ioremap(lapic->address, 0); 192 } 193 return 0; 194 } 195 196 static int __init 197 acpi_parse_lsapic(struct acpi_subtable_header * header, const unsigned long end) 198 { 199 struct acpi_madt_local_sapic *lsapic; 200 201 lsapic = (struct acpi_madt_local_sapic *)header; 202 203 /*Skip BAD_MADT_ENTRY check, as lsapic size could vary */ 204 205 if (lsapic->lapic_flags & ACPI_MADT_ENABLED) { 206 #ifdef CONFIG_SMP 207 smp_boot_data.cpu_phys_id[available_cpus] = 208 (lsapic->id << 8) | lsapic->eid; 209 #endif 210 ++available_cpus; 211 } 212 213 total_cpus++; 214 return 0; 215 } 216 217 static int __init 218 acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long end) 219 { 220 struct acpi_madt_local_apic_nmi *lacpi_nmi; 221 222 lacpi_nmi = (struct acpi_madt_local_apic_nmi *)header; 223 224 if (BAD_MADT_ENTRY(lacpi_nmi, end)) 225 return -EINVAL; 226 227 /* TBD: Support lapic_nmi entries */ 228 return 0; 229 } 230 231 static int __init 232 acpi_parse_iosapic(struct acpi_subtable_header * header, const unsigned long end) 233 { 234 struct acpi_madt_io_sapic *iosapic; 235 236 iosapic = (struct acpi_madt_io_sapic *)header; 237 238 if (BAD_MADT_ENTRY(iosapic, end)) 239 return -EINVAL; 240 241 return iosapic_init(iosapic->address, iosapic->global_irq_base); 242 } 243 244 static unsigned int __initdata acpi_madt_rev; 245 246 static int __init 247 acpi_parse_plat_int_src(struct acpi_subtable_header * header, 248 const unsigned long end) 249 { 250 struct acpi_madt_interrupt_source *plintsrc; 251 int vector; 252 253 plintsrc = (struct acpi_madt_interrupt_source *)header; 254 255 if (BAD_MADT_ENTRY(plintsrc, end)) 256 return -EINVAL; 257 258 /* 259 * Get vector assignment for this interrupt, set attributes, 260 * and program the IOSAPIC routing table. 261 */ 262 vector = iosapic_register_platform_intr(plintsrc->type, 263 plintsrc->global_irq, 264 plintsrc->io_sapic_vector, 265 plintsrc->eid, 266 plintsrc->id, 267 ((plintsrc->inti_flags & ACPI_MADT_POLARITY_MASK) == 268 ACPI_MADT_POLARITY_ACTIVE_HIGH) ? 269 IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW, 270 ((plintsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) == 271 ACPI_MADT_TRIGGER_EDGE) ? 272 IOSAPIC_EDGE : IOSAPIC_LEVEL); 273 274 platform_intr_list[plintsrc->type] = vector; 275 if (acpi_madt_rev > 1) { 276 acpi_cpei_override = plintsrc->flags & ACPI_MADT_CPEI_OVERRIDE; 277 } 278 279 /* 280 * Save the physical id, so we can check when its being removed 281 */ 282 acpi_cpei_phys_cpuid = ((plintsrc->id << 8) | (plintsrc->eid)) & 0xffff; 283 284 return 0; 285 } 286 287 #ifdef CONFIG_HOTPLUG_CPU 288 unsigned int can_cpei_retarget(void) 289 { 290 extern int cpe_vector; 291 extern unsigned int force_cpei_retarget; 292 293 /* 294 * Only if CPEI is supported and the override flag 295 * is present, otherwise return that its re-targettable 296 * if we are in polling mode. 297 */ 298 if (cpe_vector > 0) { 299 if (acpi_cpei_override || force_cpei_retarget) 300 return 1; 301 else 302 return 0; 303 } 304 return 1; 305 } 306 307 unsigned int is_cpu_cpei_target(unsigned int cpu) 308 { 309 unsigned int logical_id; 310 311 logical_id = cpu_logical_id(acpi_cpei_phys_cpuid); 312 313 if (logical_id == cpu) 314 return 1; 315 else 316 return 0; 317 } 318 319 void set_cpei_target_cpu(unsigned int cpu) 320 { 321 acpi_cpei_phys_cpuid = cpu_physical_id(cpu); 322 } 323 #endif 324 325 unsigned int get_cpei_target_cpu(void) 326 { 327 return acpi_cpei_phys_cpuid; 328 } 329 330 static int __init 331 acpi_parse_int_src_ovr(struct acpi_subtable_header * header, 332 const unsigned long end) 333 { 334 struct acpi_madt_interrupt_override *p; 335 336 p = (struct acpi_madt_interrupt_override *)header; 337 338 if (BAD_MADT_ENTRY(p, end)) 339 return -EINVAL; 340 341 iosapic_override_isa_irq(p->source_irq, p->global_irq, 342 ((p->inti_flags & ACPI_MADT_POLARITY_MASK) == 343 ACPI_MADT_POLARITY_ACTIVE_LOW) ? 344 IOSAPIC_POL_LOW : IOSAPIC_POL_HIGH, 345 ((p->inti_flags & ACPI_MADT_TRIGGER_MASK) == 346 ACPI_MADT_TRIGGER_LEVEL) ? 347 IOSAPIC_LEVEL : IOSAPIC_EDGE); 348 return 0; 349 } 350 351 static int __init 352 acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long end) 353 { 354 struct acpi_madt_nmi_source *nmi_src; 355 356 nmi_src = (struct acpi_madt_nmi_source *)header; 357 358 if (BAD_MADT_ENTRY(nmi_src, end)) 359 return -EINVAL; 360 361 /* TBD: Support nimsrc entries */ 362 return 0; 363 } 364 365 static void __init acpi_madt_oem_check(char *oem_id, char *oem_table_id) 366 { 367 if (!strncmp(oem_id, "IBM", 3) && (!strncmp(oem_table_id, "SERMOW", 6))) { 368 369 /* 370 * Unfortunately ITC_DRIFT is not yet part of the 371 * official SAL spec, so the ITC_DRIFT bit is not 372 * set by the BIOS on this hardware. 373 */ 374 sal_platform_features |= IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT; 375 376 cyclone_setup(); 377 } 378 } 379 380 static int __init acpi_parse_madt(struct acpi_table_header *table) 381 { 382 if (!table) 383 return -EINVAL; 384 385 acpi_madt = (struct acpi_table_madt *)table; 386 387 acpi_madt_rev = acpi_madt->header.revision; 388 389 /* remember the value for reference after free_initmem() */ 390 #ifdef CONFIG_ITANIUM 391 has_8259 = 1; /* Firmware on old Itanium systems is broken */ 392 #else 393 has_8259 = acpi_madt->flags & ACPI_MADT_PCAT_COMPAT; 394 #endif 395 iosapic_system_init(has_8259); 396 397 /* Get base address of IPI Message Block */ 398 399 if (acpi_madt->address) 400 ipi_base_addr = ioremap(acpi_madt->address, 0); 401 402 printk(KERN_INFO PREFIX "Local APIC address %p\n", ipi_base_addr); 403 404 acpi_madt_oem_check(acpi_madt->header.oem_id, 405 acpi_madt->header.oem_table_id); 406 407 return 0; 408 } 409 410 #ifdef CONFIG_ACPI_NUMA 411 412 #undef SLIT_DEBUG 413 414 #define PXM_FLAG_LEN ((MAX_PXM_DOMAINS + 1)/32) 415 416 static int __initdata srat_num_cpus; /* number of cpus */ 417 static u32 pxm_flag[PXM_FLAG_LEN]; 418 #define pxm_bit_set(bit) (set_bit(bit,(void *)pxm_flag)) 419 #define pxm_bit_test(bit) (test_bit(bit,(void *)pxm_flag)) 420 static struct acpi_table_slit __initdata *slit_table; 421 cpumask_t early_cpu_possible_map = CPU_MASK_NONE; 422 423 static int __init 424 get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa) 425 { 426 int pxm; 427 428 pxm = pa->proximity_domain_lo; 429 if (ia64_platform_is("sn2") || acpi_srat_revision >= 2) 430 pxm += pa->proximity_domain_hi[0] << 8; 431 return pxm; 432 } 433 434 static int __init 435 get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma) 436 { 437 int pxm; 438 439 pxm = ma->proximity_domain; 440 if (!ia64_platform_is("sn2") && acpi_srat_revision <= 1) 441 pxm &= 0xff; 442 443 return pxm; 444 } 445 446 /* 447 * ACPI 2.0 SLIT (System Locality Information Table) 448 * http://devresource.hp.com/devresource/Docs/TechPapers/IA64/slit.pdf 449 */ 450 void __init acpi_numa_slit_init(struct acpi_table_slit *slit) 451 { 452 u32 len; 453 454 len = sizeof(struct acpi_table_header) + 8 455 + slit->locality_count * slit->locality_count; 456 if (slit->header.length != len) { 457 printk(KERN_ERR 458 "ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n", 459 len, slit->header.length); 460 return; 461 } 462 slit_table = slit; 463 } 464 465 void __init 466 acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) 467 { 468 int pxm; 469 470 if (!(pa->flags & ACPI_SRAT_CPU_ENABLED)) 471 return; 472 473 if (srat_num_cpus >= ARRAY_SIZE(node_cpuid)) { 474 printk_once(KERN_WARNING 475 "node_cpuid[%ld] is too small, may not be able to use all cpus\n", 476 ARRAY_SIZE(node_cpuid)); 477 return; 478 } 479 pxm = get_processor_proximity_domain(pa); 480 481 /* record this node in proximity bitmap */ 482 pxm_bit_set(pxm); 483 484 node_cpuid[srat_num_cpus].phys_id = 485 (pa->apic_id << 8) | (pa->local_sapic_eid); 486 /* nid should be overridden as logical node id later */ 487 node_cpuid[srat_num_cpus].nid = pxm; 488 cpu_set(srat_num_cpus, early_cpu_possible_map); 489 srat_num_cpus++; 490 } 491 492 int __init 493 acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) 494 { 495 unsigned long paddr, size; 496 int pxm; 497 struct node_memblk_s *p, *q, *pend; 498 499 pxm = get_memory_proximity_domain(ma); 500 501 /* fill node memory chunk structure */ 502 paddr = ma->base_address; 503 size = ma->length; 504 505 /* Ignore disabled entries */ 506 if (!(ma->flags & ACPI_SRAT_MEM_ENABLED)) 507 return -1; 508 509 /* record this node in proximity bitmap */ 510 pxm_bit_set(pxm); 511 512 /* Insertion sort based on base address */ 513 pend = &node_memblk[num_node_memblks]; 514 for (p = &node_memblk[0]; p < pend; p++) { 515 if (paddr < p->start_paddr) 516 break; 517 } 518 if (p < pend) { 519 for (q = pend - 1; q >= p; q--) 520 *(q + 1) = *q; 521 } 522 p->start_paddr = paddr; 523 p->size = size; 524 p->nid = pxm; 525 num_node_memblks++; 526 return 0; 527 } 528 529 void __init acpi_numa_arch_fixup(void) 530 { 531 int i, j, node_from, node_to; 532 533 /* If there's no SRAT, fix the phys_id and mark node 0 online */ 534 if (srat_num_cpus == 0) { 535 node_set_online(0); 536 node_cpuid[0].phys_id = hard_smp_processor_id(); 537 return; 538 } 539 540 /* 541 * MCD - This can probably be dropped now. No need for pxm ID to node ID 542 * mapping with sparse node numbering iff MAX_PXM_DOMAINS <= MAX_NUMNODES. 543 */ 544 nodes_clear(node_online_map); 545 for (i = 0; i < MAX_PXM_DOMAINS; i++) { 546 if (pxm_bit_test(i)) { 547 int nid = acpi_map_pxm_to_node(i); 548 node_set_online(nid); 549 } 550 } 551 552 /* set logical node id in memory chunk structure */ 553 for (i = 0; i < num_node_memblks; i++) 554 node_memblk[i].nid = pxm_to_node(node_memblk[i].nid); 555 556 /* assign memory bank numbers for each chunk on each node */ 557 for_each_online_node(i) { 558 int bank; 559 560 bank = 0; 561 for (j = 0; j < num_node_memblks; j++) 562 if (node_memblk[j].nid == i) 563 node_memblk[j].bank = bank++; 564 } 565 566 /* set logical node id in cpu structure */ 567 for_each_possible_early_cpu(i) 568 node_cpuid[i].nid = pxm_to_node(node_cpuid[i].nid); 569 570 printk(KERN_INFO "Number of logical nodes in system = %d\n", 571 num_online_nodes()); 572 printk(KERN_INFO "Number of memory chunks in system = %d\n", 573 num_node_memblks); 574 575 if (!slit_table) { 576 for (i = 0; i < MAX_NUMNODES; i++) 577 for (j = 0; j < MAX_NUMNODES; j++) 578 node_distance(i, j) = i == j ? LOCAL_DISTANCE : 579 REMOTE_DISTANCE; 580 return; 581 } 582 583 memset(numa_slit, -1, sizeof(numa_slit)); 584 for (i = 0; i < slit_table->locality_count; i++) { 585 if (!pxm_bit_test(i)) 586 continue; 587 node_from = pxm_to_node(i); 588 for (j = 0; j < slit_table->locality_count; j++) { 589 if (!pxm_bit_test(j)) 590 continue; 591 node_to = pxm_to_node(j); 592 node_distance(node_from, node_to) = 593 slit_table->entry[i * slit_table->locality_count + j]; 594 } 595 } 596 597 #ifdef SLIT_DEBUG 598 printk("ACPI 2.0 SLIT locality table:\n"); 599 for_each_online_node(i) { 600 for_each_online_node(j) 601 printk("%03d ", node_distance(i, j)); 602 printk("\n"); 603 } 604 #endif 605 } 606 #endif /* CONFIG_ACPI_NUMA */ 607 608 /* 609 * success: return IRQ number (>=0) 610 * failure: return < 0 611 */ 612 int acpi_register_gsi(struct device *dev, u32 gsi, int triggering, int polarity) 613 { 614 if (acpi_irq_model == ACPI_IRQ_MODEL_PLATFORM) 615 return gsi; 616 617 if (has_8259 && gsi < 16) 618 return isa_irq_to_vector(gsi); 619 620 return iosapic_register_intr(gsi, 621 (polarity == 622 ACPI_ACTIVE_HIGH) ? IOSAPIC_POL_HIGH : 623 IOSAPIC_POL_LOW, 624 (triggering == 625 ACPI_EDGE_SENSITIVE) ? IOSAPIC_EDGE : 626 IOSAPIC_LEVEL); 627 } 628 EXPORT_SYMBOL_GPL(acpi_register_gsi); 629 630 void acpi_unregister_gsi(u32 gsi) 631 { 632 if (acpi_irq_model == ACPI_IRQ_MODEL_PLATFORM) 633 return; 634 635 if (has_8259 && gsi < 16) 636 return; 637 638 iosapic_unregister_intr(gsi); 639 } 640 EXPORT_SYMBOL_GPL(acpi_unregister_gsi); 641 642 static int __init acpi_parse_fadt(struct acpi_table_header *table) 643 { 644 struct acpi_table_header *fadt_header; 645 struct acpi_table_fadt *fadt; 646 647 if (!table) 648 return -EINVAL; 649 650 fadt_header = (struct acpi_table_header *)table; 651 if (fadt_header->revision != 3) 652 return -ENODEV; /* Only deal with ACPI 2.0 FADT */ 653 654 fadt = (struct acpi_table_fadt *)fadt_header; 655 656 acpi_register_gsi(NULL, fadt->sci_interrupt, ACPI_LEVEL_SENSITIVE, 657 ACPI_ACTIVE_LOW); 658 return 0; 659 } 660 661 int __init early_acpi_boot_init(void) 662 { 663 int ret; 664 665 /* 666 * do a partial walk of MADT to determine how many CPUs 667 * we have including offline CPUs 668 */ 669 if (acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) { 670 printk(KERN_ERR PREFIX "Can't find MADT\n"); 671 return 0; 672 } 673 674 ret = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC, 675 acpi_parse_lsapic, NR_CPUS); 676 if (ret < 1) 677 printk(KERN_ERR PREFIX 678 "Error parsing MADT - no LAPIC entries\n"); 679 680 #ifdef CONFIG_SMP 681 if (available_cpus == 0) { 682 printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n"); 683 printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id()); 684 smp_boot_data.cpu_phys_id[available_cpus] = 685 hard_smp_processor_id(); 686 available_cpus = 1; /* We've got at least one of these, no? */ 687 } 688 smp_boot_data.cpu_count = available_cpus; 689 #endif 690 /* Make boot-up look pretty */ 691 printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus, 692 total_cpus); 693 694 return 0; 695 } 696 697 int __init acpi_boot_init(void) 698 { 699 700 /* 701 * MADT 702 * ---- 703 * Parse the Multiple APIC Description Table (MADT), if exists. 704 * Note that this table provides platform SMP configuration 705 * information -- the successor to MPS tables. 706 */ 707 708 if (acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) { 709 printk(KERN_ERR PREFIX "Can't find MADT\n"); 710 goto skip_madt; 711 } 712 713 /* Local APIC */ 714 715 if (acpi_table_parse_madt 716 (ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE, acpi_parse_lapic_addr_ovr, 0) < 0) 717 printk(KERN_ERR PREFIX 718 "Error parsing LAPIC address override entry\n"); 719 720 if (acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0) 721 < 0) 722 printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n"); 723 724 /* I/O APIC */ 725 726 if (acpi_table_parse_madt 727 (ACPI_MADT_TYPE_IO_SAPIC, acpi_parse_iosapic, NR_IOSAPICS) < 1) { 728 if (!ia64_platform_is("sn2")) 729 printk(KERN_ERR PREFIX 730 "Error parsing MADT - no IOSAPIC entries\n"); 731 } 732 733 /* System-Level Interrupt Routing */ 734 735 if (acpi_table_parse_madt 736 (ACPI_MADT_TYPE_INTERRUPT_SOURCE, acpi_parse_plat_int_src, 737 ACPI_MAX_PLATFORM_INTERRUPTS) < 0) 738 printk(KERN_ERR PREFIX 739 "Error parsing platform interrupt source entry\n"); 740 741 if (acpi_table_parse_madt 742 (ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr, 0) < 0) 743 printk(KERN_ERR PREFIX 744 "Error parsing interrupt source overrides entry\n"); 745 746 if (acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src, 0) < 0) 747 printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n"); 748 skip_madt: 749 750 /* 751 * FADT says whether a legacy keyboard controller is present. 752 * The FADT also contains an SCI_INT line, by which the system 753 * gets interrupts such as power and sleep buttons. If it's not 754 * on a Legacy interrupt, it needs to be setup. 755 */ 756 if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt)) 757 printk(KERN_ERR PREFIX "Can't find FADT\n"); 758 759 #ifdef CONFIG_ACPI_NUMA 760 #ifdef CONFIG_SMP 761 if (srat_num_cpus == 0) { 762 int cpu, i = 1; 763 for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++) 764 if (smp_boot_data.cpu_phys_id[cpu] != 765 hard_smp_processor_id()) 766 node_cpuid[i++].phys_id = 767 smp_boot_data.cpu_phys_id[cpu]; 768 } 769 #endif 770 build_cpu_to_node_map(); 771 #endif 772 return 0; 773 } 774 775 int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) 776 { 777 int tmp; 778 779 if (has_8259 && gsi < 16) 780 *irq = isa_irq_to_vector(gsi); 781 else { 782 tmp = gsi_to_irq(gsi); 783 if (tmp == -1) 784 return -1; 785 *irq = tmp; 786 } 787 return 0; 788 } 789 790 int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi) 791 { 792 if (isa_irq >= 16) 793 return -1; 794 *gsi = isa_irq; 795 return 0; 796 } 797 798 /* 799 * ACPI based hotplug CPU support 800 */ 801 #ifdef CONFIG_ACPI_HOTPLUG_CPU 802 static int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) 803 { 804 #ifdef CONFIG_ACPI_NUMA 805 /* 806 * We don't have cpu-only-node hotadd. But if the system equips 807 * SRAT table, pxm is already found and node is ready. 808 * So, just pxm_to_nid(pxm) is OK. 809 * This code here is for the system which doesn't have full SRAT 810 * table for possible cpus. 811 */ 812 node_cpuid[cpu].phys_id = physid; 813 node_cpuid[cpu].nid = acpi_get_node(handle); 814 #endif 815 return 0; 816 } 817 818 int additional_cpus __initdata = -1; 819 820 static __init int setup_additional_cpus(char *s) 821 { 822 if (s) 823 additional_cpus = simple_strtol(s, NULL, 0); 824 825 return 0; 826 } 827 828 early_param("additional_cpus", setup_additional_cpus); 829 830 /* 831 * cpu_possible_mask should be static, it cannot change as CPUs 832 * are onlined, or offlined. The reason is per-cpu data-structures 833 * are allocated by some modules at init time, and dont expect to 834 * do this dynamically on cpu arrival/departure. 835 * cpu_present_mask on the other hand can change dynamically. 836 * In case when cpu_hotplug is not compiled, then we resort to current 837 * behaviour, which is cpu_possible == cpu_present. 838 * - Ashok Raj 839 * 840 * Three ways to find out the number of additional hotplug CPUs: 841 * - If the BIOS specified disabled CPUs in ACPI/mptables use that. 842 * - The user can overwrite it with additional_cpus=NUM 843 * - Otherwise don't reserve additional CPUs. 844 */ 845 __init void prefill_possible_map(void) 846 { 847 int i; 848 int possible, disabled_cpus; 849 850 disabled_cpus = total_cpus - available_cpus; 851 852 if (additional_cpus == -1) { 853 if (disabled_cpus > 0) 854 additional_cpus = disabled_cpus; 855 else 856 additional_cpus = 0; 857 } 858 859 possible = available_cpus + additional_cpus; 860 861 if (possible > nr_cpu_ids) 862 possible = nr_cpu_ids; 863 864 printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n", 865 possible, max((possible - available_cpus), 0)); 866 867 for (i = 0; i < possible; i++) 868 set_cpu_possible(i, true); 869 } 870 871 static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu) 872 { 873 cpumask_t tmp_map; 874 int cpu; 875 876 cpumask_complement(&tmp_map, cpu_present_mask); 877 cpu = cpumask_first(&tmp_map); 878 if (cpu >= nr_cpu_ids) 879 return -EINVAL; 880 881 acpi_map_cpu2node(handle, cpu, physid); 882 883 set_cpu_present(cpu, true); 884 ia64_cpu_to_sapicid[cpu] = physid; 885 886 acpi_processor_set_pdc(handle); 887 888 *pcpu = cpu; 889 return (0); 890 } 891 892 /* wrapper to silence section mismatch warning */ 893 int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu) 894 { 895 return _acpi_map_lsapic(handle, physid, pcpu); 896 } 897 EXPORT_SYMBOL(acpi_map_lsapic); 898 899 int acpi_unmap_lsapic(int cpu) 900 { 901 ia64_cpu_to_sapicid[cpu] = -1; 902 set_cpu_present(cpu, false); 903 904 #ifdef CONFIG_ACPI_NUMA 905 /* NUMA specific cleanup's */ 906 #endif 907 908 return (0); 909 } 910 911 EXPORT_SYMBOL(acpi_unmap_lsapic); 912 #endif /* CONFIG_ACPI_HOTPLUG_CPU */ 913 914 #ifdef CONFIG_ACPI_NUMA 915 static acpi_status acpi_map_iosapic(acpi_handle handle, u32 depth, 916 void *context, void **ret) 917 { 918 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 919 union acpi_object *obj; 920 struct acpi_madt_io_sapic *iosapic; 921 unsigned int gsi_base; 922 int node; 923 924 /* Only care about objects w/ a method that returns the MADT */ 925 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) 926 return AE_OK; 927 928 if (!buffer.length || !buffer.pointer) 929 return AE_OK; 930 931 obj = buffer.pointer; 932 if (obj->type != ACPI_TYPE_BUFFER || 933 obj->buffer.length < sizeof(*iosapic)) { 934 kfree(buffer.pointer); 935 return AE_OK; 936 } 937 938 iosapic = (struct acpi_madt_io_sapic *)obj->buffer.pointer; 939 940 if (iosapic->header.type != ACPI_MADT_TYPE_IO_SAPIC) { 941 kfree(buffer.pointer); 942 return AE_OK; 943 } 944 945 gsi_base = iosapic->global_irq_base; 946 947 kfree(buffer.pointer); 948 949 /* OK, it's an IOSAPIC MADT entry; associate it with a node */ 950 node = acpi_get_node(handle); 951 if (node == NUMA_NO_NODE || !node_online(node) || 952 cpumask_empty(cpumask_of_node(node))) 953 return AE_OK; 954 955 /* We know a gsi to node mapping! */ 956 map_iosapic_to_node(gsi_base, node); 957 return AE_OK; 958 } 959 960 static int __init 961 acpi_map_iosapics (void) 962 { 963 acpi_get_devices(NULL, acpi_map_iosapic, NULL, NULL); 964 return 0; 965 } 966 967 fs_initcall(acpi_map_iosapics); 968 #endif /* CONFIG_ACPI_NUMA */ 969 970 int __ref acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base) 971 { 972 int err; 973 974 if ((err = iosapic_init(phys_addr, gsi_base))) 975 return err; 976 977 #ifdef CONFIG_ACPI_NUMA 978 acpi_map_iosapic(handle, 0, NULL, NULL); 979 #endif /* CONFIG_ACPI_NUMA */ 980 981 return 0; 982 } 983 984 EXPORT_SYMBOL(acpi_register_ioapic); 985 986 int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base) 987 { 988 return iosapic_remove(gsi_base); 989 } 990 991 EXPORT_SYMBOL(acpi_unregister_ioapic); 992 993 /* 994 * acpi_suspend_lowlevel() - save kernel state and suspend. 995 * 996 * TBD when when IA64 starts to support suspend... 997 */ 998 int acpi_suspend_lowlevel(void) { return 0; } 999