1 /* 2 * acpi_processor.c - ACPI Processor Driver ($Revision: 71 $) 3 * 4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 6 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de> 7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 8 * - Added processor hotplug support 9 * 10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2 of the License, or (at 15 * your option) any later version. 16 * 17 * This program is distributed in the hope that it will be useful, but 18 * WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 20 * General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License along 23 * with this program; if not, write to the Free Software Foundation, Inc., 24 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. 25 * 26 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 27 * TBD: 28 * 1. Make # power states dynamic. 29 * 2. Support duty_cycle values that span bit 4. 30 * 3. Optimize by having scheduler determine business instead of 31 * having us try to calculate it here. 32 * 4. Need C1 timing -- must modify kernel (IRQ handler) to get this. 33 */ 34 35 #include <linux/kernel.h> 36 #include <linux/module.h> 37 #include <linux/init.h> 38 #include <linux/types.h> 39 #include <linux/pci.h> 40 #include <linux/pm.h> 41 #include <linux/cpufreq.h> 42 #include <linux/cpu.h> 43 #include <linux/proc_fs.h> 44 #include <linux/seq_file.h> 45 #include <linux/dmi.h> 46 #include <linux/moduleparam.h> 47 #include <linux/cpuidle.h> 48 49 #include <asm/io.h> 50 #include <asm/system.h> 51 #include <asm/cpu.h> 52 #include <asm/delay.h> 53 #include <asm/uaccess.h> 54 #include <asm/processor.h> 55 #include <asm/smp.h> 56 #include <asm/acpi.h> 57 58 #include <acpi/acpi_bus.h> 59 #include <acpi/acpi_drivers.h> 60 #include <acpi/processor.h> 61 62 #define ACPI_PROCESSOR_COMPONENT 0x01000000 63 #define ACPI_PROCESSOR_CLASS "processor" 64 #define ACPI_PROCESSOR_DEVICE_NAME "Processor" 65 #define ACPI_PROCESSOR_FILE_INFO "info" 66 #define ACPI_PROCESSOR_FILE_THROTTLING "throttling" 67 #define ACPI_PROCESSOR_FILE_LIMIT "limit" 68 #define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80 69 #define ACPI_PROCESSOR_NOTIFY_POWER 0x81 70 #define ACPI_PROCESSOR_NOTIFY_THROTTLING 0x82 71 72 #define ACPI_PROCESSOR_LIMIT_USER 0 73 #define ACPI_PROCESSOR_LIMIT_THERMAL 1 74 75 #define _COMPONENT ACPI_PROCESSOR_COMPONENT 76 ACPI_MODULE_NAME("processor_core"); 77 78 MODULE_AUTHOR("Paul Diefenbaugh"); 79 MODULE_DESCRIPTION("ACPI Processor Driver"); 80 MODULE_LICENSE("GPL"); 81 82 static int acpi_processor_add(struct acpi_device *device); 83 static int acpi_processor_start(struct acpi_device *device); 84 static int acpi_processor_remove(struct acpi_device *device, int type); 85 static int acpi_processor_info_open_fs(struct inode *inode, struct file *file); 86 static void acpi_processor_notify(acpi_handle handle, u32 event, void *data); 87 static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu); 88 static int acpi_processor_handle_eject(struct acpi_processor *pr); 89 extern int acpi_processor_tstate_has_changed(struct acpi_processor *pr); 90 91 92 static const struct acpi_device_id processor_device_ids[] = { 93 {ACPI_PROCESSOR_HID, 0}, 94 {"", 0}, 95 }; 96 MODULE_DEVICE_TABLE(acpi, processor_device_ids); 97 98 static struct acpi_driver acpi_processor_driver = { 99 .name = "processor", 100 .class = ACPI_PROCESSOR_CLASS, 101 .ids = processor_device_ids, 102 .ops = { 103 .add = acpi_processor_add, 104 .remove = acpi_processor_remove, 105 .start = acpi_processor_start, 106 .suspend = acpi_processor_suspend, 107 .resume = acpi_processor_resume, 108 }, 109 }; 110 111 #define INSTALL_NOTIFY_HANDLER 1 112 #define UNINSTALL_NOTIFY_HANDLER 2 113 114 static const struct file_operations acpi_processor_info_fops = { 115 .open = acpi_processor_info_open_fs, 116 .read = seq_read, 117 .llseek = seq_lseek, 118 .release = single_release, 119 }; 120 121 struct acpi_processor *processors[NR_CPUS]; 122 struct acpi_processor_errata errata __read_mostly; 123 124 /* -------------------------------------------------------------------------- 125 Errata Handling 126 -------------------------------------------------------------------------- */ 127 128 static int acpi_processor_errata_piix4(struct pci_dev *dev) 129 { 130 u8 value1 = 0; 131 u8 value2 = 0; 132 133 134 if (!dev) 135 return -EINVAL; 136 137 /* 138 * Note that 'dev' references the PIIX4 ACPI Controller. 139 */ 140 141 switch (dev->revision) { 142 case 0: 143 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 A-step\n")); 144 break; 145 case 1: 146 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 B-step\n")); 147 break; 148 case 2: 149 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4E\n")); 150 break; 151 case 3: 152 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4M\n")); 153 break; 154 default: 155 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found unknown PIIX4\n")); 156 break; 157 } 158 159 switch (dev->revision) { 160 161 case 0: /* PIIX4 A-step */ 162 case 1: /* PIIX4 B-step */ 163 /* 164 * See specification changes #13 ("Manual Throttle Duty Cycle") 165 * and #14 ("Enabling and Disabling Manual Throttle"), plus 166 * erratum #5 ("STPCLK# Deassertion Time") from the January 167 * 2002 PIIX4 specification update. Applies to only older 168 * PIIX4 models. 169 */ 170 errata.piix4.throttle = 1; 171 172 case 2: /* PIIX4E */ 173 case 3: /* PIIX4M */ 174 /* 175 * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA 176 * Livelock") from the January 2002 PIIX4 specification update. 177 * Applies to all PIIX4 models. 178 */ 179 180 /* 181 * BM-IDE 182 * ------ 183 * Find the PIIX4 IDE Controller and get the Bus Master IDE 184 * Status register address. We'll use this later to read 185 * each IDE controller's DMA status to make sure we catch all 186 * DMA activity. 187 */ 188 dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, 189 PCI_DEVICE_ID_INTEL_82371AB, 190 PCI_ANY_ID, PCI_ANY_ID, NULL); 191 if (dev) { 192 errata.piix4.bmisx = pci_resource_start(dev, 4); 193 pci_dev_put(dev); 194 } 195 196 /* 197 * Type-F DMA 198 * ---------- 199 * Find the PIIX4 ISA Controller and read the Motherboard 200 * DMA controller's status to see if Type-F (Fast) DMA mode 201 * is enabled (bit 7) on either channel. Note that we'll 202 * disable C3 support if this is enabled, as some legacy 203 * devices won't operate well if fast DMA is disabled. 204 */ 205 dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, 206 PCI_DEVICE_ID_INTEL_82371AB_0, 207 PCI_ANY_ID, PCI_ANY_ID, NULL); 208 if (dev) { 209 pci_read_config_byte(dev, 0x76, &value1); 210 pci_read_config_byte(dev, 0x77, &value2); 211 if ((value1 & 0x80) || (value2 & 0x80)) 212 errata.piix4.fdma = 1; 213 pci_dev_put(dev); 214 } 215 216 break; 217 } 218 219 if (errata.piix4.bmisx) 220 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 221 "Bus master activity detection (BM-IDE) erratum enabled\n")); 222 if (errata.piix4.fdma) 223 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 224 "Type-F DMA livelock erratum (C3 disabled)\n")); 225 226 return 0; 227 } 228 229 static int acpi_processor_errata(struct acpi_processor *pr) 230 { 231 int result = 0; 232 struct pci_dev *dev = NULL; 233 234 235 if (!pr) 236 return -EINVAL; 237 238 /* 239 * PIIX4 240 */ 241 dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, 242 PCI_DEVICE_ID_INTEL_82371AB_3, PCI_ANY_ID, 243 PCI_ANY_ID, NULL); 244 if (dev) { 245 result = acpi_processor_errata_piix4(dev); 246 pci_dev_put(dev); 247 } 248 249 return result; 250 } 251 252 /* -------------------------------------------------------------------------- 253 Common ACPI processor functions 254 -------------------------------------------------------------------------- */ 255 256 /* 257 * _PDC is required for a BIOS-OS handshake for most of the newer 258 * ACPI processor features. 259 */ 260 static int acpi_processor_set_pdc(struct acpi_processor *pr) 261 { 262 struct acpi_object_list *pdc_in = pr->pdc; 263 acpi_status status = AE_OK; 264 265 266 if (!pdc_in) 267 return status; 268 269 status = acpi_evaluate_object(pr->handle, "_PDC", pdc_in, NULL); 270 271 if (ACPI_FAILURE(status)) 272 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 273 "Could not evaluate _PDC, using legacy perf. control...\n")); 274 275 return status; 276 } 277 278 /* -------------------------------------------------------------------------- 279 FS Interface (/proc) 280 -------------------------------------------------------------------------- */ 281 282 static struct proc_dir_entry *acpi_processor_dir = NULL; 283 284 static int acpi_processor_info_seq_show(struct seq_file *seq, void *offset) 285 { 286 struct acpi_processor *pr = seq->private; 287 288 289 if (!pr) 290 goto end; 291 292 seq_printf(seq, "processor id: %d\n" 293 "acpi id: %d\n" 294 "bus mastering control: %s\n" 295 "power management: %s\n" 296 "throttling control: %s\n" 297 "limit interface: %s\n", 298 pr->id, 299 pr->acpi_id, 300 pr->flags.bm_control ? "yes" : "no", 301 pr->flags.power ? "yes" : "no", 302 pr->flags.throttling ? "yes" : "no", 303 pr->flags.limit ? "yes" : "no"); 304 305 end: 306 return 0; 307 } 308 309 static int acpi_processor_info_open_fs(struct inode *inode, struct file *file) 310 { 311 return single_open(file, acpi_processor_info_seq_show, 312 PDE(inode)->data); 313 } 314 315 static int acpi_processor_add_fs(struct acpi_device *device) 316 { 317 struct proc_dir_entry *entry = NULL; 318 319 320 if (!acpi_device_dir(device)) { 321 acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device), 322 acpi_processor_dir); 323 if (!acpi_device_dir(device)) 324 return -ENODEV; 325 } 326 acpi_device_dir(device)->owner = THIS_MODULE; 327 328 /* 'info' [R] */ 329 entry = create_proc_entry(ACPI_PROCESSOR_FILE_INFO, 330 S_IRUGO, acpi_device_dir(device)); 331 if (!entry) 332 return -EIO; 333 else { 334 entry->proc_fops = &acpi_processor_info_fops; 335 entry->data = acpi_driver_data(device); 336 entry->owner = THIS_MODULE; 337 } 338 339 /* 'throttling' [R/W] */ 340 entry = create_proc_entry(ACPI_PROCESSOR_FILE_THROTTLING, 341 S_IFREG | S_IRUGO | S_IWUSR, 342 acpi_device_dir(device)); 343 if (!entry) 344 return -EIO; 345 else { 346 entry->proc_fops = &acpi_processor_throttling_fops; 347 entry->data = acpi_driver_data(device); 348 entry->owner = THIS_MODULE; 349 } 350 351 /* 'limit' [R/W] */ 352 entry = create_proc_entry(ACPI_PROCESSOR_FILE_LIMIT, 353 S_IFREG | S_IRUGO | S_IWUSR, 354 acpi_device_dir(device)); 355 if (!entry) 356 return -EIO; 357 else { 358 entry->proc_fops = &acpi_processor_limit_fops; 359 entry->data = acpi_driver_data(device); 360 entry->owner = THIS_MODULE; 361 } 362 363 return 0; 364 } 365 366 static int acpi_processor_remove_fs(struct acpi_device *device) 367 { 368 369 if (acpi_device_dir(device)) { 370 remove_proc_entry(ACPI_PROCESSOR_FILE_INFO, 371 acpi_device_dir(device)); 372 remove_proc_entry(ACPI_PROCESSOR_FILE_THROTTLING, 373 acpi_device_dir(device)); 374 remove_proc_entry(ACPI_PROCESSOR_FILE_LIMIT, 375 acpi_device_dir(device)); 376 remove_proc_entry(acpi_device_bid(device), acpi_processor_dir); 377 acpi_device_dir(device) = NULL; 378 } 379 380 return 0; 381 } 382 383 /* Use the acpiid in MADT to map cpus in case of SMP */ 384 385 #ifndef CONFIG_SMP 386 static int get_cpu_id(acpi_handle handle, u32 acpi_id) {return -1;} 387 #else 388 389 static struct acpi_table_madt *madt; 390 391 static int map_lapic_id(struct acpi_subtable_header *entry, 392 u32 acpi_id, int *apic_id) 393 { 394 struct acpi_madt_local_apic *lapic = 395 (struct acpi_madt_local_apic *)entry; 396 if ((lapic->lapic_flags & ACPI_MADT_ENABLED) && 397 lapic->processor_id == acpi_id) { 398 *apic_id = lapic->id; 399 return 1; 400 } 401 return 0; 402 } 403 404 static int map_lsapic_id(struct acpi_subtable_header *entry, 405 u32 acpi_id, int *apic_id) 406 { 407 struct acpi_madt_local_sapic *lsapic = 408 (struct acpi_madt_local_sapic *)entry; 409 /* Only check enabled APICs*/ 410 if (lsapic->lapic_flags & ACPI_MADT_ENABLED) { 411 /* First check against id */ 412 if (lsapic->processor_id == acpi_id) { 413 *apic_id = (lsapic->id << 8) | lsapic->eid; 414 return 1; 415 /* Check against optional uid */ 416 } else if (entry->length >= 16 && 417 lsapic->uid == acpi_id) { 418 *apic_id = lsapic->uid; 419 return 1; 420 } 421 } 422 return 0; 423 } 424 425 #ifdef CONFIG_IA64 426 #define arch_cpu_to_apicid ia64_cpu_to_sapicid 427 #else 428 #define arch_cpu_to_apicid x86_cpu_to_apicid 429 #endif 430 431 static int map_madt_entry(u32 acpi_id) 432 { 433 unsigned long madt_end, entry; 434 int apic_id = -1; 435 436 if (!madt) 437 return apic_id; 438 439 entry = (unsigned long)madt; 440 madt_end = entry + madt->header.length; 441 442 /* Parse all entries looking for a match. */ 443 444 entry += sizeof(struct acpi_table_madt); 445 while (entry + sizeof(struct acpi_subtable_header) < madt_end) { 446 struct acpi_subtable_header *header = 447 (struct acpi_subtable_header *)entry; 448 if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) { 449 if (map_lapic_id(header, acpi_id, &apic_id)) 450 break; 451 } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) { 452 if (map_lsapic_id(header, acpi_id, &apic_id)) 453 break; 454 } 455 entry += header->length; 456 } 457 return apic_id; 458 } 459 460 static int map_mat_entry(acpi_handle handle, u32 acpi_id) 461 { 462 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 463 union acpi_object *obj; 464 struct acpi_subtable_header *header; 465 int apic_id = -1; 466 467 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) 468 goto exit; 469 470 if (!buffer.length || !buffer.pointer) 471 goto exit; 472 473 obj = buffer.pointer; 474 if (obj->type != ACPI_TYPE_BUFFER || 475 obj->buffer.length < sizeof(struct acpi_subtable_header)) { 476 goto exit; 477 } 478 479 header = (struct acpi_subtable_header *)obj->buffer.pointer; 480 if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) { 481 map_lapic_id(header, acpi_id, &apic_id); 482 } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) { 483 map_lsapic_id(header, acpi_id, &apic_id); 484 } 485 486 exit: 487 if (buffer.pointer) 488 kfree(buffer.pointer); 489 return apic_id; 490 } 491 492 static int get_cpu_id(acpi_handle handle, u32 acpi_id) 493 { 494 int i; 495 int apic_id = -1; 496 497 apic_id = map_mat_entry(handle, acpi_id); 498 if (apic_id == -1) 499 apic_id = map_madt_entry(acpi_id); 500 if (apic_id == -1) 501 return apic_id; 502 503 for (i = 0; i < NR_CPUS; ++i) { 504 if (arch_cpu_to_apicid[i] == apic_id) 505 return i; 506 } 507 return -1; 508 } 509 #endif 510 511 /* -------------------------------------------------------------------------- 512 Driver Interface 513 -------------------------------------------------------------------------- */ 514 515 static int acpi_processor_get_info(struct acpi_processor *pr, unsigned has_uid) 516 { 517 acpi_status status = 0; 518 union acpi_object object = { 0 }; 519 struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; 520 int cpu_index; 521 static int cpu0_initialized; 522 523 524 if (!pr) 525 return -EINVAL; 526 527 if (num_online_cpus() > 1) 528 errata.smp = TRUE; 529 530 acpi_processor_errata(pr); 531 532 /* 533 * Check to see if we have bus mastering arbitration control. This 534 * is required for proper C3 usage (to maintain cache coherency). 535 */ 536 if (acpi_gbl_FADT.pm2_control_block && acpi_gbl_FADT.pm2_control_length) { 537 pr->flags.bm_control = 1; 538 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 539 "Bus mastering arbitration control present\n")); 540 } else 541 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 542 "No bus mastering arbitration control\n")); 543 544 /* Check if it is a Device with HID and UID */ 545 if (has_uid) { 546 unsigned long value; 547 status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID, 548 NULL, &value); 549 if (ACPI_FAILURE(status)) { 550 printk(KERN_ERR PREFIX "Evaluating processor _UID\n"); 551 return -ENODEV; 552 } 553 pr->acpi_id = value; 554 } else { 555 /* 556 * Evalute the processor object. Note that it is common on SMP to 557 * have the first (boot) processor with a valid PBLK address while 558 * all others have a NULL address. 559 */ 560 status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer); 561 if (ACPI_FAILURE(status)) { 562 printk(KERN_ERR PREFIX "Evaluating processor object\n"); 563 return -ENODEV; 564 } 565 566 /* 567 * TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP. 568 * >>> 'acpi_get_processor_id(acpi_id, &id)' in arch/xxx/acpi.c 569 */ 570 pr->acpi_id = object.processor.proc_id; 571 } 572 cpu_index = get_cpu_id(pr->handle, pr->acpi_id); 573 574 /* Handle UP system running SMP kernel, with no LAPIC in MADT */ 575 if (!cpu0_initialized && (cpu_index == -1) && 576 (num_online_cpus() == 1)) { 577 cpu_index = 0; 578 } 579 580 cpu0_initialized = 1; 581 582 pr->id = cpu_index; 583 584 /* 585 * Extra Processor objects may be enumerated on MP systems with 586 * less than the max # of CPUs. They should be ignored _iff 587 * they are physically not present. 588 */ 589 if (pr->id == -1) { 590 if (ACPI_FAILURE 591 (acpi_processor_hotadd_init(pr->handle, &pr->id))) { 592 return -ENODEV; 593 } 594 } 595 596 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Processor [%d:%d]\n", pr->id, 597 pr->acpi_id)); 598 599 if (!object.processor.pblk_address) 600 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No PBLK (NULL address)\n")); 601 else if (object.processor.pblk_length != 6) 602 printk(KERN_ERR PREFIX "Invalid PBLK length [%d]\n", 603 object.processor.pblk_length); 604 else { 605 pr->throttling.address = object.processor.pblk_address; 606 pr->throttling.duty_offset = acpi_gbl_FADT.duty_offset; 607 pr->throttling.duty_width = acpi_gbl_FADT.duty_width; 608 609 pr->pblk = object.processor.pblk_address; 610 611 /* 612 * We don't care about error returns - we just try to mark 613 * these reserved so that nobody else is confused into thinking 614 * that this region might be unused.. 615 * 616 * (In particular, allocating the IO range for Cardbus) 617 */ 618 request_region(pr->throttling.address, 6, "ACPI CPU throttle"); 619 } 620 621 #ifdef CONFIG_CPU_FREQ 622 acpi_processor_ppc_has_changed(pr); 623 #endif 624 acpi_processor_get_throttling_info(pr); 625 acpi_processor_get_limit_info(pr); 626 627 return 0; 628 } 629 630 static void *processor_device_array[NR_CPUS]; 631 632 static int __cpuinit acpi_processor_start(struct acpi_device *device) 633 { 634 int result = 0; 635 acpi_status status = AE_OK; 636 struct acpi_processor *pr; 637 638 639 pr = acpi_driver_data(device); 640 641 result = acpi_processor_get_info(pr, device->flags.unique_id); 642 if (result) { 643 /* Processor is physically not present */ 644 return 0; 645 } 646 647 BUG_ON((pr->id >= NR_CPUS) || (pr->id < 0)); 648 649 /* 650 * Buggy BIOS check 651 * ACPI id of processors can be reported wrongly by the BIOS. 652 * Don't trust it blindly 653 */ 654 if (processor_device_array[pr->id] != NULL && 655 processor_device_array[pr->id] != device) { 656 printk(KERN_WARNING "BIOS reported wrong ACPI id" 657 "for the processor\n"); 658 return -ENODEV; 659 } 660 processor_device_array[pr->id] = device; 661 662 processors[pr->id] = pr; 663 664 result = acpi_processor_add_fs(device); 665 if (result) 666 goto end; 667 668 status = acpi_install_notify_handler(pr->handle, ACPI_DEVICE_NOTIFY, 669 acpi_processor_notify, pr); 670 671 /* _PDC call should be done before doing anything else (if reqd.). */ 672 arch_acpi_processor_init_pdc(pr); 673 acpi_processor_set_pdc(pr); 674 675 acpi_processor_power_init(pr, device); 676 677 if (pr->flags.throttling) { 678 printk(KERN_INFO PREFIX "%s [%s] (supports", 679 acpi_device_name(device), acpi_device_bid(device)); 680 printk(" %d throttling states", pr->throttling.state_count); 681 printk(")\n"); 682 } 683 684 end: 685 686 return result; 687 } 688 689 static void acpi_processor_notify(acpi_handle handle, u32 event, void *data) 690 { 691 struct acpi_processor *pr = data; 692 struct acpi_device *device = NULL; 693 694 695 if (!pr) 696 return; 697 698 if (acpi_bus_get_device(pr->handle, &device)) 699 return; 700 701 switch (event) { 702 case ACPI_PROCESSOR_NOTIFY_PERFORMANCE: 703 acpi_processor_ppc_has_changed(pr); 704 acpi_bus_generate_proc_event(device, event, 705 pr->performance_platform_limit); 706 acpi_bus_generate_netlink_event(device->pnp.device_class, 707 device->dev.bus_id, event, 708 pr->performance_platform_limit); 709 break; 710 case ACPI_PROCESSOR_NOTIFY_POWER: 711 acpi_processor_cst_has_changed(pr); 712 acpi_bus_generate_proc_event(device, event, 0); 713 acpi_bus_generate_netlink_event(device->pnp.device_class, 714 device->dev.bus_id, event, 0); 715 break; 716 case ACPI_PROCESSOR_NOTIFY_THROTTLING: 717 acpi_processor_tstate_has_changed(pr); 718 acpi_bus_generate_proc_event(device, event, 0); 719 acpi_bus_generate_netlink_event(device->pnp.device_class, 720 device->dev.bus_id, event, 0); 721 default: 722 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 723 "Unsupported event [0x%x]\n", event)); 724 break; 725 } 726 727 return; 728 } 729 730 static int acpi_cpu_soft_notify(struct notifier_block *nfb, 731 unsigned long action, void *hcpu) 732 { 733 unsigned int cpu = (unsigned long)hcpu; 734 struct acpi_processor *pr = processors[cpu]; 735 736 if (action == CPU_ONLINE && pr) { 737 acpi_processor_ppc_has_changed(pr); 738 acpi_processor_cst_has_changed(pr); 739 acpi_processor_tstate_has_changed(pr); 740 } 741 return NOTIFY_OK; 742 } 743 744 static struct notifier_block acpi_cpu_notifier = 745 { 746 .notifier_call = acpi_cpu_soft_notify, 747 }; 748 749 static int acpi_processor_add(struct acpi_device *device) 750 { 751 struct acpi_processor *pr = NULL; 752 753 754 if (!device) 755 return -EINVAL; 756 757 pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL); 758 if (!pr) 759 return -ENOMEM; 760 761 pr->handle = device->handle; 762 strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME); 763 strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS); 764 acpi_driver_data(device) = pr; 765 766 return 0; 767 } 768 769 static int acpi_processor_remove(struct acpi_device *device, int type) 770 { 771 acpi_status status = AE_OK; 772 struct acpi_processor *pr = NULL; 773 774 775 if (!device || !acpi_driver_data(device)) 776 return -EINVAL; 777 778 pr = acpi_driver_data(device); 779 780 if (pr->id >= NR_CPUS) { 781 kfree(pr); 782 return 0; 783 } 784 785 if (type == ACPI_BUS_REMOVAL_EJECT) { 786 if (acpi_processor_handle_eject(pr)) 787 return -EINVAL; 788 } 789 790 acpi_processor_power_exit(pr, device); 791 792 status = acpi_remove_notify_handler(pr->handle, ACPI_DEVICE_NOTIFY, 793 acpi_processor_notify); 794 795 acpi_processor_remove_fs(device); 796 797 processors[pr->id] = NULL; 798 799 kfree(pr); 800 801 return 0; 802 } 803 804 #ifdef CONFIG_ACPI_HOTPLUG_CPU 805 /**************************************************************************** 806 * Acpi processor hotplug support * 807 ****************************************************************************/ 808 809 static int is_processor_present(acpi_handle handle); 810 811 static int is_processor_present(acpi_handle handle) 812 { 813 acpi_status status; 814 unsigned long sta = 0; 815 816 817 status = acpi_evaluate_integer(handle, "_STA", NULL, &sta); 818 if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_PRESENT)) { 819 ACPI_EXCEPTION((AE_INFO, status, "Processor Device is not present")); 820 return 0; 821 } 822 return 1; 823 } 824 825 static 826 int acpi_processor_device_add(acpi_handle handle, struct acpi_device **device) 827 { 828 acpi_handle phandle; 829 struct acpi_device *pdev; 830 struct acpi_processor *pr; 831 832 833 if (acpi_get_parent(handle, &phandle)) { 834 return -ENODEV; 835 } 836 837 if (acpi_bus_get_device(phandle, &pdev)) { 838 return -ENODEV; 839 } 840 841 if (acpi_bus_add(device, pdev, handle, ACPI_BUS_TYPE_PROCESSOR)) { 842 return -ENODEV; 843 } 844 845 acpi_bus_start(*device); 846 847 pr = acpi_driver_data(*device); 848 if (!pr) 849 return -ENODEV; 850 851 if ((pr->id >= 0) && (pr->id < NR_CPUS)) { 852 kobject_uevent(&(*device)->dev.kobj, KOBJ_ONLINE); 853 } 854 return 0; 855 } 856 857 static void 858 acpi_processor_hotplug_notify(acpi_handle handle, u32 event, void *data) 859 { 860 struct acpi_processor *pr; 861 struct acpi_device *device = NULL; 862 int result; 863 864 865 switch (event) { 866 case ACPI_NOTIFY_BUS_CHECK: 867 case ACPI_NOTIFY_DEVICE_CHECK: 868 printk("Processor driver received %s event\n", 869 (event == ACPI_NOTIFY_BUS_CHECK) ? 870 "ACPI_NOTIFY_BUS_CHECK" : "ACPI_NOTIFY_DEVICE_CHECK"); 871 872 if (!is_processor_present(handle)) 873 break; 874 875 if (acpi_bus_get_device(handle, &device)) { 876 result = acpi_processor_device_add(handle, &device); 877 if (result) 878 printk(KERN_ERR PREFIX 879 "Unable to add the device\n"); 880 break; 881 } 882 883 pr = acpi_driver_data(device); 884 if (!pr) { 885 printk(KERN_ERR PREFIX "Driver data is NULL\n"); 886 break; 887 } 888 889 if (pr->id >= 0 && (pr->id < NR_CPUS)) { 890 kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE); 891 break; 892 } 893 894 result = acpi_processor_start(device); 895 if ((!result) && ((pr->id >= 0) && (pr->id < NR_CPUS))) { 896 kobject_uevent(&device->dev.kobj, KOBJ_ONLINE); 897 } else { 898 printk(KERN_ERR PREFIX "Device [%s] failed to start\n", 899 acpi_device_bid(device)); 900 } 901 break; 902 case ACPI_NOTIFY_EJECT_REQUEST: 903 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 904 "received ACPI_NOTIFY_EJECT_REQUEST\n")); 905 906 if (acpi_bus_get_device(handle, &device)) { 907 printk(KERN_ERR PREFIX 908 "Device don't exist, dropping EJECT\n"); 909 break; 910 } 911 pr = acpi_driver_data(device); 912 if (!pr) { 913 printk(KERN_ERR PREFIX 914 "Driver data is NULL, dropping EJECT\n"); 915 return; 916 } 917 918 if ((pr->id < NR_CPUS) && (cpu_present(pr->id))) 919 kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE); 920 break; 921 default: 922 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 923 "Unsupported event [0x%x]\n", event)); 924 break; 925 } 926 927 return; 928 } 929 930 static acpi_status 931 processor_walk_namespace_cb(acpi_handle handle, 932 u32 lvl, void *context, void **rv) 933 { 934 acpi_status status; 935 int *action = context; 936 acpi_object_type type = 0; 937 938 status = acpi_get_type(handle, &type); 939 if (ACPI_FAILURE(status)) 940 return (AE_OK); 941 942 if (type != ACPI_TYPE_PROCESSOR) 943 return (AE_OK); 944 945 switch (*action) { 946 case INSTALL_NOTIFY_HANDLER: 947 acpi_install_notify_handler(handle, 948 ACPI_SYSTEM_NOTIFY, 949 acpi_processor_hotplug_notify, 950 NULL); 951 break; 952 case UNINSTALL_NOTIFY_HANDLER: 953 acpi_remove_notify_handler(handle, 954 ACPI_SYSTEM_NOTIFY, 955 acpi_processor_hotplug_notify); 956 break; 957 default: 958 break; 959 } 960 961 return (AE_OK); 962 } 963 964 static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu) 965 { 966 967 if (!is_processor_present(handle)) { 968 return AE_ERROR; 969 } 970 971 if (acpi_map_lsapic(handle, p_cpu)) 972 return AE_ERROR; 973 974 if (arch_register_cpu(*p_cpu)) { 975 acpi_unmap_lsapic(*p_cpu); 976 return AE_ERROR; 977 } 978 979 return AE_OK; 980 } 981 982 static int acpi_processor_handle_eject(struct acpi_processor *pr) 983 { 984 if (cpu_online(pr->id)) { 985 return (-EINVAL); 986 } 987 arch_unregister_cpu(pr->id); 988 acpi_unmap_lsapic(pr->id); 989 return (0); 990 } 991 #else 992 static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu) 993 { 994 return AE_ERROR; 995 } 996 static int acpi_processor_handle_eject(struct acpi_processor *pr) 997 { 998 return (-EINVAL); 999 } 1000 #endif 1001 1002 static 1003 void acpi_processor_install_hotplug_notify(void) 1004 { 1005 #ifdef CONFIG_ACPI_HOTPLUG_CPU 1006 int action = INSTALL_NOTIFY_HANDLER; 1007 acpi_walk_namespace(ACPI_TYPE_PROCESSOR, 1008 ACPI_ROOT_OBJECT, 1009 ACPI_UINT32_MAX, 1010 processor_walk_namespace_cb, &action, NULL); 1011 #endif 1012 register_hotcpu_notifier(&acpi_cpu_notifier); 1013 } 1014 1015 static 1016 void acpi_processor_uninstall_hotplug_notify(void) 1017 { 1018 #ifdef CONFIG_ACPI_HOTPLUG_CPU 1019 int action = UNINSTALL_NOTIFY_HANDLER; 1020 acpi_walk_namespace(ACPI_TYPE_PROCESSOR, 1021 ACPI_ROOT_OBJECT, 1022 ACPI_UINT32_MAX, 1023 processor_walk_namespace_cb, &action, NULL); 1024 #endif 1025 unregister_hotcpu_notifier(&acpi_cpu_notifier); 1026 } 1027 1028 /* 1029 * We keep the driver loaded even when ACPI is not running. 1030 * This is needed for the powernow-k8 driver, that works even without 1031 * ACPI, but needs symbols from this driver 1032 */ 1033 1034 static int __init acpi_processor_init(void) 1035 { 1036 int result = 0; 1037 1038 1039 memset(&processors, 0, sizeof(processors)); 1040 memset(&errata, 0, sizeof(errata)); 1041 1042 #ifdef CONFIG_SMP 1043 if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0, 1044 (struct acpi_table_header **)&madt))) 1045 madt = NULL; 1046 #endif 1047 1048 acpi_processor_dir = proc_mkdir(ACPI_PROCESSOR_CLASS, acpi_root_dir); 1049 if (!acpi_processor_dir) 1050 return -ENOMEM; 1051 acpi_processor_dir->owner = THIS_MODULE; 1052 1053 result = cpuidle_register_driver(&acpi_idle_driver); 1054 if (result < 0) 1055 goto out_proc; 1056 1057 result = acpi_bus_register_driver(&acpi_processor_driver); 1058 if (result < 0) 1059 goto out_cpuidle; 1060 1061 acpi_processor_install_hotplug_notify(); 1062 1063 acpi_thermal_cpufreq_init(); 1064 1065 acpi_processor_ppc_init(); 1066 1067 return 0; 1068 1069 out_cpuidle: 1070 cpuidle_unregister_driver(&acpi_idle_driver); 1071 1072 out_proc: 1073 remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir); 1074 1075 return result; 1076 } 1077 1078 static void __exit acpi_processor_exit(void) 1079 { 1080 acpi_processor_ppc_exit(); 1081 1082 acpi_thermal_cpufreq_exit(); 1083 1084 acpi_processor_uninstall_hotplug_notify(); 1085 1086 acpi_bus_unregister_driver(&acpi_processor_driver); 1087 1088 cpuidle_unregister_driver(&acpi_idle_driver); 1089 1090 remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir); 1091 1092 return; 1093 } 1094 1095 module_init(acpi_processor_init); 1096 module_exit(acpi_processor_exit); 1097 1098 EXPORT_SYMBOL(acpi_processor_set_thermal_limit); 1099 1100 MODULE_ALIAS("processor"); 1101