1 /* 2 * acpi_processor.c - ACPI Processor Driver ($Revision: 71 $) 3 * 4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 6 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de> 7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 8 * - Added processor hotplug support 9 * 10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2 of the License, or (at 15 * your option) any later version. 16 * 17 * This program is distributed in the hope that it will be useful, but 18 * WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 20 * General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License along 23 * with this program; if not, write to the Free Software Foundation, Inc., 24 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. 25 * 26 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 27 * TBD: 28 * 1. Make # power states dynamic. 29 * 2. Support duty_cycle values that span bit 4. 30 * 3. Optimize by having scheduler determine business instead of 31 * having us try to calculate it here. 32 * 4. Need C1 timing -- must modify kernel (IRQ handler) to get this. 33 */ 34 35 #include <linux/kernel.h> 36 #include <linux/module.h> 37 #include <linux/init.h> 38 #include <linux/types.h> 39 #include <linux/pci.h> 40 #include <linux/pm.h> 41 #include <linux/cpufreq.h> 42 #include <linux/cpu.h> 43 #include <linux/dmi.h> 44 #include <linux/moduleparam.h> 45 #include <linux/cpuidle.h> 46 #include <linux/slab.h> 47 #include <linux/acpi.h> 48 #include <linux/memory_hotplug.h> 49 50 #include <asm/io.h> 51 #include <asm/cpu.h> 52 #include <asm/delay.h> 53 #include <asm/uaccess.h> 54 #include <asm/processor.h> 55 #include <asm/smp.h> 56 #include <asm/acpi.h> 57 58 #include <acpi/acpi_bus.h> 59 #include <acpi/acpi_drivers.h> 60 #include <acpi/processor.h> 61 62 #define PREFIX "ACPI: " 63 64 #define ACPI_PROCESSOR_CLASS "processor" 65 #define ACPI_PROCESSOR_DEVICE_NAME "Processor" 66 #define ACPI_PROCESSOR_FILE_INFO "info" 67 #define ACPI_PROCESSOR_FILE_THROTTLING "throttling" 68 #define ACPI_PROCESSOR_FILE_LIMIT "limit" 69 #define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80 70 #define ACPI_PROCESSOR_NOTIFY_POWER 0x81 71 #define ACPI_PROCESSOR_NOTIFY_THROTTLING 0x82 72 #define ACPI_PROCESSOR_DEVICE_HID "ACPI0007" 73 74 #define ACPI_PROCESSOR_LIMIT_USER 0 75 #define ACPI_PROCESSOR_LIMIT_THERMAL 1 76 77 #define _COMPONENT ACPI_PROCESSOR_COMPONENT 78 ACPI_MODULE_NAME("processor_driver"); 79 80 MODULE_AUTHOR("Paul Diefenbaugh"); 81 MODULE_DESCRIPTION("ACPI Processor Driver"); 82 MODULE_LICENSE("GPL"); 83 84 static int acpi_processor_add(struct acpi_device *device); 85 static int acpi_processor_remove(struct acpi_device *device); 86 static void acpi_processor_notify(struct acpi_device *device, u32 event); 87 static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr); 88 static int acpi_processor_handle_eject(struct acpi_processor *pr); 89 static int acpi_processor_start(struct acpi_processor *pr); 90 91 static const struct acpi_device_id processor_device_ids[] = { 92 {ACPI_PROCESSOR_OBJECT_HID, 0}, 93 {ACPI_PROCESSOR_DEVICE_HID, 0}, 94 {"", 0}, 95 }; 96 MODULE_DEVICE_TABLE(acpi, processor_device_ids); 97 98 static SIMPLE_DEV_PM_OPS(acpi_processor_pm, 99 acpi_processor_suspend, acpi_processor_resume); 100 101 static struct acpi_driver acpi_processor_driver = { 102 .name = "processor", 103 .class = ACPI_PROCESSOR_CLASS, 104 .ids = processor_device_ids, 105 .ops = { 106 .add = acpi_processor_add, 107 .remove = acpi_processor_remove, 108 .notify = acpi_processor_notify, 109 }, 110 .drv.pm = &acpi_processor_pm, 111 }; 112 113 #define INSTALL_NOTIFY_HANDLER 1 114 #define UNINSTALL_NOTIFY_HANDLER 2 115 116 DEFINE_PER_CPU(struct acpi_processor *, processors); 117 EXPORT_PER_CPU_SYMBOL(processors); 118 119 struct acpi_processor_errata errata __read_mostly; 120 121 /* -------------------------------------------------------------------------- 122 Errata Handling 123 -------------------------------------------------------------------------- */ 124 125 static int acpi_processor_errata_piix4(struct pci_dev *dev) 126 { 127 u8 value1 = 0; 128 u8 value2 = 0; 129 130 131 if (!dev) 132 return -EINVAL; 133 134 /* 135 * Note that 'dev' references the PIIX4 ACPI Controller. 136 */ 137 138 switch (dev->revision) { 139 case 0: 140 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 A-step\n")); 141 break; 142 case 1: 143 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 B-step\n")); 144 break; 145 case 2: 146 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4E\n")); 147 break; 148 case 3: 149 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4M\n")); 150 break; 151 default: 152 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found unknown PIIX4\n")); 153 break; 154 } 155 156 switch (dev->revision) { 157 158 case 0: /* PIIX4 A-step */ 159 case 1: /* PIIX4 B-step */ 160 /* 161 * See specification changes #13 ("Manual Throttle Duty Cycle") 162 * and #14 ("Enabling and Disabling Manual Throttle"), plus 163 * erratum #5 ("STPCLK# Deassertion Time") from the January 164 * 2002 PIIX4 specification update. Applies to only older 165 * PIIX4 models. 166 */ 167 errata.piix4.throttle = 1; 168 169 case 2: /* PIIX4E */ 170 case 3: /* PIIX4M */ 171 /* 172 * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA 173 * Livelock") from the January 2002 PIIX4 specification update. 174 * Applies to all PIIX4 models. 175 */ 176 177 /* 178 * BM-IDE 179 * ------ 180 * Find the PIIX4 IDE Controller and get the Bus Master IDE 181 * Status register address. We'll use this later to read 182 * each IDE controller's DMA status to make sure we catch all 183 * DMA activity. 184 */ 185 dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, 186 PCI_DEVICE_ID_INTEL_82371AB, 187 PCI_ANY_ID, PCI_ANY_ID, NULL); 188 if (dev) { 189 errata.piix4.bmisx = pci_resource_start(dev, 4); 190 pci_dev_put(dev); 191 } 192 193 /* 194 * Type-F DMA 195 * ---------- 196 * Find the PIIX4 ISA Controller and read the Motherboard 197 * DMA controller's status to see if Type-F (Fast) DMA mode 198 * is enabled (bit 7) on either channel. Note that we'll 199 * disable C3 support if this is enabled, as some legacy 200 * devices won't operate well if fast DMA is disabled. 201 */ 202 dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, 203 PCI_DEVICE_ID_INTEL_82371AB_0, 204 PCI_ANY_ID, PCI_ANY_ID, NULL); 205 if (dev) { 206 pci_read_config_byte(dev, 0x76, &value1); 207 pci_read_config_byte(dev, 0x77, &value2); 208 if ((value1 & 0x80) || (value2 & 0x80)) 209 errata.piix4.fdma = 1; 210 pci_dev_put(dev); 211 } 212 213 break; 214 } 215 216 if (errata.piix4.bmisx) 217 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 218 "Bus master activity detection (BM-IDE) erratum enabled\n")); 219 if (errata.piix4.fdma) 220 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 221 "Type-F DMA livelock erratum (C3 disabled)\n")); 222 223 return 0; 224 } 225 226 static int acpi_processor_errata(struct acpi_processor *pr) 227 { 228 int result = 0; 229 struct pci_dev *dev = NULL; 230 231 232 if (!pr) 233 return -EINVAL; 234 235 /* 236 * PIIX4 237 */ 238 dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, 239 PCI_DEVICE_ID_INTEL_82371AB_3, PCI_ANY_ID, 240 PCI_ANY_ID, NULL); 241 if (dev) { 242 result = acpi_processor_errata_piix4(dev); 243 pci_dev_put(dev); 244 } 245 246 return result; 247 } 248 249 /* -------------------------------------------------------------------------- 250 Driver Interface 251 -------------------------------------------------------------------------- */ 252 253 static int acpi_processor_get_info(struct acpi_device *device) 254 { 255 acpi_status status = 0; 256 union acpi_object object = { 0 }; 257 struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; 258 struct acpi_processor *pr; 259 int cpu_index, device_declaration = 0; 260 static int cpu0_initialized; 261 262 pr = acpi_driver_data(device); 263 if (!pr) 264 return -EINVAL; 265 266 if (num_online_cpus() > 1) 267 errata.smp = TRUE; 268 269 acpi_processor_errata(pr); 270 271 /* 272 * Check to see if we have bus mastering arbitration control. This 273 * is required for proper C3 usage (to maintain cache coherency). 274 */ 275 if (acpi_gbl_FADT.pm2_control_block && acpi_gbl_FADT.pm2_control_length) { 276 pr->flags.bm_control = 1; 277 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 278 "Bus mastering arbitration control present\n")); 279 } else 280 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 281 "No bus mastering arbitration control\n")); 282 283 if (!strcmp(acpi_device_hid(device), ACPI_PROCESSOR_OBJECT_HID)) { 284 /* Declared with "Processor" statement; match ProcessorID */ 285 status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer); 286 if (ACPI_FAILURE(status)) { 287 dev_err(&device->dev, 288 "Failed to evaluate processor object (0x%x)\n", 289 status); 290 return -ENODEV; 291 } 292 293 /* 294 * TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP. 295 * >>> 'acpi_get_processor_id(acpi_id, &id)' in 296 * arch/xxx/acpi.c 297 */ 298 pr->acpi_id = object.processor.proc_id; 299 } else { 300 /* 301 * Declared with "Device" statement; match _UID. 302 * Note that we don't handle string _UIDs yet. 303 */ 304 unsigned long long value; 305 status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID, 306 NULL, &value); 307 if (ACPI_FAILURE(status)) { 308 dev_err(&device->dev, 309 "Failed to evaluate processor _UID (0x%x)\n", 310 status); 311 return -ENODEV; 312 } 313 device_declaration = 1; 314 pr->acpi_id = value; 315 } 316 cpu_index = acpi_get_cpuid(pr->handle, device_declaration, pr->acpi_id); 317 318 /* Handle UP system running SMP kernel, with no LAPIC in MADT */ 319 if (!cpu0_initialized && (cpu_index == -1) && 320 (num_online_cpus() == 1)) { 321 cpu_index = 0; 322 } 323 324 cpu0_initialized = 1; 325 326 pr->id = cpu_index; 327 328 /* 329 * Extra Processor objects may be enumerated on MP systems with 330 * less than the max # of CPUs. They should be ignored _iff 331 * they are physically not present. 332 */ 333 if (pr->id == -1) { 334 if (ACPI_FAILURE(acpi_processor_hotadd_init(pr))) 335 return -ENODEV; 336 } 337 /* 338 * On some boxes several processors use the same processor bus id. 339 * But they are located in different scope. For example: 340 * \_SB.SCK0.CPU0 341 * \_SB.SCK1.CPU0 342 * Rename the processor device bus id. And the new bus id will be 343 * generated as the following format: 344 * CPU+CPU ID. 345 */ 346 sprintf(acpi_device_bid(device), "CPU%X", pr->id); 347 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Processor [%d:%d]\n", pr->id, 348 pr->acpi_id)); 349 350 if (!object.processor.pblk_address) 351 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No PBLK (NULL address)\n")); 352 else if (object.processor.pblk_length != 6) 353 dev_err(&device->dev, "Invalid PBLK length [%d]\n", 354 object.processor.pblk_length); 355 else { 356 pr->throttling.address = object.processor.pblk_address; 357 pr->throttling.duty_offset = acpi_gbl_FADT.duty_offset; 358 pr->throttling.duty_width = acpi_gbl_FADT.duty_width; 359 360 pr->pblk = object.processor.pblk_address; 361 362 /* 363 * We don't care about error returns - we just try to mark 364 * these reserved so that nobody else is confused into thinking 365 * that this region might be unused.. 366 * 367 * (In particular, allocating the IO range for Cardbus) 368 */ 369 request_region(pr->throttling.address, 6, "ACPI CPU throttle"); 370 } 371 372 /* 373 * If ACPI describes a slot number for this CPU, we can use it 374 * ensure we get the right value in the "physical id" field 375 * of /proc/cpuinfo 376 */ 377 status = acpi_evaluate_object(pr->handle, "_SUN", NULL, &buffer); 378 if (ACPI_SUCCESS(status)) 379 arch_fix_phys_package_id(pr->id, object.integer.value); 380 381 return 0; 382 } 383 384 static DEFINE_PER_CPU(void *, processor_device_array); 385 386 static void acpi_processor_notify(struct acpi_device *device, u32 event) 387 { 388 struct acpi_processor *pr = acpi_driver_data(device); 389 int saved; 390 391 if (!pr) 392 return; 393 394 switch (event) { 395 case ACPI_PROCESSOR_NOTIFY_PERFORMANCE: 396 saved = pr->performance_platform_limit; 397 acpi_processor_ppc_has_changed(pr, 1); 398 if (saved == pr->performance_platform_limit) 399 break; 400 acpi_bus_generate_proc_event(device, event, 401 pr->performance_platform_limit); 402 acpi_bus_generate_netlink_event(device->pnp.device_class, 403 dev_name(&device->dev), event, 404 pr->performance_platform_limit); 405 break; 406 case ACPI_PROCESSOR_NOTIFY_POWER: 407 acpi_processor_cst_has_changed(pr); 408 acpi_bus_generate_proc_event(device, event, 0); 409 acpi_bus_generate_netlink_event(device->pnp.device_class, 410 dev_name(&device->dev), event, 0); 411 break; 412 case ACPI_PROCESSOR_NOTIFY_THROTTLING: 413 acpi_processor_tstate_has_changed(pr); 414 acpi_bus_generate_proc_event(device, event, 0); 415 acpi_bus_generate_netlink_event(device->pnp.device_class, 416 dev_name(&device->dev), event, 0); 417 break; 418 default: 419 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 420 "Unsupported event [0x%x]\n", event)); 421 break; 422 } 423 424 return; 425 } 426 427 static int acpi_cpu_soft_notify(struct notifier_block *nfb, 428 unsigned long action, void *hcpu) 429 { 430 unsigned int cpu = (unsigned long)hcpu; 431 struct acpi_processor *pr = per_cpu(processors, cpu); 432 433 if (action == CPU_ONLINE && pr) { 434 /* CPU got physically hotplugged and onlined the first time: 435 * Initialize missing things 436 */ 437 if (pr->flags.need_hotplug_init) { 438 pr_info("Will online and init hotplugged CPU: %d\n", 439 pr->id); 440 WARN(acpi_processor_start(pr), "Failed to start CPU:" 441 " %d\n", pr->id); 442 pr->flags.need_hotplug_init = 0; 443 /* Normal CPU soft online event */ 444 } else { 445 acpi_processor_ppc_has_changed(pr, 0); 446 acpi_processor_hotplug(pr); 447 acpi_processor_reevaluate_tstate(pr, action); 448 acpi_processor_tstate_has_changed(pr); 449 } 450 } 451 if (action == CPU_DEAD && pr) { 452 /* invalidate the flag.throttling after one CPU is offline */ 453 acpi_processor_reevaluate_tstate(pr, action); 454 } 455 return NOTIFY_OK; 456 } 457 458 static struct notifier_block acpi_cpu_notifier = 459 { 460 .notifier_call = acpi_cpu_soft_notify, 461 }; 462 463 /* 464 * acpi_processor_start() is called by the cpu_hotplug_notifier func: 465 * acpi_cpu_soft_notify(). Getting it __cpuinit{data} is difficult, the 466 * root cause seem to be that acpi_processor_uninstall_hotplug_notify() 467 * is in the module_exit (__exit) func. Allowing acpi_processor_start() 468 * to not be in __cpuinit section, but being called from __cpuinit funcs 469 * via __ref looks like the right thing to do here. 470 */ 471 static __ref int acpi_processor_start(struct acpi_processor *pr) 472 { 473 struct acpi_device *device = per_cpu(processor_device_array, pr->id); 474 int result = 0; 475 476 #ifdef CONFIG_CPU_FREQ 477 acpi_processor_ppc_has_changed(pr, 0); 478 acpi_processor_load_module(pr); 479 #endif 480 acpi_processor_get_throttling_info(pr); 481 acpi_processor_get_limit_info(pr); 482 483 if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver) 484 acpi_processor_power_init(pr); 485 486 pr->cdev = thermal_cooling_device_register("Processor", device, 487 &processor_cooling_ops); 488 if (IS_ERR(pr->cdev)) { 489 result = PTR_ERR(pr->cdev); 490 goto err_power_exit; 491 } 492 493 dev_dbg(&device->dev, "registered as cooling_device%d\n", 494 pr->cdev->id); 495 496 result = sysfs_create_link(&device->dev.kobj, 497 &pr->cdev->device.kobj, 498 "thermal_cooling"); 499 if (result) { 500 dev_err(&device->dev, 501 "Failed to create sysfs link 'thermal_cooling'\n"); 502 goto err_thermal_unregister; 503 } 504 result = sysfs_create_link(&pr->cdev->device.kobj, 505 &device->dev.kobj, 506 "device"); 507 if (result) { 508 dev_err(&pr->cdev->device, 509 "Failed to create sysfs link 'device'\n"); 510 goto err_remove_sysfs_thermal; 511 } 512 513 return 0; 514 515 err_remove_sysfs_thermal: 516 sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); 517 err_thermal_unregister: 518 thermal_cooling_device_unregister(pr->cdev); 519 err_power_exit: 520 acpi_processor_power_exit(pr); 521 522 return result; 523 } 524 525 /* 526 * Do not put anything in here which needs the core to be online. 527 * For example MSR access or setting up things which check for cpuinfo_x86 528 * (cpu_data(cpu)) values, like CPU feature flags, family, model, etc. 529 * Such things have to be put in and set up above in acpi_processor_start() 530 */ 531 static int __cpuinit acpi_processor_add(struct acpi_device *device) 532 { 533 struct acpi_processor *pr = NULL; 534 int result = 0; 535 struct device *dev; 536 537 pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL); 538 if (!pr) 539 return -ENOMEM; 540 541 if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) { 542 result = -ENOMEM; 543 goto err_free_pr; 544 } 545 546 pr->handle = device->handle; 547 strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME); 548 strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS); 549 device->driver_data = pr; 550 551 result = acpi_processor_get_info(device); 552 if (result) { 553 /* Processor is physically not present */ 554 return 0; 555 } 556 557 #ifdef CONFIG_SMP 558 if (pr->id >= setup_max_cpus && pr->id != 0) 559 return 0; 560 #endif 561 562 BUG_ON(pr->id >= nr_cpu_ids); 563 564 /* 565 * Buggy BIOS check 566 * ACPI id of processors can be reported wrongly by the BIOS. 567 * Don't trust it blindly 568 */ 569 if (per_cpu(processor_device_array, pr->id) != NULL && 570 per_cpu(processor_device_array, pr->id) != device) { 571 dev_warn(&device->dev, 572 "BIOS reported wrong ACPI id %d for the processor\n", 573 pr->id); 574 result = -ENODEV; 575 goto err_free_cpumask; 576 } 577 per_cpu(processor_device_array, pr->id) = device; 578 579 per_cpu(processors, pr->id) = pr; 580 581 dev = get_cpu_device(pr->id); 582 if (sysfs_create_link(&device->dev.kobj, &dev->kobj, "sysdev")) { 583 result = -EFAULT; 584 goto err_clear_processor; 585 } 586 587 /* 588 * Do not start hotplugged CPUs now, but when they 589 * are onlined the first time 590 */ 591 if (pr->flags.need_hotplug_init) 592 return 0; 593 594 result = acpi_processor_start(pr); 595 if (result) 596 goto err_remove_sysfs; 597 598 return 0; 599 600 err_remove_sysfs: 601 sysfs_remove_link(&device->dev.kobj, "sysdev"); 602 err_clear_processor: 603 /* 604 * processor_device_array is not cleared to allow checks for buggy BIOS 605 */ 606 per_cpu(processors, pr->id) = NULL; 607 err_free_cpumask: 608 free_cpumask_var(pr->throttling.shared_cpu_map); 609 err_free_pr: 610 kfree(pr); 611 return result; 612 } 613 614 static int acpi_processor_remove(struct acpi_device *device) 615 { 616 struct acpi_processor *pr = NULL; 617 618 619 if (!device || !acpi_driver_data(device)) 620 return -EINVAL; 621 622 pr = acpi_driver_data(device); 623 624 if (pr->id >= nr_cpu_ids) 625 goto free; 626 627 if (device->removal_type == ACPI_BUS_REMOVAL_EJECT) { 628 if (acpi_processor_handle_eject(pr)) 629 return -EINVAL; 630 } 631 632 acpi_processor_power_exit(pr); 633 634 sysfs_remove_link(&device->dev.kobj, "sysdev"); 635 636 if (pr->cdev) { 637 sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); 638 sysfs_remove_link(&pr->cdev->device.kobj, "device"); 639 thermal_cooling_device_unregister(pr->cdev); 640 pr->cdev = NULL; 641 } 642 643 per_cpu(processors, pr->id) = NULL; 644 per_cpu(processor_device_array, pr->id) = NULL; 645 try_offline_node(cpu_to_node(pr->id)); 646 647 free: 648 free_cpumask_var(pr->throttling.shared_cpu_map); 649 kfree(pr); 650 651 return 0; 652 } 653 654 #ifdef CONFIG_ACPI_HOTPLUG_CPU 655 /**************************************************************************** 656 * Acpi processor hotplug support * 657 ****************************************************************************/ 658 659 static int is_processor_present(acpi_handle handle) 660 { 661 acpi_status status; 662 unsigned long long sta = 0; 663 664 665 status = acpi_evaluate_integer(handle, "_STA", NULL, &sta); 666 667 if (ACPI_SUCCESS(status) && (sta & ACPI_STA_DEVICE_PRESENT)) 668 return 1; 669 670 /* 671 * _STA is mandatory for a processor that supports hot plug 672 */ 673 if (status == AE_NOT_FOUND) 674 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 675 "Processor does not support hot plug\n")); 676 else 677 ACPI_EXCEPTION((AE_INFO, status, 678 "Processor Device is not present")); 679 return 0; 680 } 681 682 static void acpi_processor_hotplug_notify(acpi_handle handle, 683 u32 event, void *data) 684 { 685 struct acpi_device *device = NULL; 686 struct acpi_eject_event *ej_event = NULL; 687 u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */ 688 acpi_status status; 689 int result; 690 691 acpi_scan_lock_acquire(); 692 693 switch (event) { 694 case ACPI_NOTIFY_BUS_CHECK: 695 case ACPI_NOTIFY_DEVICE_CHECK: 696 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 697 "Processor driver received %s event\n", 698 (event == ACPI_NOTIFY_BUS_CHECK) ? 699 "ACPI_NOTIFY_BUS_CHECK" : "ACPI_NOTIFY_DEVICE_CHECK")); 700 701 if (!is_processor_present(handle)) 702 break; 703 704 if (!acpi_bus_get_device(handle, &device)) 705 break; 706 707 result = acpi_bus_scan(handle); 708 if (result) { 709 acpi_handle_err(handle, "Unable to add the device\n"); 710 break; 711 } 712 result = acpi_bus_get_device(handle, &device); 713 if (result) { 714 acpi_handle_err(handle, "Missing device object\n"); 715 break; 716 } 717 ost_code = ACPI_OST_SC_SUCCESS; 718 break; 719 720 case ACPI_NOTIFY_EJECT_REQUEST: 721 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 722 "received ACPI_NOTIFY_EJECT_REQUEST\n")); 723 724 if (acpi_bus_get_device(handle, &device)) { 725 acpi_handle_err(handle, 726 "Device don't exist, dropping EJECT\n"); 727 break; 728 } 729 if (!acpi_driver_data(device)) { 730 acpi_handle_err(handle, 731 "Driver data is NULL, dropping EJECT\n"); 732 break; 733 } 734 735 ej_event = kmalloc(sizeof(*ej_event), GFP_KERNEL); 736 if (!ej_event) { 737 acpi_handle_err(handle, "No memory, dropping EJECT\n"); 738 break; 739 } 740 741 get_device(&device->dev); 742 ej_event->device = device; 743 ej_event->event = ACPI_NOTIFY_EJECT_REQUEST; 744 /* The eject is carried out asynchronously. */ 745 status = acpi_os_hotplug_execute(acpi_bus_hot_remove_device, 746 ej_event); 747 if (ACPI_FAILURE(status)) { 748 put_device(&device->dev); 749 kfree(ej_event); 750 break; 751 } 752 goto out; 753 754 default: 755 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 756 "Unsupported event [0x%x]\n", event)); 757 758 /* non-hotplug event; possibly handled by other handler */ 759 goto out; 760 } 761 762 /* Inform firmware that the hotplug operation has completed */ 763 (void) acpi_evaluate_hotplug_ost(handle, event, ost_code, NULL); 764 765 out: 766 acpi_scan_lock_release(); 767 } 768 769 static acpi_status is_processor_device(acpi_handle handle) 770 { 771 struct acpi_device_info *info; 772 char *hid; 773 acpi_status status; 774 775 status = acpi_get_object_info(handle, &info); 776 if (ACPI_FAILURE(status)) 777 return status; 778 779 if (info->type == ACPI_TYPE_PROCESSOR) { 780 kfree(info); 781 return AE_OK; /* found a processor object */ 782 } 783 784 if (!(info->valid & ACPI_VALID_HID)) { 785 kfree(info); 786 return AE_ERROR; 787 } 788 789 hid = info->hardware_id.string; 790 if ((hid == NULL) || strcmp(hid, ACPI_PROCESSOR_DEVICE_HID)) { 791 kfree(info); 792 return AE_ERROR; 793 } 794 795 kfree(info); 796 return AE_OK; /* found a processor device object */ 797 } 798 799 static acpi_status 800 processor_walk_namespace_cb(acpi_handle handle, 801 u32 lvl, void *context, void **rv) 802 { 803 acpi_status status; 804 int *action = context; 805 806 status = is_processor_device(handle); 807 if (ACPI_FAILURE(status)) 808 return AE_OK; /* not a processor; continue to walk */ 809 810 switch (*action) { 811 case INSTALL_NOTIFY_HANDLER: 812 acpi_install_notify_handler(handle, 813 ACPI_SYSTEM_NOTIFY, 814 acpi_processor_hotplug_notify, 815 NULL); 816 break; 817 case UNINSTALL_NOTIFY_HANDLER: 818 acpi_remove_notify_handler(handle, 819 ACPI_SYSTEM_NOTIFY, 820 acpi_processor_hotplug_notify); 821 break; 822 default: 823 break; 824 } 825 826 /* found a processor; skip walking underneath */ 827 return AE_CTRL_DEPTH; 828 } 829 830 static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr) 831 { 832 acpi_handle handle = pr->handle; 833 834 if (!is_processor_present(handle)) { 835 return AE_ERROR; 836 } 837 838 if (acpi_map_lsapic(handle, &pr->id)) 839 return AE_ERROR; 840 841 if (arch_register_cpu(pr->id)) { 842 acpi_unmap_lsapic(pr->id); 843 return AE_ERROR; 844 } 845 846 /* CPU got hot-plugged, but cpu_data is not initialized yet 847 * Set flag to delay cpu_idle/throttling initialization 848 * in: 849 * acpi_processor_add() 850 * acpi_processor_get_info() 851 * and do it when the CPU gets online the first time 852 * TBD: Cleanup above functions and try to do this more elegant. 853 */ 854 pr_info("CPU %d got hotplugged\n", pr->id); 855 pr->flags.need_hotplug_init = 1; 856 857 return AE_OK; 858 } 859 860 static int acpi_processor_handle_eject(struct acpi_processor *pr) 861 { 862 if (cpu_online(pr->id)) 863 cpu_down(pr->id); 864 865 get_online_cpus(); 866 /* 867 * The cpu might become online again at this point. So we check whether 868 * the cpu has been onlined or not. If the cpu became online, it means 869 * that someone wants to use the cpu. So acpi_processor_handle_eject() 870 * returns -EAGAIN. 871 */ 872 if (unlikely(cpu_online(pr->id))) { 873 put_online_cpus(); 874 pr_warn("Failed to remove CPU %d, because other task " 875 "brought the CPU back online\n", pr->id); 876 return -EAGAIN; 877 } 878 arch_unregister_cpu(pr->id); 879 acpi_unmap_lsapic(pr->id); 880 put_online_cpus(); 881 return (0); 882 } 883 #else 884 static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr) 885 { 886 return AE_ERROR; 887 } 888 static int acpi_processor_handle_eject(struct acpi_processor *pr) 889 { 890 return (-EINVAL); 891 } 892 #endif 893 894 static 895 void acpi_processor_install_hotplug_notify(void) 896 { 897 #ifdef CONFIG_ACPI_HOTPLUG_CPU 898 int action = INSTALL_NOTIFY_HANDLER; 899 acpi_walk_namespace(ACPI_TYPE_ANY, 900 ACPI_ROOT_OBJECT, 901 ACPI_UINT32_MAX, 902 processor_walk_namespace_cb, NULL, &action, NULL); 903 #endif 904 register_hotcpu_notifier(&acpi_cpu_notifier); 905 } 906 907 static 908 void acpi_processor_uninstall_hotplug_notify(void) 909 { 910 #ifdef CONFIG_ACPI_HOTPLUG_CPU 911 int action = UNINSTALL_NOTIFY_HANDLER; 912 acpi_walk_namespace(ACPI_TYPE_ANY, 913 ACPI_ROOT_OBJECT, 914 ACPI_UINT32_MAX, 915 processor_walk_namespace_cb, NULL, &action, NULL); 916 #endif 917 unregister_hotcpu_notifier(&acpi_cpu_notifier); 918 } 919 920 /* 921 * We keep the driver loaded even when ACPI is not running. 922 * This is needed for the powernow-k8 driver, that works even without 923 * ACPI, but needs symbols from this driver 924 */ 925 926 static int __init acpi_processor_init(void) 927 { 928 int result = 0; 929 930 if (acpi_disabled) 931 return 0; 932 933 result = acpi_bus_register_driver(&acpi_processor_driver); 934 if (result < 0) 935 return result; 936 937 acpi_processor_install_hotplug_notify(); 938 939 acpi_thermal_cpufreq_init(); 940 941 acpi_processor_ppc_init(); 942 943 acpi_processor_throttling_init(); 944 945 return 0; 946 } 947 948 static void __exit acpi_processor_exit(void) 949 { 950 if (acpi_disabled) 951 return; 952 953 acpi_processor_ppc_exit(); 954 955 acpi_thermal_cpufreq_exit(); 956 957 acpi_processor_uninstall_hotplug_notify(); 958 959 acpi_bus_unregister_driver(&acpi_processor_driver); 960 961 return; 962 } 963 964 module_init(acpi_processor_init); 965 module_exit(acpi_processor_exit); 966 967 MODULE_ALIAS("processor"); 968