1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * acpi_processor.c - ACPI processor enumeration support 4 * 5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 7 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de> 8 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 9 * Copyright (C) 2013, Intel Corporation 10 * Rafael J. Wysocki <rafael.j.wysocki@intel.com> 11 */ 12 13 #include <linux/acpi.h> 14 #include <linux/device.h> 15 #include <linux/kernel.h> 16 #include <linux/module.h> 17 #include <linux/pci.h> 18 #include <linux/platform_device.h> 19 20 #include <acpi/processor.h> 21 22 #include <asm/cpu.h> 23 24 #include "internal.h" 25 26 DEFINE_PER_CPU(struct acpi_processor *, processors); 27 EXPORT_PER_CPU_SYMBOL(processors); 28 29 /* Errata Handling */ 30 struct acpi_processor_errata errata __read_mostly; 31 EXPORT_SYMBOL_GPL(errata); 32 33 static int acpi_processor_errata_piix4(struct pci_dev *dev) 34 { 35 u8 value1 = 0; 36 u8 value2 = 0; 37 38 39 if (!dev) 40 return -EINVAL; 41 42 /* 43 * Note that 'dev' references the PIIX4 ACPI Controller. 44 */ 45 46 switch (dev->revision) { 47 case 0: 48 dev_dbg(&dev->dev, "Found PIIX4 A-step\n"); 49 break; 50 case 1: 51 dev_dbg(&dev->dev, "Found PIIX4 B-step\n"); 52 break; 53 case 2: 54 dev_dbg(&dev->dev, "Found PIIX4E\n"); 55 break; 56 case 3: 57 dev_dbg(&dev->dev, "Found PIIX4M\n"); 58 break; 59 default: 60 dev_dbg(&dev->dev, "Found unknown PIIX4\n"); 61 break; 62 } 63 64 switch (dev->revision) { 65 66 case 0: /* PIIX4 A-step */ 67 case 1: /* PIIX4 B-step */ 68 /* 69 * See specification changes #13 ("Manual Throttle Duty Cycle") 70 * and #14 ("Enabling and Disabling Manual Throttle"), plus 71 * erratum #5 ("STPCLK# Deassertion Time") from the January 72 * 2002 PIIX4 specification update. Applies to only older 73 * PIIX4 models. 74 */ 75 errata.piix4.throttle = 1; 76 fallthrough; 77 78 case 2: /* PIIX4E */ 79 case 3: /* PIIX4M */ 80 /* 81 * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA 82 * Livelock") from the January 2002 PIIX4 specification update. 83 * Applies to all PIIX4 models. 84 */ 85 86 /* 87 * BM-IDE 88 * ------ 89 * Find the PIIX4 IDE Controller and get the Bus Master IDE 90 * Status register address. We'll use this later to read 91 * each IDE controller's DMA status to make sure we catch all 92 * DMA activity. 93 */ 94 dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, 95 PCI_DEVICE_ID_INTEL_82371AB, 96 PCI_ANY_ID, PCI_ANY_ID, NULL); 97 if (dev) { 98 errata.piix4.bmisx = pci_resource_start(dev, 4); 99 pci_dev_put(dev); 100 } 101 102 /* 103 * Type-F DMA 104 * ---------- 105 * Find the PIIX4 ISA Controller and read the Motherboard 106 * DMA controller's status to see if Type-F (Fast) DMA mode 107 * is enabled (bit 7) on either channel. Note that we'll 108 * disable C3 support if this is enabled, as some legacy 109 * devices won't operate well if fast DMA is disabled. 110 */ 111 dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, 112 PCI_DEVICE_ID_INTEL_82371AB_0, 113 PCI_ANY_ID, PCI_ANY_ID, NULL); 114 if (dev) { 115 pci_read_config_byte(dev, 0x76, &value1); 116 pci_read_config_byte(dev, 0x77, &value2); 117 if ((value1 & 0x80) || (value2 & 0x80)) 118 errata.piix4.fdma = 1; 119 pci_dev_put(dev); 120 } 121 122 break; 123 } 124 125 if (errata.piix4.bmisx) 126 dev_dbg(&dev->dev, "Bus master activity detection (BM-IDE) erratum enabled\n"); 127 if (errata.piix4.fdma) 128 dev_dbg(&dev->dev, "Type-F DMA livelock erratum (C3 disabled)\n"); 129 130 return 0; 131 } 132 133 static int acpi_processor_errata(void) 134 { 135 int result = 0; 136 struct pci_dev *dev = NULL; 137 138 /* 139 * PIIX4 140 */ 141 dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, 142 PCI_DEVICE_ID_INTEL_82371AB_3, PCI_ANY_ID, 143 PCI_ANY_ID, NULL); 144 if (dev) { 145 result = acpi_processor_errata_piix4(dev); 146 pci_dev_put(dev); 147 } 148 149 return result; 150 } 151 152 /* Create a platform device to represent a CPU frequency control mechanism. */ 153 static void cpufreq_add_device(const char *name) 154 { 155 struct platform_device *pdev; 156 157 pdev = platform_device_register_simple(name, PLATFORM_DEVID_NONE, NULL, 0); 158 if (IS_ERR(pdev)) 159 pr_info("%s device creation failed: %ld\n", name, PTR_ERR(pdev)); 160 } 161 162 #ifdef CONFIG_X86 163 /* Check presence of Processor Clocking Control by searching for \_SB.PCCH. */ 164 static void __init acpi_pcc_cpufreq_init(void) 165 { 166 acpi_status status; 167 acpi_handle handle; 168 169 status = acpi_get_handle(NULL, "\\_SB", &handle); 170 if (ACPI_FAILURE(status)) 171 return; 172 173 if (acpi_has_method(handle, "PCCH")) 174 cpufreq_add_device("pcc-cpufreq"); 175 } 176 #else 177 static void __init acpi_pcc_cpufreq_init(void) {} 178 #endif /* CONFIG_X86 */ 179 180 /* Initialization */ 181 #ifdef CONFIG_ACPI_HOTPLUG_CPU 182 int __weak acpi_map_cpu(acpi_handle handle, 183 phys_cpuid_t physid, u32 acpi_id, int *pcpu) 184 { 185 return -ENODEV; 186 } 187 188 int __weak acpi_unmap_cpu(int cpu) 189 { 190 return -ENODEV; 191 } 192 193 int __weak arch_register_cpu(int cpu) 194 { 195 return -ENODEV; 196 } 197 198 void __weak arch_unregister_cpu(int cpu) {} 199 200 static int acpi_processor_hotadd_init(struct acpi_processor *pr) 201 { 202 unsigned long long sta; 203 acpi_status status; 204 int ret; 205 206 if (invalid_phys_cpuid(pr->phys_id)) 207 return -ENODEV; 208 209 status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta); 210 if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_PRESENT)) 211 return -ENODEV; 212 213 cpu_maps_update_begin(); 214 cpus_write_lock(); 215 216 ret = acpi_map_cpu(pr->handle, pr->phys_id, pr->acpi_id, &pr->id); 217 if (ret) 218 goto out; 219 220 ret = arch_register_cpu(pr->id); 221 if (ret) { 222 acpi_unmap_cpu(pr->id); 223 goto out; 224 } 225 226 /* 227 * CPU got hot-added, but cpu_data is not initialized yet. Set a flag 228 * to delay cpu_idle/throttling initialization and do it when the CPU 229 * gets online for the first time. 230 */ 231 pr_info("CPU%d has been hot-added\n", pr->id); 232 pr->flags.need_hotplug_init = 1; 233 234 out: 235 cpus_write_unlock(); 236 cpu_maps_update_done(); 237 return ret; 238 } 239 #else 240 static inline int acpi_processor_hotadd_init(struct acpi_processor *pr) 241 { 242 return -ENODEV; 243 } 244 #endif /* CONFIG_ACPI_HOTPLUG_CPU */ 245 246 static int acpi_processor_get_info(struct acpi_device *device) 247 { 248 union acpi_object object = { 0 }; 249 struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; 250 struct acpi_processor *pr = acpi_driver_data(device); 251 int device_declaration = 0; 252 acpi_status status = AE_OK; 253 static int cpu0_initialized; 254 unsigned long long value; 255 256 acpi_processor_errata(); 257 258 /* 259 * Check to see if we have bus mastering arbitration control. This 260 * is required for proper C3 usage (to maintain cache coherency). 261 */ 262 if (acpi_gbl_FADT.pm2_control_block && acpi_gbl_FADT.pm2_control_length) { 263 pr->flags.bm_control = 1; 264 dev_dbg(&device->dev, "Bus mastering arbitration control present\n"); 265 } else 266 dev_dbg(&device->dev, "No bus mastering arbitration control\n"); 267 268 if (!strcmp(acpi_device_hid(device), ACPI_PROCESSOR_OBJECT_HID)) { 269 /* Declared with "Processor" statement; match ProcessorID */ 270 status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer); 271 if (ACPI_FAILURE(status)) { 272 dev_err(&device->dev, 273 "Failed to evaluate processor object (0x%x)\n", 274 status); 275 return -ENODEV; 276 } 277 278 pr->acpi_id = object.processor.proc_id; 279 } else { 280 /* 281 * Declared with "Device" statement; match _UID. 282 */ 283 status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID, 284 NULL, &value); 285 if (ACPI_FAILURE(status)) { 286 dev_err(&device->dev, 287 "Failed to evaluate processor _UID (0x%x)\n", 288 status); 289 return -ENODEV; 290 } 291 device_declaration = 1; 292 pr->acpi_id = value; 293 } 294 295 if (acpi_duplicate_processor_id(pr->acpi_id)) { 296 if (pr->acpi_id == 0xff) 297 dev_info_once(&device->dev, 298 "Entry not well-defined, consider updating BIOS\n"); 299 else 300 dev_err(&device->dev, 301 "Failed to get unique processor _UID (0x%x)\n", 302 pr->acpi_id); 303 return -ENODEV; 304 } 305 306 pr->phys_id = acpi_get_phys_id(pr->handle, device_declaration, 307 pr->acpi_id); 308 if (invalid_phys_cpuid(pr->phys_id)) 309 dev_dbg(&device->dev, "Failed to get CPU physical ID.\n"); 310 311 pr->id = acpi_map_cpuid(pr->phys_id, pr->acpi_id); 312 if (!cpu0_initialized) { 313 cpu0_initialized = 1; 314 /* 315 * Handle UP system running SMP kernel, with no CPU 316 * entry in MADT 317 */ 318 if (!acpi_has_cpu_in_madt() && invalid_logical_cpuid(pr->id) && 319 (num_online_cpus() == 1)) 320 pr->id = 0; 321 /* 322 * Check availability of Processor Performance Control by 323 * looking at the presence of the _PCT object under the first 324 * processor definition. 325 */ 326 if (acpi_has_method(pr->handle, "_PCT")) 327 cpufreq_add_device("acpi-cpufreq"); 328 } 329 330 /* 331 * Extra Processor objects may be enumerated on MP systems with 332 * less than the max # of CPUs. They should be ignored _iff 333 * they are physically not present. 334 * 335 * NOTE: Even if the processor has a cpuid, it may not be present 336 * because cpuid <-> apicid mapping is persistent now. 337 */ 338 if (invalid_logical_cpuid(pr->id) || !cpu_present(pr->id)) { 339 int ret = acpi_processor_hotadd_init(pr); 340 341 if (ret) 342 return ret; 343 } 344 345 /* 346 * On some boxes several processors use the same processor bus id. 347 * But they are located in different scope. For example: 348 * \_SB.SCK0.CPU0 349 * \_SB.SCK1.CPU0 350 * Rename the processor device bus id. And the new bus id will be 351 * generated as the following format: 352 * CPU+CPU ID. 353 */ 354 sprintf(acpi_device_bid(device), "CPU%X", pr->id); 355 dev_dbg(&device->dev, "Processor [%d:%d]\n", pr->id, pr->acpi_id); 356 357 if (!object.processor.pblk_address) 358 dev_dbg(&device->dev, "No PBLK (NULL address)\n"); 359 else if (object.processor.pblk_length != 6) 360 dev_err(&device->dev, "Invalid PBLK length [%d]\n", 361 object.processor.pblk_length); 362 else { 363 pr->throttling.address = object.processor.pblk_address; 364 pr->throttling.duty_offset = acpi_gbl_FADT.duty_offset; 365 pr->throttling.duty_width = acpi_gbl_FADT.duty_width; 366 367 pr->pblk = object.processor.pblk_address; 368 } 369 370 /* 371 * If ACPI describes a slot number for this CPU, we can use it to 372 * ensure we get the right value in the "physical id" field 373 * of /proc/cpuinfo 374 */ 375 status = acpi_evaluate_integer(pr->handle, "_SUN", NULL, &value); 376 if (ACPI_SUCCESS(status)) 377 arch_fix_phys_package_id(pr->id, value); 378 379 return 0; 380 } 381 382 /* 383 * Do not put anything in here which needs the core to be online. 384 * For example MSR access or setting up things which check for cpuinfo_x86 385 * (cpu_data(cpu)) values, like CPU feature flags, family, model, etc. 386 * Such things have to be put in and set up by the processor driver's .probe(). 387 */ 388 static DEFINE_PER_CPU(void *, processor_device_array); 389 390 static int acpi_processor_add(struct acpi_device *device, 391 const struct acpi_device_id *id) 392 { 393 struct acpi_processor *pr; 394 struct device *dev; 395 int result = 0; 396 397 pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL); 398 if (!pr) 399 return -ENOMEM; 400 401 if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) { 402 result = -ENOMEM; 403 goto err_free_pr; 404 } 405 406 pr->handle = device->handle; 407 strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME); 408 strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS); 409 device->driver_data = pr; 410 411 result = acpi_processor_get_info(device); 412 if (result) /* Processor is not physically present or unavailable */ 413 return 0; 414 415 BUG_ON(pr->id >= nr_cpu_ids); 416 417 /* 418 * Buggy BIOS check. 419 * ACPI id of processors can be reported wrongly by the BIOS. 420 * Don't trust it blindly 421 */ 422 if (per_cpu(processor_device_array, pr->id) != NULL && 423 per_cpu(processor_device_array, pr->id) != device) { 424 dev_warn(&device->dev, 425 "BIOS reported wrong ACPI id %d for the processor\n", 426 pr->id); 427 /* Give up, but do not abort the namespace scan. */ 428 goto err; 429 } 430 /* 431 * processor_device_array is not cleared on errors to allow buggy BIOS 432 * checks. 433 */ 434 per_cpu(processor_device_array, pr->id) = device; 435 per_cpu(processors, pr->id) = pr; 436 437 dev = get_cpu_device(pr->id); 438 if (!dev) { 439 result = -ENODEV; 440 goto err; 441 } 442 443 result = acpi_bind_one(dev, device); 444 if (result) 445 goto err; 446 447 pr->dev = dev; 448 449 /* Trigger the processor driver's .probe() if present. */ 450 if (device_attach(dev) >= 0) 451 return 1; 452 453 dev_err(dev, "Processor driver could not be attached\n"); 454 acpi_unbind_one(dev); 455 456 err: 457 free_cpumask_var(pr->throttling.shared_cpu_map); 458 device->driver_data = NULL; 459 per_cpu(processors, pr->id) = NULL; 460 err_free_pr: 461 kfree(pr); 462 return result; 463 } 464 465 #ifdef CONFIG_ACPI_HOTPLUG_CPU 466 /* Removal */ 467 static void acpi_processor_remove(struct acpi_device *device) 468 { 469 struct acpi_processor *pr; 470 471 if (!device || !acpi_driver_data(device)) 472 return; 473 474 pr = acpi_driver_data(device); 475 if (pr->id >= nr_cpu_ids) 476 goto out; 477 478 /* 479 * The only reason why we ever get here is CPU hot-removal. The CPU is 480 * already offline and the ACPI device removal locking prevents it from 481 * being put back online at this point. 482 * 483 * Unbind the driver from the processor device and detach it from the 484 * ACPI companion object. 485 */ 486 device_release_driver(pr->dev); 487 acpi_unbind_one(pr->dev); 488 489 /* Clean up. */ 490 per_cpu(processor_device_array, pr->id) = NULL; 491 per_cpu(processors, pr->id) = NULL; 492 493 cpu_maps_update_begin(); 494 cpus_write_lock(); 495 496 /* Remove the CPU. */ 497 arch_unregister_cpu(pr->id); 498 acpi_unmap_cpu(pr->id); 499 500 cpus_write_unlock(); 501 cpu_maps_update_done(); 502 503 try_offline_node(cpu_to_node(pr->id)); 504 505 out: 506 free_cpumask_var(pr->throttling.shared_cpu_map); 507 kfree(pr); 508 } 509 #endif /* CONFIG_ACPI_HOTPLUG_CPU */ 510 511 #ifdef CONFIG_X86 512 static bool acpi_hwp_native_thermal_lvt_set; 513 static acpi_status __init acpi_hwp_native_thermal_lvt_osc(acpi_handle handle, 514 u32 lvl, 515 void *context, 516 void **rv) 517 { 518 u8 sb_uuid_str[] = "4077A616-290C-47BE-9EBD-D87058713953"; 519 u32 capbuf[2]; 520 struct acpi_osc_context osc_context = { 521 .uuid_str = sb_uuid_str, 522 .rev = 1, 523 .cap.length = 8, 524 .cap.pointer = capbuf, 525 }; 526 527 if (acpi_hwp_native_thermal_lvt_set) 528 return AE_CTRL_TERMINATE; 529 530 capbuf[0] = 0x0000; 531 capbuf[1] = 0x1000; /* set bit 12 */ 532 533 if (ACPI_SUCCESS(acpi_run_osc(handle, &osc_context))) { 534 if (osc_context.ret.pointer && osc_context.ret.length > 1) { 535 u32 *capbuf_ret = osc_context.ret.pointer; 536 537 if (capbuf_ret[1] & 0x1000) { 538 acpi_handle_info(handle, 539 "_OSC native thermal LVT Acked\n"); 540 acpi_hwp_native_thermal_lvt_set = true; 541 } 542 } 543 kfree(osc_context.ret.pointer); 544 } 545 546 return AE_OK; 547 } 548 549 void __init acpi_early_processor_osc(void) 550 { 551 if (boot_cpu_has(X86_FEATURE_HWP)) { 552 acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, 553 ACPI_UINT32_MAX, 554 acpi_hwp_native_thermal_lvt_osc, 555 NULL, NULL, NULL); 556 acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, 557 acpi_hwp_native_thermal_lvt_osc, 558 NULL, NULL); 559 } 560 } 561 #endif 562 563 /* 564 * The following ACPI IDs are known to be suitable for representing as 565 * processor devices. 566 */ 567 static const struct acpi_device_id processor_device_ids[] = { 568 569 { ACPI_PROCESSOR_OBJECT_HID, }, 570 { ACPI_PROCESSOR_DEVICE_HID, }, 571 572 { } 573 }; 574 575 static struct acpi_scan_handler processor_handler = { 576 .ids = processor_device_ids, 577 .attach = acpi_processor_add, 578 #ifdef CONFIG_ACPI_HOTPLUG_CPU 579 .detach = acpi_processor_remove, 580 #endif 581 .hotplug = { 582 .enabled = true, 583 }, 584 }; 585 586 static int acpi_processor_container_attach(struct acpi_device *dev, 587 const struct acpi_device_id *id) 588 { 589 return 1; 590 } 591 592 static const struct acpi_device_id processor_container_ids[] = { 593 { ACPI_PROCESSOR_CONTAINER_HID, }, 594 { } 595 }; 596 597 static struct acpi_scan_handler processor_container_handler = { 598 .ids = processor_container_ids, 599 .attach = acpi_processor_container_attach, 600 }; 601 602 /* The number of the unique processor IDs */ 603 static int nr_unique_ids __initdata; 604 605 /* The number of the duplicate processor IDs */ 606 static int nr_duplicate_ids; 607 608 /* Used to store the unique processor IDs */ 609 static int unique_processor_ids[] __initdata = { 610 [0 ... NR_CPUS - 1] = -1, 611 }; 612 613 /* Used to store the duplicate processor IDs */ 614 static int duplicate_processor_ids[] = { 615 [0 ... NR_CPUS - 1] = -1, 616 }; 617 618 static void __init processor_validated_ids_update(int proc_id) 619 { 620 int i; 621 622 if (nr_unique_ids == NR_CPUS||nr_duplicate_ids == NR_CPUS) 623 return; 624 625 /* 626 * Firstly, compare the proc_id with duplicate IDs, if the proc_id is 627 * already in the IDs, do nothing. 628 */ 629 for (i = 0; i < nr_duplicate_ids; i++) { 630 if (duplicate_processor_ids[i] == proc_id) 631 return; 632 } 633 634 /* 635 * Secondly, compare the proc_id with unique IDs, if the proc_id is in 636 * the IDs, put it in the duplicate IDs. 637 */ 638 for (i = 0; i < nr_unique_ids; i++) { 639 if (unique_processor_ids[i] == proc_id) { 640 duplicate_processor_ids[nr_duplicate_ids] = proc_id; 641 nr_duplicate_ids++; 642 return; 643 } 644 } 645 646 /* 647 * Lastly, the proc_id is a unique ID, put it in the unique IDs. 648 */ 649 unique_processor_ids[nr_unique_ids] = proc_id; 650 nr_unique_ids++; 651 } 652 653 static acpi_status __init acpi_processor_ids_walk(acpi_handle handle, 654 u32 lvl, 655 void *context, 656 void **rv) 657 { 658 acpi_status status; 659 acpi_object_type acpi_type; 660 unsigned long long uid; 661 union acpi_object object = { 0 }; 662 struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; 663 664 status = acpi_get_type(handle, &acpi_type); 665 if (ACPI_FAILURE(status)) 666 return status; 667 668 switch (acpi_type) { 669 case ACPI_TYPE_PROCESSOR: 670 status = acpi_evaluate_object(handle, NULL, NULL, &buffer); 671 if (ACPI_FAILURE(status)) 672 goto err; 673 uid = object.processor.proc_id; 674 break; 675 676 case ACPI_TYPE_DEVICE: 677 status = acpi_evaluate_integer(handle, "_UID", NULL, &uid); 678 if (ACPI_FAILURE(status)) 679 goto err; 680 break; 681 default: 682 goto err; 683 } 684 685 processor_validated_ids_update(uid); 686 return AE_OK; 687 688 err: 689 /* Exit on error, but don't abort the namespace walk */ 690 acpi_handle_info(handle, "Invalid processor object\n"); 691 return AE_OK; 692 693 } 694 695 static void __init acpi_processor_check_duplicates(void) 696 { 697 /* check the correctness for all processors in ACPI namespace */ 698 acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, 699 ACPI_UINT32_MAX, 700 acpi_processor_ids_walk, 701 NULL, NULL, NULL); 702 acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, acpi_processor_ids_walk, 703 NULL, NULL); 704 } 705 706 bool acpi_duplicate_processor_id(int proc_id) 707 { 708 int i; 709 710 /* 711 * compare the proc_id with duplicate IDs, if the proc_id is already 712 * in the duplicate IDs, return true, otherwise, return false. 713 */ 714 for (i = 0; i < nr_duplicate_ids; i++) { 715 if (duplicate_processor_ids[i] == proc_id) 716 return true; 717 } 718 return false; 719 } 720 721 void __init acpi_processor_init(void) 722 { 723 acpi_processor_check_duplicates(); 724 acpi_scan_add_handler_with_hotplug(&processor_handler, "processor"); 725 acpi_scan_add_handler(&processor_container_handler); 726 acpi_pcc_cpufreq_init(); 727 } 728 729 #ifdef CONFIG_ACPI_PROCESSOR_CSTATE 730 /** 731 * acpi_processor_claim_cst_control - Request _CST control from the platform. 732 */ 733 bool acpi_processor_claim_cst_control(void) 734 { 735 static bool cst_control_claimed; 736 acpi_status status; 737 738 if (!acpi_gbl_FADT.cst_control || cst_control_claimed) 739 return true; 740 741 status = acpi_os_write_port(acpi_gbl_FADT.smi_command, 742 acpi_gbl_FADT.cst_control, 8); 743 if (ACPI_FAILURE(status)) { 744 pr_warn("ACPI: Failed to claim processor _CST control\n"); 745 return false; 746 } 747 748 cst_control_claimed = true; 749 return true; 750 } 751 EXPORT_SYMBOL_GPL(acpi_processor_claim_cst_control); 752 753 /** 754 * acpi_processor_evaluate_cst - Evaluate the processor _CST control method. 755 * @handle: ACPI handle of the processor object containing the _CST. 756 * @cpu: The numeric ID of the target CPU. 757 * @info: Object write the C-states information into. 758 * 759 * Extract the C-state information for the given CPU from the output of the _CST 760 * control method under the corresponding ACPI processor object (or processor 761 * device object) and populate @info with it. 762 * 763 * If any ACPI_ADR_SPACE_FIXED_HARDWARE C-states are found, invoke 764 * acpi_processor_ffh_cstate_probe() to verify them and update the 765 * cpu_cstate_entry data for @cpu. 766 */ 767 int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu, 768 struct acpi_processor_power *info) 769 { 770 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 771 union acpi_object *cst; 772 acpi_status status; 773 u64 count; 774 int last_index = 0; 775 int i, ret = 0; 776 777 status = acpi_evaluate_object(handle, "_CST", NULL, &buffer); 778 if (ACPI_FAILURE(status)) { 779 acpi_handle_debug(handle, "No _CST\n"); 780 return -ENODEV; 781 } 782 783 cst = buffer.pointer; 784 785 /* There must be at least 2 elements. */ 786 if (!cst || cst->type != ACPI_TYPE_PACKAGE || cst->package.count < 2) { 787 acpi_handle_warn(handle, "Invalid _CST output\n"); 788 ret = -EFAULT; 789 goto end; 790 } 791 792 count = cst->package.elements[0].integer.value; 793 794 /* Validate the number of C-states. */ 795 if (count < 1 || count != cst->package.count - 1) { 796 acpi_handle_warn(handle, "Inconsistent _CST data\n"); 797 ret = -EFAULT; 798 goto end; 799 } 800 801 for (i = 1; i <= count; i++) { 802 union acpi_object *element; 803 union acpi_object *obj; 804 struct acpi_power_register *reg; 805 struct acpi_processor_cx cx; 806 807 /* 808 * If there is not enough space for all C-states, skip the 809 * excess ones and log a warning. 810 */ 811 if (last_index >= ACPI_PROCESSOR_MAX_POWER - 1) { 812 acpi_handle_warn(handle, 813 "No room for more idle states (limit: %d)\n", 814 ACPI_PROCESSOR_MAX_POWER - 1); 815 break; 816 } 817 818 memset(&cx, 0, sizeof(cx)); 819 820 element = &cst->package.elements[i]; 821 if (element->type != ACPI_TYPE_PACKAGE) { 822 acpi_handle_info(handle, "_CST C%d type(%x) is not package, skip...\n", 823 i, element->type); 824 continue; 825 } 826 827 if (element->package.count != 4) { 828 acpi_handle_info(handle, "_CST C%d package count(%d) is not 4, skip...\n", 829 i, element->package.count); 830 continue; 831 } 832 833 obj = &element->package.elements[0]; 834 835 if (obj->type != ACPI_TYPE_BUFFER) { 836 acpi_handle_info(handle, "_CST C%d package element[0] type(%x) is not buffer, skip...\n", 837 i, obj->type); 838 continue; 839 } 840 841 reg = (struct acpi_power_register *)obj->buffer.pointer; 842 843 obj = &element->package.elements[1]; 844 if (obj->type != ACPI_TYPE_INTEGER) { 845 acpi_handle_info(handle, "_CST C[%d] package element[1] type(%x) is not integer, skip...\n", 846 i, obj->type); 847 continue; 848 } 849 850 cx.type = obj->integer.value; 851 /* 852 * There are known cases in which the _CST output does not 853 * contain C1, so if the type of the first state found is not 854 * C1, leave an empty slot for C1 to be filled in later. 855 */ 856 if (i == 1 && cx.type != ACPI_STATE_C1) 857 last_index = 1; 858 859 cx.address = reg->address; 860 cx.index = last_index + 1; 861 862 if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) { 863 if (!acpi_processor_ffh_cstate_probe(cpu, &cx, reg)) { 864 /* 865 * In the majority of cases _CST describes C1 as 866 * a FIXED_HARDWARE C-state, but if the command 867 * line forbids using MWAIT, use CSTATE_HALT for 868 * C1 regardless. 869 */ 870 if (cx.type == ACPI_STATE_C1 && 871 boot_option_idle_override == IDLE_NOMWAIT) { 872 cx.entry_method = ACPI_CSTATE_HALT; 873 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); 874 } else { 875 cx.entry_method = ACPI_CSTATE_FFH; 876 } 877 } else if (cx.type == ACPI_STATE_C1) { 878 /* 879 * In the special case of C1, FIXED_HARDWARE can 880 * be handled by executing the HLT instruction. 881 */ 882 cx.entry_method = ACPI_CSTATE_HALT; 883 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); 884 } else { 885 acpi_handle_info(handle, "_CST C%d declares FIXED_HARDWARE C-state but not supported in hardware, skip...\n", 886 i); 887 continue; 888 } 889 } else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { 890 cx.entry_method = ACPI_CSTATE_SYSTEMIO; 891 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x", 892 cx.address); 893 } else { 894 acpi_handle_info(handle, "_CST C%d space_id(%x) neither FIXED_HARDWARE nor SYSTEM_IO, skip...\n", 895 i, reg->space_id); 896 continue; 897 } 898 899 if (cx.type == ACPI_STATE_C1) 900 cx.valid = 1; 901 902 obj = &element->package.elements[2]; 903 if (obj->type != ACPI_TYPE_INTEGER) { 904 acpi_handle_info(handle, "_CST C%d package element[2] type(%x) not integer, skip...\n", 905 i, obj->type); 906 continue; 907 } 908 909 cx.latency = obj->integer.value; 910 911 obj = &element->package.elements[3]; 912 if (obj->type != ACPI_TYPE_INTEGER) { 913 acpi_handle_info(handle, "_CST C%d package element[3] type(%x) not integer, skip...\n", 914 i, obj->type); 915 continue; 916 } 917 918 memcpy(&info->states[++last_index], &cx, sizeof(cx)); 919 } 920 921 acpi_handle_info(handle, "Found %d idle states\n", last_index); 922 923 info->count = last_index; 924 925 end: 926 kfree(buffer.pointer); 927 928 return ret; 929 } 930 EXPORT_SYMBOL_GPL(acpi_processor_evaluate_cst); 931 #endif /* CONFIG_ACPI_PROCESSOR_CSTATE */ 932