1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * acpi_processor.c - ACPI processor enumeration support 4 * 5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 7 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de> 8 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 9 * Copyright (C) 2013, Intel Corporation 10 * Rafael J. Wysocki <rafael.j.wysocki@intel.com> 11 */ 12 13 #include <linux/acpi.h> 14 #include <linux/device.h> 15 #include <linux/dmi.h> 16 #include <linux/kernel.h> 17 #include <linux/module.h> 18 #include <linux/pci.h> 19 #include <linux/platform_device.h> 20 21 #include <acpi/processor.h> 22 23 #include <asm/cpu.h> 24 25 #include <xen/xen.h> 26 27 #include "internal.h" 28 29 DEFINE_PER_CPU(struct acpi_processor *, processors); 30 EXPORT_PER_CPU_SYMBOL(processors); 31 32 /* Errata Handling */ 33 struct acpi_processor_errata errata __read_mostly; 34 EXPORT_SYMBOL_GPL(errata); 35 36 static int acpi_processor_errata_piix4(struct pci_dev *dev) 37 { 38 u8 value1 = 0; 39 u8 value2 = 0; 40 41 42 if (!dev) 43 return -EINVAL; 44 45 /* 46 * Note that 'dev' references the PIIX4 ACPI Controller. 47 */ 48 49 switch (dev->revision) { 50 case 0: 51 dev_dbg(&dev->dev, "Found PIIX4 A-step\n"); 52 break; 53 case 1: 54 dev_dbg(&dev->dev, "Found PIIX4 B-step\n"); 55 break; 56 case 2: 57 dev_dbg(&dev->dev, "Found PIIX4E\n"); 58 break; 59 case 3: 60 dev_dbg(&dev->dev, "Found PIIX4M\n"); 61 break; 62 default: 63 dev_dbg(&dev->dev, "Found unknown PIIX4\n"); 64 break; 65 } 66 67 switch (dev->revision) { 68 69 case 0: /* PIIX4 A-step */ 70 case 1: /* PIIX4 B-step */ 71 /* 72 * See specification changes #13 ("Manual Throttle Duty Cycle") 73 * and #14 ("Enabling and Disabling Manual Throttle"), plus 74 * erratum #5 ("STPCLK# Deassertion Time") from the January 75 * 2002 PIIX4 specification update. Applies to only older 76 * PIIX4 models. 77 */ 78 errata.piix4.throttle = 1; 79 fallthrough; 80 81 case 2: /* PIIX4E */ 82 case 3: /* PIIX4M */ 83 /* 84 * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA 85 * Livelock") from the January 2002 PIIX4 specification update. 86 * Applies to all PIIX4 models. 87 */ 88 89 /* 90 * BM-IDE 91 * ------ 92 * Find the PIIX4 IDE Controller and get the Bus Master IDE 93 * Status register address. We'll use this later to read 94 * each IDE controller's DMA status to make sure we catch all 95 * DMA activity. 96 */ 97 dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, 98 PCI_DEVICE_ID_INTEL_82371AB, 99 PCI_ANY_ID, PCI_ANY_ID, NULL); 100 if (dev) { 101 errata.piix4.bmisx = pci_resource_start(dev, 4); 102 pci_dev_put(dev); 103 } 104 105 /* 106 * Type-F DMA 107 * ---------- 108 * Find the PIIX4 ISA Controller and read the Motherboard 109 * DMA controller's status to see if Type-F (Fast) DMA mode 110 * is enabled (bit 7) on either channel. Note that we'll 111 * disable C3 support if this is enabled, as some legacy 112 * devices won't operate well if fast DMA is disabled. 113 */ 114 dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, 115 PCI_DEVICE_ID_INTEL_82371AB_0, 116 PCI_ANY_ID, PCI_ANY_ID, NULL); 117 if (dev) { 118 pci_read_config_byte(dev, 0x76, &value1); 119 pci_read_config_byte(dev, 0x77, &value2); 120 if ((value1 & 0x80) || (value2 & 0x80)) 121 errata.piix4.fdma = 1; 122 pci_dev_put(dev); 123 } 124 125 break; 126 } 127 128 if (errata.piix4.bmisx) 129 dev_dbg(&dev->dev, "Bus master activity detection (BM-IDE) erratum enabled\n"); 130 if (errata.piix4.fdma) 131 dev_dbg(&dev->dev, "Type-F DMA livelock erratum (C3 disabled)\n"); 132 133 return 0; 134 } 135 136 static int acpi_processor_errata(void) 137 { 138 int result = 0; 139 struct pci_dev *dev = NULL; 140 141 /* 142 * PIIX4 143 */ 144 dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, 145 PCI_DEVICE_ID_INTEL_82371AB_3, PCI_ANY_ID, 146 PCI_ANY_ID, NULL); 147 if (dev) { 148 result = acpi_processor_errata_piix4(dev); 149 pci_dev_put(dev); 150 } 151 152 return result; 153 } 154 155 /* Create a platform device to represent a CPU frequency control mechanism. */ 156 static void cpufreq_add_device(const char *name) 157 { 158 struct platform_device *pdev; 159 160 pdev = platform_device_register_simple(name, PLATFORM_DEVID_NONE, NULL, 0); 161 if (IS_ERR(pdev)) 162 pr_info("%s device creation failed: %ld\n", name, PTR_ERR(pdev)); 163 } 164 165 #ifdef CONFIG_X86 166 /* Check presence of Processor Clocking Control by searching for \_SB.PCCH. */ 167 static void __init acpi_pcc_cpufreq_init(void) 168 { 169 acpi_status status; 170 acpi_handle handle; 171 172 status = acpi_get_handle(NULL, "\\_SB", &handle); 173 if (ACPI_FAILURE(status)) 174 return; 175 176 if (acpi_has_method(handle, "PCCH")) 177 cpufreq_add_device("pcc-cpufreq"); 178 } 179 #else 180 static void __init acpi_pcc_cpufreq_init(void) {} 181 #endif /* CONFIG_X86 */ 182 183 /* Initialization */ 184 #ifdef CONFIG_ACPI_HOTPLUG_CPU 185 int __weak acpi_map_cpu(acpi_handle handle, 186 phys_cpuid_t physid, u32 acpi_id, int *pcpu) 187 { 188 return -ENODEV; 189 } 190 191 int __weak acpi_unmap_cpu(int cpu) 192 { 193 return -ENODEV; 194 } 195 196 int __weak arch_register_cpu(int cpu) 197 { 198 return -ENODEV; 199 } 200 201 void __weak arch_unregister_cpu(int cpu) {} 202 203 static int acpi_processor_hotadd_init(struct acpi_processor *pr) 204 { 205 unsigned long long sta; 206 acpi_status status; 207 int ret; 208 209 if (invalid_phys_cpuid(pr->phys_id)) 210 return -ENODEV; 211 212 status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta); 213 if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_PRESENT)) 214 return -ENODEV; 215 216 cpu_maps_update_begin(); 217 cpus_write_lock(); 218 219 ret = acpi_map_cpu(pr->handle, pr->phys_id, pr->acpi_id, &pr->id); 220 if (ret) 221 goto out; 222 223 ret = arch_register_cpu(pr->id); 224 if (ret) { 225 acpi_unmap_cpu(pr->id); 226 goto out; 227 } 228 229 /* 230 * CPU got hot-added, but cpu_data is not initialized yet. Set a flag 231 * to delay cpu_idle/throttling initialization and do it when the CPU 232 * gets online for the first time. 233 */ 234 pr_info("CPU%d has been hot-added\n", pr->id); 235 pr->flags.need_hotplug_init = 1; 236 237 out: 238 cpus_write_unlock(); 239 cpu_maps_update_done(); 240 return ret; 241 } 242 #else 243 static inline int acpi_processor_hotadd_init(struct acpi_processor *pr) 244 { 245 return -ENODEV; 246 } 247 #endif /* CONFIG_ACPI_HOTPLUG_CPU */ 248 249 static int acpi_processor_get_info(struct acpi_device *device) 250 { 251 union acpi_object object = { 0 }; 252 struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; 253 struct acpi_processor *pr = acpi_driver_data(device); 254 int device_declaration = 0; 255 acpi_status status = AE_OK; 256 static int cpu0_initialized; 257 unsigned long long value; 258 259 acpi_processor_errata(); 260 261 /* 262 * Check to see if we have bus mastering arbitration control. This 263 * is required for proper C3 usage (to maintain cache coherency). 264 */ 265 if (acpi_gbl_FADT.pm2_control_block && acpi_gbl_FADT.pm2_control_length) { 266 pr->flags.bm_control = 1; 267 dev_dbg(&device->dev, "Bus mastering arbitration control present\n"); 268 } else 269 dev_dbg(&device->dev, "No bus mastering arbitration control\n"); 270 271 if (!strcmp(acpi_device_hid(device), ACPI_PROCESSOR_OBJECT_HID)) { 272 /* Declared with "Processor" statement; match ProcessorID */ 273 status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer); 274 if (ACPI_FAILURE(status)) { 275 dev_err(&device->dev, 276 "Failed to evaluate processor object (0x%x)\n", 277 status); 278 return -ENODEV; 279 } 280 281 pr->acpi_id = object.processor.proc_id; 282 } else { 283 /* 284 * Declared with "Device" statement; match _UID. 285 */ 286 status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID, 287 NULL, &value); 288 if (ACPI_FAILURE(status)) { 289 dev_err(&device->dev, 290 "Failed to evaluate processor _UID (0x%x)\n", 291 status); 292 return -ENODEV; 293 } 294 device_declaration = 1; 295 pr->acpi_id = value; 296 } 297 298 if (acpi_duplicate_processor_id(pr->acpi_id)) { 299 if (pr->acpi_id == 0xff) 300 dev_info_once(&device->dev, 301 "Entry not well-defined, consider updating BIOS\n"); 302 else 303 dev_err(&device->dev, 304 "Failed to get unique processor _UID (0x%x)\n", 305 pr->acpi_id); 306 return -ENODEV; 307 } 308 309 pr->phys_id = acpi_get_phys_id(pr->handle, device_declaration, 310 pr->acpi_id); 311 if (invalid_phys_cpuid(pr->phys_id)) 312 dev_dbg(&device->dev, "Failed to get CPU physical ID.\n"); 313 314 pr->id = acpi_map_cpuid(pr->phys_id, pr->acpi_id); 315 if (!cpu0_initialized) { 316 cpu0_initialized = 1; 317 /* 318 * Handle UP system running SMP kernel, with no CPU 319 * entry in MADT 320 */ 321 if (!acpi_has_cpu_in_madt() && invalid_logical_cpuid(pr->id) && 322 (num_online_cpus() == 1)) 323 pr->id = 0; 324 /* 325 * Check availability of Processor Performance Control by 326 * looking at the presence of the _PCT object under the first 327 * processor definition. 328 */ 329 if (acpi_has_method(pr->handle, "_PCT")) 330 cpufreq_add_device("acpi-cpufreq"); 331 } 332 333 /* 334 * Extra Processor objects may be enumerated on MP systems with 335 * less than the max # of CPUs. They should be ignored _iff 336 * they are physically not present. 337 * 338 * NOTE: Even if the processor has a cpuid, it may not be present 339 * because cpuid <-> apicid mapping is persistent now. 340 */ 341 if (invalid_logical_cpuid(pr->id) || !cpu_present(pr->id)) { 342 int ret = acpi_processor_hotadd_init(pr); 343 344 if (ret) 345 return ret; 346 } 347 348 /* 349 * On some boxes several processors use the same processor bus id. 350 * But they are located in different scope. For example: 351 * \_SB.SCK0.CPU0 352 * \_SB.SCK1.CPU0 353 * Rename the processor device bus id. And the new bus id will be 354 * generated as the following format: 355 * CPU+CPU ID. 356 */ 357 sprintf(acpi_device_bid(device), "CPU%X", pr->id); 358 dev_dbg(&device->dev, "Processor [%d:%d]\n", pr->id, pr->acpi_id); 359 360 if (!object.processor.pblk_address) 361 dev_dbg(&device->dev, "No PBLK (NULL address)\n"); 362 else if (object.processor.pblk_length != 6) 363 dev_err(&device->dev, "Invalid PBLK length [%d]\n", 364 object.processor.pblk_length); 365 else { 366 pr->throttling.address = object.processor.pblk_address; 367 pr->throttling.duty_offset = acpi_gbl_FADT.duty_offset; 368 pr->throttling.duty_width = acpi_gbl_FADT.duty_width; 369 370 pr->pblk = object.processor.pblk_address; 371 } 372 373 /* 374 * If ACPI describes a slot number for this CPU, we can use it to 375 * ensure we get the right value in the "physical id" field 376 * of /proc/cpuinfo 377 */ 378 status = acpi_evaluate_integer(pr->handle, "_SUN", NULL, &value); 379 if (ACPI_SUCCESS(status)) 380 arch_fix_phys_package_id(pr->id, value); 381 382 return 0; 383 } 384 385 /* 386 * Do not put anything in here which needs the core to be online. 387 * For example MSR access or setting up things which check for cpuinfo_x86 388 * (cpu_data(cpu)) values, like CPU feature flags, family, model, etc. 389 * Such things have to be put in and set up by the processor driver's .probe(). 390 */ 391 static DEFINE_PER_CPU(void *, processor_device_array); 392 393 static int acpi_processor_add(struct acpi_device *device, 394 const struct acpi_device_id *id) 395 { 396 struct acpi_processor *pr; 397 struct device *dev; 398 int result = 0; 399 400 pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL); 401 if (!pr) 402 return -ENOMEM; 403 404 if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) { 405 result = -ENOMEM; 406 goto err_free_pr; 407 } 408 409 pr->handle = device->handle; 410 strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME); 411 strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS); 412 device->driver_data = pr; 413 414 result = acpi_processor_get_info(device); 415 if (result) /* Processor is not physically present or unavailable */ 416 return 0; 417 418 BUG_ON(pr->id >= nr_cpu_ids); 419 420 /* 421 * Buggy BIOS check. 422 * ACPI id of processors can be reported wrongly by the BIOS. 423 * Don't trust it blindly 424 */ 425 if (per_cpu(processor_device_array, pr->id) != NULL && 426 per_cpu(processor_device_array, pr->id) != device) { 427 dev_warn(&device->dev, 428 "BIOS reported wrong ACPI id %d for the processor\n", 429 pr->id); 430 /* Give up, but do not abort the namespace scan. */ 431 goto err; 432 } 433 /* 434 * processor_device_array is not cleared on errors to allow buggy BIOS 435 * checks. 436 */ 437 per_cpu(processor_device_array, pr->id) = device; 438 per_cpu(processors, pr->id) = pr; 439 440 dev = get_cpu_device(pr->id); 441 if (!dev) { 442 result = -ENODEV; 443 goto err; 444 } 445 446 result = acpi_bind_one(dev, device); 447 if (result) 448 goto err; 449 450 pr->dev = dev; 451 452 /* Trigger the processor driver's .probe() if present. */ 453 if (device_attach(dev) >= 0) 454 return 1; 455 456 dev_err(dev, "Processor driver could not be attached\n"); 457 acpi_unbind_one(dev); 458 459 err: 460 free_cpumask_var(pr->throttling.shared_cpu_map); 461 device->driver_data = NULL; 462 per_cpu(processors, pr->id) = NULL; 463 err_free_pr: 464 kfree(pr); 465 return result; 466 } 467 468 #ifdef CONFIG_ACPI_HOTPLUG_CPU 469 /* Removal */ 470 static void acpi_processor_remove(struct acpi_device *device) 471 { 472 struct acpi_processor *pr; 473 474 if (!device || !acpi_driver_data(device)) 475 return; 476 477 pr = acpi_driver_data(device); 478 if (pr->id >= nr_cpu_ids) 479 goto out; 480 481 /* 482 * The only reason why we ever get here is CPU hot-removal. The CPU is 483 * already offline and the ACPI device removal locking prevents it from 484 * being put back online at this point. 485 * 486 * Unbind the driver from the processor device and detach it from the 487 * ACPI companion object. 488 */ 489 device_release_driver(pr->dev); 490 acpi_unbind_one(pr->dev); 491 492 /* Clean up. */ 493 per_cpu(processor_device_array, pr->id) = NULL; 494 per_cpu(processors, pr->id) = NULL; 495 496 cpu_maps_update_begin(); 497 cpus_write_lock(); 498 499 /* Remove the CPU. */ 500 arch_unregister_cpu(pr->id); 501 acpi_unmap_cpu(pr->id); 502 503 cpus_write_unlock(); 504 cpu_maps_update_done(); 505 506 try_offline_node(cpu_to_node(pr->id)); 507 508 out: 509 free_cpumask_var(pr->throttling.shared_cpu_map); 510 kfree(pr); 511 } 512 #endif /* CONFIG_ACPI_HOTPLUG_CPU */ 513 514 #ifdef CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC 515 bool __init processor_physically_present(acpi_handle handle) 516 { 517 int cpuid, type; 518 u32 acpi_id; 519 acpi_status status; 520 acpi_object_type acpi_type; 521 unsigned long long tmp; 522 union acpi_object object = {}; 523 struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; 524 525 status = acpi_get_type(handle, &acpi_type); 526 if (ACPI_FAILURE(status)) 527 return false; 528 529 switch (acpi_type) { 530 case ACPI_TYPE_PROCESSOR: 531 status = acpi_evaluate_object(handle, NULL, NULL, &buffer); 532 if (ACPI_FAILURE(status)) 533 return false; 534 acpi_id = object.processor.proc_id; 535 break; 536 case ACPI_TYPE_DEVICE: 537 status = acpi_evaluate_integer(handle, METHOD_NAME__UID, 538 NULL, &tmp); 539 if (ACPI_FAILURE(status)) 540 return false; 541 acpi_id = tmp; 542 break; 543 default: 544 return false; 545 } 546 547 if (xen_initial_domain()) 548 /* 549 * When running as a Xen dom0 the number of processors Linux 550 * sees can be different from the real number of processors on 551 * the system, and we still need to execute _PDC or _OSC for 552 * all of them. 553 */ 554 return xen_processor_present(acpi_id); 555 556 type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0; 557 cpuid = acpi_get_cpuid(handle, type, acpi_id); 558 559 return !invalid_logical_cpuid(cpuid); 560 } 561 562 static bool acpi_hwp_native_thermal_lvt_set; 563 static acpi_status __init acpi_hwp_native_thermal_lvt_osc(acpi_handle handle, 564 u32 lvl, 565 void *context, 566 void **rv) 567 { 568 u8 sb_uuid_str[] = "4077A616-290C-47BE-9EBD-D87058713953"; 569 u32 capbuf[2]; 570 struct acpi_osc_context osc_context = { 571 .uuid_str = sb_uuid_str, 572 .rev = 1, 573 .cap.length = 8, 574 .cap.pointer = capbuf, 575 }; 576 577 if (acpi_hwp_native_thermal_lvt_set) 578 return AE_CTRL_TERMINATE; 579 580 capbuf[0] = 0x0000; 581 capbuf[1] = 0x1000; /* set bit 12 */ 582 583 if (ACPI_SUCCESS(acpi_run_osc(handle, &osc_context))) { 584 if (osc_context.ret.pointer && osc_context.ret.length > 1) { 585 u32 *capbuf_ret = osc_context.ret.pointer; 586 587 if (capbuf_ret[1] & 0x1000) { 588 acpi_handle_info(handle, 589 "_OSC native thermal LVT Acked\n"); 590 acpi_hwp_native_thermal_lvt_set = true; 591 } 592 } 593 kfree(osc_context.ret.pointer); 594 } 595 596 return AE_OK; 597 } 598 599 void __init acpi_early_processor_osc(void) 600 { 601 if (boot_cpu_has(X86_FEATURE_HWP)) { 602 acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, 603 ACPI_UINT32_MAX, 604 acpi_hwp_native_thermal_lvt_osc, 605 NULL, NULL, NULL); 606 acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, 607 acpi_hwp_native_thermal_lvt_osc, 608 NULL, NULL); 609 } 610 } 611 #endif 612 613 /* 614 * The following ACPI IDs are known to be suitable for representing as 615 * processor devices. 616 */ 617 static const struct acpi_device_id processor_device_ids[] = { 618 619 { ACPI_PROCESSOR_OBJECT_HID, }, 620 { ACPI_PROCESSOR_DEVICE_HID, }, 621 622 { } 623 }; 624 625 static struct acpi_scan_handler processor_handler = { 626 .ids = processor_device_ids, 627 .attach = acpi_processor_add, 628 #ifdef CONFIG_ACPI_HOTPLUG_CPU 629 .detach = acpi_processor_remove, 630 #endif 631 .hotplug = { 632 .enabled = true, 633 }, 634 }; 635 636 static int acpi_processor_container_attach(struct acpi_device *dev, 637 const struct acpi_device_id *id) 638 { 639 return 1; 640 } 641 642 static const struct acpi_device_id processor_container_ids[] = { 643 { ACPI_PROCESSOR_CONTAINER_HID, }, 644 { } 645 }; 646 647 static struct acpi_scan_handler processor_container_handler = { 648 .ids = processor_container_ids, 649 .attach = acpi_processor_container_attach, 650 }; 651 652 /* The number of the unique processor IDs */ 653 static int nr_unique_ids __initdata; 654 655 /* The number of the duplicate processor IDs */ 656 static int nr_duplicate_ids; 657 658 /* Used to store the unique processor IDs */ 659 static int unique_processor_ids[] __initdata = { 660 [0 ... NR_CPUS - 1] = -1, 661 }; 662 663 /* Used to store the duplicate processor IDs */ 664 static int duplicate_processor_ids[] = { 665 [0 ... NR_CPUS - 1] = -1, 666 }; 667 668 static void __init processor_validated_ids_update(int proc_id) 669 { 670 int i; 671 672 if (nr_unique_ids == NR_CPUS||nr_duplicate_ids == NR_CPUS) 673 return; 674 675 /* 676 * Firstly, compare the proc_id with duplicate IDs, if the proc_id is 677 * already in the IDs, do nothing. 678 */ 679 for (i = 0; i < nr_duplicate_ids; i++) { 680 if (duplicate_processor_ids[i] == proc_id) 681 return; 682 } 683 684 /* 685 * Secondly, compare the proc_id with unique IDs, if the proc_id is in 686 * the IDs, put it in the duplicate IDs. 687 */ 688 for (i = 0; i < nr_unique_ids; i++) { 689 if (unique_processor_ids[i] == proc_id) { 690 duplicate_processor_ids[nr_duplicate_ids] = proc_id; 691 nr_duplicate_ids++; 692 return; 693 } 694 } 695 696 /* 697 * Lastly, the proc_id is a unique ID, put it in the unique IDs. 698 */ 699 unique_processor_ids[nr_unique_ids] = proc_id; 700 nr_unique_ids++; 701 } 702 703 static acpi_status __init acpi_processor_ids_walk(acpi_handle handle, 704 u32 lvl, 705 void *context, 706 void **rv) 707 { 708 acpi_status status; 709 acpi_object_type acpi_type; 710 unsigned long long uid; 711 union acpi_object object = { 0 }; 712 struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; 713 714 status = acpi_get_type(handle, &acpi_type); 715 if (ACPI_FAILURE(status)) 716 return status; 717 718 switch (acpi_type) { 719 case ACPI_TYPE_PROCESSOR: 720 status = acpi_evaluate_object(handle, NULL, NULL, &buffer); 721 if (ACPI_FAILURE(status)) 722 goto err; 723 uid = object.processor.proc_id; 724 break; 725 726 case ACPI_TYPE_DEVICE: 727 status = acpi_evaluate_integer(handle, "_UID", NULL, &uid); 728 if (ACPI_FAILURE(status)) 729 goto err; 730 break; 731 default: 732 goto err; 733 } 734 735 processor_validated_ids_update(uid); 736 return AE_OK; 737 738 err: 739 /* Exit on error, but don't abort the namespace walk */ 740 acpi_handle_info(handle, "Invalid processor object\n"); 741 return AE_OK; 742 743 } 744 745 static void __init acpi_processor_check_duplicates(void) 746 { 747 /* check the correctness for all processors in ACPI namespace */ 748 acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, 749 ACPI_UINT32_MAX, 750 acpi_processor_ids_walk, 751 NULL, NULL, NULL); 752 acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, acpi_processor_ids_walk, 753 NULL, NULL); 754 } 755 756 bool acpi_duplicate_processor_id(int proc_id) 757 { 758 int i; 759 760 /* 761 * compare the proc_id with duplicate IDs, if the proc_id is already 762 * in the duplicate IDs, return true, otherwise, return false. 763 */ 764 for (i = 0; i < nr_duplicate_ids; i++) { 765 if (duplicate_processor_ids[i] == proc_id) 766 return true; 767 } 768 return false; 769 } 770 771 void __init acpi_processor_init(void) 772 { 773 acpi_processor_check_duplicates(); 774 acpi_scan_add_handler_with_hotplug(&processor_handler, "processor"); 775 acpi_scan_add_handler(&processor_container_handler); 776 acpi_pcc_cpufreq_init(); 777 } 778 779 #ifdef CONFIG_ACPI_PROCESSOR_CSTATE 780 /** 781 * acpi_processor_claim_cst_control - Request _CST control from the platform. 782 */ 783 bool acpi_processor_claim_cst_control(void) 784 { 785 static bool cst_control_claimed; 786 acpi_status status; 787 788 if (!acpi_gbl_FADT.cst_control || cst_control_claimed) 789 return true; 790 791 status = acpi_os_write_port(acpi_gbl_FADT.smi_command, 792 acpi_gbl_FADT.cst_control, 8); 793 if (ACPI_FAILURE(status)) { 794 pr_warn("ACPI: Failed to claim processor _CST control\n"); 795 return false; 796 } 797 798 cst_control_claimed = true; 799 return true; 800 } 801 EXPORT_SYMBOL_GPL(acpi_processor_claim_cst_control); 802 803 /** 804 * acpi_processor_evaluate_cst - Evaluate the processor _CST control method. 805 * @handle: ACPI handle of the processor object containing the _CST. 806 * @cpu: The numeric ID of the target CPU. 807 * @info: Object write the C-states information into. 808 * 809 * Extract the C-state information for the given CPU from the output of the _CST 810 * control method under the corresponding ACPI processor object (or processor 811 * device object) and populate @info with it. 812 * 813 * If any ACPI_ADR_SPACE_FIXED_HARDWARE C-states are found, invoke 814 * acpi_processor_ffh_cstate_probe() to verify them and update the 815 * cpu_cstate_entry data for @cpu. 816 */ 817 int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu, 818 struct acpi_processor_power *info) 819 { 820 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 821 union acpi_object *cst; 822 acpi_status status; 823 u64 count; 824 int last_index = 0; 825 int i, ret = 0; 826 827 status = acpi_evaluate_object(handle, "_CST", NULL, &buffer); 828 if (ACPI_FAILURE(status)) { 829 acpi_handle_debug(handle, "No _CST\n"); 830 return -ENODEV; 831 } 832 833 cst = buffer.pointer; 834 835 /* There must be at least 2 elements. */ 836 if (!cst || cst->type != ACPI_TYPE_PACKAGE || cst->package.count < 2) { 837 acpi_handle_warn(handle, "Invalid _CST output\n"); 838 ret = -EFAULT; 839 goto end; 840 } 841 842 count = cst->package.elements[0].integer.value; 843 844 /* Validate the number of C-states. */ 845 if (count < 1 || count != cst->package.count - 1) { 846 acpi_handle_warn(handle, "Inconsistent _CST data\n"); 847 ret = -EFAULT; 848 goto end; 849 } 850 851 for (i = 1; i <= count; i++) { 852 union acpi_object *element; 853 union acpi_object *obj; 854 struct acpi_power_register *reg; 855 struct acpi_processor_cx cx; 856 857 /* 858 * If there is not enough space for all C-states, skip the 859 * excess ones and log a warning. 860 */ 861 if (last_index >= ACPI_PROCESSOR_MAX_POWER - 1) { 862 acpi_handle_warn(handle, 863 "No room for more idle states (limit: %d)\n", 864 ACPI_PROCESSOR_MAX_POWER - 1); 865 break; 866 } 867 868 memset(&cx, 0, sizeof(cx)); 869 870 element = &cst->package.elements[i]; 871 if (element->type != ACPI_TYPE_PACKAGE) { 872 acpi_handle_info(handle, "_CST C%d type(%x) is not package, skip...\n", 873 i, element->type); 874 continue; 875 } 876 877 if (element->package.count != 4) { 878 acpi_handle_info(handle, "_CST C%d package count(%d) is not 4, skip...\n", 879 i, element->package.count); 880 continue; 881 } 882 883 obj = &element->package.elements[0]; 884 885 if (obj->type != ACPI_TYPE_BUFFER) { 886 acpi_handle_info(handle, "_CST C%d package element[0] type(%x) is not buffer, skip...\n", 887 i, obj->type); 888 continue; 889 } 890 891 reg = (struct acpi_power_register *)obj->buffer.pointer; 892 893 obj = &element->package.elements[1]; 894 if (obj->type != ACPI_TYPE_INTEGER) { 895 acpi_handle_info(handle, "_CST C[%d] package element[1] type(%x) is not integer, skip...\n", 896 i, obj->type); 897 continue; 898 } 899 900 cx.type = obj->integer.value; 901 /* 902 * There are known cases in which the _CST output does not 903 * contain C1, so if the type of the first state found is not 904 * C1, leave an empty slot for C1 to be filled in later. 905 */ 906 if (i == 1 && cx.type != ACPI_STATE_C1) 907 last_index = 1; 908 909 cx.address = reg->address; 910 cx.index = last_index + 1; 911 912 if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) { 913 if (!acpi_processor_ffh_cstate_probe(cpu, &cx, reg)) { 914 /* 915 * In the majority of cases _CST describes C1 as 916 * a FIXED_HARDWARE C-state, but if the command 917 * line forbids using MWAIT, use CSTATE_HALT for 918 * C1 regardless. 919 */ 920 if (cx.type == ACPI_STATE_C1 && 921 boot_option_idle_override == IDLE_NOMWAIT) { 922 cx.entry_method = ACPI_CSTATE_HALT; 923 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); 924 } else { 925 cx.entry_method = ACPI_CSTATE_FFH; 926 } 927 } else if (cx.type == ACPI_STATE_C1) { 928 /* 929 * In the special case of C1, FIXED_HARDWARE can 930 * be handled by executing the HLT instruction. 931 */ 932 cx.entry_method = ACPI_CSTATE_HALT; 933 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); 934 } else { 935 acpi_handle_info(handle, "_CST C%d declares FIXED_HARDWARE C-state but not supported in hardware, skip...\n", 936 i); 937 continue; 938 } 939 } else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { 940 cx.entry_method = ACPI_CSTATE_SYSTEMIO; 941 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x", 942 cx.address); 943 } else { 944 acpi_handle_info(handle, "_CST C%d space_id(%x) neither FIXED_HARDWARE nor SYSTEM_IO, skip...\n", 945 i, reg->space_id); 946 continue; 947 } 948 949 if (cx.type == ACPI_STATE_C1) 950 cx.valid = 1; 951 952 obj = &element->package.elements[2]; 953 if (obj->type != ACPI_TYPE_INTEGER) { 954 acpi_handle_info(handle, "_CST C%d package element[2] type(%x) not integer, skip...\n", 955 i, obj->type); 956 continue; 957 } 958 959 cx.latency = obj->integer.value; 960 961 obj = &element->package.elements[3]; 962 if (obj->type != ACPI_TYPE_INTEGER) { 963 acpi_handle_info(handle, "_CST C%d package element[3] type(%x) not integer, skip...\n", 964 i, obj->type); 965 continue; 966 } 967 968 memcpy(&info->states[++last_index], &cx, sizeof(cx)); 969 } 970 971 acpi_handle_info(handle, "Found %d idle states\n", last_index); 972 973 info->count = last_index; 974 975 end: 976 kfree(buffer.pointer); 977 978 return ret; 979 } 980 EXPORT_SYMBOL_GPL(acpi_processor_evaluate_cst); 981 #endif /* CONFIG_ACPI_PROCESSOR_CSTATE */ 982