1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * processor_perflib.c - ACPI Processor P-States Library ($Revision: 71 $) 4 * 5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 7 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de> 8 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 9 * - Added processor hotplug support 10 */ 11 12 #include <linux/kernel.h> 13 #include <linux/module.h> 14 #include <linux/init.h> 15 #include <linux/cpufreq.h> 16 #include <linux/slab.h> 17 #include <linux/acpi.h> 18 #include <acpi/processor.h> 19 #ifdef CONFIG_X86 20 #include <asm/cpufeature.h> 21 #endif 22 23 #define PREFIX "ACPI: " 24 25 #define ACPI_PROCESSOR_CLASS "processor" 26 #define ACPI_PROCESSOR_FILE_PERFORMANCE "performance" 27 #define _COMPONENT ACPI_PROCESSOR_COMPONENT 28 ACPI_MODULE_NAME("processor_perflib"); 29 30 static DEFINE_MUTEX(performance_mutex); 31 32 /* 33 * _PPC support is implemented as a CPUfreq policy notifier: 34 * This means each time a CPUfreq driver registered also with 35 * the ACPI core is asked to change the speed policy, the maximum 36 * value is adjusted so that it is within the platform limit. 37 * 38 * Also, when a new platform limit value is detected, the CPUfreq 39 * policy is adjusted accordingly. 40 */ 41 42 /* ignore_ppc: 43 * -1 -> cpufreq low level drivers not initialized -> _PSS, etc. not called yet 44 * ignore _PPC 45 * 0 -> cpufreq low level drivers initialized -> consider _PPC values 46 * 1 -> ignore _PPC totally -> forced by user through boot param 47 */ 48 static int ignore_ppc = -1; 49 module_param(ignore_ppc, int, 0644); 50 MODULE_PARM_DESC(ignore_ppc, "If the frequency of your machine gets wrongly" \ 51 "limited by BIOS, this should help"); 52 53 static bool acpi_processor_ppc_in_use; 54 55 static int acpi_processor_get_platform_limit(struct acpi_processor *pr) 56 { 57 acpi_status status = 0; 58 unsigned long long ppc = 0; 59 int ret; 60 61 if (!pr) 62 return -EINVAL; 63 64 /* 65 * _PPC indicates the maximum state currently supported by the platform 66 * (e.g. 0 = states 0..n; 1 = states 1..n; etc. 67 */ 68 status = acpi_evaluate_integer(pr->handle, "_PPC", NULL, &ppc); 69 70 if (status != AE_NOT_FOUND) 71 acpi_processor_ppc_in_use = true; 72 73 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { 74 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PPC")); 75 return -ENODEV; 76 } 77 78 pr_debug("CPU %d: _PPC is %d - frequency %s limited\n", pr->id, 79 (int)ppc, ppc ? "" : "not"); 80 81 pr->performance_platform_limit = (int)ppc; 82 83 if (ppc >= pr->performance->state_count || 84 unlikely(!dev_pm_qos_request_active(&pr->perflib_req))) 85 return 0; 86 87 ret = dev_pm_qos_update_request(&pr->perflib_req, 88 pr->performance->states[ppc].core_frequency * 1000); 89 if (ret < 0) { 90 pr_warn("Failed to update perflib freq constraint: CPU%d (%d)\n", 91 pr->id, ret); 92 } 93 94 return 0; 95 } 96 97 #define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80 98 /* 99 * acpi_processor_ppc_ost: Notify firmware the _PPC evaluation status 100 * @handle: ACPI processor handle 101 * @status: the status code of _PPC evaluation 102 * 0: success. OSPM is now using the performance state specificed. 103 * 1: failure. OSPM has not changed the number of P-states in use 104 */ 105 static void acpi_processor_ppc_ost(acpi_handle handle, int status) 106 { 107 if (acpi_has_method(handle, "_OST")) 108 acpi_evaluate_ost(handle, ACPI_PROCESSOR_NOTIFY_PERFORMANCE, 109 status, NULL); 110 } 111 112 void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag) 113 { 114 int ret; 115 116 if (ignore_ppc || !pr->performance) { 117 /* 118 * Only when it is notification event, the _OST object 119 * will be evaluated. Otherwise it is skipped. 120 */ 121 if (event_flag) 122 acpi_processor_ppc_ost(pr->handle, 1); 123 return; 124 } 125 126 ret = acpi_processor_get_platform_limit(pr); 127 /* 128 * Only when it is notification event, the _OST object 129 * will be evaluated. Otherwise it is skipped. 130 */ 131 if (event_flag) { 132 if (ret < 0) 133 acpi_processor_ppc_ost(pr->handle, 1); 134 else 135 acpi_processor_ppc_ost(pr->handle, 0); 136 } 137 if (ret >= 0) 138 cpufreq_update_limits(pr->id); 139 } 140 141 int acpi_processor_get_bios_limit(int cpu, unsigned int *limit) 142 { 143 struct acpi_processor *pr; 144 145 pr = per_cpu(processors, cpu); 146 if (!pr || !pr->performance || !pr->performance->state_count) 147 return -ENODEV; 148 *limit = pr->performance->states[pr->performance_platform_limit]. 149 core_frequency * 1000; 150 return 0; 151 } 152 EXPORT_SYMBOL(acpi_processor_get_bios_limit); 153 154 void acpi_processor_ignore_ppc_init(void) 155 { 156 if (ignore_ppc < 0) 157 ignore_ppc = 0; 158 } 159 160 void acpi_processor_ppc_init(int cpu) 161 { 162 struct acpi_processor *pr = per_cpu(processors, cpu); 163 int ret; 164 165 if (!pr) 166 return; 167 168 ret = dev_pm_qos_add_request(get_cpu_device(cpu), 169 &pr->perflib_req, DEV_PM_QOS_MAX_FREQUENCY, 170 INT_MAX); 171 if (ret < 0) 172 pr_err("Failed to add freq constraint for CPU%d (%d)\n", cpu, 173 ret); 174 } 175 176 void acpi_processor_ppc_exit(int cpu) 177 { 178 struct acpi_processor *pr = per_cpu(processors, cpu); 179 180 if (pr) 181 dev_pm_qos_remove_request(&pr->perflib_req); 182 } 183 184 static int acpi_processor_get_performance_control(struct acpi_processor *pr) 185 { 186 int result = 0; 187 acpi_status status = 0; 188 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 189 union acpi_object *pct = NULL; 190 union acpi_object obj = { 0 }; 191 192 193 status = acpi_evaluate_object(pr->handle, "_PCT", NULL, &buffer); 194 if (ACPI_FAILURE(status)) { 195 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PCT")); 196 return -ENODEV; 197 } 198 199 pct = (union acpi_object *)buffer.pointer; 200 if (!pct || (pct->type != ACPI_TYPE_PACKAGE) 201 || (pct->package.count != 2)) { 202 printk(KERN_ERR PREFIX "Invalid _PCT data\n"); 203 result = -EFAULT; 204 goto end; 205 } 206 207 /* 208 * control_register 209 */ 210 211 obj = pct->package.elements[0]; 212 213 if ((obj.type != ACPI_TYPE_BUFFER) 214 || (obj.buffer.length < sizeof(struct acpi_pct_register)) 215 || (obj.buffer.pointer == NULL)) { 216 printk(KERN_ERR PREFIX "Invalid _PCT data (control_register)\n"); 217 result = -EFAULT; 218 goto end; 219 } 220 memcpy(&pr->performance->control_register, obj.buffer.pointer, 221 sizeof(struct acpi_pct_register)); 222 223 /* 224 * status_register 225 */ 226 227 obj = pct->package.elements[1]; 228 229 if ((obj.type != ACPI_TYPE_BUFFER) 230 || (obj.buffer.length < sizeof(struct acpi_pct_register)) 231 || (obj.buffer.pointer == NULL)) { 232 printk(KERN_ERR PREFIX "Invalid _PCT data (status_register)\n"); 233 result = -EFAULT; 234 goto end; 235 } 236 237 memcpy(&pr->performance->status_register, obj.buffer.pointer, 238 sizeof(struct acpi_pct_register)); 239 240 end: 241 kfree(buffer.pointer); 242 243 return result; 244 } 245 246 #ifdef CONFIG_X86 247 /* 248 * Some AMDs have 50MHz frequency multiples, but only provide 100MHz rounding 249 * in their ACPI data. Calculate the real values and fix up the _PSS data. 250 */ 251 static void amd_fixup_frequency(struct acpi_processor_px *px, int i) 252 { 253 u32 hi, lo, fid, did; 254 int index = px->control & 0x00000007; 255 256 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) 257 return; 258 259 if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10) 260 || boot_cpu_data.x86 == 0x11) { 261 rdmsr(MSR_AMD_PSTATE_DEF_BASE + index, lo, hi); 262 /* 263 * MSR C001_0064+: 264 * Bit 63: PstateEn. Read-write. If set, the P-state is valid. 265 */ 266 if (!(hi & BIT(31))) 267 return; 268 269 fid = lo & 0x3f; 270 did = (lo >> 6) & 7; 271 if (boot_cpu_data.x86 == 0x10) 272 px->core_frequency = (100 * (fid + 0x10)) >> did; 273 else 274 px->core_frequency = (100 * (fid + 8)) >> did; 275 } 276 } 277 #else 278 static void amd_fixup_frequency(struct acpi_processor_px *px, int i) {}; 279 #endif 280 281 static int acpi_processor_get_performance_states(struct acpi_processor *pr) 282 { 283 int result = 0; 284 acpi_status status = AE_OK; 285 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 286 struct acpi_buffer format = { sizeof("NNNNNN"), "NNNNNN" }; 287 struct acpi_buffer state = { 0, NULL }; 288 union acpi_object *pss = NULL; 289 int i; 290 int last_invalid = -1; 291 292 293 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); 294 if (ACPI_FAILURE(status)) { 295 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PSS")); 296 return -ENODEV; 297 } 298 299 pss = buffer.pointer; 300 if (!pss || (pss->type != ACPI_TYPE_PACKAGE)) { 301 printk(KERN_ERR PREFIX "Invalid _PSS data\n"); 302 result = -EFAULT; 303 goto end; 304 } 305 306 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d performance states\n", 307 pss->package.count)); 308 309 pr->performance->state_count = pss->package.count; 310 pr->performance->states = 311 kmalloc_array(pss->package.count, 312 sizeof(struct acpi_processor_px), 313 GFP_KERNEL); 314 if (!pr->performance->states) { 315 result = -ENOMEM; 316 goto end; 317 } 318 319 for (i = 0; i < pr->performance->state_count; i++) { 320 321 struct acpi_processor_px *px = &(pr->performance->states[i]); 322 323 state.length = sizeof(struct acpi_processor_px); 324 state.pointer = px; 325 326 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i)); 327 328 status = acpi_extract_package(&(pss->package.elements[i]), 329 &format, &state); 330 if (ACPI_FAILURE(status)) { 331 ACPI_EXCEPTION((AE_INFO, status, "Invalid _PSS data")); 332 result = -EFAULT; 333 kfree(pr->performance->states); 334 goto end; 335 } 336 337 amd_fixup_frequency(px, i); 338 339 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 340 "State [%d]: core_frequency[%d] power[%d] transition_latency[%d] bus_master_latency[%d] control[0x%x] status[0x%x]\n", 341 i, 342 (u32) px->core_frequency, 343 (u32) px->power, 344 (u32) px->transition_latency, 345 (u32) px->bus_master_latency, 346 (u32) px->control, (u32) px->status)); 347 348 /* 349 * Check that ACPI's u64 MHz will be valid as u32 KHz in cpufreq 350 */ 351 if (!px->core_frequency || 352 ((u32)(px->core_frequency * 1000) != 353 (px->core_frequency * 1000))) { 354 printk(KERN_ERR FW_BUG PREFIX 355 "Invalid BIOS _PSS frequency found for processor %d: 0x%llx MHz\n", 356 pr->id, px->core_frequency); 357 if (last_invalid == -1) 358 last_invalid = i; 359 } else { 360 if (last_invalid != -1) { 361 /* 362 * Copy this valid entry over last_invalid entry 363 */ 364 memcpy(&(pr->performance->states[last_invalid]), 365 px, sizeof(struct acpi_processor_px)); 366 ++last_invalid; 367 } 368 } 369 } 370 371 if (last_invalid == 0) { 372 printk(KERN_ERR FW_BUG PREFIX 373 "No valid BIOS _PSS frequency found for processor %d\n", pr->id); 374 result = -EFAULT; 375 kfree(pr->performance->states); 376 pr->performance->states = NULL; 377 } 378 379 if (last_invalid > 0) 380 pr->performance->state_count = last_invalid; 381 382 end: 383 kfree(buffer.pointer); 384 385 return result; 386 } 387 388 int acpi_processor_get_performance_info(struct acpi_processor *pr) 389 { 390 int result = 0; 391 392 if (!pr || !pr->performance || !pr->handle) 393 return -EINVAL; 394 395 if (!acpi_has_method(pr->handle, "_PCT")) { 396 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 397 "ACPI-based processor performance control unavailable\n")); 398 return -ENODEV; 399 } 400 401 result = acpi_processor_get_performance_control(pr); 402 if (result) 403 goto update_bios; 404 405 result = acpi_processor_get_performance_states(pr); 406 if (result) 407 goto update_bios; 408 409 /* We need to call _PPC once when cpufreq starts */ 410 if (ignore_ppc != 1) 411 result = acpi_processor_get_platform_limit(pr); 412 413 return result; 414 415 /* 416 * Having _PPC but missing frequencies (_PSS, _PCT) is a very good hint that 417 * the BIOS is older than the CPU and does not know its frequencies 418 */ 419 update_bios: 420 #ifdef CONFIG_X86 421 if (acpi_has_method(pr->handle, "_PPC")) { 422 if(boot_cpu_has(X86_FEATURE_EST)) 423 printk(KERN_WARNING FW_BUG "BIOS needs update for CPU " 424 "frequency support\n"); 425 } 426 #endif 427 return result; 428 } 429 EXPORT_SYMBOL_GPL(acpi_processor_get_performance_info); 430 431 int acpi_processor_pstate_control(void) 432 { 433 acpi_status status; 434 435 if (!acpi_gbl_FADT.smi_command || !acpi_gbl_FADT.pstate_control) 436 return 0; 437 438 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 439 "Writing pstate_control [0x%x] to smi_command [0x%x]\n", 440 acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command)); 441 442 status = acpi_os_write_port(acpi_gbl_FADT.smi_command, 443 (u32)acpi_gbl_FADT.pstate_control, 8); 444 if (ACPI_SUCCESS(status)) 445 return 1; 446 447 ACPI_EXCEPTION((AE_INFO, status, 448 "Failed to write pstate_control [0x%x] to smi_command [0x%x]", 449 acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command)); 450 return -EIO; 451 } 452 453 int acpi_processor_notify_smm(struct module *calling_module) 454 { 455 static int is_done = 0; 456 int result; 457 458 if (!acpi_processor_cpufreq_init) 459 return -EBUSY; 460 461 if (!try_module_get(calling_module)) 462 return -EINVAL; 463 464 /* is_done is set to negative if an error occurred, 465 * and to postitive if _no_ error occurred, but SMM 466 * was already notified. This avoids double notification 467 * which might lead to unexpected results... 468 */ 469 if (is_done > 0) { 470 module_put(calling_module); 471 return 0; 472 } else if (is_done < 0) { 473 module_put(calling_module); 474 return is_done; 475 } 476 477 is_done = -EIO; 478 479 result = acpi_processor_pstate_control(); 480 if (!result) { 481 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No SMI port or pstate_control\n")); 482 module_put(calling_module); 483 return 0; 484 } 485 if (result < 0) { 486 module_put(calling_module); 487 return result; 488 } 489 490 /* Success. If there's no _PPC, we need to fear nothing, so 491 * we can allow the cpufreq driver to be rmmod'ed. */ 492 is_done = 1; 493 494 if (!acpi_processor_ppc_in_use) 495 module_put(calling_module); 496 497 return 0; 498 } 499 500 EXPORT_SYMBOL(acpi_processor_notify_smm); 501 502 int acpi_processor_get_psd(acpi_handle handle, struct acpi_psd_package *pdomain) 503 { 504 int result = 0; 505 acpi_status status = AE_OK; 506 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; 507 struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"}; 508 struct acpi_buffer state = {0, NULL}; 509 union acpi_object *psd = NULL; 510 511 status = acpi_evaluate_object(handle, "_PSD", NULL, &buffer); 512 if (ACPI_FAILURE(status)) { 513 return -ENODEV; 514 } 515 516 psd = buffer.pointer; 517 if (!psd || (psd->type != ACPI_TYPE_PACKAGE)) { 518 printk(KERN_ERR PREFIX "Invalid _PSD data\n"); 519 result = -EFAULT; 520 goto end; 521 } 522 523 if (psd->package.count != 1) { 524 printk(KERN_ERR PREFIX "Invalid _PSD data\n"); 525 result = -EFAULT; 526 goto end; 527 } 528 529 state.length = sizeof(struct acpi_psd_package); 530 state.pointer = pdomain; 531 532 status = acpi_extract_package(&(psd->package.elements[0]), 533 &format, &state); 534 if (ACPI_FAILURE(status)) { 535 printk(KERN_ERR PREFIX "Invalid _PSD data\n"); 536 result = -EFAULT; 537 goto end; 538 } 539 540 if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) { 541 printk(KERN_ERR PREFIX "Unknown _PSD:num_entries\n"); 542 result = -EFAULT; 543 goto end; 544 } 545 546 if (pdomain->revision != ACPI_PSD_REV0_REVISION) { 547 printk(KERN_ERR PREFIX "Unknown _PSD:revision\n"); 548 result = -EFAULT; 549 goto end; 550 } 551 552 if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL && 553 pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY && 554 pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) { 555 printk(KERN_ERR PREFIX "Invalid _PSD:coord_type\n"); 556 result = -EFAULT; 557 goto end; 558 } 559 end: 560 kfree(buffer.pointer); 561 return result; 562 } 563 EXPORT_SYMBOL(acpi_processor_get_psd); 564 565 int acpi_processor_preregister_performance( 566 struct acpi_processor_performance __percpu *performance) 567 { 568 int count_target; 569 int retval = 0; 570 unsigned int i, j; 571 cpumask_var_t covered_cpus; 572 struct acpi_processor *pr; 573 struct acpi_psd_package *pdomain; 574 struct acpi_processor *match_pr; 575 struct acpi_psd_package *match_pdomain; 576 577 if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL)) 578 return -ENOMEM; 579 580 mutex_lock(&performance_mutex); 581 582 /* 583 * Check if another driver has already registered, and abort before 584 * changing pr->performance if it has. Check input data as well. 585 */ 586 for_each_possible_cpu(i) { 587 pr = per_cpu(processors, i); 588 if (!pr) { 589 /* Look only at processors in ACPI namespace */ 590 continue; 591 } 592 593 if (pr->performance) { 594 retval = -EBUSY; 595 goto err_out; 596 } 597 598 if (!performance || !per_cpu_ptr(performance, i)) { 599 retval = -EINVAL; 600 goto err_out; 601 } 602 } 603 604 /* Call _PSD for all CPUs */ 605 for_each_possible_cpu(i) { 606 pr = per_cpu(processors, i); 607 if (!pr) 608 continue; 609 610 pr->performance = per_cpu_ptr(performance, i); 611 cpumask_set_cpu(i, pr->performance->shared_cpu_map); 612 pdomain = &(pr->performance->domain_info); 613 if (acpi_processor_get_psd(pr->handle, pdomain)) { 614 retval = -EINVAL; 615 continue; 616 } 617 } 618 if (retval) 619 goto err_ret; 620 621 /* 622 * Now that we have _PSD data from all CPUs, lets setup P-state 623 * domain info. 624 */ 625 for_each_possible_cpu(i) { 626 pr = per_cpu(processors, i); 627 if (!pr) 628 continue; 629 630 if (cpumask_test_cpu(i, covered_cpus)) 631 continue; 632 633 pdomain = &(pr->performance->domain_info); 634 cpumask_set_cpu(i, pr->performance->shared_cpu_map); 635 cpumask_set_cpu(i, covered_cpus); 636 if (pdomain->num_processors <= 1) 637 continue; 638 639 /* Validate the Domain info */ 640 count_target = pdomain->num_processors; 641 if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL) 642 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL; 643 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL) 644 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_HW; 645 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY) 646 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ANY; 647 648 for_each_possible_cpu(j) { 649 if (i == j) 650 continue; 651 652 match_pr = per_cpu(processors, j); 653 if (!match_pr) 654 continue; 655 656 match_pdomain = &(match_pr->performance->domain_info); 657 if (match_pdomain->domain != pdomain->domain) 658 continue; 659 660 /* Here i and j are in the same domain */ 661 662 if (match_pdomain->num_processors != count_target) { 663 retval = -EINVAL; 664 goto err_ret; 665 } 666 667 if (pdomain->coord_type != match_pdomain->coord_type) { 668 retval = -EINVAL; 669 goto err_ret; 670 } 671 672 cpumask_set_cpu(j, covered_cpus); 673 cpumask_set_cpu(j, pr->performance->shared_cpu_map); 674 } 675 676 for_each_possible_cpu(j) { 677 if (i == j) 678 continue; 679 680 match_pr = per_cpu(processors, j); 681 if (!match_pr) 682 continue; 683 684 match_pdomain = &(match_pr->performance->domain_info); 685 if (match_pdomain->domain != pdomain->domain) 686 continue; 687 688 match_pr->performance->shared_type = 689 pr->performance->shared_type; 690 cpumask_copy(match_pr->performance->shared_cpu_map, 691 pr->performance->shared_cpu_map); 692 } 693 } 694 695 err_ret: 696 for_each_possible_cpu(i) { 697 pr = per_cpu(processors, i); 698 if (!pr || !pr->performance) 699 continue; 700 701 /* Assume no coordination on any error parsing domain info */ 702 if (retval) { 703 cpumask_clear(pr->performance->shared_cpu_map); 704 cpumask_set_cpu(i, pr->performance->shared_cpu_map); 705 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL; 706 } 707 pr->performance = NULL; /* Will be set for real in register */ 708 } 709 710 err_out: 711 mutex_unlock(&performance_mutex); 712 free_cpumask_var(covered_cpus); 713 return retval; 714 } 715 EXPORT_SYMBOL(acpi_processor_preregister_performance); 716 717 int 718 acpi_processor_register_performance(struct acpi_processor_performance 719 *performance, unsigned int cpu) 720 { 721 struct acpi_processor *pr; 722 723 if (!acpi_processor_cpufreq_init) 724 return -EINVAL; 725 726 mutex_lock(&performance_mutex); 727 728 pr = per_cpu(processors, cpu); 729 if (!pr) { 730 mutex_unlock(&performance_mutex); 731 return -ENODEV; 732 } 733 734 if (pr->performance) { 735 mutex_unlock(&performance_mutex); 736 return -EBUSY; 737 } 738 739 WARN_ON(!performance); 740 741 pr->performance = performance; 742 743 if (acpi_processor_get_performance_info(pr)) { 744 pr->performance = NULL; 745 mutex_unlock(&performance_mutex); 746 return -EIO; 747 } 748 749 mutex_unlock(&performance_mutex); 750 return 0; 751 } 752 753 EXPORT_SYMBOL(acpi_processor_register_performance); 754 755 void acpi_processor_unregister_performance(unsigned int cpu) 756 { 757 struct acpi_processor *pr; 758 759 mutex_lock(&performance_mutex); 760 761 pr = per_cpu(processors, cpu); 762 if (!pr) { 763 mutex_unlock(&performance_mutex); 764 return; 765 } 766 767 if (pr->performance) 768 kfree(pr->performance->states); 769 pr->performance = NULL; 770 771 mutex_unlock(&performance_mutex); 772 773 return; 774 } 775 776 EXPORT_SYMBOL(acpi_processor_unregister_performance); 777