1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * processor_perflib.c - ACPI Processor P-States Library ($Revision: 71 $) 4 * 5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 7 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de> 8 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 9 * - Added processor hotplug support 10 */ 11 12 #include <linux/kernel.h> 13 #include <linux/module.h> 14 #include <linux/init.h> 15 #include <linux/cpufreq.h> 16 #include <linux/slab.h> 17 #include <linux/acpi.h> 18 #include <acpi/processor.h> 19 #ifdef CONFIG_X86 20 #include <asm/cpufeature.h> 21 #endif 22 23 #define PREFIX "ACPI: " 24 25 #define ACPI_PROCESSOR_FILE_PERFORMANCE "performance" 26 #define _COMPONENT ACPI_PROCESSOR_COMPONENT 27 ACPI_MODULE_NAME("processor_perflib"); 28 29 static DEFINE_MUTEX(performance_mutex); 30 31 /* 32 * _PPC support is implemented as a CPUfreq policy notifier: 33 * This means each time a CPUfreq driver registered also with 34 * the ACPI core is asked to change the speed policy, the maximum 35 * value is adjusted so that it is within the platform limit. 36 * 37 * Also, when a new platform limit value is detected, the CPUfreq 38 * policy is adjusted accordingly. 39 */ 40 41 /* ignore_ppc: 42 * -1 -> cpufreq low level drivers not initialized -> _PSS, etc. not called yet 43 * ignore _PPC 44 * 0 -> cpufreq low level drivers initialized -> consider _PPC values 45 * 1 -> ignore _PPC totally -> forced by user through boot param 46 */ 47 static int ignore_ppc = -1; 48 module_param(ignore_ppc, int, 0644); 49 MODULE_PARM_DESC(ignore_ppc, "If the frequency of your machine gets wrongly" \ 50 "limited by BIOS, this should help"); 51 52 static bool acpi_processor_ppc_in_use; 53 54 static int acpi_processor_get_platform_limit(struct acpi_processor *pr) 55 { 56 acpi_status status = 0; 57 unsigned long long ppc = 0; 58 int ret; 59 60 if (!pr) 61 return -EINVAL; 62 63 /* 64 * _PPC indicates the maximum state currently supported by the platform 65 * (e.g. 0 = states 0..n; 1 = states 1..n; etc. 66 */ 67 status = acpi_evaluate_integer(pr->handle, "_PPC", NULL, &ppc); 68 69 if (status != AE_NOT_FOUND) 70 acpi_processor_ppc_in_use = true; 71 72 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { 73 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PPC")); 74 return -ENODEV; 75 } 76 77 pr_debug("CPU %d: _PPC is %d - frequency %s limited\n", pr->id, 78 (int)ppc, ppc ? "" : "not"); 79 80 pr->performance_platform_limit = (int)ppc; 81 82 if (ppc >= pr->performance->state_count || 83 unlikely(!freq_qos_request_active(&pr->perflib_req))) 84 return 0; 85 86 ret = freq_qos_update_request(&pr->perflib_req, 87 pr->performance->states[ppc].core_frequency * 1000); 88 if (ret < 0) { 89 pr_warn("Failed to update perflib freq constraint: CPU%d (%d)\n", 90 pr->id, ret); 91 } 92 93 return 0; 94 } 95 96 #define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80 97 /* 98 * acpi_processor_ppc_ost: Notify firmware the _PPC evaluation status 99 * @handle: ACPI processor handle 100 * @status: the status code of _PPC evaluation 101 * 0: success. OSPM is now using the performance state specificed. 102 * 1: failure. OSPM has not changed the number of P-states in use 103 */ 104 static void acpi_processor_ppc_ost(acpi_handle handle, int status) 105 { 106 if (acpi_has_method(handle, "_OST")) 107 acpi_evaluate_ost(handle, ACPI_PROCESSOR_NOTIFY_PERFORMANCE, 108 status, NULL); 109 } 110 111 void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag) 112 { 113 int ret; 114 115 if (ignore_ppc || !pr->performance) { 116 /* 117 * Only when it is notification event, the _OST object 118 * will be evaluated. Otherwise it is skipped. 119 */ 120 if (event_flag) 121 acpi_processor_ppc_ost(pr->handle, 1); 122 return; 123 } 124 125 ret = acpi_processor_get_platform_limit(pr); 126 /* 127 * Only when it is notification event, the _OST object 128 * will be evaluated. Otherwise it is skipped. 129 */ 130 if (event_flag) { 131 if (ret < 0) 132 acpi_processor_ppc_ost(pr->handle, 1); 133 else 134 acpi_processor_ppc_ost(pr->handle, 0); 135 } 136 if (ret >= 0) 137 cpufreq_update_limits(pr->id); 138 } 139 140 int acpi_processor_get_bios_limit(int cpu, unsigned int *limit) 141 { 142 struct acpi_processor *pr; 143 144 pr = per_cpu(processors, cpu); 145 if (!pr || !pr->performance || !pr->performance->state_count) 146 return -ENODEV; 147 *limit = pr->performance->states[pr->performance_platform_limit]. 148 core_frequency * 1000; 149 return 0; 150 } 151 EXPORT_SYMBOL(acpi_processor_get_bios_limit); 152 153 void acpi_processor_ignore_ppc_init(void) 154 { 155 if (ignore_ppc < 0) 156 ignore_ppc = 0; 157 } 158 159 void acpi_processor_ppc_init(struct cpufreq_policy *policy) 160 { 161 unsigned int cpu; 162 163 for_each_cpu(cpu, policy->related_cpus) { 164 struct acpi_processor *pr = per_cpu(processors, cpu); 165 int ret; 166 167 if (!pr) 168 continue; 169 170 ret = freq_qos_add_request(&policy->constraints, 171 &pr->perflib_req, 172 FREQ_QOS_MAX, INT_MAX); 173 if (ret < 0) 174 pr_err("Failed to add freq constraint for CPU%d (%d)\n", 175 cpu, ret); 176 } 177 } 178 179 void acpi_processor_ppc_exit(struct cpufreq_policy *policy) 180 { 181 unsigned int cpu; 182 183 for_each_cpu(cpu, policy->related_cpus) { 184 struct acpi_processor *pr = per_cpu(processors, cpu); 185 186 if (pr) 187 freq_qos_remove_request(&pr->perflib_req); 188 } 189 } 190 191 static int acpi_processor_get_performance_control(struct acpi_processor *pr) 192 { 193 int result = 0; 194 acpi_status status = 0; 195 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 196 union acpi_object *pct = NULL; 197 union acpi_object obj = { 0 }; 198 199 200 status = acpi_evaluate_object(pr->handle, "_PCT", NULL, &buffer); 201 if (ACPI_FAILURE(status)) { 202 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PCT")); 203 return -ENODEV; 204 } 205 206 pct = (union acpi_object *)buffer.pointer; 207 if (!pct || (pct->type != ACPI_TYPE_PACKAGE) 208 || (pct->package.count != 2)) { 209 printk(KERN_ERR PREFIX "Invalid _PCT data\n"); 210 result = -EFAULT; 211 goto end; 212 } 213 214 /* 215 * control_register 216 */ 217 218 obj = pct->package.elements[0]; 219 220 if ((obj.type != ACPI_TYPE_BUFFER) 221 || (obj.buffer.length < sizeof(struct acpi_pct_register)) 222 || (obj.buffer.pointer == NULL)) { 223 printk(KERN_ERR PREFIX "Invalid _PCT data (control_register)\n"); 224 result = -EFAULT; 225 goto end; 226 } 227 memcpy(&pr->performance->control_register, obj.buffer.pointer, 228 sizeof(struct acpi_pct_register)); 229 230 /* 231 * status_register 232 */ 233 234 obj = pct->package.elements[1]; 235 236 if ((obj.type != ACPI_TYPE_BUFFER) 237 || (obj.buffer.length < sizeof(struct acpi_pct_register)) 238 || (obj.buffer.pointer == NULL)) { 239 printk(KERN_ERR PREFIX "Invalid _PCT data (status_register)\n"); 240 result = -EFAULT; 241 goto end; 242 } 243 244 memcpy(&pr->performance->status_register, obj.buffer.pointer, 245 sizeof(struct acpi_pct_register)); 246 247 end: 248 kfree(buffer.pointer); 249 250 return result; 251 } 252 253 #ifdef CONFIG_X86 254 /* 255 * Some AMDs have 50MHz frequency multiples, but only provide 100MHz rounding 256 * in their ACPI data. Calculate the real values and fix up the _PSS data. 257 */ 258 static void amd_fixup_frequency(struct acpi_processor_px *px, int i) 259 { 260 u32 hi, lo, fid, did; 261 int index = px->control & 0x00000007; 262 263 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) 264 return; 265 266 if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10) 267 || boot_cpu_data.x86 == 0x11) { 268 rdmsr(MSR_AMD_PSTATE_DEF_BASE + index, lo, hi); 269 /* 270 * MSR C001_0064+: 271 * Bit 63: PstateEn. Read-write. If set, the P-state is valid. 272 */ 273 if (!(hi & BIT(31))) 274 return; 275 276 fid = lo & 0x3f; 277 did = (lo >> 6) & 7; 278 if (boot_cpu_data.x86 == 0x10) 279 px->core_frequency = (100 * (fid + 0x10)) >> did; 280 else 281 px->core_frequency = (100 * (fid + 8)) >> did; 282 } 283 } 284 #else 285 static void amd_fixup_frequency(struct acpi_processor_px *px, int i) {}; 286 #endif 287 288 static int acpi_processor_get_performance_states(struct acpi_processor *pr) 289 { 290 int result = 0; 291 acpi_status status = AE_OK; 292 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 293 struct acpi_buffer format = { sizeof("NNNNNN"), "NNNNNN" }; 294 struct acpi_buffer state = { 0, NULL }; 295 union acpi_object *pss = NULL; 296 int i; 297 int last_invalid = -1; 298 299 300 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); 301 if (ACPI_FAILURE(status)) { 302 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PSS")); 303 return -ENODEV; 304 } 305 306 pss = buffer.pointer; 307 if (!pss || (pss->type != ACPI_TYPE_PACKAGE)) { 308 printk(KERN_ERR PREFIX "Invalid _PSS data\n"); 309 result = -EFAULT; 310 goto end; 311 } 312 313 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d performance states\n", 314 pss->package.count)); 315 316 pr->performance->state_count = pss->package.count; 317 pr->performance->states = 318 kmalloc_array(pss->package.count, 319 sizeof(struct acpi_processor_px), 320 GFP_KERNEL); 321 if (!pr->performance->states) { 322 result = -ENOMEM; 323 goto end; 324 } 325 326 for (i = 0; i < pr->performance->state_count; i++) { 327 328 struct acpi_processor_px *px = &(pr->performance->states[i]); 329 330 state.length = sizeof(struct acpi_processor_px); 331 state.pointer = px; 332 333 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i)); 334 335 status = acpi_extract_package(&(pss->package.elements[i]), 336 &format, &state); 337 if (ACPI_FAILURE(status)) { 338 ACPI_EXCEPTION((AE_INFO, status, "Invalid _PSS data")); 339 result = -EFAULT; 340 kfree(pr->performance->states); 341 goto end; 342 } 343 344 amd_fixup_frequency(px, i); 345 346 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 347 "State [%d]: core_frequency[%d] power[%d] transition_latency[%d] bus_master_latency[%d] control[0x%x] status[0x%x]\n", 348 i, 349 (u32) px->core_frequency, 350 (u32) px->power, 351 (u32) px->transition_latency, 352 (u32) px->bus_master_latency, 353 (u32) px->control, (u32) px->status)); 354 355 /* 356 * Check that ACPI's u64 MHz will be valid as u32 KHz in cpufreq 357 */ 358 if (!px->core_frequency || 359 ((u32)(px->core_frequency * 1000) != 360 (px->core_frequency * 1000))) { 361 printk(KERN_ERR FW_BUG PREFIX 362 "Invalid BIOS _PSS frequency found for processor %d: 0x%llx MHz\n", 363 pr->id, px->core_frequency); 364 if (last_invalid == -1) 365 last_invalid = i; 366 } else { 367 if (last_invalid != -1) { 368 /* 369 * Copy this valid entry over last_invalid entry 370 */ 371 memcpy(&(pr->performance->states[last_invalid]), 372 px, sizeof(struct acpi_processor_px)); 373 ++last_invalid; 374 } 375 } 376 } 377 378 if (last_invalid == 0) { 379 printk(KERN_ERR FW_BUG PREFIX 380 "No valid BIOS _PSS frequency found for processor %d\n", pr->id); 381 result = -EFAULT; 382 kfree(pr->performance->states); 383 pr->performance->states = NULL; 384 } 385 386 if (last_invalid > 0) 387 pr->performance->state_count = last_invalid; 388 389 end: 390 kfree(buffer.pointer); 391 392 return result; 393 } 394 395 int acpi_processor_get_performance_info(struct acpi_processor *pr) 396 { 397 int result = 0; 398 399 if (!pr || !pr->performance || !pr->handle) 400 return -EINVAL; 401 402 if (!acpi_has_method(pr->handle, "_PCT")) { 403 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 404 "ACPI-based processor performance control unavailable\n")); 405 return -ENODEV; 406 } 407 408 result = acpi_processor_get_performance_control(pr); 409 if (result) 410 goto update_bios; 411 412 result = acpi_processor_get_performance_states(pr); 413 if (result) 414 goto update_bios; 415 416 /* We need to call _PPC once when cpufreq starts */ 417 if (ignore_ppc != 1) 418 result = acpi_processor_get_platform_limit(pr); 419 420 return result; 421 422 /* 423 * Having _PPC but missing frequencies (_PSS, _PCT) is a very good hint that 424 * the BIOS is older than the CPU and does not know its frequencies 425 */ 426 update_bios: 427 #ifdef CONFIG_X86 428 if (acpi_has_method(pr->handle, "_PPC")) { 429 if(boot_cpu_has(X86_FEATURE_EST)) 430 printk(KERN_WARNING FW_BUG "BIOS needs update for CPU " 431 "frequency support\n"); 432 } 433 #endif 434 return result; 435 } 436 EXPORT_SYMBOL_GPL(acpi_processor_get_performance_info); 437 438 int acpi_processor_pstate_control(void) 439 { 440 acpi_status status; 441 442 if (!acpi_gbl_FADT.smi_command || !acpi_gbl_FADT.pstate_control) 443 return 0; 444 445 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 446 "Writing pstate_control [0x%x] to smi_command [0x%x]\n", 447 acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command)); 448 449 status = acpi_os_write_port(acpi_gbl_FADT.smi_command, 450 (u32)acpi_gbl_FADT.pstate_control, 8); 451 if (ACPI_SUCCESS(status)) 452 return 1; 453 454 ACPI_EXCEPTION((AE_INFO, status, 455 "Failed to write pstate_control [0x%x] to smi_command [0x%x]", 456 acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command)); 457 return -EIO; 458 } 459 460 int acpi_processor_notify_smm(struct module *calling_module) 461 { 462 static int is_done = 0; 463 int result; 464 465 if (!acpi_processor_cpufreq_init) 466 return -EBUSY; 467 468 if (!try_module_get(calling_module)) 469 return -EINVAL; 470 471 /* is_done is set to negative if an error occurred, 472 * and to postitive if _no_ error occurred, but SMM 473 * was already notified. This avoids double notification 474 * which might lead to unexpected results... 475 */ 476 if (is_done > 0) { 477 module_put(calling_module); 478 return 0; 479 } else if (is_done < 0) { 480 module_put(calling_module); 481 return is_done; 482 } 483 484 is_done = -EIO; 485 486 result = acpi_processor_pstate_control(); 487 if (!result) { 488 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No SMI port or pstate_control\n")); 489 module_put(calling_module); 490 return 0; 491 } 492 if (result < 0) { 493 module_put(calling_module); 494 return result; 495 } 496 497 /* Success. If there's no _PPC, we need to fear nothing, so 498 * we can allow the cpufreq driver to be rmmod'ed. */ 499 is_done = 1; 500 501 if (!acpi_processor_ppc_in_use) 502 module_put(calling_module); 503 504 return 0; 505 } 506 507 EXPORT_SYMBOL(acpi_processor_notify_smm); 508 509 int acpi_processor_get_psd(acpi_handle handle, struct acpi_psd_package *pdomain) 510 { 511 int result = 0; 512 acpi_status status = AE_OK; 513 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; 514 struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"}; 515 struct acpi_buffer state = {0, NULL}; 516 union acpi_object *psd = NULL; 517 518 status = acpi_evaluate_object(handle, "_PSD", NULL, &buffer); 519 if (ACPI_FAILURE(status)) { 520 return -ENODEV; 521 } 522 523 psd = buffer.pointer; 524 if (!psd || (psd->type != ACPI_TYPE_PACKAGE)) { 525 printk(KERN_ERR PREFIX "Invalid _PSD data\n"); 526 result = -EFAULT; 527 goto end; 528 } 529 530 if (psd->package.count != 1) { 531 printk(KERN_ERR PREFIX "Invalid _PSD data\n"); 532 result = -EFAULT; 533 goto end; 534 } 535 536 state.length = sizeof(struct acpi_psd_package); 537 state.pointer = pdomain; 538 539 status = acpi_extract_package(&(psd->package.elements[0]), 540 &format, &state); 541 if (ACPI_FAILURE(status)) { 542 printk(KERN_ERR PREFIX "Invalid _PSD data\n"); 543 result = -EFAULT; 544 goto end; 545 } 546 547 if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) { 548 printk(KERN_ERR PREFIX "Unknown _PSD:num_entries\n"); 549 result = -EFAULT; 550 goto end; 551 } 552 553 if (pdomain->revision != ACPI_PSD_REV0_REVISION) { 554 printk(KERN_ERR PREFIX "Unknown _PSD:revision\n"); 555 result = -EFAULT; 556 goto end; 557 } 558 559 if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL && 560 pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY && 561 pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) { 562 printk(KERN_ERR PREFIX "Invalid _PSD:coord_type\n"); 563 result = -EFAULT; 564 goto end; 565 } 566 end: 567 kfree(buffer.pointer); 568 return result; 569 } 570 EXPORT_SYMBOL(acpi_processor_get_psd); 571 572 int acpi_processor_preregister_performance( 573 struct acpi_processor_performance __percpu *performance) 574 { 575 int count_target; 576 int retval = 0; 577 unsigned int i, j; 578 cpumask_var_t covered_cpus; 579 struct acpi_processor *pr; 580 struct acpi_psd_package *pdomain; 581 struct acpi_processor *match_pr; 582 struct acpi_psd_package *match_pdomain; 583 584 if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL)) 585 return -ENOMEM; 586 587 mutex_lock(&performance_mutex); 588 589 /* 590 * Check if another driver has already registered, and abort before 591 * changing pr->performance if it has. Check input data as well. 592 */ 593 for_each_possible_cpu(i) { 594 pr = per_cpu(processors, i); 595 if (!pr) { 596 /* Look only at processors in ACPI namespace */ 597 continue; 598 } 599 600 if (pr->performance) { 601 retval = -EBUSY; 602 goto err_out; 603 } 604 605 if (!performance || !per_cpu_ptr(performance, i)) { 606 retval = -EINVAL; 607 goto err_out; 608 } 609 } 610 611 /* Call _PSD for all CPUs */ 612 for_each_possible_cpu(i) { 613 pr = per_cpu(processors, i); 614 if (!pr) 615 continue; 616 617 pr->performance = per_cpu_ptr(performance, i); 618 pdomain = &(pr->performance->domain_info); 619 if (acpi_processor_get_psd(pr->handle, pdomain)) { 620 retval = -EINVAL; 621 continue; 622 } 623 } 624 if (retval) 625 goto err_ret; 626 627 /* 628 * Now that we have _PSD data from all CPUs, lets setup P-state 629 * domain info. 630 */ 631 for_each_possible_cpu(i) { 632 pr = per_cpu(processors, i); 633 if (!pr) 634 continue; 635 636 if (cpumask_test_cpu(i, covered_cpus)) 637 continue; 638 639 pdomain = &(pr->performance->domain_info); 640 cpumask_set_cpu(i, pr->performance->shared_cpu_map); 641 cpumask_set_cpu(i, covered_cpus); 642 if (pdomain->num_processors <= 1) 643 continue; 644 645 /* Validate the Domain info */ 646 count_target = pdomain->num_processors; 647 if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL) 648 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL; 649 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL) 650 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_HW; 651 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY) 652 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ANY; 653 654 for_each_possible_cpu(j) { 655 if (i == j) 656 continue; 657 658 match_pr = per_cpu(processors, j); 659 if (!match_pr) 660 continue; 661 662 match_pdomain = &(match_pr->performance->domain_info); 663 if (match_pdomain->domain != pdomain->domain) 664 continue; 665 666 /* Here i and j are in the same domain */ 667 668 if (match_pdomain->num_processors != count_target) { 669 retval = -EINVAL; 670 goto err_ret; 671 } 672 673 if (pdomain->coord_type != match_pdomain->coord_type) { 674 retval = -EINVAL; 675 goto err_ret; 676 } 677 678 cpumask_set_cpu(j, covered_cpus); 679 cpumask_set_cpu(j, pr->performance->shared_cpu_map); 680 } 681 682 for_each_possible_cpu(j) { 683 if (i == j) 684 continue; 685 686 match_pr = per_cpu(processors, j); 687 if (!match_pr) 688 continue; 689 690 match_pdomain = &(match_pr->performance->domain_info); 691 if (match_pdomain->domain != pdomain->domain) 692 continue; 693 694 match_pr->performance->shared_type = 695 pr->performance->shared_type; 696 cpumask_copy(match_pr->performance->shared_cpu_map, 697 pr->performance->shared_cpu_map); 698 } 699 } 700 701 err_ret: 702 for_each_possible_cpu(i) { 703 pr = per_cpu(processors, i); 704 if (!pr || !pr->performance) 705 continue; 706 707 /* Assume no coordination on any error parsing domain info */ 708 if (retval) { 709 cpumask_clear(pr->performance->shared_cpu_map); 710 cpumask_set_cpu(i, pr->performance->shared_cpu_map); 711 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_NONE; 712 } 713 pr->performance = NULL; /* Will be set for real in register */ 714 } 715 716 err_out: 717 mutex_unlock(&performance_mutex); 718 free_cpumask_var(covered_cpus); 719 return retval; 720 } 721 EXPORT_SYMBOL(acpi_processor_preregister_performance); 722 723 int 724 acpi_processor_register_performance(struct acpi_processor_performance 725 *performance, unsigned int cpu) 726 { 727 struct acpi_processor *pr; 728 729 if (!acpi_processor_cpufreq_init) 730 return -EINVAL; 731 732 mutex_lock(&performance_mutex); 733 734 pr = per_cpu(processors, cpu); 735 if (!pr) { 736 mutex_unlock(&performance_mutex); 737 return -ENODEV; 738 } 739 740 if (pr->performance) { 741 mutex_unlock(&performance_mutex); 742 return -EBUSY; 743 } 744 745 WARN_ON(!performance); 746 747 pr->performance = performance; 748 749 if (acpi_processor_get_performance_info(pr)) { 750 pr->performance = NULL; 751 mutex_unlock(&performance_mutex); 752 return -EIO; 753 } 754 755 mutex_unlock(&performance_mutex); 756 return 0; 757 } 758 759 EXPORT_SYMBOL(acpi_processor_register_performance); 760 761 void acpi_processor_unregister_performance(unsigned int cpu) 762 { 763 struct acpi_processor *pr; 764 765 mutex_lock(&performance_mutex); 766 767 pr = per_cpu(processors, cpu); 768 if (!pr) { 769 mutex_unlock(&performance_mutex); 770 return; 771 } 772 773 if (pr->performance) 774 kfree(pr->performance->states); 775 pr->performance = NULL; 776 777 mutex_unlock(&performance_mutex); 778 779 return; 780 } 781 782 EXPORT_SYMBOL(acpi_processor_unregister_performance); 783