1 /* 2 * processor_perflib.c - ACPI Processor P-States Library ($Revision: 71 $) 3 * 4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 6 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de> 7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 8 * - Added processor hotplug support 9 * 10 * 11 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of the GNU General Public License as published by 15 * the Free Software Foundation; either version 2 of the License, or (at 16 * your option) any later version. 17 * 18 * This program is distributed in the hope that it will be useful, but 19 * WITHOUT ANY WARRANTY; without even the implied warranty of 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 21 * General Public License for more details. 22 * 23 * You should have received a copy of the GNU General Public License along 24 * with this program; if not, write to the Free Software Foundation, Inc., 25 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. 26 * 27 */ 28 29 #include <linux/kernel.h> 30 #include <linux/module.h> 31 #include <linux/init.h> 32 #include <linux/cpufreq.h> 33 #include <linux/slab.h> 34 35 #ifdef CONFIG_X86 36 #include <asm/cpufeature.h> 37 #endif 38 39 #include <acpi/acpi_bus.h> 40 #include <acpi/acpi_drivers.h> 41 #include <acpi/processor.h> 42 43 #define PREFIX "ACPI: " 44 45 #define ACPI_PROCESSOR_CLASS "processor" 46 #define ACPI_PROCESSOR_FILE_PERFORMANCE "performance" 47 #define _COMPONENT ACPI_PROCESSOR_COMPONENT 48 ACPI_MODULE_NAME("processor_perflib"); 49 50 static DEFINE_MUTEX(performance_mutex); 51 52 /* Use cpufreq debug layer for _PPC changes. */ 53 #define cpufreq_printk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \ 54 "cpufreq-core", msg) 55 56 /* 57 * _PPC support is implemented as a CPUfreq policy notifier: 58 * This means each time a CPUfreq driver registered also with 59 * the ACPI core is asked to change the speed policy, the maximum 60 * value is adjusted so that it is within the platform limit. 61 * 62 * Also, when a new platform limit value is detected, the CPUfreq 63 * policy is adjusted accordingly. 64 */ 65 66 /* ignore_ppc: 67 * -1 -> cpufreq low level drivers not initialized -> _PSS, etc. not called yet 68 * ignore _PPC 69 * 0 -> cpufreq low level drivers initialized -> consider _PPC values 70 * 1 -> ignore _PPC totally -> forced by user through boot param 71 */ 72 static int ignore_ppc = -1; 73 module_param(ignore_ppc, int, 0644); 74 MODULE_PARM_DESC(ignore_ppc, "If the frequency of your machine gets wrongly" \ 75 "limited by BIOS, this should help"); 76 77 #define PPC_REGISTERED 1 78 #define PPC_IN_USE 2 79 80 static int acpi_processor_ppc_status; 81 82 static int acpi_processor_ppc_notifier(struct notifier_block *nb, 83 unsigned long event, void *data) 84 { 85 struct cpufreq_policy *policy = data; 86 struct acpi_processor *pr; 87 unsigned int ppc = 0; 88 89 if (event == CPUFREQ_START && ignore_ppc <= 0) { 90 ignore_ppc = 0; 91 return 0; 92 } 93 94 if (ignore_ppc) 95 return 0; 96 97 if (event != CPUFREQ_INCOMPATIBLE) 98 return 0; 99 100 mutex_lock(&performance_mutex); 101 102 pr = per_cpu(processors, policy->cpu); 103 if (!pr || !pr->performance) 104 goto out; 105 106 ppc = (unsigned int)pr->performance_platform_limit; 107 108 if (ppc >= pr->performance->state_count) 109 goto out; 110 111 cpufreq_verify_within_limits(policy, 0, 112 pr->performance->states[ppc]. 113 core_frequency * 1000); 114 115 out: 116 mutex_unlock(&performance_mutex); 117 118 return 0; 119 } 120 121 static struct notifier_block acpi_ppc_notifier_block = { 122 .notifier_call = acpi_processor_ppc_notifier, 123 }; 124 125 static int acpi_processor_get_platform_limit(struct acpi_processor *pr) 126 { 127 acpi_status status = 0; 128 unsigned long long ppc = 0; 129 130 131 if (!pr) 132 return -EINVAL; 133 134 /* 135 * _PPC indicates the maximum state currently supported by the platform 136 * (e.g. 0 = states 0..n; 1 = states 1..n; etc. 137 */ 138 status = acpi_evaluate_integer(pr->handle, "_PPC", NULL, &ppc); 139 140 if (status != AE_NOT_FOUND) 141 acpi_processor_ppc_status |= PPC_IN_USE; 142 143 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { 144 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PPC")); 145 return -ENODEV; 146 } 147 148 cpufreq_printk("CPU %d: _PPC is %d - frequency %s limited\n", pr->id, 149 (int)ppc, ppc ? "" : "not"); 150 151 pr->performance_platform_limit = (int)ppc; 152 153 return 0; 154 } 155 156 #define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80 157 /* 158 * acpi_processor_ppc_ost: Notify firmware the _PPC evaluation status 159 * @handle: ACPI processor handle 160 * @status: the status code of _PPC evaluation 161 * 0: success. OSPM is now using the performance state specificed. 162 * 1: failure. OSPM has not changed the number of P-states in use 163 */ 164 static void acpi_processor_ppc_ost(acpi_handle handle, int status) 165 { 166 union acpi_object params[2] = { 167 {.type = ACPI_TYPE_INTEGER,}, 168 {.type = ACPI_TYPE_INTEGER,}, 169 }; 170 struct acpi_object_list arg_list = {2, params}; 171 acpi_handle temp; 172 173 params[0].integer.value = ACPI_PROCESSOR_NOTIFY_PERFORMANCE; 174 params[1].integer.value = status; 175 176 /* when there is no _OST , skip it */ 177 if (ACPI_FAILURE(acpi_get_handle(handle, "_OST", &temp))) 178 return; 179 180 acpi_evaluate_object(handle, "_OST", &arg_list, NULL); 181 return; 182 } 183 184 int acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag) 185 { 186 int ret; 187 188 if (ignore_ppc) { 189 /* 190 * Only when it is notification event, the _OST object 191 * will be evaluated. Otherwise it is skipped. 192 */ 193 if (event_flag) 194 acpi_processor_ppc_ost(pr->handle, 1); 195 return 0; 196 } 197 198 ret = acpi_processor_get_platform_limit(pr); 199 /* 200 * Only when it is notification event, the _OST object 201 * will be evaluated. Otherwise it is skipped. 202 */ 203 if (event_flag) { 204 if (ret < 0) 205 acpi_processor_ppc_ost(pr->handle, 1); 206 else 207 acpi_processor_ppc_ost(pr->handle, 0); 208 } 209 if (ret < 0) 210 return (ret); 211 else 212 return cpufreq_update_policy(pr->id); 213 } 214 215 int acpi_processor_get_bios_limit(int cpu, unsigned int *limit) 216 { 217 struct acpi_processor *pr; 218 219 pr = per_cpu(processors, cpu); 220 if (!pr || !pr->performance || !pr->performance->state_count) 221 return -ENODEV; 222 *limit = pr->performance->states[pr->performance_platform_limit]. 223 core_frequency * 1000; 224 return 0; 225 } 226 EXPORT_SYMBOL(acpi_processor_get_bios_limit); 227 228 void acpi_processor_ppc_init(void) 229 { 230 if (!cpufreq_register_notifier 231 (&acpi_ppc_notifier_block, CPUFREQ_POLICY_NOTIFIER)) 232 acpi_processor_ppc_status |= PPC_REGISTERED; 233 else 234 printk(KERN_DEBUG 235 "Warning: Processor Platform Limit not supported.\n"); 236 } 237 238 void acpi_processor_ppc_exit(void) 239 { 240 if (acpi_processor_ppc_status & PPC_REGISTERED) 241 cpufreq_unregister_notifier(&acpi_ppc_notifier_block, 242 CPUFREQ_POLICY_NOTIFIER); 243 244 acpi_processor_ppc_status &= ~PPC_REGISTERED; 245 } 246 247 static int acpi_processor_get_performance_control(struct acpi_processor *pr) 248 { 249 int result = 0; 250 acpi_status status = 0; 251 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 252 union acpi_object *pct = NULL; 253 union acpi_object obj = { 0 }; 254 255 256 status = acpi_evaluate_object(pr->handle, "_PCT", NULL, &buffer); 257 if (ACPI_FAILURE(status)) { 258 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PCT")); 259 return -ENODEV; 260 } 261 262 pct = (union acpi_object *)buffer.pointer; 263 if (!pct || (pct->type != ACPI_TYPE_PACKAGE) 264 || (pct->package.count != 2)) { 265 printk(KERN_ERR PREFIX "Invalid _PCT data\n"); 266 result = -EFAULT; 267 goto end; 268 } 269 270 /* 271 * control_register 272 */ 273 274 obj = pct->package.elements[0]; 275 276 if ((obj.type != ACPI_TYPE_BUFFER) 277 || (obj.buffer.length < sizeof(struct acpi_pct_register)) 278 || (obj.buffer.pointer == NULL)) { 279 printk(KERN_ERR PREFIX "Invalid _PCT data (control_register)\n"); 280 result = -EFAULT; 281 goto end; 282 } 283 memcpy(&pr->performance->control_register, obj.buffer.pointer, 284 sizeof(struct acpi_pct_register)); 285 286 /* 287 * status_register 288 */ 289 290 obj = pct->package.elements[1]; 291 292 if ((obj.type != ACPI_TYPE_BUFFER) 293 || (obj.buffer.length < sizeof(struct acpi_pct_register)) 294 || (obj.buffer.pointer == NULL)) { 295 printk(KERN_ERR PREFIX "Invalid _PCT data (status_register)\n"); 296 result = -EFAULT; 297 goto end; 298 } 299 300 memcpy(&pr->performance->status_register, obj.buffer.pointer, 301 sizeof(struct acpi_pct_register)); 302 303 end: 304 kfree(buffer.pointer); 305 306 return result; 307 } 308 309 static int acpi_processor_get_performance_states(struct acpi_processor *pr) 310 { 311 int result = 0; 312 acpi_status status = AE_OK; 313 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 314 struct acpi_buffer format = { sizeof("NNNNNN"), "NNNNNN" }; 315 struct acpi_buffer state = { 0, NULL }; 316 union acpi_object *pss = NULL; 317 int i; 318 319 320 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); 321 if (ACPI_FAILURE(status)) { 322 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PSS")); 323 return -ENODEV; 324 } 325 326 pss = buffer.pointer; 327 if (!pss || (pss->type != ACPI_TYPE_PACKAGE)) { 328 printk(KERN_ERR PREFIX "Invalid _PSS data\n"); 329 result = -EFAULT; 330 goto end; 331 } 332 333 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d performance states\n", 334 pss->package.count)); 335 336 pr->performance->state_count = pss->package.count; 337 pr->performance->states = 338 kmalloc(sizeof(struct acpi_processor_px) * pss->package.count, 339 GFP_KERNEL); 340 if (!pr->performance->states) { 341 result = -ENOMEM; 342 goto end; 343 } 344 345 for (i = 0; i < pr->performance->state_count; i++) { 346 347 struct acpi_processor_px *px = &(pr->performance->states[i]); 348 349 state.length = sizeof(struct acpi_processor_px); 350 state.pointer = px; 351 352 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i)); 353 354 status = acpi_extract_package(&(pss->package.elements[i]), 355 &format, &state); 356 if (ACPI_FAILURE(status)) { 357 ACPI_EXCEPTION((AE_INFO, status, "Invalid _PSS data")); 358 result = -EFAULT; 359 kfree(pr->performance->states); 360 goto end; 361 } 362 363 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 364 "State [%d]: core_frequency[%d] power[%d] transition_latency[%d] bus_master_latency[%d] control[0x%x] status[0x%x]\n", 365 i, 366 (u32) px->core_frequency, 367 (u32) px->power, 368 (u32) px->transition_latency, 369 (u32) px->bus_master_latency, 370 (u32) px->control, (u32) px->status)); 371 372 /* 373 * Check that ACPI's u64 MHz will be valid as u32 KHz in cpufreq 374 */ 375 if (!px->core_frequency || 376 ((u32)(px->core_frequency * 1000) != 377 (px->core_frequency * 1000))) { 378 printk(KERN_ERR FW_BUG PREFIX 379 "Invalid BIOS _PSS frequency: 0x%llx MHz\n", 380 px->core_frequency); 381 result = -EFAULT; 382 kfree(pr->performance->states); 383 goto end; 384 } 385 } 386 387 end: 388 kfree(buffer.pointer); 389 390 return result; 391 } 392 393 static int acpi_processor_get_performance_info(struct acpi_processor *pr) 394 { 395 int result = 0; 396 acpi_status status = AE_OK; 397 acpi_handle handle = NULL; 398 399 if (!pr || !pr->performance || !pr->handle) 400 return -EINVAL; 401 402 status = acpi_get_handle(pr->handle, "_PCT", &handle); 403 if (ACPI_FAILURE(status)) { 404 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 405 "ACPI-based processor performance control unavailable\n")); 406 return -ENODEV; 407 } 408 409 result = acpi_processor_get_performance_control(pr); 410 if (result) 411 goto update_bios; 412 413 result = acpi_processor_get_performance_states(pr); 414 if (result) 415 goto update_bios; 416 417 /* We need to call _PPC once when cpufreq starts */ 418 if (ignore_ppc != 1) 419 result = acpi_processor_get_platform_limit(pr); 420 421 return result; 422 423 /* 424 * Having _PPC but missing frequencies (_PSS, _PCT) is a very good hint that 425 * the BIOS is older than the CPU and does not know its frequencies 426 */ 427 update_bios: 428 #ifdef CONFIG_X86 429 if (ACPI_SUCCESS(acpi_get_handle(pr->handle, "_PPC", &handle))){ 430 if(boot_cpu_has(X86_FEATURE_EST)) 431 printk(KERN_WARNING FW_BUG "BIOS needs update for CPU " 432 "frequency support\n"); 433 } 434 #endif 435 return result; 436 } 437 438 int acpi_processor_notify_smm(struct module *calling_module) 439 { 440 acpi_status status; 441 static int is_done = 0; 442 443 444 if (!(acpi_processor_ppc_status & PPC_REGISTERED)) 445 return -EBUSY; 446 447 if (!try_module_get(calling_module)) 448 return -EINVAL; 449 450 /* is_done is set to negative if an error occurred, 451 * and to postitive if _no_ error occurred, but SMM 452 * was already notified. This avoids double notification 453 * which might lead to unexpected results... 454 */ 455 if (is_done > 0) { 456 module_put(calling_module); 457 return 0; 458 } else if (is_done < 0) { 459 module_put(calling_module); 460 return is_done; 461 } 462 463 is_done = -EIO; 464 465 /* Can't write pstate_control to smi_command if either value is zero */ 466 if ((!acpi_gbl_FADT.smi_command) || (!acpi_gbl_FADT.pstate_control)) { 467 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No SMI port or pstate_control\n")); 468 module_put(calling_module); 469 return 0; 470 } 471 472 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 473 "Writing pstate_control [0x%x] to smi_command [0x%x]\n", 474 acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command)); 475 476 status = acpi_os_write_port(acpi_gbl_FADT.smi_command, 477 (u32) acpi_gbl_FADT.pstate_control, 8); 478 if (ACPI_FAILURE(status)) { 479 ACPI_EXCEPTION((AE_INFO, status, 480 "Failed to write pstate_control [0x%x] to " 481 "smi_command [0x%x]", acpi_gbl_FADT.pstate_control, 482 acpi_gbl_FADT.smi_command)); 483 module_put(calling_module); 484 return status; 485 } 486 487 /* Success. If there's no _PPC, we need to fear nothing, so 488 * we can allow the cpufreq driver to be rmmod'ed. */ 489 is_done = 1; 490 491 if (!(acpi_processor_ppc_status & PPC_IN_USE)) 492 module_put(calling_module); 493 494 return 0; 495 } 496 497 EXPORT_SYMBOL(acpi_processor_notify_smm); 498 499 static int acpi_processor_get_psd(struct acpi_processor *pr) 500 { 501 int result = 0; 502 acpi_status status = AE_OK; 503 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; 504 struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"}; 505 struct acpi_buffer state = {0, NULL}; 506 union acpi_object *psd = NULL; 507 struct acpi_psd_package *pdomain; 508 509 status = acpi_evaluate_object(pr->handle, "_PSD", NULL, &buffer); 510 if (ACPI_FAILURE(status)) { 511 return -ENODEV; 512 } 513 514 psd = buffer.pointer; 515 if (!psd || (psd->type != ACPI_TYPE_PACKAGE)) { 516 printk(KERN_ERR PREFIX "Invalid _PSD data\n"); 517 result = -EFAULT; 518 goto end; 519 } 520 521 if (psd->package.count != 1) { 522 printk(KERN_ERR PREFIX "Invalid _PSD data\n"); 523 result = -EFAULT; 524 goto end; 525 } 526 527 pdomain = &(pr->performance->domain_info); 528 529 state.length = sizeof(struct acpi_psd_package); 530 state.pointer = pdomain; 531 532 status = acpi_extract_package(&(psd->package.elements[0]), 533 &format, &state); 534 if (ACPI_FAILURE(status)) { 535 printk(KERN_ERR PREFIX "Invalid _PSD data\n"); 536 result = -EFAULT; 537 goto end; 538 } 539 540 if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) { 541 printk(KERN_ERR PREFIX "Unknown _PSD:num_entries\n"); 542 result = -EFAULT; 543 goto end; 544 } 545 546 if (pdomain->revision != ACPI_PSD_REV0_REVISION) { 547 printk(KERN_ERR PREFIX "Unknown _PSD:revision\n"); 548 result = -EFAULT; 549 goto end; 550 } 551 552 if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL && 553 pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY && 554 pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) { 555 printk(KERN_ERR PREFIX "Invalid _PSD:coord_type\n"); 556 result = -EFAULT; 557 goto end; 558 } 559 end: 560 kfree(buffer.pointer); 561 return result; 562 } 563 564 int acpi_processor_preregister_performance( 565 struct acpi_processor_performance __percpu *performance) 566 { 567 int count, count_target; 568 int retval = 0; 569 unsigned int i, j; 570 cpumask_var_t covered_cpus; 571 struct acpi_processor *pr; 572 struct acpi_psd_package *pdomain; 573 struct acpi_processor *match_pr; 574 struct acpi_psd_package *match_pdomain; 575 576 if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL)) 577 return -ENOMEM; 578 579 mutex_lock(&performance_mutex); 580 581 /* 582 * Check if another driver has already registered, and abort before 583 * changing pr->performance if it has. Check input data as well. 584 */ 585 for_each_possible_cpu(i) { 586 pr = per_cpu(processors, i); 587 if (!pr) { 588 /* Look only at processors in ACPI namespace */ 589 continue; 590 } 591 592 if (pr->performance) { 593 retval = -EBUSY; 594 goto err_out; 595 } 596 597 if (!performance || !per_cpu_ptr(performance, i)) { 598 retval = -EINVAL; 599 goto err_out; 600 } 601 } 602 603 /* Call _PSD for all CPUs */ 604 for_each_possible_cpu(i) { 605 pr = per_cpu(processors, i); 606 if (!pr) 607 continue; 608 609 pr->performance = per_cpu_ptr(performance, i); 610 cpumask_set_cpu(i, pr->performance->shared_cpu_map); 611 if (acpi_processor_get_psd(pr)) { 612 retval = -EINVAL; 613 continue; 614 } 615 } 616 if (retval) 617 goto err_ret; 618 619 /* 620 * Now that we have _PSD data from all CPUs, lets setup P-state 621 * domain info. 622 */ 623 for_each_possible_cpu(i) { 624 pr = per_cpu(processors, i); 625 if (!pr) 626 continue; 627 628 if (cpumask_test_cpu(i, covered_cpus)) 629 continue; 630 631 pdomain = &(pr->performance->domain_info); 632 cpumask_set_cpu(i, pr->performance->shared_cpu_map); 633 cpumask_set_cpu(i, covered_cpus); 634 if (pdomain->num_processors <= 1) 635 continue; 636 637 /* Validate the Domain info */ 638 count_target = pdomain->num_processors; 639 count = 1; 640 if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL) 641 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL; 642 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL) 643 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_HW; 644 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY) 645 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ANY; 646 647 for_each_possible_cpu(j) { 648 if (i == j) 649 continue; 650 651 match_pr = per_cpu(processors, j); 652 if (!match_pr) 653 continue; 654 655 match_pdomain = &(match_pr->performance->domain_info); 656 if (match_pdomain->domain != pdomain->domain) 657 continue; 658 659 /* Here i and j are in the same domain */ 660 661 if (match_pdomain->num_processors != count_target) { 662 retval = -EINVAL; 663 goto err_ret; 664 } 665 666 if (pdomain->coord_type != match_pdomain->coord_type) { 667 retval = -EINVAL; 668 goto err_ret; 669 } 670 671 cpumask_set_cpu(j, covered_cpus); 672 cpumask_set_cpu(j, pr->performance->shared_cpu_map); 673 count++; 674 } 675 676 for_each_possible_cpu(j) { 677 if (i == j) 678 continue; 679 680 match_pr = per_cpu(processors, j); 681 if (!match_pr) 682 continue; 683 684 match_pdomain = &(match_pr->performance->domain_info); 685 if (match_pdomain->domain != pdomain->domain) 686 continue; 687 688 match_pr->performance->shared_type = 689 pr->performance->shared_type; 690 cpumask_copy(match_pr->performance->shared_cpu_map, 691 pr->performance->shared_cpu_map); 692 } 693 } 694 695 err_ret: 696 for_each_possible_cpu(i) { 697 pr = per_cpu(processors, i); 698 if (!pr || !pr->performance) 699 continue; 700 701 /* Assume no coordination on any error parsing domain info */ 702 if (retval) { 703 cpumask_clear(pr->performance->shared_cpu_map); 704 cpumask_set_cpu(i, pr->performance->shared_cpu_map); 705 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL; 706 } 707 pr->performance = NULL; /* Will be set for real in register */ 708 } 709 710 err_out: 711 mutex_unlock(&performance_mutex); 712 free_cpumask_var(covered_cpus); 713 return retval; 714 } 715 EXPORT_SYMBOL(acpi_processor_preregister_performance); 716 717 int 718 acpi_processor_register_performance(struct acpi_processor_performance 719 *performance, unsigned int cpu) 720 { 721 struct acpi_processor *pr; 722 723 if (!(acpi_processor_ppc_status & PPC_REGISTERED)) 724 return -EINVAL; 725 726 mutex_lock(&performance_mutex); 727 728 pr = per_cpu(processors, cpu); 729 if (!pr) { 730 mutex_unlock(&performance_mutex); 731 return -ENODEV; 732 } 733 734 if (pr->performance) { 735 mutex_unlock(&performance_mutex); 736 return -EBUSY; 737 } 738 739 WARN_ON(!performance); 740 741 pr->performance = performance; 742 743 if (acpi_processor_get_performance_info(pr)) { 744 pr->performance = NULL; 745 mutex_unlock(&performance_mutex); 746 return -EIO; 747 } 748 749 mutex_unlock(&performance_mutex); 750 return 0; 751 } 752 753 EXPORT_SYMBOL(acpi_processor_register_performance); 754 755 void 756 acpi_processor_unregister_performance(struct acpi_processor_performance 757 *performance, unsigned int cpu) 758 { 759 struct acpi_processor *pr; 760 761 mutex_lock(&performance_mutex); 762 763 pr = per_cpu(processors, cpu); 764 if (!pr) { 765 mutex_unlock(&performance_mutex); 766 return; 767 } 768 769 if (pr->performance) 770 kfree(pr->performance->states); 771 pr->performance = NULL; 772 773 mutex_unlock(&performance_mutex); 774 775 return; 776 } 777 778 EXPORT_SYMBOL(acpi_processor_unregister_performance); 779