1 /* 2 * processor_perflib.c - ACPI Processor P-States Library ($Revision: 71 $) 3 * 4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 6 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de> 7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 8 * - Added processor hotplug support 9 * 10 * 11 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of the GNU General Public License as published by 15 * the Free Software Foundation; either version 2 of the License, or (at 16 * your option) any later version. 17 * 18 * This program is distributed in the hope that it will be useful, but 19 * WITHOUT ANY WARRANTY; without even the implied warranty of 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 21 * General Public License for more details. 22 * 23 */ 24 25 #include <linux/kernel.h> 26 #include <linux/module.h> 27 #include <linux/init.h> 28 #include <linux/cpufreq.h> 29 #include <linux/slab.h> 30 #include <linux/acpi.h> 31 #include <acpi/processor.h> 32 #ifdef CONFIG_X86 33 #include <asm/cpufeature.h> 34 #endif 35 36 #define PREFIX "ACPI: " 37 38 #define ACPI_PROCESSOR_CLASS "processor" 39 #define ACPI_PROCESSOR_FILE_PERFORMANCE "performance" 40 #define _COMPONENT ACPI_PROCESSOR_COMPONENT 41 ACPI_MODULE_NAME("processor_perflib"); 42 43 static DEFINE_MUTEX(performance_mutex); 44 45 /* 46 * _PPC support is implemented as a CPUfreq policy notifier: 47 * This means each time a CPUfreq driver registered also with 48 * the ACPI core is asked to change the speed policy, the maximum 49 * value is adjusted so that it is within the platform limit. 50 * 51 * Also, when a new platform limit value is detected, the CPUfreq 52 * policy is adjusted accordingly. 53 */ 54 55 /* ignore_ppc: 56 * -1 -> cpufreq low level drivers not initialized -> _PSS, etc. not called yet 57 * ignore _PPC 58 * 0 -> cpufreq low level drivers initialized -> consider _PPC values 59 * 1 -> ignore _PPC totally -> forced by user through boot param 60 */ 61 static int ignore_ppc = -1; 62 module_param(ignore_ppc, int, 0644); 63 MODULE_PARM_DESC(ignore_ppc, "If the frequency of your machine gets wrongly" \ 64 "limited by BIOS, this should help"); 65 66 #define PPC_REGISTERED 1 67 #define PPC_IN_USE 2 68 69 static int acpi_processor_ppc_status; 70 71 static int acpi_processor_ppc_notifier(struct notifier_block *nb, 72 unsigned long event, void *data) 73 { 74 struct cpufreq_policy *policy = data; 75 struct acpi_processor *pr; 76 unsigned int ppc = 0; 77 78 if (event == CPUFREQ_START && ignore_ppc <= 0) { 79 ignore_ppc = 0; 80 return 0; 81 } 82 83 if (ignore_ppc) 84 return 0; 85 86 if (event != CPUFREQ_ADJUST) 87 return 0; 88 89 mutex_lock(&performance_mutex); 90 91 pr = per_cpu(processors, policy->cpu); 92 if (!pr || !pr->performance) 93 goto out; 94 95 ppc = (unsigned int)pr->performance_platform_limit; 96 97 if (ppc >= pr->performance->state_count) 98 goto out; 99 100 cpufreq_verify_within_limits(policy, 0, 101 pr->performance->states[ppc]. 102 core_frequency * 1000); 103 104 out: 105 mutex_unlock(&performance_mutex); 106 107 return 0; 108 } 109 110 static struct notifier_block acpi_ppc_notifier_block = { 111 .notifier_call = acpi_processor_ppc_notifier, 112 }; 113 114 static int acpi_processor_get_platform_limit(struct acpi_processor *pr) 115 { 116 acpi_status status = 0; 117 unsigned long long ppc = 0; 118 119 120 if (!pr) 121 return -EINVAL; 122 123 /* 124 * _PPC indicates the maximum state currently supported by the platform 125 * (e.g. 0 = states 0..n; 1 = states 1..n; etc. 126 */ 127 status = acpi_evaluate_integer(pr->handle, "_PPC", NULL, &ppc); 128 129 if (status != AE_NOT_FOUND) 130 acpi_processor_ppc_status |= PPC_IN_USE; 131 132 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { 133 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PPC")); 134 return -ENODEV; 135 } 136 137 pr_debug("CPU %d: _PPC is %d - frequency %s limited\n", pr->id, 138 (int)ppc, ppc ? "" : "not"); 139 140 pr->performance_platform_limit = (int)ppc; 141 142 return 0; 143 } 144 145 #define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80 146 /* 147 * acpi_processor_ppc_ost: Notify firmware the _PPC evaluation status 148 * @handle: ACPI processor handle 149 * @status: the status code of _PPC evaluation 150 * 0: success. OSPM is now using the performance state specificed. 151 * 1: failure. OSPM has not changed the number of P-states in use 152 */ 153 static void acpi_processor_ppc_ost(acpi_handle handle, int status) 154 { 155 if (acpi_has_method(handle, "_OST")) 156 acpi_evaluate_ost(handle, ACPI_PROCESSOR_NOTIFY_PERFORMANCE, 157 status, NULL); 158 } 159 160 void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag) 161 { 162 int ret; 163 164 if (ignore_ppc) { 165 /* 166 * Only when it is notification event, the _OST object 167 * will be evaluated. Otherwise it is skipped. 168 */ 169 if (event_flag) 170 acpi_processor_ppc_ost(pr->handle, 1); 171 return; 172 } 173 174 ret = acpi_processor_get_platform_limit(pr); 175 /* 176 * Only when it is notification event, the _OST object 177 * will be evaluated. Otherwise it is skipped. 178 */ 179 if (event_flag) { 180 if (ret < 0) 181 acpi_processor_ppc_ost(pr->handle, 1); 182 else 183 acpi_processor_ppc_ost(pr->handle, 0); 184 } 185 if (ret >= 0) 186 cpufreq_update_policy(pr->id); 187 } 188 189 int acpi_processor_get_bios_limit(int cpu, unsigned int *limit) 190 { 191 struct acpi_processor *pr; 192 193 pr = per_cpu(processors, cpu); 194 if (!pr || !pr->performance || !pr->performance->state_count) 195 return -ENODEV; 196 *limit = pr->performance->states[pr->performance_platform_limit]. 197 core_frequency * 1000; 198 return 0; 199 } 200 EXPORT_SYMBOL(acpi_processor_get_bios_limit); 201 202 void acpi_processor_ppc_init(void) 203 { 204 if (!cpufreq_register_notifier 205 (&acpi_ppc_notifier_block, CPUFREQ_POLICY_NOTIFIER)) 206 acpi_processor_ppc_status |= PPC_REGISTERED; 207 else 208 printk(KERN_DEBUG 209 "Warning: Processor Platform Limit not supported.\n"); 210 } 211 212 void acpi_processor_ppc_exit(void) 213 { 214 if (acpi_processor_ppc_status & PPC_REGISTERED) 215 cpufreq_unregister_notifier(&acpi_ppc_notifier_block, 216 CPUFREQ_POLICY_NOTIFIER); 217 218 acpi_processor_ppc_status &= ~PPC_REGISTERED; 219 } 220 221 static int acpi_processor_get_performance_control(struct acpi_processor *pr) 222 { 223 int result = 0; 224 acpi_status status = 0; 225 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 226 union acpi_object *pct = NULL; 227 union acpi_object obj = { 0 }; 228 229 230 status = acpi_evaluate_object(pr->handle, "_PCT", NULL, &buffer); 231 if (ACPI_FAILURE(status)) { 232 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PCT")); 233 return -ENODEV; 234 } 235 236 pct = (union acpi_object *)buffer.pointer; 237 if (!pct || (pct->type != ACPI_TYPE_PACKAGE) 238 || (pct->package.count != 2)) { 239 printk(KERN_ERR PREFIX "Invalid _PCT data\n"); 240 result = -EFAULT; 241 goto end; 242 } 243 244 /* 245 * control_register 246 */ 247 248 obj = pct->package.elements[0]; 249 250 if ((obj.type != ACPI_TYPE_BUFFER) 251 || (obj.buffer.length < sizeof(struct acpi_pct_register)) 252 || (obj.buffer.pointer == NULL)) { 253 printk(KERN_ERR PREFIX "Invalid _PCT data (control_register)\n"); 254 result = -EFAULT; 255 goto end; 256 } 257 memcpy(&pr->performance->control_register, obj.buffer.pointer, 258 sizeof(struct acpi_pct_register)); 259 260 /* 261 * status_register 262 */ 263 264 obj = pct->package.elements[1]; 265 266 if ((obj.type != ACPI_TYPE_BUFFER) 267 || (obj.buffer.length < sizeof(struct acpi_pct_register)) 268 || (obj.buffer.pointer == NULL)) { 269 printk(KERN_ERR PREFIX "Invalid _PCT data (status_register)\n"); 270 result = -EFAULT; 271 goto end; 272 } 273 274 memcpy(&pr->performance->status_register, obj.buffer.pointer, 275 sizeof(struct acpi_pct_register)); 276 277 end: 278 kfree(buffer.pointer); 279 280 return result; 281 } 282 283 #ifdef CONFIG_X86 284 /* 285 * Some AMDs have 50MHz frequency multiples, but only provide 100MHz rounding 286 * in their ACPI data. Calculate the real values and fix up the _PSS data. 287 */ 288 static void amd_fixup_frequency(struct acpi_processor_px *px, int i) 289 { 290 u32 hi, lo, fid, did; 291 int index = px->control & 0x00000007; 292 293 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) 294 return; 295 296 if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10) 297 || boot_cpu_data.x86 == 0x11) { 298 rdmsr(MSR_AMD_PSTATE_DEF_BASE + index, lo, hi); 299 /* 300 * MSR C001_0064+: 301 * Bit 63: PstateEn. Read-write. If set, the P-state is valid. 302 */ 303 if (!(hi & BIT(31))) 304 return; 305 306 fid = lo & 0x3f; 307 did = (lo >> 6) & 7; 308 if (boot_cpu_data.x86 == 0x10) 309 px->core_frequency = (100 * (fid + 0x10)) >> did; 310 else 311 px->core_frequency = (100 * (fid + 8)) >> did; 312 } 313 } 314 #else 315 static void amd_fixup_frequency(struct acpi_processor_px *px, int i) {}; 316 #endif 317 318 static int acpi_processor_get_performance_states(struct acpi_processor *pr) 319 { 320 int result = 0; 321 acpi_status status = AE_OK; 322 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 323 struct acpi_buffer format = { sizeof("NNNNNN"), "NNNNNN" }; 324 struct acpi_buffer state = { 0, NULL }; 325 union acpi_object *pss = NULL; 326 int i; 327 int last_invalid = -1; 328 329 330 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); 331 if (ACPI_FAILURE(status)) { 332 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PSS")); 333 return -ENODEV; 334 } 335 336 pss = buffer.pointer; 337 if (!pss || (pss->type != ACPI_TYPE_PACKAGE)) { 338 printk(KERN_ERR PREFIX "Invalid _PSS data\n"); 339 result = -EFAULT; 340 goto end; 341 } 342 343 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d performance states\n", 344 pss->package.count)); 345 346 pr->performance->state_count = pss->package.count; 347 pr->performance->states = 348 kmalloc(sizeof(struct acpi_processor_px) * pss->package.count, 349 GFP_KERNEL); 350 if (!pr->performance->states) { 351 result = -ENOMEM; 352 goto end; 353 } 354 355 for (i = 0; i < pr->performance->state_count; i++) { 356 357 struct acpi_processor_px *px = &(pr->performance->states[i]); 358 359 state.length = sizeof(struct acpi_processor_px); 360 state.pointer = px; 361 362 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i)); 363 364 status = acpi_extract_package(&(pss->package.elements[i]), 365 &format, &state); 366 if (ACPI_FAILURE(status)) { 367 ACPI_EXCEPTION((AE_INFO, status, "Invalid _PSS data")); 368 result = -EFAULT; 369 kfree(pr->performance->states); 370 goto end; 371 } 372 373 amd_fixup_frequency(px, i); 374 375 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 376 "State [%d]: core_frequency[%d] power[%d] transition_latency[%d] bus_master_latency[%d] control[0x%x] status[0x%x]\n", 377 i, 378 (u32) px->core_frequency, 379 (u32) px->power, 380 (u32) px->transition_latency, 381 (u32) px->bus_master_latency, 382 (u32) px->control, (u32) px->status)); 383 384 /* 385 * Check that ACPI's u64 MHz will be valid as u32 KHz in cpufreq 386 */ 387 if (!px->core_frequency || 388 ((u32)(px->core_frequency * 1000) != 389 (px->core_frequency * 1000))) { 390 printk(KERN_ERR FW_BUG PREFIX 391 "Invalid BIOS _PSS frequency found for processor %d: 0x%llx MHz\n", 392 pr->id, px->core_frequency); 393 if (last_invalid == -1) 394 last_invalid = i; 395 } else { 396 if (last_invalid != -1) { 397 /* 398 * Copy this valid entry over last_invalid entry 399 */ 400 memcpy(&(pr->performance->states[last_invalid]), 401 px, sizeof(struct acpi_processor_px)); 402 ++last_invalid; 403 } 404 } 405 } 406 407 if (last_invalid == 0) { 408 printk(KERN_ERR FW_BUG PREFIX 409 "No valid BIOS _PSS frequency found for processor %d\n", pr->id); 410 result = -EFAULT; 411 kfree(pr->performance->states); 412 pr->performance->states = NULL; 413 } 414 415 if (last_invalid > 0) 416 pr->performance->state_count = last_invalid; 417 418 end: 419 kfree(buffer.pointer); 420 421 return result; 422 } 423 424 int acpi_processor_get_performance_info(struct acpi_processor *pr) 425 { 426 int result = 0; 427 428 if (!pr || !pr->performance || !pr->handle) 429 return -EINVAL; 430 431 if (!acpi_has_method(pr->handle, "_PCT")) { 432 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 433 "ACPI-based processor performance control unavailable\n")); 434 return -ENODEV; 435 } 436 437 result = acpi_processor_get_performance_control(pr); 438 if (result) 439 goto update_bios; 440 441 result = acpi_processor_get_performance_states(pr); 442 if (result) 443 goto update_bios; 444 445 /* We need to call _PPC once when cpufreq starts */ 446 if (ignore_ppc != 1) 447 result = acpi_processor_get_platform_limit(pr); 448 449 return result; 450 451 /* 452 * Having _PPC but missing frequencies (_PSS, _PCT) is a very good hint that 453 * the BIOS is older than the CPU and does not know its frequencies 454 */ 455 update_bios: 456 #ifdef CONFIG_X86 457 if (acpi_has_method(pr->handle, "_PPC")) { 458 if(boot_cpu_has(X86_FEATURE_EST)) 459 printk(KERN_WARNING FW_BUG "BIOS needs update for CPU " 460 "frequency support\n"); 461 } 462 #endif 463 return result; 464 } 465 EXPORT_SYMBOL_GPL(acpi_processor_get_performance_info); 466 467 int acpi_processor_pstate_control(void) 468 { 469 acpi_status status; 470 471 if (!acpi_gbl_FADT.smi_command || !acpi_gbl_FADT.pstate_control) 472 return 0; 473 474 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 475 "Writing pstate_control [0x%x] to smi_command [0x%x]\n", 476 acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command)); 477 478 status = acpi_os_write_port(acpi_gbl_FADT.smi_command, 479 (u32)acpi_gbl_FADT.pstate_control, 8); 480 if (ACPI_SUCCESS(status)) 481 return 1; 482 483 ACPI_EXCEPTION((AE_INFO, status, 484 "Failed to write pstate_control [0x%x] to smi_command [0x%x]", 485 acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command)); 486 return -EIO; 487 } 488 489 int acpi_processor_notify_smm(struct module *calling_module) 490 { 491 static int is_done = 0; 492 int result; 493 494 if (!(acpi_processor_ppc_status & PPC_REGISTERED)) 495 return -EBUSY; 496 497 if (!try_module_get(calling_module)) 498 return -EINVAL; 499 500 /* is_done is set to negative if an error occurred, 501 * and to postitive if _no_ error occurred, but SMM 502 * was already notified. This avoids double notification 503 * which might lead to unexpected results... 504 */ 505 if (is_done > 0) { 506 module_put(calling_module); 507 return 0; 508 } else if (is_done < 0) { 509 module_put(calling_module); 510 return is_done; 511 } 512 513 is_done = -EIO; 514 515 result = acpi_processor_pstate_control(); 516 if (!result) { 517 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No SMI port or pstate_control\n")); 518 module_put(calling_module); 519 return 0; 520 } 521 if (result < 0) { 522 module_put(calling_module); 523 return result; 524 } 525 526 /* Success. If there's no _PPC, we need to fear nothing, so 527 * we can allow the cpufreq driver to be rmmod'ed. */ 528 is_done = 1; 529 530 if (!(acpi_processor_ppc_status & PPC_IN_USE)) 531 module_put(calling_module); 532 533 return 0; 534 } 535 536 EXPORT_SYMBOL(acpi_processor_notify_smm); 537 538 static int acpi_processor_get_psd(struct acpi_processor *pr) 539 { 540 int result = 0; 541 acpi_status status = AE_OK; 542 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; 543 struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"}; 544 struct acpi_buffer state = {0, NULL}; 545 union acpi_object *psd = NULL; 546 struct acpi_psd_package *pdomain; 547 548 status = acpi_evaluate_object(pr->handle, "_PSD", NULL, &buffer); 549 if (ACPI_FAILURE(status)) { 550 return -ENODEV; 551 } 552 553 psd = buffer.pointer; 554 if (!psd || (psd->type != ACPI_TYPE_PACKAGE)) { 555 printk(KERN_ERR PREFIX "Invalid _PSD data\n"); 556 result = -EFAULT; 557 goto end; 558 } 559 560 if (psd->package.count != 1) { 561 printk(KERN_ERR PREFIX "Invalid _PSD data\n"); 562 result = -EFAULT; 563 goto end; 564 } 565 566 pdomain = &(pr->performance->domain_info); 567 568 state.length = sizeof(struct acpi_psd_package); 569 state.pointer = pdomain; 570 571 status = acpi_extract_package(&(psd->package.elements[0]), 572 &format, &state); 573 if (ACPI_FAILURE(status)) { 574 printk(KERN_ERR PREFIX "Invalid _PSD data\n"); 575 result = -EFAULT; 576 goto end; 577 } 578 579 if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) { 580 printk(KERN_ERR PREFIX "Unknown _PSD:num_entries\n"); 581 result = -EFAULT; 582 goto end; 583 } 584 585 if (pdomain->revision != ACPI_PSD_REV0_REVISION) { 586 printk(KERN_ERR PREFIX "Unknown _PSD:revision\n"); 587 result = -EFAULT; 588 goto end; 589 } 590 591 if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL && 592 pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY && 593 pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) { 594 printk(KERN_ERR PREFIX "Invalid _PSD:coord_type\n"); 595 result = -EFAULT; 596 goto end; 597 } 598 end: 599 kfree(buffer.pointer); 600 return result; 601 } 602 603 int acpi_processor_preregister_performance( 604 struct acpi_processor_performance __percpu *performance) 605 { 606 int count_target; 607 int retval = 0; 608 unsigned int i, j; 609 cpumask_var_t covered_cpus; 610 struct acpi_processor *pr; 611 struct acpi_psd_package *pdomain; 612 struct acpi_processor *match_pr; 613 struct acpi_psd_package *match_pdomain; 614 615 if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL)) 616 return -ENOMEM; 617 618 mutex_lock(&performance_mutex); 619 620 /* 621 * Check if another driver has already registered, and abort before 622 * changing pr->performance if it has. Check input data as well. 623 */ 624 for_each_possible_cpu(i) { 625 pr = per_cpu(processors, i); 626 if (!pr) { 627 /* Look only at processors in ACPI namespace */ 628 continue; 629 } 630 631 if (pr->performance) { 632 retval = -EBUSY; 633 goto err_out; 634 } 635 636 if (!performance || !per_cpu_ptr(performance, i)) { 637 retval = -EINVAL; 638 goto err_out; 639 } 640 } 641 642 /* Call _PSD for all CPUs */ 643 for_each_possible_cpu(i) { 644 pr = per_cpu(processors, i); 645 if (!pr) 646 continue; 647 648 pr->performance = per_cpu_ptr(performance, i); 649 cpumask_set_cpu(i, pr->performance->shared_cpu_map); 650 if (acpi_processor_get_psd(pr)) { 651 retval = -EINVAL; 652 continue; 653 } 654 } 655 if (retval) 656 goto err_ret; 657 658 /* 659 * Now that we have _PSD data from all CPUs, lets setup P-state 660 * domain info. 661 */ 662 for_each_possible_cpu(i) { 663 pr = per_cpu(processors, i); 664 if (!pr) 665 continue; 666 667 if (cpumask_test_cpu(i, covered_cpus)) 668 continue; 669 670 pdomain = &(pr->performance->domain_info); 671 cpumask_set_cpu(i, pr->performance->shared_cpu_map); 672 cpumask_set_cpu(i, covered_cpus); 673 if (pdomain->num_processors <= 1) 674 continue; 675 676 /* Validate the Domain info */ 677 count_target = pdomain->num_processors; 678 if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL) 679 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL; 680 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL) 681 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_HW; 682 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY) 683 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ANY; 684 685 for_each_possible_cpu(j) { 686 if (i == j) 687 continue; 688 689 match_pr = per_cpu(processors, j); 690 if (!match_pr) 691 continue; 692 693 match_pdomain = &(match_pr->performance->domain_info); 694 if (match_pdomain->domain != pdomain->domain) 695 continue; 696 697 /* Here i and j are in the same domain */ 698 699 if (match_pdomain->num_processors != count_target) { 700 retval = -EINVAL; 701 goto err_ret; 702 } 703 704 if (pdomain->coord_type != match_pdomain->coord_type) { 705 retval = -EINVAL; 706 goto err_ret; 707 } 708 709 cpumask_set_cpu(j, covered_cpus); 710 cpumask_set_cpu(j, pr->performance->shared_cpu_map); 711 } 712 713 for_each_possible_cpu(j) { 714 if (i == j) 715 continue; 716 717 match_pr = per_cpu(processors, j); 718 if (!match_pr) 719 continue; 720 721 match_pdomain = &(match_pr->performance->domain_info); 722 if (match_pdomain->domain != pdomain->domain) 723 continue; 724 725 match_pr->performance->shared_type = 726 pr->performance->shared_type; 727 cpumask_copy(match_pr->performance->shared_cpu_map, 728 pr->performance->shared_cpu_map); 729 } 730 } 731 732 err_ret: 733 for_each_possible_cpu(i) { 734 pr = per_cpu(processors, i); 735 if (!pr || !pr->performance) 736 continue; 737 738 /* Assume no coordination on any error parsing domain info */ 739 if (retval) { 740 cpumask_clear(pr->performance->shared_cpu_map); 741 cpumask_set_cpu(i, pr->performance->shared_cpu_map); 742 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL; 743 } 744 pr->performance = NULL; /* Will be set for real in register */ 745 } 746 747 err_out: 748 mutex_unlock(&performance_mutex); 749 free_cpumask_var(covered_cpus); 750 return retval; 751 } 752 EXPORT_SYMBOL(acpi_processor_preregister_performance); 753 754 int 755 acpi_processor_register_performance(struct acpi_processor_performance 756 *performance, unsigned int cpu) 757 { 758 struct acpi_processor *pr; 759 760 if (!(acpi_processor_ppc_status & PPC_REGISTERED)) 761 return -EINVAL; 762 763 mutex_lock(&performance_mutex); 764 765 pr = per_cpu(processors, cpu); 766 if (!pr) { 767 mutex_unlock(&performance_mutex); 768 return -ENODEV; 769 } 770 771 if (pr->performance) { 772 mutex_unlock(&performance_mutex); 773 return -EBUSY; 774 } 775 776 WARN_ON(!performance); 777 778 pr->performance = performance; 779 780 if (acpi_processor_get_performance_info(pr)) { 781 pr->performance = NULL; 782 mutex_unlock(&performance_mutex); 783 return -EIO; 784 } 785 786 mutex_unlock(&performance_mutex); 787 return 0; 788 } 789 790 EXPORT_SYMBOL(acpi_processor_register_performance); 791 792 void acpi_processor_unregister_performance(unsigned int cpu) 793 { 794 struct acpi_processor *pr; 795 796 mutex_lock(&performance_mutex); 797 798 pr = per_cpu(processors, cpu); 799 if (!pr) { 800 mutex_unlock(&performance_mutex); 801 return; 802 } 803 804 if (pr->performance) 805 kfree(pr->performance->states); 806 pr->performance = NULL; 807 808 mutex_unlock(&performance_mutex); 809 810 return; 811 } 812 813 EXPORT_SYMBOL(acpi_processor_unregister_performance); 814