1 /* 2 * CPPC (Collaborative Processor Performance Control) methods used by CPUfreq drivers. 3 * 4 * (C) Copyright 2014, 2015 Linaro Ltd. 5 * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org> 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; version 2 10 * of the License. 11 * 12 * CPPC describes a few methods for controlling CPU performance using 13 * information from a per CPU table called CPC. This table is described in 14 * the ACPI v5.0+ specification. The table consists of a list of 15 * registers which may be memory mapped or hardware registers and also may 16 * include some static integer values. 17 * 18 * CPU performance is on an abstract continuous scale as against a discretized 19 * P-state scale which is tied to CPU frequency only. In brief, the basic 20 * operation involves: 21 * 22 * - OS makes a CPU performance request. (Can provide min and max bounds) 23 * 24 * - Platform (such as BMC) is free to optimize request within requested bounds 25 * depending on power/thermal budgets etc. 26 * 27 * - Platform conveys its decision back to OS 28 * 29 * The communication between OS and platform occurs through another medium 30 * called (PCC) Platform Communication Channel. This is a generic mailbox like 31 * mechanism which includes doorbell semantics to indicate register updates. 32 * See drivers/mailbox/pcc.c for details on PCC. 33 * 34 * Finer details about the PCC and CPPC spec are available in the ACPI v5.1 and 35 * above specifications. 36 */ 37 38 #define pr_fmt(fmt) "ACPI CPPC: " fmt 39 40 #include <linux/cpufreq.h> 41 #include <linux/delay.h> 42 #include <linux/ktime.h> 43 #include <linux/rwsem.h> 44 #include <linux/wait.h> 45 46 #include <acpi/cppc_acpi.h> 47 48 struct cppc_pcc_data { 49 struct mbox_chan *pcc_channel; 50 void __iomem *pcc_comm_addr; 51 bool pcc_channel_acquired; 52 ktime_t deadline; 53 unsigned int pcc_mpar, pcc_mrtt, pcc_nominal; 54 55 bool pending_pcc_write_cmd; /* Any pending/batched PCC write cmds? */ 56 bool platform_owns_pcc; /* Ownership of PCC subspace */ 57 unsigned int pcc_write_cnt; /* Running count of PCC write commands */ 58 59 /* 60 * Lock to provide controlled access to the PCC channel. 61 * 62 * For performance critical usecases(currently cppc_set_perf) 63 * We need to take read_lock and check if channel belongs to OSPM 64 * before reading or writing to PCC subspace 65 * We need to take write_lock before transferring the channel 66 * ownership to the platform via a Doorbell 67 * This allows us to batch a number of CPPC requests if they happen 68 * to originate in about the same time 69 * 70 * For non-performance critical usecases(init) 71 * Take write_lock for all purposes which gives exclusive access 72 */ 73 struct rw_semaphore pcc_lock; 74 75 /* Wait queue for CPUs whose requests were batched */ 76 wait_queue_head_t pcc_write_wait_q; 77 ktime_t last_cmd_cmpl_time; 78 ktime_t last_mpar_reset; 79 int mpar_count; 80 int refcount; 81 }; 82 83 /* Array to represent the PCC channel per subspace id */ 84 static struct cppc_pcc_data *pcc_data[MAX_PCC_SUBSPACES]; 85 /* The cpu_pcc_subspace_idx containsper CPU subspace id */ 86 static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx); 87 88 /* 89 * The cpc_desc structure contains the ACPI register details 90 * as described in the per CPU _CPC tables. The details 91 * include the type of register (e.g. PCC, System IO, FFH etc.) 92 * and destination addresses which lets us READ/WRITE CPU performance 93 * information using the appropriate I/O methods. 94 */ 95 static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr); 96 97 /* pcc mapped address + header size + offset within PCC subspace */ 98 #define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_comm_addr + \ 99 0x8 + (offs)) 100 101 /* Check if a CPC register is in PCC */ 102 #define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \ 103 (cpc)->cpc_entry.reg.space_id == \ 104 ACPI_ADR_SPACE_PLATFORM_COMM) 105 106 /* Evalutes to True if reg is a NULL register descriptor */ 107 #define IS_NULL_REG(reg) ((reg)->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY && \ 108 (reg)->address == 0 && \ 109 (reg)->bit_width == 0 && \ 110 (reg)->bit_offset == 0 && \ 111 (reg)->access_width == 0) 112 113 /* Evalutes to True if an optional cpc field is supported */ 114 #define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ? \ 115 !!(cpc)->cpc_entry.int_value : \ 116 !IS_NULL_REG(&(cpc)->cpc_entry.reg)) 117 /* 118 * Arbitrary Retries in case the remote processor is slow to respond 119 * to PCC commands. Keeping it high enough to cover emulators where 120 * the processors run painfully slow. 121 */ 122 #define NUM_RETRIES 500ULL 123 124 struct cppc_attr { 125 struct attribute attr; 126 ssize_t (*show)(struct kobject *kobj, 127 struct attribute *attr, char *buf); 128 ssize_t (*store)(struct kobject *kobj, 129 struct attribute *attr, const char *c, ssize_t count); 130 }; 131 132 #define define_one_cppc_ro(_name) \ 133 static struct cppc_attr _name = \ 134 __ATTR(_name, 0444, show_##_name, NULL) 135 136 #define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj) 137 138 #define show_cppc_data(access_fn, struct_name, member_name) \ 139 static ssize_t show_##member_name(struct kobject *kobj, \ 140 struct attribute *attr, char *buf) \ 141 { \ 142 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj); \ 143 struct struct_name st_name = {0}; \ 144 int ret; \ 145 \ 146 ret = access_fn(cpc_ptr->cpu_id, &st_name); \ 147 if (ret) \ 148 return ret; \ 149 \ 150 return scnprintf(buf, PAGE_SIZE, "%llu\n", \ 151 (u64)st_name.member_name); \ 152 } \ 153 define_one_cppc_ro(member_name) 154 155 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, highest_perf); 156 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_perf); 157 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_perf); 158 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_nonlinear_perf); 159 show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf); 160 show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time); 161 162 static ssize_t show_feedback_ctrs(struct kobject *kobj, 163 struct attribute *attr, char *buf) 164 { 165 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj); 166 struct cppc_perf_fb_ctrs fb_ctrs = {0}; 167 int ret; 168 169 ret = cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs); 170 if (ret) 171 return ret; 172 173 return scnprintf(buf, PAGE_SIZE, "ref:%llu del:%llu\n", 174 fb_ctrs.reference, fb_ctrs.delivered); 175 } 176 define_one_cppc_ro(feedback_ctrs); 177 178 static struct attribute *cppc_attrs[] = { 179 &feedback_ctrs.attr, 180 &reference_perf.attr, 181 &wraparound_time.attr, 182 &highest_perf.attr, 183 &lowest_perf.attr, 184 &lowest_nonlinear_perf.attr, 185 &nominal_perf.attr, 186 NULL 187 }; 188 189 static struct kobj_type cppc_ktype = { 190 .sysfs_ops = &kobj_sysfs_ops, 191 .default_attrs = cppc_attrs, 192 }; 193 194 static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit) 195 { 196 int ret = -EIO, status = 0; 197 struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id]; 198 struct acpi_pcct_shared_memory __iomem *generic_comm_base = 199 pcc_ss_data->pcc_comm_addr; 200 ktime_t next_deadline = ktime_add(ktime_get(), 201 pcc_ss_data->deadline); 202 203 if (!pcc_ss_data->platform_owns_pcc) 204 return 0; 205 206 /* Retry in case the remote processor was too slow to catch up. */ 207 while (!ktime_after(ktime_get(), next_deadline)) { 208 /* 209 * Per spec, prior to boot the PCC space wil be initialized by 210 * platform and should have set the command completion bit when 211 * PCC can be used by OSPM 212 */ 213 status = readw_relaxed(&generic_comm_base->status); 214 if (status & PCC_CMD_COMPLETE_MASK) { 215 ret = 0; 216 if (chk_err_bit && (status & PCC_ERROR_MASK)) 217 ret = -EIO; 218 break; 219 } 220 /* 221 * Reducing the bus traffic in case this loop takes longer than 222 * a few retries. 223 */ 224 udelay(3); 225 } 226 227 if (likely(!ret)) 228 pcc_ss_data->platform_owns_pcc = false; 229 else 230 pr_err("PCC check channel failed. Status=%x\n", status); 231 232 return ret; 233 } 234 235 /* 236 * This function transfers the ownership of the PCC to the platform 237 * So it must be called while holding write_lock(pcc_lock) 238 */ 239 static int send_pcc_cmd(int pcc_ss_id, u16 cmd) 240 { 241 int ret = -EIO, i; 242 struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id]; 243 struct acpi_pcct_shared_memory *generic_comm_base = 244 (struct acpi_pcct_shared_memory *)pcc_ss_data->pcc_comm_addr; 245 unsigned int time_delta; 246 247 /* 248 * For CMD_WRITE we know for a fact the caller should have checked 249 * the channel before writing to PCC space 250 */ 251 if (cmd == CMD_READ) { 252 /* 253 * If there are pending cpc_writes, then we stole the channel 254 * before write completion, so first send a WRITE command to 255 * platform 256 */ 257 if (pcc_ss_data->pending_pcc_write_cmd) 258 send_pcc_cmd(pcc_ss_id, CMD_WRITE); 259 260 ret = check_pcc_chan(pcc_ss_id, false); 261 if (ret) 262 goto end; 263 } else /* CMD_WRITE */ 264 pcc_ss_data->pending_pcc_write_cmd = FALSE; 265 266 /* 267 * Handle the Minimum Request Turnaround Time(MRTT) 268 * "The minimum amount of time that OSPM must wait after the completion 269 * of a command before issuing the next command, in microseconds" 270 */ 271 if (pcc_ss_data->pcc_mrtt) { 272 time_delta = ktime_us_delta(ktime_get(), 273 pcc_ss_data->last_cmd_cmpl_time); 274 if (pcc_ss_data->pcc_mrtt > time_delta) 275 udelay(pcc_ss_data->pcc_mrtt - time_delta); 276 } 277 278 /* 279 * Handle the non-zero Maximum Periodic Access Rate(MPAR) 280 * "The maximum number of periodic requests that the subspace channel can 281 * support, reported in commands per minute. 0 indicates no limitation." 282 * 283 * This parameter should be ideally zero or large enough so that it can 284 * handle maximum number of requests that all the cores in the system can 285 * collectively generate. If it is not, we will follow the spec and just 286 * not send the request to the platform after hitting the MPAR limit in 287 * any 60s window 288 */ 289 if (pcc_ss_data->pcc_mpar) { 290 if (pcc_ss_data->mpar_count == 0) { 291 time_delta = ktime_ms_delta(ktime_get(), 292 pcc_ss_data->last_mpar_reset); 293 if ((time_delta < 60 * MSEC_PER_SEC) && pcc_ss_data->last_mpar_reset) { 294 pr_debug("PCC cmd not sent due to MPAR limit"); 295 ret = -EIO; 296 goto end; 297 } 298 pcc_ss_data->last_mpar_reset = ktime_get(); 299 pcc_ss_data->mpar_count = pcc_ss_data->pcc_mpar; 300 } 301 pcc_ss_data->mpar_count--; 302 } 303 304 /* Write to the shared comm region. */ 305 writew_relaxed(cmd, &generic_comm_base->command); 306 307 /* Flip CMD COMPLETE bit */ 308 writew_relaxed(0, &generic_comm_base->status); 309 310 pcc_ss_data->platform_owns_pcc = true; 311 312 /* Ring doorbell */ 313 ret = mbox_send_message(pcc_ss_data->pcc_channel, &cmd); 314 if (ret < 0) { 315 pr_err("Err sending PCC mbox message. cmd:%d, ret:%d\n", 316 cmd, ret); 317 goto end; 318 } 319 320 /* wait for completion and check for PCC errro bit */ 321 ret = check_pcc_chan(pcc_ss_id, true); 322 323 if (pcc_ss_data->pcc_mrtt) 324 pcc_ss_data->last_cmd_cmpl_time = ktime_get(); 325 326 if (pcc_ss_data->pcc_channel->mbox->txdone_irq) 327 mbox_chan_txdone(pcc_ss_data->pcc_channel, ret); 328 else 329 mbox_client_txdone(pcc_ss_data->pcc_channel, ret); 330 331 end: 332 if (cmd == CMD_WRITE) { 333 if (unlikely(ret)) { 334 for_each_possible_cpu(i) { 335 struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i); 336 if (!desc) 337 continue; 338 339 if (desc->write_cmd_id == pcc_ss_data->pcc_write_cnt) 340 desc->write_cmd_status = ret; 341 } 342 } 343 pcc_ss_data->pcc_write_cnt++; 344 wake_up_all(&pcc_ss_data->pcc_write_wait_q); 345 } 346 347 return ret; 348 } 349 350 static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret) 351 { 352 if (ret < 0) 353 pr_debug("TX did not complete: CMD sent:%x, ret:%d\n", 354 *(u16 *)msg, ret); 355 else 356 pr_debug("TX completed. CMD sent:%x, ret:%d\n", 357 *(u16 *)msg, ret); 358 } 359 360 struct mbox_client cppc_mbox_cl = { 361 .tx_done = cppc_chan_tx_done, 362 .knows_txdone = true, 363 }; 364 365 static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle) 366 { 367 int result = -EFAULT; 368 acpi_status status = AE_OK; 369 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; 370 struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"}; 371 struct acpi_buffer state = {0, NULL}; 372 union acpi_object *psd = NULL; 373 struct acpi_psd_package *pdomain; 374 375 status = acpi_evaluate_object_typed(handle, "_PSD", NULL, &buffer, 376 ACPI_TYPE_PACKAGE); 377 if (ACPI_FAILURE(status)) 378 return -ENODEV; 379 380 psd = buffer.pointer; 381 if (!psd || psd->package.count != 1) { 382 pr_debug("Invalid _PSD data\n"); 383 goto end; 384 } 385 386 pdomain = &(cpc_ptr->domain_info); 387 388 state.length = sizeof(struct acpi_psd_package); 389 state.pointer = pdomain; 390 391 status = acpi_extract_package(&(psd->package.elements[0]), 392 &format, &state); 393 if (ACPI_FAILURE(status)) { 394 pr_debug("Invalid _PSD data for CPU:%d\n", cpc_ptr->cpu_id); 395 goto end; 396 } 397 398 if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) { 399 pr_debug("Unknown _PSD:num_entries for CPU:%d\n", cpc_ptr->cpu_id); 400 goto end; 401 } 402 403 if (pdomain->revision != ACPI_PSD_REV0_REVISION) { 404 pr_debug("Unknown _PSD:revision for CPU: %d\n", cpc_ptr->cpu_id); 405 goto end; 406 } 407 408 if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL && 409 pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY && 410 pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) { 411 pr_debug("Invalid _PSD:coord_type for CPU:%d\n", cpc_ptr->cpu_id); 412 goto end; 413 } 414 415 result = 0; 416 end: 417 kfree(buffer.pointer); 418 return result; 419 } 420 421 /** 422 * acpi_get_psd_map - Map the CPUs in a common freq domain. 423 * @all_cpu_data: Ptrs to CPU specific CPPC data including PSD info. 424 * 425 * Return: 0 for success or negative value for err. 426 */ 427 int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data) 428 { 429 int count_target; 430 int retval = 0; 431 unsigned int i, j; 432 cpumask_var_t covered_cpus; 433 struct cppc_cpudata *pr, *match_pr; 434 struct acpi_psd_package *pdomain; 435 struct acpi_psd_package *match_pdomain; 436 struct cpc_desc *cpc_ptr, *match_cpc_ptr; 437 438 if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL)) 439 return -ENOMEM; 440 441 /* 442 * Now that we have _PSD data from all CPUs, lets setup P-state 443 * domain info. 444 */ 445 for_each_possible_cpu(i) { 446 pr = all_cpu_data[i]; 447 if (!pr) 448 continue; 449 450 if (cpumask_test_cpu(i, covered_cpus)) 451 continue; 452 453 cpc_ptr = per_cpu(cpc_desc_ptr, i); 454 if (!cpc_ptr) { 455 retval = -EFAULT; 456 goto err_ret; 457 } 458 459 pdomain = &(cpc_ptr->domain_info); 460 cpumask_set_cpu(i, pr->shared_cpu_map); 461 cpumask_set_cpu(i, covered_cpus); 462 if (pdomain->num_processors <= 1) 463 continue; 464 465 /* Validate the Domain info */ 466 count_target = pdomain->num_processors; 467 if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL) 468 pr->shared_type = CPUFREQ_SHARED_TYPE_ALL; 469 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL) 470 pr->shared_type = CPUFREQ_SHARED_TYPE_HW; 471 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY) 472 pr->shared_type = CPUFREQ_SHARED_TYPE_ANY; 473 474 for_each_possible_cpu(j) { 475 if (i == j) 476 continue; 477 478 match_cpc_ptr = per_cpu(cpc_desc_ptr, j); 479 if (!match_cpc_ptr) { 480 retval = -EFAULT; 481 goto err_ret; 482 } 483 484 match_pdomain = &(match_cpc_ptr->domain_info); 485 if (match_pdomain->domain != pdomain->domain) 486 continue; 487 488 /* Here i and j are in the same domain */ 489 if (match_pdomain->num_processors != count_target) { 490 retval = -EFAULT; 491 goto err_ret; 492 } 493 494 if (pdomain->coord_type != match_pdomain->coord_type) { 495 retval = -EFAULT; 496 goto err_ret; 497 } 498 499 cpumask_set_cpu(j, covered_cpus); 500 cpumask_set_cpu(j, pr->shared_cpu_map); 501 } 502 503 for_each_possible_cpu(j) { 504 if (i == j) 505 continue; 506 507 match_pr = all_cpu_data[j]; 508 if (!match_pr) 509 continue; 510 511 match_cpc_ptr = per_cpu(cpc_desc_ptr, j); 512 if (!match_cpc_ptr) { 513 retval = -EFAULT; 514 goto err_ret; 515 } 516 517 match_pdomain = &(match_cpc_ptr->domain_info); 518 if (match_pdomain->domain != pdomain->domain) 519 continue; 520 521 match_pr->shared_type = pr->shared_type; 522 cpumask_copy(match_pr->shared_cpu_map, 523 pr->shared_cpu_map); 524 } 525 } 526 527 err_ret: 528 for_each_possible_cpu(i) { 529 pr = all_cpu_data[i]; 530 if (!pr) 531 continue; 532 533 /* Assume no coordination on any error parsing domain info */ 534 if (retval) { 535 cpumask_clear(pr->shared_cpu_map); 536 cpumask_set_cpu(i, pr->shared_cpu_map); 537 pr->shared_type = CPUFREQ_SHARED_TYPE_ALL; 538 } 539 } 540 541 free_cpumask_var(covered_cpus); 542 return retval; 543 } 544 EXPORT_SYMBOL_GPL(acpi_get_psd_map); 545 546 static int register_pcc_channel(int pcc_ss_idx) 547 { 548 struct acpi_pcct_hw_reduced *cppc_ss; 549 u64 usecs_lat; 550 551 if (pcc_ss_idx >= 0) { 552 pcc_data[pcc_ss_idx]->pcc_channel = 553 pcc_mbox_request_channel(&cppc_mbox_cl, pcc_ss_idx); 554 555 if (IS_ERR(pcc_data[pcc_ss_idx]->pcc_channel)) { 556 pr_err("Failed to find PCC communication channel\n"); 557 return -ENODEV; 558 } 559 560 /* 561 * The PCC mailbox controller driver should 562 * have parsed the PCCT (global table of all 563 * PCC channels) and stored pointers to the 564 * subspace communication region in con_priv. 565 */ 566 cppc_ss = (pcc_data[pcc_ss_idx]->pcc_channel)->con_priv; 567 568 if (!cppc_ss) { 569 pr_err("No PCC subspace found for CPPC\n"); 570 return -ENODEV; 571 } 572 573 /* 574 * cppc_ss->latency is just a Nominal value. In reality 575 * the remote processor could be much slower to reply. 576 * So add an arbitrary amount of wait on top of Nominal. 577 */ 578 usecs_lat = NUM_RETRIES * cppc_ss->latency; 579 pcc_data[pcc_ss_idx]->deadline = ns_to_ktime(usecs_lat * NSEC_PER_USEC); 580 pcc_data[pcc_ss_idx]->pcc_mrtt = cppc_ss->min_turnaround_time; 581 pcc_data[pcc_ss_idx]->pcc_mpar = cppc_ss->max_access_rate; 582 pcc_data[pcc_ss_idx]->pcc_nominal = cppc_ss->latency; 583 584 pcc_data[pcc_ss_idx]->pcc_comm_addr = 585 acpi_os_ioremap(cppc_ss->base_address, cppc_ss->length); 586 if (!pcc_data[pcc_ss_idx]->pcc_comm_addr) { 587 pr_err("Failed to ioremap PCC comm region mem\n"); 588 return -ENOMEM; 589 } 590 591 /* Set flag so that we dont come here for each CPU. */ 592 pcc_data[pcc_ss_idx]->pcc_channel_acquired = true; 593 } 594 595 return 0; 596 } 597 598 /** 599 * cpc_ffh_supported() - check if FFH reading supported 600 * 601 * Check if the architecture has support for functional fixed hardware 602 * read/write capability. 603 * 604 * Return: true for supported, false for not supported 605 */ 606 bool __weak cpc_ffh_supported(void) 607 { 608 return false; 609 } 610 611 612 /** 613 * pcc_data_alloc() - Allocate the pcc_data memory for pcc subspace 614 * 615 * Check and allocate the cppc_pcc_data memory. 616 * In some processor configurations it is possible that same subspace 617 * is shared between multiple CPU's. This is seen especially in CPU's 618 * with hardware multi-threading support. 619 * 620 * Return: 0 for success, errno for failure 621 */ 622 int pcc_data_alloc(int pcc_ss_id) 623 { 624 if (pcc_ss_id < 0 || pcc_ss_id >= MAX_PCC_SUBSPACES) 625 return -EINVAL; 626 627 if (pcc_data[pcc_ss_id]) { 628 pcc_data[pcc_ss_id]->refcount++; 629 } else { 630 pcc_data[pcc_ss_id] = kzalloc(sizeof(struct cppc_pcc_data), 631 GFP_KERNEL); 632 if (!pcc_data[pcc_ss_id]) 633 return -ENOMEM; 634 pcc_data[pcc_ss_id]->refcount++; 635 } 636 637 return 0; 638 } 639 /* 640 * An example CPC table looks like the following. 641 * 642 * Name(_CPC, Package() 643 * { 644 * 17, 645 * NumEntries 646 * 1, 647 * // Revision 648 * ResourceTemplate(){Register(PCC, 32, 0, 0x120, 2)}, 649 * // Highest Performance 650 * ResourceTemplate(){Register(PCC, 32, 0, 0x124, 2)}, 651 * // Nominal Performance 652 * ResourceTemplate(){Register(PCC, 32, 0, 0x128, 2)}, 653 * // Lowest Nonlinear Performance 654 * ResourceTemplate(){Register(PCC, 32, 0, 0x12C, 2)}, 655 * // Lowest Performance 656 * ResourceTemplate(){Register(PCC, 32, 0, 0x130, 2)}, 657 * // Guaranteed Performance Register 658 * ResourceTemplate(){Register(PCC, 32, 0, 0x110, 2)}, 659 * // Desired Performance Register 660 * ResourceTemplate(){Register(SystemMemory, 0, 0, 0, 0)}, 661 * .. 662 * .. 663 * .. 664 * 665 * } 666 * Each Register() encodes how to access that specific register. 667 * e.g. a sample PCC entry has the following encoding: 668 * 669 * Register ( 670 * PCC, 671 * AddressSpaceKeyword 672 * 8, 673 * //RegisterBitWidth 674 * 8, 675 * //RegisterBitOffset 676 * 0x30, 677 * //RegisterAddress 678 * 9 679 * //AccessSize (subspace ID) 680 * 0 681 * ) 682 * } 683 */ 684 685 /** 686 * acpi_cppc_processor_probe - Search for per CPU _CPC objects. 687 * @pr: Ptr to acpi_processor containing this CPUs logical Id. 688 * 689 * Return: 0 for success or negative value for err. 690 */ 691 int acpi_cppc_processor_probe(struct acpi_processor *pr) 692 { 693 struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL}; 694 union acpi_object *out_obj, *cpc_obj; 695 struct cpc_desc *cpc_ptr; 696 struct cpc_reg *gas_t; 697 struct device *cpu_dev; 698 acpi_handle handle = pr->handle; 699 unsigned int num_ent, i, cpc_rev; 700 int pcc_subspace_id = -1; 701 acpi_status status; 702 int ret = -EFAULT; 703 704 /* Parse the ACPI _CPC table for this cpu. */ 705 status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output, 706 ACPI_TYPE_PACKAGE); 707 if (ACPI_FAILURE(status)) { 708 ret = -ENODEV; 709 goto out_buf_free; 710 } 711 712 out_obj = (union acpi_object *) output.pointer; 713 714 cpc_ptr = kzalloc(sizeof(struct cpc_desc), GFP_KERNEL); 715 if (!cpc_ptr) { 716 ret = -ENOMEM; 717 goto out_buf_free; 718 } 719 720 /* First entry is NumEntries. */ 721 cpc_obj = &out_obj->package.elements[0]; 722 if (cpc_obj->type == ACPI_TYPE_INTEGER) { 723 num_ent = cpc_obj->integer.value; 724 } else { 725 pr_debug("Unexpected entry type(%d) for NumEntries\n", 726 cpc_obj->type); 727 goto out_free; 728 } 729 730 /* Only support CPPCv2. Bail otherwise. */ 731 if (num_ent != CPPC_NUM_ENT) { 732 pr_debug("Firmware exports %d entries. Expected: %d\n", 733 num_ent, CPPC_NUM_ENT); 734 goto out_free; 735 } 736 737 cpc_ptr->num_entries = num_ent; 738 739 /* Second entry should be revision. */ 740 cpc_obj = &out_obj->package.elements[1]; 741 if (cpc_obj->type == ACPI_TYPE_INTEGER) { 742 cpc_rev = cpc_obj->integer.value; 743 } else { 744 pr_debug("Unexpected entry type(%d) for Revision\n", 745 cpc_obj->type); 746 goto out_free; 747 } 748 749 if (cpc_rev != CPPC_REV) { 750 pr_debug("Firmware exports revision:%d. Expected:%d\n", 751 cpc_rev, CPPC_REV); 752 goto out_free; 753 } 754 755 /* Iterate through remaining entries in _CPC */ 756 for (i = 2; i < num_ent; i++) { 757 cpc_obj = &out_obj->package.elements[i]; 758 759 if (cpc_obj->type == ACPI_TYPE_INTEGER) { 760 cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_INTEGER; 761 cpc_ptr->cpc_regs[i-2].cpc_entry.int_value = cpc_obj->integer.value; 762 } else if (cpc_obj->type == ACPI_TYPE_BUFFER) { 763 gas_t = (struct cpc_reg *) 764 cpc_obj->buffer.pointer; 765 766 /* 767 * The PCC Subspace index is encoded inside 768 * the CPC table entries. The same PCC index 769 * will be used for all the PCC entries, 770 * so extract it only once. 771 */ 772 if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) { 773 if (pcc_subspace_id < 0) { 774 pcc_subspace_id = gas_t->access_width; 775 if (pcc_data_alloc(pcc_subspace_id)) 776 goto out_free; 777 } else if (pcc_subspace_id != gas_t->access_width) { 778 pr_debug("Mismatched PCC ids.\n"); 779 goto out_free; 780 } 781 } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { 782 if (gas_t->address) { 783 void __iomem *addr; 784 785 addr = ioremap(gas_t->address, gas_t->bit_width/8); 786 if (!addr) 787 goto out_free; 788 cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr; 789 } 790 } else { 791 if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) { 792 /* Support only PCC ,SYS MEM and FFH type regs */ 793 pr_debug("Unsupported register type: %d\n", gas_t->space_id); 794 goto out_free; 795 } 796 } 797 798 cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER; 799 memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t)); 800 } else { 801 pr_debug("Err in entry:%d in CPC table of CPU:%d \n", i, pr->id); 802 goto out_free; 803 } 804 } 805 per_cpu(cpu_pcc_subspace_idx, pr->id) = pcc_subspace_id; 806 /* Store CPU Logical ID */ 807 cpc_ptr->cpu_id = pr->id; 808 809 /* Parse PSD data for this CPU */ 810 ret = acpi_get_psd(cpc_ptr, handle); 811 if (ret) 812 goto out_free; 813 814 /* Register PCC channel once for all PCC subspace id. */ 815 if (pcc_subspace_id >= 0 && !pcc_data[pcc_subspace_id]->pcc_channel_acquired) { 816 ret = register_pcc_channel(pcc_subspace_id); 817 if (ret) 818 goto out_free; 819 820 init_rwsem(&pcc_data[pcc_subspace_id]->pcc_lock); 821 init_waitqueue_head(&pcc_data[pcc_subspace_id]->pcc_write_wait_q); 822 } 823 824 /* Everything looks okay */ 825 pr_debug("Parsed CPC struct for CPU: %d\n", pr->id); 826 827 /* Add per logical CPU nodes for reading its feedback counters. */ 828 cpu_dev = get_cpu_device(pr->id); 829 if (!cpu_dev) { 830 ret = -EINVAL; 831 goto out_free; 832 } 833 834 /* Plug PSD data into this CPUs CPC descriptor. */ 835 per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr; 836 837 ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj, 838 "acpi_cppc"); 839 if (ret) { 840 per_cpu(cpc_desc_ptr, pr->id) = NULL; 841 goto out_free; 842 } 843 844 kfree(output.pointer); 845 return 0; 846 847 out_free: 848 /* Free all the mapped sys mem areas for this CPU */ 849 for (i = 2; i < cpc_ptr->num_entries; i++) { 850 void __iomem *addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr; 851 852 if (addr) 853 iounmap(addr); 854 } 855 kfree(cpc_ptr); 856 857 out_buf_free: 858 kfree(output.pointer); 859 return ret; 860 } 861 EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe); 862 863 /** 864 * acpi_cppc_processor_exit - Cleanup CPC structs. 865 * @pr: Ptr to acpi_processor containing this CPUs logical Id. 866 * 867 * Return: Void 868 */ 869 void acpi_cppc_processor_exit(struct acpi_processor *pr) 870 { 871 struct cpc_desc *cpc_ptr; 872 unsigned int i; 873 void __iomem *addr; 874 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, pr->id); 875 876 if (pcc_ss_id >=0 && pcc_data[pcc_ss_id]) { 877 if (pcc_data[pcc_ss_id]->pcc_channel_acquired) { 878 pcc_data[pcc_ss_id]->refcount--; 879 if (!pcc_data[pcc_ss_id]->refcount) { 880 pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel); 881 pcc_data[pcc_ss_id]->pcc_channel_acquired = 0; 882 kfree(pcc_data[pcc_ss_id]); 883 } 884 } 885 } 886 887 cpc_ptr = per_cpu(cpc_desc_ptr, pr->id); 888 if (!cpc_ptr) 889 return; 890 891 /* Free all the mapped sys mem areas for this CPU */ 892 for (i = 2; i < cpc_ptr->num_entries; i++) { 893 addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr; 894 if (addr) 895 iounmap(addr); 896 } 897 898 kobject_put(&cpc_ptr->kobj); 899 kfree(cpc_ptr); 900 } 901 EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit); 902 903 /** 904 * cpc_read_ffh() - Read FFH register 905 * @cpunum: cpu number to read 906 * @reg: cppc register information 907 * @val: place holder for return value 908 * 909 * Read bit_width bits from a specified address and bit_offset 910 * 911 * Return: 0 for success and error code 912 */ 913 int __weak cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val) 914 { 915 return -ENOTSUPP; 916 } 917 918 /** 919 * cpc_write_ffh() - Write FFH register 920 * @cpunum: cpu number to write 921 * @reg: cppc register information 922 * @val: value to write 923 * 924 * Write value of bit_width bits to a specified address and bit_offset 925 * 926 * Return: 0 for success and error code 927 */ 928 int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val) 929 { 930 return -ENOTSUPP; 931 } 932 933 /* 934 * Since cpc_read and cpc_write are called while holding pcc_lock, it should be 935 * as fast as possible. We have already mapped the PCC subspace during init, so 936 * we can directly write to it. 937 */ 938 939 static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val) 940 { 941 int ret_val = 0; 942 void __iomem *vaddr = 0; 943 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); 944 struct cpc_reg *reg = ®_res->cpc_entry.reg; 945 946 if (reg_res->type == ACPI_TYPE_INTEGER) { 947 *val = reg_res->cpc_entry.int_value; 948 return ret_val; 949 } 950 951 *val = 0; 952 if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) 953 vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id); 954 else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) 955 vaddr = reg_res->sys_mem_vaddr; 956 else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) 957 return cpc_read_ffh(cpu, reg, val); 958 else 959 return acpi_os_read_memory((acpi_physical_address)reg->address, 960 val, reg->bit_width); 961 962 switch (reg->bit_width) { 963 case 8: 964 *val = readb_relaxed(vaddr); 965 break; 966 case 16: 967 *val = readw_relaxed(vaddr); 968 break; 969 case 32: 970 *val = readl_relaxed(vaddr); 971 break; 972 case 64: 973 *val = readq_relaxed(vaddr); 974 break; 975 default: 976 pr_debug("Error: Cannot read %u bit width from PCC\n", 977 reg->bit_width); 978 ret_val = -EFAULT; 979 } 980 981 return ret_val; 982 } 983 984 static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val) 985 { 986 int ret_val = 0; 987 void __iomem *vaddr = 0; 988 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); 989 struct cpc_reg *reg = ®_res->cpc_entry.reg; 990 991 if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) 992 vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id); 993 else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) 994 vaddr = reg_res->sys_mem_vaddr; 995 else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) 996 return cpc_write_ffh(cpu, reg, val); 997 else 998 return acpi_os_write_memory((acpi_physical_address)reg->address, 999 val, reg->bit_width); 1000 1001 switch (reg->bit_width) { 1002 case 8: 1003 writeb_relaxed(val, vaddr); 1004 break; 1005 case 16: 1006 writew_relaxed(val, vaddr); 1007 break; 1008 case 32: 1009 writel_relaxed(val, vaddr); 1010 break; 1011 case 64: 1012 writeq_relaxed(val, vaddr); 1013 break; 1014 default: 1015 pr_debug("Error: Cannot write %u bit width to PCC\n", 1016 reg->bit_width); 1017 ret_val = -EFAULT; 1018 break; 1019 } 1020 1021 return ret_val; 1022 } 1023 1024 /** 1025 * cppc_get_perf_caps - Get a CPUs performance capabilities. 1026 * @cpunum: CPU from which to get capabilities info. 1027 * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h 1028 * 1029 * Return: 0 for success with perf_caps populated else -ERRNO. 1030 */ 1031 int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps) 1032 { 1033 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); 1034 struct cpc_register_resource *highest_reg, *lowest_reg, 1035 *lowest_non_linear_reg, *nominal_reg; 1036 u64 high, low, nom, min_nonlinear; 1037 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum); 1038 struct cppc_pcc_data *pcc_ss_data; 1039 int ret = 0, regs_in_pcc = 0; 1040 1041 if (!cpc_desc || pcc_ss_id < 0) { 1042 pr_debug("No CPC descriptor for CPU:%d\n", cpunum); 1043 return -ENODEV; 1044 } 1045 1046 pcc_ss_data = pcc_data[pcc_ss_id]; 1047 highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF]; 1048 lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF]; 1049 lowest_non_linear_reg = &cpc_desc->cpc_regs[LOW_NON_LINEAR_PERF]; 1050 nominal_reg = &cpc_desc->cpc_regs[NOMINAL_PERF]; 1051 1052 /* Are any of the regs PCC ?*/ 1053 if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) || 1054 CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg)) { 1055 regs_in_pcc = 1; 1056 down_write(&pcc_ss_data->pcc_lock); 1057 /* Ring doorbell once to update PCC subspace */ 1058 if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) { 1059 ret = -EIO; 1060 goto out_err; 1061 } 1062 } 1063 1064 cpc_read(cpunum, highest_reg, &high); 1065 perf_caps->highest_perf = high; 1066 1067 cpc_read(cpunum, lowest_reg, &low); 1068 perf_caps->lowest_perf = low; 1069 1070 cpc_read(cpunum, nominal_reg, &nom); 1071 perf_caps->nominal_perf = nom; 1072 1073 cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear); 1074 perf_caps->lowest_nonlinear_perf = min_nonlinear; 1075 1076 if (!high || !low || !nom || !min_nonlinear) 1077 ret = -EFAULT; 1078 1079 out_err: 1080 if (regs_in_pcc) 1081 up_write(&pcc_ss_data->pcc_lock); 1082 return ret; 1083 } 1084 EXPORT_SYMBOL_GPL(cppc_get_perf_caps); 1085 1086 /** 1087 * cppc_get_perf_ctrs - Read a CPUs performance feedback counters. 1088 * @cpunum: CPU from which to read counters. 1089 * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h 1090 * 1091 * Return: 0 for success with perf_fb_ctrs populated else -ERRNO. 1092 */ 1093 int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs) 1094 { 1095 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); 1096 struct cpc_register_resource *delivered_reg, *reference_reg, 1097 *ref_perf_reg, *ctr_wrap_reg; 1098 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum); 1099 struct cppc_pcc_data *pcc_ss_data; 1100 u64 delivered, reference, ref_perf, ctr_wrap_time; 1101 int ret = 0, regs_in_pcc = 0; 1102 1103 if (!cpc_desc || pcc_ss_id < 0) { 1104 pr_debug("No CPC descriptor for CPU:%d\n", cpunum); 1105 return -ENODEV; 1106 } 1107 1108 pcc_ss_data = pcc_data[pcc_ss_id]; 1109 delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR]; 1110 reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR]; 1111 ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF]; 1112 ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME]; 1113 1114 /* 1115 * If refernce perf register is not supported then we should 1116 * use the nominal perf value 1117 */ 1118 if (!CPC_SUPPORTED(ref_perf_reg)) 1119 ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF]; 1120 1121 /* Are any of the regs PCC ?*/ 1122 if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) || 1123 CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) { 1124 down_write(&pcc_ss_data->pcc_lock); 1125 regs_in_pcc = 1; 1126 /* Ring doorbell once to update PCC subspace */ 1127 if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) { 1128 ret = -EIO; 1129 goto out_err; 1130 } 1131 } 1132 1133 cpc_read(cpunum, delivered_reg, &delivered); 1134 cpc_read(cpunum, reference_reg, &reference); 1135 cpc_read(cpunum, ref_perf_reg, &ref_perf); 1136 1137 /* 1138 * Per spec, if ctr_wrap_time optional register is unsupported, then the 1139 * performance counters are assumed to never wrap during the lifetime of 1140 * platform 1141 */ 1142 ctr_wrap_time = (u64)(~((u64)0)); 1143 if (CPC_SUPPORTED(ctr_wrap_reg)) 1144 cpc_read(cpunum, ctr_wrap_reg, &ctr_wrap_time); 1145 1146 if (!delivered || !reference || !ref_perf) { 1147 ret = -EFAULT; 1148 goto out_err; 1149 } 1150 1151 perf_fb_ctrs->delivered = delivered; 1152 perf_fb_ctrs->reference = reference; 1153 perf_fb_ctrs->reference_perf = ref_perf; 1154 perf_fb_ctrs->wraparound_time = ctr_wrap_time; 1155 out_err: 1156 if (regs_in_pcc) 1157 up_write(&pcc_ss_data->pcc_lock); 1158 return ret; 1159 } 1160 EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs); 1161 1162 /** 1163 * cppc_set_perf - Set a CPUs performance controls. 1164 * @cpu: CPU for which to set performance controls. 1165 * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h 1166 * 1167 * Return: 0 for success, -ERRNO otherwise. 1168 */ 1169 int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls) 1170 { 1171 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu); 1172 struct cpc_register_resource *desired_reg; 1173 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); 1174 struct cppc_pcc_data *pcc_ss_data; 1175 int ret = 0; 1176 1177 if (!cpc_desc || pcc_ss_id < 0) { 1178 pr_debug("No CPC descriptor for CPU:%d\n", cpu); 1179 return -ENODEV; 1180 } 1181 1182 pcc_ss_data = pcc_data[pcc_ss_id]; 1183 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF]; 1184 1185 /* 1186 * This is Phase-I where we want to write to CPC registers 1187 * -> We want all CPUs to be able to execute this phase in parallel 1188 * 1189 * Since read_lock can be acquired by multiple CPUs simultaneously we 1190 * achieve that goal here 1191 */ 1192 if (CPC_IN_PCC(desired_reg)) { 1193 down_read(&pcc_ss_data->pcc_lock); /* BEGIN Phase-I */ 1194 if (pcc_ss_data->platform_owns_pcc) { 1195 ret = check_pcc_chan(pcc_ss_id, false); 1196 if (ret) { 1197 up_read(&pcc_ss_data->pcc_lock); 1198 return ret; 1199 } 1200 } 1201 /* 1202 * Update the pending_write to make sure a PCC CMD_READ will not 1203 * arrive and steal the channel during the switch to write lock 1204 */ 1205 pcc_ss_data->pending_pcc_write_cmd = true; 1206 cpc_desc->write_cmd_id = pcc_ss_data->pcc_write_cnt; 1207 cpc_desc->write_cmd_status = 0; 1208 } 1209 1210 /* 1211 * Skip writing MIN/MAX until Linux knows how to come up with 1212 * useful values. 1213 */ 1214 cpc_write(cpu, desired_reg, perf_ctrls->desired_perf); 1215 1216 if (CPC_IN_PCC(desired_reg)) 1217 up_read(&pcc_ss_data->pcc_lock); /* END Phase-I */ 1218 /* 1219 * This is Phase-II where we transfer the ownership of PCC to Platform 1220 * 1221 * Short Summary: Basically if we think of a group of cppc_set_perf 1222 * requests that happened in short overlapping interval. The last CPU to 1223 * come out of Phase-I will enter Phase-II and ring the doorbell. 1224 * 1225 * We have the following requirements for Phase-II: 1226 * 1. We want to execute Phase-II only when there are no CPUs 1227 * currently executing in Phase-I 1228 * 2. Once we start Phase-II we want to avoid all other CPUs from 1229 * entering Phase-I. 1230 * 3. We want only one CPU among all those who went through Phase-I 1231 * to run phase-II 1232 * 1233 * If write_trylock fails to get the lock and doesn't transfer the 1234 * PCC ownership to the platform, then one of the following will be TRUE 1235 * 1. There is at-least one CPU in Phase-I which will later execute 1236 * write_trylock, so the CPUs in Phase-I will be responsible for 1237 * executing the Phase-II. 1238 * 2. Some other CPU has beaten this CPU to successfully execute the 1239 * write_trylock and has already acquired the write_lock. We know for a 1240 * fact it(other CPU acquiring the write_lock) couldn't have happened 1241 * before this CPU's Phase-I as we held the read_lock. 1242 * 3. Some other CPU executing pcc CMD_READ has stolen the 1243 * down_write, in which case, send_pcc_cmd will check for pending 1244 * CMD_WRITE commands by checking the pending_pcc_write_cmd. 1245 * So this CPU can be certain that its request will be delivered 1246 * So in all cases, this CPU knows that its request will be delivered 1247 * by another CPU and can return 1248 * 1249 * After getting the down_write we still need to check for 1250 * pending_pcc_write_cmd to take care of the following scenario 1251 * The thread running this code could be scheduled out between 1252 * Phase-I and Phase-II. Before it is scheduled back on, another CPU 1253 * could have delivered the request to Platform by triggering the 1254 * doorbell and transferred the ownership of PCC to platform. So this 1255 * avoids triggering an unnecessary doorbell and more importantly before 1256 * triggering the doorbell it makes sure that the PCC channel ownership 1257 * is still with OSPM. 1258 * pending_pcc_write_cmd can also be cleared by a different CPU, if 1259 * there was a pcc CMD_READ waiting on down_write and it steals the lock 1260 * before the pcc CMD_WRITE is completed. pcc_send_cmd checks for this 1261 * case during a CMD_READ and if there are pending writes it delivers 1262 * the write command before servicing the read command 1263 */ 1264 if (CPC_IN_PCC(desired_reg)) { 1265 if (down_write_trylock(&pcc_ss_data->pcc_lock)) {/* BEGIN Phase-II */ 1266 /* Update only if there are pending write commands */ 1267 if (pcc_ss_data->pending_pcc_write_cmd) 1268 send_pcc_cmd(pcc_ss_id, CMD_WRITE); 1269 up_write(&pcc_ss_data->pcc_lock); /* END Phase-II */ 1270 } else 1271 /* Wait until pcc_write_cnt is updated by send_pcc_cmd */ 1272 wait_event(pcc_ss_data->pcc_write_wait_q, 1273 cpc_desc->write_cmd_id != pcc_ss_data->pcc_write_cnt); 1274 1275 /* send_pcc_cmd updates the status in case of failure */ 1276 ret = cpc_desc->write_cmd_status; 1277 } 1278 return ret; 1279 } 1280 EXPORT_SYMBOL_GPL(cppc_set_perf); 1281 1282 /** 1283 * cppc_get_transition_latency - returns frequency transition latency in ns 1284 * 1285 * ACPI CPPC does not explicitly specifiy how a platform can specify the 1286 * transition latency for perfromance change requests. The closest we have 1287 * is the timing information from the PCCT tables which provides the info 1288 * on the number and frequency of PCC commands the platform can handle. 1289 */ 1290 unsigned int cppc_get_transition_latency(int cpu_num) 1291 { 1292 /* 1293 * Expected transition latency is based on the PCCT timing values 1294 * Below are definition from ACPI spec: 1295 * pcc_nominal- Expected latency to process a command, in microseconds 1296 * pcc_mpar - The maximum number of periodic requests that the subspace 1297 * channel can support, reported in commands per minute. 0 1298 * indicates no limitation. 1299 * pcc_mrtt - The minimum amount of time that OSPM must wait after the 1300 * completion of a command before issuing the next command, 1301 * in microseconds. 1302 */ 1303 unsigned int latency_ns = 0; 1304 struct cpc_desc *cpc_desc; 1305 struct cpc_register_resource *desired_reg; 1306 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num); 1307 struct cppc_pcc_data *pcc_ss_data; 1308 1309 cpc_desc = per_cpu(cpc_desc_ptr, cpu_num); 1310 if (!cpc_desc) 1311 return CPUFREQ_ETERNAL; 1312 1313 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF]; 1314 if (!CPC_IN_PCC(desired_reg)) 1315 return CPUFREQ_ETERNAL; 1316 1317 if (pcc_ss_id < 0) 1318 return CPUFREQ_ETERNAL; 1319 1320 pcc_ss_data = pcc_data[pcc_ss_id]; 1321 if (pcc_ss_data->pcc_mpar) 1322 latency_ns = 60 * (1000 * 1000 * 1000 / pcc_ss_data->pcc_mpar); 1323 1324 latency_ns = max(latency_ns, pcc_ss_data->pcc_nominal * 1000); 1325 latency_ns = max(latency_ns, pcc_ss_data->pcc_mrtt * 1000); 1326 1327 return latency_ns; 1328 } 1329 EXPORT_SYMBOL_GPL(cppc_get_transition_latency); 1330