1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * CPPC (Collaborative Processor Performance Control) driver for 4 * interfacing with the CPUfreq layer and governors. See 5 * cppc_acpi.c for CPPC specific methods. 6 * 7 * (C) Copyright 2014, 2015 Linaro Ltd. 8 * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org> 9 */ 10 11 #define pr_fmt(fmt) "CPPC Cpufreq:" fmt 12 13 #include <linux/arch_topology.h> 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/delay.h> 17 #include <linux/cpu.h> 18 #include <linux/cpufreq.h> 19 #include <linux/dmi.h> 20 #include <linux/irq_work.h> 21 #include <linux/kthread.h> 22 #include <linux/time.h> 23 #include <linux/vmalloc.h> 24 #include <uapi/linux/sched/types.h> 25 26 #include <asm/unaligned.h> 27 28 #include <acpi/cppc_acpi.h> 29 30 /* Minimum struct length needed for the DMI processor entry we want */ 31 #define DMI_ENTRY_PROCESSOR_MIN_LENGTH 48 32 33 /* Offset in the DMI processor structure for the max frequency */ 34 #define DMI_PROCESSOR_MAX_SPEED 0x14 35 36 /* 37 * This list contains information parsed from per CPU ACPI _CPC and _PSD 38 * structures: e.g. the highest and lowest supported performance, capabilities, 39 * desired performance, level requested etc. Depending on the share_type, not 40 * all CPUs will have an entry in the list. 41 */ 42 static LIST_HEAD(cpu_data_list); 43 44 static bool boost_supported; 45 46 struct cppc_workaround_oem_info { 47 char oem_id[ACPI_OEM_ID_SIZE + 1]; 48 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1]; 49 u32 oem_revision; 50 }; 51 52 static struct cppc_workaround_oem_info wa_info[] = { 53 { 54 .oem_id = "HISI ", 55 .oem_table_id = "HIP07 ", 56 .oem_revision = 0, 57 }, { 58 .oem_id = "HISI ", 59 .oem_table_id = "HIP08 ", 60 .oem_revision = 0, 61 } 62 }; 63 64 #ifdef CONFIG_ACPI_CPPC_CPUFREQ_FIE 65 66 /* Frequency invariance support */ 67 struct cppc_freq_invariance { 68 int cpu; 69 struct irq_work irq_work; 70 struct kthread_work work; 71 struct cppc_perf_fb_ctrs prev_perf_fb_ctrs; 72 struct cppc_cpudata *cpu_data; 73 }; 74 75 static DEFINE_PER_CPU(struct cppc_freq_invariance, cppc_freq_inv); 76 static struct kthread_worker *kworker_fie; 77 static bool fie_disabled; 78 79 static struct cpufreq_driver cppc_cpufreq_driver; 80 static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu); 81 static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data, 82 struct cppc_perf_fb_ctrs fb_ctrs_t0, 83 struct cppc_perf_fb_ctrs fb_ctrs_t1); 84 85 /** 86 * cppc_scale_freq_workfn - CPPC arch_freq_scale updater for frequency invariance 87 * @work: The work item. 88 * 89 * The CPPC driver register itself with the topology core to provide its own 90 * implementation (cppc_scale_freq_tick()) of topology_scale_freq_tick() which 91 * gets called by the scheduler on every tick. 92 * 93 * Note that the arch specific counters have higher priority than CPPC counters, 94 * if available, though the CPPC driver doesn't need to have any special 95 * handling for that. 96 * 97 * On an invocation of cppc_scale_freq_tick(), we schedule an irq work (since we 98 * reach here from hard-irq context), which then schedules a normal work item 99 * and cppc_scale_freq_workfn() updates the per_cpu arch_freq_scale variable 100 * based on the counter updates since the last tick. 101 */ 102 static void cppc_scale_freq_workfn(struct kthread_work *work) 103 { 104 struct cppc_freq_invariance *cppc_fi; 105 struct cppc_perf_fb_ctrs fb_ctrs = {0}; 106 struct cppc_cpudata *cpu_data; 107 unsigned long local_freq_scale; 108 u64 perf; 109 110 cppc_fi = container_of(work, struct cppc_freq_invariance, work); 111 cpu_data = cppc_fi->cpu_data; 112 113 if (cppc_get_perf_ctrs(cppc_fi->cpu, &fb_ctrs)) { 114 pr_warn("%s: failed to read perf counters\n", __func__); 115 return; 116 } 117 118 cppc_fi->prev_perf_fb_ctrs = fb_ctrs; 119 perf = cppc_perf_from_fbctrs(cpu_data, cppc_fi->prev_perf_fb_ctrs, 120 fb_ctrs); 121 122 perf <<= SCHED_CAPACITY_SHIFT; 123 local_freq_scale = div64_u64(perf, cpu_data->perf_caps.highest_perf); 124 if (WARN_ON(local_freq_scale > 1024)) 125 local_freq_scale = 1024; 126 127 per_cpu(arch_freq_scale, cppc_fi->cpu) = local_freq_scale; 128 } 129 130 static void cppc_irq_work(struct irq_work *irq_work) 131 { 132 struct cppc_freq_invariance *cppc_fi; 133 134 cppc_fi = container_of(irq_work, struct cppc_freq_invariance, irq_work); 135 kthread_queue_work(kworker_fie, &cppc_fi->work); 136 } 137 138 static void cppc_scale_freq_tick(void) 139 { 140 struct cppc_freq_invariance *cppc_fi = &per_cpu(cppc_freq_inv, smp_processor_id()); 141 142 /* 143 * cppc_get_perf_ctrs() can potentially sleep, call that from the right 144 * context. 145 */ 146 irq_work_queue(&cppc_fi->irq_work); 147 } 148 149 static struct scale_freq_data cppc_sftd = { 150 .source = SCALE_FREQ_SOURCE_CPPC, 151 .set_freq_scale = cppc_scale_freq_tick, 152 }; 153 154 static void cppc_freq_invariance_policy_init(struct cpufreq_policy *policy, 155 struct cppc_cpudata *cpu_data) 156 { 157 struct cppc_perf_fb_ctrs fb_ctrs = {0}; 158 struct cppc_freq_invariance *cppc_fi; 159 int i, ret; 160 161 if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate) 162 return; 163 164 if (fie_disabled) 165 return; 166 167 for_each_cpu(i, policy->cpus) { 168 cppc_fi = &per_cpu(cppc_freq_inv, i); 169 cppc_fi->cpu = i; 170 cppc_fi->cpu_data = cpu_data; 171 kthread_init_work(&cppc_fi->work, cppc_scale_freq_workfn); 172 init_irq_work(&cppc_fi->irq_work, cppc_irq_work); 173 174 ret = cppc_get_perf_ctrs(i, &fb_ctrs); 175 if (ret) { 176 pr_warn("%s: failed to read perf counters: %d\n", 177 __func__, ret); 178 fie_disabled = true; 179 } else { 180 cppc_fi->prev_perf_fb_ctrs = fb_ctrs; 181 } 182 } 183 } 184 185 static void __init cppc_freq_invariance_init(void) 186 { 187 struct sched_attr attr = { 188 .size = sizeof(struct sched_attr), 189 .sched_policy = SCHED_DEADLINE, 190 .sched_nice = 0, 191 .sched_priority = 0, 192 /* 193 * Fake (unused) bandwidth; workaround to "fix" 194 * priority inheritance. 195 */ 196 .sched_runtime = 1000000, 197 .sched_deadline = 10000000, 198 .sched_period = 10000000, 199 }; 200 int ret; 201 202 if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate) 203 return; 204 205 if (fie_disabled) 206 return; 207 208 kworker_fie = kthread_create_worker(0, "cppc_fie"); 209 if (IS_ERR(kworker_fie)) 210 return; 211 212 ret = sched_setattr_nocheck(kworker_fie->task, &attr); 213 if (ret) { 214 pr_warn("%s: failed to set SCHED_DEADLINE: %d\n", __func__, 215 ret); 216 kthread_destroy_worker(kworker_fie); 217 return; 218 } 219 220 /* Register for freq-invariance */ 221 topology_set_scale_freq_source(&cppc_sftd, cpu_present_mask); 222 } 223 224 static void cppc_freq_invariance_exit(void) 225 { 226 struct cppc_freq_invariance *cppc_fi; 227 int i; 228 229 if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate) 230 return; 231 232 if (fie_disabled) 233 return; 234 235 topology_clear_scale_freq_source(SCALE_FREQ_SOURCE_CPPC, cpu_present_mask); 236 237 for_each_possible_cpu(i) { 238 cppc_fi = &per_cpu(cppc_freq_inv, i); 239 irq_work_sync(&cppc_fi->irq_work); 240 } 241 242 kthread_destroy_worker(kworker_fie); 243 kworker_fie = NULL; 244 } 245 246 #else 247 static inline void 248 cppc_freq_invariance_policy_init(struct cpufreq_policy *policy, 249 struct cppc_cpudata *cpu_data) 250 { 251 } 252 253 static inline void cppc_freq_invariance_init(void) 254 { 255 } 256 257 static inline void cppc_freq_invariance_exit(void) 258 { 259 } 260 #endif /* CONFIG_ACPI_CPPC_CPUFREQ_FIE */ 261 262 /* Callback function used to retrieve the max frequency from DMI */ 263 static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private) 264 { 265 const u8 *dmi_data = (const u8 *)dm; 266 u16 *mhz = (u16 *)private; 267 268 if (dm->type == DMI_ENTRY_PROCESSOR && 269 dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) { 270 u16 val = (u16)get_unaligned((const u16 *) 271 (dmi_data + DMI_PROCESSOR_MAX_SPEED)); 272 *mhz = val > *mhz ? val : *mhz; 273 } 274 } 275 276 /* Look up the max frequency in DMI */ 277 static u64 cppc_get_dmi_max_khz(void) 278 { 279 u16 mhz = 0; 280 281 dmi_walk(cppc_find_dmi_mhz, &mhz); 282 283 /* 284 * Real stupid fallback value, just in case there is no 285 * actual value set. 286 */ 287 mhz = mhz ? mhz : 1; 288 289 return (1000 * mhz); 290 } 291 292 /* 293 * If CPPC lowest_freq and nominal_freq registers are exposed then we can 294 * use them to convert perf to freq and vice versa 295 * 296 * If the perf/freq point lies between Nominal and Lowest, we can treat 297 * (Low perf, Low freq) and (Nom Perf, Nom freq) as 2D co-ordinates of a line 298 * and extrapolate the rest 299 * For perf/freq > Nominal, we use the ratio perf:freq at Nominal for conversion 300 */ 301 static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu_data, 302 unsigned int perf) 303 { 304 struct cppc_perf_caps *caps = &cpu_data->perf_caps; 305 static u64 max_khz; 306 u64 mul, div; 307 308 if (caps->lowest_freq && caps->nominal_freq) { 309 if (perf >= caps->nominal_perf) { 310 mul = caps->nominal_freq; 311 div = caps->nominal_perf; 312 } else { 313 mul = caps->nominal_freq - caps->lowest_freq; 314 div = caps->nominal_perf - caps->lowest_perf; 315 } 316 } else { 317 if (!max_khz) 318 max_khz = cppc_get_dmi_max_khz(); 319 mul = max_khz; 320 div = caps->highest_perf; 321 } 322 return (u64)perf * mul / div; 323 } 324 325 static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu_data, 326 unsigned int freq) 327 { 328 struct cppc_perf_caps *caps = &cpu_data->perf_caps; 329 static u64 max_khz; 330 u64 mul, div; 331 332 if (caps->lowest_freq && caps->nominal_freq) { 333 if (freq >= caps->nominal_freq) { 334 mul = caps->nominal_perf; 335 div = caps->nominal_freq; 336 } else { 337 mul = caps->lowest_perf; 338 div = caps->lowest_freq; 339 } 340 } else { 341 if (!max_khz) 342 max_khz = cppc_get_dmi_max_khz(); 343 mul = caps->highest_perf; 344 div = max_khz; 345 } 346 347 return (u64)freq * mul / div; 348 } 349 350 static int cppc_cpufreq_set_target(struct cpufreq_policy *policy, 351 unsigned int target_freq, 352 unsigned int relation) 353 354 { 355 struct cppc_cpudata *cpu_data = policy->driver_data; 356 unsigned int cpu = policy->cpu; 357 struct cpufreq_freqs freqs; 358 u32 desired_perf; 359 int ret = 0; 360 361 desired_perf = cppc_cpufreq_khz_to_perf(cpu_data, target_freq); 362 /* Return if it is exactly the same perf */ 363 if (desired_perf == cpu_data->perf_ctrls.desired_perf) 364 return ret; 365 366 cpu_data->perf_ctrls.desired_perf = desired_perf; 367 freqs.old = policy->cur; 368 freqs.new = target_freq; 369 370 cpufreq_freq_transition_begin(policy, &freqs); 371 ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls); 372 cpufreq_freq_transition_end(policy, &freqs, ret != 0); 373 374 if (ret) 375 pr_debug("Failed to set target on CPU:%d. ret:%d\n", 376 cpu, ret); 377 378 return ret; 379 } 380 381 static int cppc_verify_policy(struct cpufreq_policy_data *policy) 382 { 383 cpufreq_verify_within_cpu_limits(policy); 384 return 0; 385 } 386 387 static void cppc_cpufreq_stop_cpu(struct cpufreq_policy *policy) 388 { 389 struct cppc_cpudata *cpu_data = policy->driver_data; 390 struct cppc_perf_caps *caps = &cpu_data->perf_caps; 391 unsigned int cpu = policy->cpu; 392 int ret; 393 394 cpu_data->perf_ctrls.desired_perf = caps->lowest_perf; 395 396 ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls); 397 if (ret) 398 pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n", 399 caps->lowest_perf, cpu, ret); 400 401 /* Remove CPU node from list and free driver data for policy */ 402 free_cpumask_var(cpu_data->shared_cpu_map); 403 list_del(&cpu_data->node); 404 kfree(policy->driver_data); 405 policy->driver_data = NULL; 406 } 407 408 /* 409 * The PCC subspace describes the rate at which platform can accept commands 410 * on the shared PCC channel (including READs which do not count towards freq 411 * transition requests), so ideally we need to use the PCC values as a fallback 412 * if we don't have a platform specific transition_delay_us 413 */ 414 #ifdef CONFIG_ARM64 415 #include <asm/cputype.h> 416 417 static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu) 418 { 419 unsigned long implementor = read_cpuid_implementor(); 420 unsigned long part_num = read_cpuid_part_number(); 421 422 switch (implementor) { 423 case ARM_CPU_IMP_QCOM: 424 switch (part_num) { 425 case QCOM_CPU_PART_FALKOR_V1: 426 case QCOM_CPU_PART_FALKOR: 427 return 10000; 428 } 429 } 430 return cppc_get_transition_latency(cpu) / NSEC_PER_USEC; 431 } 432 433 #else 434 435 static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu) 436 { 437 return cppc_get_transition_latency(cpu) / NSEC_PER_USEC; 438 } 439 #endif 440 441 442 static struct cppc_cpudata *cppc_cpufreq_get_cpu_data(unsigned int cpu) 443 { 444 struct cppc_cpudata *cpu_data; 445 int ret; 446 447 cpu_data = kzalloc(sizeof(struct cppc_cpudata), GFP_KERNEL); 448 if (!cpu_data) 449 goto out; 450 451 if (!zalloc_cpumask_var(&cpu_data->shared_cpu_map, GFP_KERNEL)) 452 goto free_cpu; 453 454 ret = acpi_get_psd_map(cpu, cpu_data); 455 if (ret) { 456 pr_debug("Err parsing CPU%d PSD data: ret:%d\n", cpu, ret); 457 goto free_mask; 458 } 459 460 ret = cppc_get_perf_caps(cpu, &cpu_data->perf_caps); 461 if (ret) { 462 pr_debug("Err reading CPU%d perf caps: ret:%d\n", cpu, ret); 463 goto free_mask; 464 } 465 466 /* Convert the lowest and nominal freq from MHz to KHz */ 467 cpu_data->perf_caps.lowest_freq *= 1000; 468 cpu_data->perf_caps.nominal_freq *= 1000; 469 470 list_add(&cpu_data->node, &cpu_data_list); 471 472 return cpu_data; 473 474 free_mask: 475 free_cpumask_var(cpu_data->shared_cpu_map); 476 free_cpu: 477 kfree(cpu_data); 478 out: 479 return NULL; 480 } 481 482 static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy) 483 { 484 unsigned int cpu = policy->cpu; 485 struct cppc_cpudata *cpu_data; 486 struct cppc_perf_caps *caps; 487 int ret; 488 489 cpu_data = cppc_cpufreq_get_cpu_data(cpu); 490 if (!cpu_data) { 491 pr_err("Error in acquiring _CPC/_PSD data for CPU%d.\n", cpu); 492 return -ENODEV; 493 } 494 caps = &cpu_data->perf_caps; 495 policy->driver_data = cpu_data; 496 497 /* 498 * Set min to lowest nonlinear perf to avoid any efficiency penalty (see 499 * Section 8.4.7.1.1.5 of ACPI 6.1 spec) 500 */ 501 policy->min = cppc_cpufreq_perf_to_khz(cpu_data, 502 caps->lowest_nonlinear_perf); 503 policy->max = cppc_cpufreq_perf_to_khz(cpu_data, 504 caps->nominal_perf); 505 506 /* 507 * Set cpuinfo.min_freq to Lowest to make the full range of performance 508 * available if userspace wants to use any perf between lowest & lowest 509 * nonlinear perf 510 */ 511 policy->cpuinfo.min_freq = cppc_cpufreq_perf_to_khz(cpu_data, 512 caps->lowest_perf); 513 policy->cpuinfo.max_freq = cppc_cpufreq_perf_to_khz(cpu_data, 514 caps->nominal_perf); 515 516 policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu); 517 policy->shared_type = cpu_data->shared_type; 518 519 switch (policy->shared_type) { 520 case CPUFREQ_SHARED_TYPE_HW: 521 case CPUFREQ_SHARED_TYPE_NONE: 522 /* Nothing to be done - we'll have a policy for each CPU */ 523 break; 524 case CPUFREQ_SHARED_TYPE_ANY: 525 /* 526 * All CPUs in the domain will share a policy and all cpufreq 527 * operations will use a single cppc_cpudata structure stored 528 * in policy->driver_data. 529 */ 530 cpumask_copy(policy->cpus, cpu_data->shared_cpu_map); 531 break; 532 default: 533 pr_debug("Unsupported CPU co-ord type: %d\n", 534 policy->shared_type); 535 return -EFAULT; 536 } 537 538 /* 539 * If 'highest_perf' is greater than 'nominal_perf', we assume CPU Boost 540 * is supported. 541 */ 542 if (caps->highest_perf > caps->nominal_perf) 543 boost_supported = true; 544 545 /* Set policy->cur to max now. The governors will adjust later. */ 546 policy->cur = cppc_cpufreq_perf_to_khz(cpu_data, caps->highest_perf); 547 cpu_data->perf_ctrls.desired_perf = caps->highest_perf; 548 549 ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls); 550 if (ret) { 551 pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n", 552 caps->highest_perf, cpu, ret); 553 } else { 554 cppc_freq_invariance_policy_init(policy, cpu_data); 555 } 556 557 return ret; 558 } 559 560 static inline u64 get_delta(u64 t1, u64 t0) 561 { 562 if (t1 > t0 || t0 > ~(u32)0) 563 return t1 - t0; 564 565 return (u32)t1 - (u32)t0; 566 } 567 568 static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data, 569 struct cppc_perf_fb_ctrs fb_ctrs_t0, 570 struct cppc_perf_fb_ctrs fb_ctrs_t1) 571 { 572 u64 delta_reference, delta_delivered; 573 u64 reference_perf; 574 575 reference_perf = fb_ctrs_t0.reference_perf; 576 577 delta_reference = get_delta(fb_ctrs_t1.reference, 578 fb_ctrs_t0.reference); 579 delta_delivered = get_delta(fb_ctrs_t1.delivered, 580 fb_ctrs_t0.delivered); 581 582 /* Check to avoid divide-by zero and invalid delivered_perf */ 583 if (!delta_reference || !delta_delivered) 584 return cpu_data->perf_ctrls.desired_perf; 585 586 return (reference_perf * delta_delivered) / delta_reference; 587 } 588 589 static int cppc_get_rate_from_fbctrs(struct cppc_cpudata *cpu_data, 590 struct cppc_perf_fb_ctrs fb_ctrs_t0, 591 struct cppc_perf_fb_ctrs fb_ctrs_t1) 592 { 593 u64 delivered_perf; 594 595 delivered_perf = cppc_perf_from_fbctrs(cpu_data, fb_ctrs_t0, 596 fb_ctrs_t1); 597 598 return cppc_cpufreq_perf_to_khz(cpu_data, delivered_perf); 599 } 600 601 static unsigned int cppc_cpufreq_get_rate(unsigned int cpu) 602 { 603 struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0}; 604 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 605 struct cppc_cpudata *cpu_data = policy->driver_data; 606 int ret; 607 608 cpufreq_cpu_put(policy); 609 610 ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t0); 611 if (ret) 612 return ret; 613 614 udelay(2); /* 2usec delay between sampling */ 615 616 ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t1); 617 if (ret) 618 return ret; 619 620 return cppc_get_rate_from_fbctrs(cpu_data, fb_ctrs_t0, fb_ctrs_t1); 621 } 622 623 static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state) 624 { 625 struct cppc_cpudata *cpu_data = policy->driver_data; 626 struct cppc_perf_caps *caps = &cpu_data->perf_caps; 627 int ret; 628 629 if (!boost_supported) { 630 pr_err("BOOST not supported by CPU or firmware\n"); 631 return -EINVAL; 632 } 633 634 if (state) 635 policy->max = cppc_cpufreq_perf_to_khz(cpu_data, 636 caps->highest_perf); 637 else 638 policy->max = cppc_cpufreq_perf_to_khz(cpu_data, 639 caps->nominal_perf); 640 policy->cpuinfo.max_freq = policy->max; 641 642 ret = freq_qos_update_request(policy->max_freq_req, policy->max); 643 if (ret < 0) 644 return ret; 645 646 return 0; 647 } 648 649 static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf) 650 { 651 struct cppc_cpudata *cpu_data = policy->driver_data; 652 653 return cpufreq_show_cpus(cpu_data->shared_cpu_map, buf); 654 } 655 cpufreq_freq_attr_ro(freqdomain_cpus); 656 657 static struct freq_attr *cppc_cpufreq_attr[] = { 658 &freqdomain_cpus, 659 NULL, 660 }; 661 662 static struct cpufreq_driver cppc_cpufreq_driver = { 663 .flags = CPUFREQ_CONST_LOOPS, 664 .verify = cppc_verify_policy, 665 .target = cppc_cpufreq_set_target, 666 .get = cppc_cpufreq_get_rate, 667 .init = cppc_cpufreq_cpu_init, 668 .stop_cpu = cppc_cpufreq_stop_cpu, 669 .set_boost = cppc_cpufreq_set_boost, 670 .attr = cppc_cpufreq_attr, 671 .name = "cppc_cpufreq", 672 }; 673 674 /* 675 * HISI platform does not support delivered performance counter and 676 * reference performance counter. It can calculate the performance using the 677 * platform specific mechanism. We reuse the desired performance register to 678 * store the real performance calculated by the platform. 679 */ 680 static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu) 681 { 682 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 683 struct cppc_cpudata *cpu_data = policy->driver_data; 684 u64 desired_perf; 685 int ret; 686 687 cpufreq_cpu_put(policy); 688 689 ret = cppc_get_desired_perf(cpu, &desired_perf); 690 if (ret < 0) 691 return -EIO; 692 693 return cppc_cpufreq_perf_to_khz(cpu_data, desired_perf); 694 } 695 696 static void cppc_check_hisi_workaround(void) 697 { 698 struct acpi_table_header *tbl; 699 acpi_status status = AE_OK; 700 int i; 701 702 status = acpi_get_table(ACPI_SIG_PCCT, 0, &tbl); 703 if (ACPI_FAILURE(status) || !tbl) 704 return; 705 706 for (i = 0; i < ARRAY_SIZE(wa_info); i++) { 707 if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) && 708 !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) && 709 wa_info[i].oem_revision == tbl->oem_revision) { 710 /* Overwrite the get() callback */ 711 cppc_cpufreq_driver.get = hisi_cppc_cpufreq_get_rate; 712 break; 713 } 714 } 715 716 acpi_put_table(tbl); 717 } 718 719 static int __init cppc_cpufreq_init(void) 720 { 721 int ret; 722 723 if ((acpi_disabled) || !acpi_cpc_valid()) 724 return -ENODEV; 725 726 INIT_LIST_HEAD(&cpu_data_list); 727 728 cppc_check_hisi_workaround(); 729 730 ret = cpufreq_register_driver(&cppc_cpufreq_driver); 731 if (!ret) 732 cppc_freq_invariance_init(); 733 734 return ret; 735 } 736 737 static inline void free_cpu_data(void) 738 { 739 struct cppc_cpudata *iter, *tmp; 740 741 list_for_each_entry_safe(iter, tmp, &cpu_data_list, node) { 742 free_cpumask_var(iter->shared_cpu_map); 743 list_del(&iter->node); 744 kfree(iter); 745 } 746 747 } 748 749 static void __exit cppc_cpufreq_exit(void) 750 { 751 cppc_freq_invariance_exit(); 752 cpufreq_unregister_driver(&cppc_cpufreq_driver); 753 754 free_cpu_data(); 755 } 756 757 module_exit(cppc_cpufreq_exit); 758 MODULE_AUTHOR("Ashwin Chaugule"); 759 MODULE_DESCRIPTION("CPUFreq driver based on the ACPI CPPC v5.0+ spec"); 760 MODULE_LICENSE("GPL"); 761 762 late_initcall(cppc_cpufreq_init); 763 764 static const struct acpi_device_id cppc_acpi_ids[] __used = { 765 {ACPI_PROCESSOR_DEVICE_HID, }, 766 {} 767 }; 768 769 MODULE_DEVICE_TABLE(acpi, cppc_acpi_ids); 770