1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * CPPC (Collaborative Processor Performance Control) driver for 4 * interfacing with the CPUfreq layer and governors. See 5 * cppc_acpi.c for CPPC specific methods. 6 * 7 * (C) Copyright 2014, 2015 Linaro Ltd. 8 * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org> 9 */ 10 11 #define pr_fmt(fmt) "CPPC Cpufreq:" fmt 12 13 #include <linux/arch_topology.h> 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/delay.h> 17 #include <linux/cpu.h> 18 #include <linux/cpufreq.h> 19 #include <linux/dmi.h> 20 #include <linux/irq_work.h> 21 #include <linux/kthread.h> 22 #include <linux/time.h> 23 #include <linux/vmalloc.h> 24 #include <uapi/linux/sched/types.h> 25 26 #include <asm/unaligned.h> 27 28 #include <acpi/cppc_acpi.h> 29 30 /* Minimum struct length needed for the DMI processor entry we want */ 31 #define DMI_ENTRY_PROCESSOR_MIN_LENGTH 48 32 33 /* Offset in the DMI processor structure for the max frequency */ 34 #define DMI_PROCESSOR_MAX_SPEED 0x14 35 36 /* 37 * This list contains information parsed from per CPU ACPI _CPC and _PSD 38 * structures: e.g. the highest and lowest supported performance, capabilities, 39 * desired performance, level requested etc. Depending on the share_type, not 40 * all CPUs will have an entry in the list. 41 */ 42 static LIST_HEAD(cpu_data_list); 43 44 static bool boost_supported; 45 46 struct cppc_workaround_oem_info { 47 char oem_id[ACPI_OEM_ID_SIZE + 1]; 48 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1]; 49 u32 oem_revision; 50 }; 51 52 static struct cppc_workaround_oem_info wa_info[] = { 53 { 54 .oem_id = "HISI ", 55 .oem_table_id = "HIP07 ", 56 .oem_revision = 0, 57 }, { 58 .oem_id = "HISI ", 59 .oem_table_id = "HIP08 ", 60 .oem_revision = 0, 61 } 62 }; 63 64 static struct cpufreq_driver cppc_cpufreq_driver; 65 66 #ifdef CONFIG_ACPI_CPPC_CPUFREQ_FIE 67 68 /* Frequency invariance support */ 69 struct cppc_freq_invariance { 70 int cpu; 71 struct irq_work irq_work; 72 struct kthread_work work; 73 struct cppc_perf_fb_ctrs prev_perf_fb_ctrs; 74 struct cppc_cpudata *cpu_data; 75 }; 76 77 static DEFINE_PER_CPU(struct cppc_freq_invariance, cppc_freq_inv); 78 static struct kthread_worker *kworker_fie; 79 80 static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu); 81 static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data, 82 struct cppc_perf_fb_ctrs *fb_ctrs_t0, 83 struct cppc_perf_fb_ctrs *fb_ctrs_t1); 84 85 /** 86 * cppc_scale_freq_workfn - CPPC arch_freq_scale updater for frequency invariance 87 * @work: The work item. 88 * 89 * The CPPC driver register itself with the topology core to provide its own 90 * implementation (cppc_scale_freq_tick()) of topology_scale_freq_tick() which 91 * gets called by the scheduler on every tick. 92 * 93 * Note that the arch specific counters have higher priority than CPPC counters, 94 * if available, though the CPPC driver doesn't need to have any special 95 * handling for that. 96 * 97 * On an invocation of cppc_scale_freq_tick(), we schedule an irq work (since we 98 * reach here from hard-irq context), which then schedules a normal work item 99 * and cppc_scale_freq_workfn() updates the per_cpu arch_freq_scale variable 100 * based on the counter updates since the last tick. 101 */ 102 static void cppc_scale_freq_workfn(struct kthread_work *work) 103 { 104 struct cppc_freq_invariance *cppc_fi; 105 struct cppc_perf_fb_ctrs fb_ctrs = {0}; 106 struct cppc_cpudata *cpu_data; 107 unsigned long local_freq_scale; 108 u64 perf; 109 110 cppc_fi = container_of(work, struct cppc_freq_invariance, work); 111 cpu_data = cppc_fi->cpu_data; 112 113 if (cppc_get_perf_ctrs(cppc_fi->cpu, &fb_ctrs)) { 114 pr_warn("%s: failed to read perf counters\n", __func__); 115 return; 116 } 117 118 perf = cppc_perf_from_fbctrs(cpu_data, &cppc_fi->prev_perf_fb_ctrs, 119 &fb_ctrs); 120 cppc_fi->prev_perf_fb_ctrs = fb_ctrs; 121 122 perf <<= SCHED_CAPACITY_SHIFT; 123 local_freq_scale = div64_u64(perf, cpu_data->perf_caps.highest_perf); 124 125 /* This can happen due to counter's overflow */ 126 if (unlikely(local_freq_scale > 1024)) 127 local_freq_scale = 1024; 128 129 per_cpu(arch_freq_scale, cppc_fi->cpu) = local_freq_scale; 130 } 131 132 static void cppc_irq_work(struct irq_work *irq_work) 133 { 134 struct cppc_freq_invariance *cppc_fi; 135 136 cppc_fi = container_of(irq_work, struct cppc_freq_invariance, irq_work); 137 kthread_queue_work(kworker_fie, &cppc_fi->work); 138 } 139 140 static void cppc_scale_freq_tick(void) 141 { 142 struct cppc_freq_invariance *cppc_fi = &per_cpu(cppc_freq_inv, smp_processor_id()); 143 144 /* 145 * cppc_get_perf_ctrs() can potentially sleep, call that from the right 146 * context. 147 */ 148 irq_work_queue(&cppc_fi->irq_work); 149 } 150 151 static struct scale_freq_data cppc_sftd = { 152 .source = SCALE_FREQ_SOURCE_CPPC, 153 .set_freq_scale = cppc_scale_freq_tick, 154 }; 155 156 static void cppc_cpufreq_cpu_fie_init(struct cpufreq_policy *policy) 157 { 158 struct cppc_freq_invariance *cppc_fi; 159 int cpu, ret; 160 161 if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate) 162 return; 163 164 for_each_cpu(cpu, policy->cpus) { 165 cppc_fi = &per_cpu(cppc_freq_inv, cpu); 166 cppc_fi->cpu = cpu; 167 cppc_fi->cpu_data = policy->driver_data; 168 kthread_init_work(&cppc_fi->work, cppc_scale_freq_workfn); 169 init_irq_work(&cppc_fi->irq_work, cppc_irq_work); 170 171 ret = cppc_get_perf_ctrs(cpu, &cppc_fi->prev_perf_fb_ctrs); 172 if (ret) { 173 pr_warn("%s: failed to read perf counters for cpu:%d: %d\n", 174 __func__, cpu, ret); 175 176 /* 177 * Don't abort if the CPU was offline while the driver 178 * was getting registered. 179 */ 180 if (cpu_online(cpu)) 181 return; 182 } 183 } 184 185 /* Register for freq-invariance */ 186 topology_set_scale_freq_source(&cppc_sftd, policy->cpus); 187 } 188 189 /* 190 * We free all the resources on policy's removal and not on CPU removal as the 191 * irq-work are per-cpu and the hotplug core takes care of flushing the pending 192 * irq-works (hint: smpcfd_dying_cpu()) on CPU hotplug. Even if the kthread-work 193 * fires on another CPU after the concerned CPU is removed, it won't harm. 194 * 195 * We just need to make sure to remove them all on policy->exit(). 196 */ 197 static void cppc_cpufreq_cpu_fie_exit(struct cpufreq_policy *policy) 198 { 199 struct cppc_freq_invariance *cppc_fi; 200 int cpu; 201 202 if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate) 203 return; 204 205 /* policy->cpus will be empty here, use related_cpus instead */ 206 topology_clear_scale_freq_source(SCALE_FREQ_SOURCE_CPPC, policy->related_cpus); 207 208 for_each_cpu(cpu, policy->related_cpus) { 209 cppc_fi = &per_cpu(cppc_freq_inv, cpu); 210 irq_work_sync(&cppc_fi->irq_work); 211 kthread_cancel_work_sync(&cppc_fi->work); 212 } 213 } 214 215 static void __init cppc_freq_invariance_init(void) 216 { 217 struct sched_attr attr = { 218 .size = sizeof(struct sched_attr), 219 .sched_policy = SCHED_DEADLINE, 220 .sched_nice = 0, 221 .sched_priority = 0, 222 /* 223 * Fake (unused) bandwidth; workaround to "fix" 224 * priority inheritance. 225 */ 226 .sched_runtime = 1000000, 227 .sched_deadline = 10000000, 228 .sched_period = 10000000, 229 }; 230 int ret; 231 232 if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate) 233 return; 234 235 kworker_fie = kthread_create_worker(0, "cppc_fie"); 236 if (IS_ERR(kworker_fie)) 237 return; 238 239 ret = sched_setattr_nocheck(kworker_fie->task, &attr); 240 if (ret) { 241 pr_warn("%s: failed to set SCHED_DEADLINE: %d\n", __func__, 242 ret); 243 kthread_destroy_worker(kworker_fie); 244 return; 245 } 246 } 247 248 static void cppc_freq_invariance_exit(void) 249 { 250 if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate) 251 return; 252 253 kthread_destroy_worker(kworker_fie); 254 kworker_fie = NULL; 255 } 256 257 #else 258 static inline void cppc_cpufreq_cpu_fie_init(struct cpufreq_policy *policy) 259 { 260 } 261 262 static inline void cppc_cpufreq_cpu_fie_exit(struct cpufreq_policy *policy) 263 { 264 } 265 266 static inline void cppc_freq_invariance_init(void) 267 { 268 } 269 270 static inline void cppc_freq_invariance_exit(void) 271 { 272 } 273 #endif /* CONFIG_ACPI_CPPC_CPUFREQ_FIE */ 274 275 /* Callback function used to retrieve the max frequency from DMI */ 276 static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private) 277 { 278 const u8 *dmi_data = (const u8 *)dm; 279 u16 *mhz = (u16 *)private; 280 281 if (dm->type == DMI_ENTRY_PROCESSOR && 282 dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) { 283 u16 val = (u16)get_unaligned((const u16 *) 284 (dmi_data + DMI_PROCESSOR_MAX_SPEED)); 285 *mhz = val > *mhz ? val : *mhz; 286 } 287 } 288 289 /* Look up the max frequency in DMI */ 290 static u64 cppc_get_dmi_max_khz(void) 291 { 292 u16 mhz = 0; 293 294 dmi_walk(cppc_find_dmi_mhz, &mhz); 295 296 /* 297 * Real stupid fallback value, just in case there is no 298 * actual value set. 299 */ 300 mhz = mhz ? mhz : 1; 301 302 return (1000 * mhz); 303 } 304 305 /* 306 * If CPPC lowest_freq and nominal_freq registers are exposed then we can 307 * use them to convert perf to freq and vice versa. The conversion is 308 * extrapolated as an affine function passing by the 2 points: 309 * - (Low perf, Low freq) 310 * - (Nominal perf, Nominal perf) 311 */ 312 static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu_data, 313 unsigned int perf) 314 { 315 struct cppc_perf_caps *caps = &cpu_data->perf_caps; 316 s64 retval, offset = 0; 317 static u64 max_khz; 318 u64 mul, div; 319 320 if (caps->lowest_freq && caps->nominal_freq) { 321 mul = caps->nominal_freq - caps->lowest_freq; 322 div = caps->nominal_perf - caps->lowest_perf; 323 offset = caps->nominal_freq - div64_u64(caps->nominal_perf * mul, div); 324 } else { 325 if (!max_khz) 326 max_khz = cppc_get_dmi_max_khz(); 327 mul = max_khz; 328 div = caps->highest_perf; 329 } 330 331 retval = offset + div64_u64(perf * mul, div); 332 if (retval >= 0) 333 return retval; 334 return 0; 335 } 336 337 static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu_data, 338 unsigned int freq) 339 { 340 struct cppc_perf_caps *caps = &cpu_data->perf_caps; 341 s64 retval, offset = 0; 342 static u64 max_khz; 343 u64 mul, div; 344 345 if (caps->lowest_freq && caps->nominal_freq) { 346 mul = caps->nominal_perf - caps->lowest_perf; 347 div = caps->nominal_freq - caps->lowest_freq; 348 offset = caps->nominal_perf - div64_u64(caps->nominal_freq * mul, div); 349 } else { 350 if (!max_khz) 351 max_khz = cppc_get_dmi_max_khz(); 352 mul = caps->highest_perf; 353 div = max_khz; 354 } 355 356 retval = offset + div64_u64(freq * mul, div); 357 if (retval >= 0) 358 return retval; 359 return 0; 360 } 361 362 static int cppc_cpufreq_set_target(struct cpufreq_policy *policy, 363 unsigned int target_freq, 364 unsigned int relation) 365 366 { 367 struct cppc_cpudata *cpu_data = policy->driver_data; 368 unsigned int cpu = policy->cpu; 369 struct cpufreq_freqs freqs; 370 u32 desired_perf; 371 int ret = 0; 372 373 desired_perf = cppc_cpufreq_khz_to_perf(cpu_data, target_freq); 374 /* Return if it is exactly the same perf */ 375 if (desired_perf == cpu_data->perf_ctrls.desired_perf) 376 return ret; 377 378 cpu_data->perf_ctrls.desired_perf = desired_perf; 379 freqs.old = policy->cur; 380 freqs.new = target_freq; 381 382 cpufreq_freq_transition_begin(policy, &freqs); 383 ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls); 384 cpufreq_freq_transition_end(policy, &freqs, ret != 0); 385 386 if (ret) 387 pr_debug("Failed to set target on CPU:%d. ret:%d\n", 388 cpu, ret); 389 390 return ret; 391 } 392 393 static unsigned int cppc_cpufreq_fast_switch(struct cpufreq_policy *policy, 394 unsigned int target_freq) 395 { 396 struct cppc_cpudata *cpu_data = policy->driver_data; 397 unsigned int cpu = policy->cpu; 398 u32 desired_perf; 399 int ret; 400 401 desired_perf = cppc_cpufreq_khz_to_perf(cpu_data, target_freq); 402 cpu_data->perf_ctrls.desired_perf = desired_perf; 403 ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls); 404 405 if (ret) { 406 pr_debug("Failed to set target on CPU:%d. ret:%d\n", 407 cpu, ret); 408 return 0; 409 } 410 411 return target_freq; 412 } 413 414 static int cppc_verify_policy(struct cpufreq_policy_data *policy) 415 { 416 cpufreq_verify_within_cpu_limits(policy); 417 return 0; 418 } 419 420 /* 421 * The PCC subspace describes the rate at which platform can accept commands 422 * on the shared PCC channel (including READs which do not count towards freq 423 * transition requests), so ideally we need to use the PCC values as a fallback 424 * if we don't have a platform specific transition_delay_us 425 */ 426 #ifdef CONFIG_ARM64 427 #include <asm/cputype.h> 428 429 static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu) 430 { 431 unsigned long implementor = read_cpuid_implementor(); 432 unsigned long part_num = read_cpuid_part_number(); 433 434 switch (implementor) { 435 case ARM_CPU_IMP_QCOM: 436 switch (part_num) { 437 case QCOM_CPU_PART_FALKOR_V1: 438 case QCOM_CPU_PART_FALKOR: 439 return 10000; 440 } 441 } 442 return cppc_get_transition_latency(cpu) / NSEC_PER_USEC; 443 } 444 #else 445 static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu) 446 { 447 return cppc_get_transition_latency(cpu) / NSEC_PER_USEC; 448 } 449 #endif 450 451 #if defined(CONFIG_ARM64) && defined(CONFIG_ENERGY_MODEL) 452 453 static DEFINE_PER_CPU(unsigned int, efficiency_class); 454 static void cppc_cpufreq_register_em(struct cpufreq_policy *policy); 455 456 /* Create an artificial performance state every CPPC_EM_CAP_STEP capacity unit. */ 457 #define CPPC_EM_CAP_STEP (20) 458 /* Increase the cost value by CPPC_EM_COST_STEP every performance state. */ 459 #define CPPC_EM_COST_STEP (1) 460 /* Add a cost gap correspnding to the energy of 4 CPUs. */ 461 #define CPPC_EM_COST_GAP (4 * SCHED_CAPACITY_SCALE * CPPC_EM_COST_STEP \ 462 / CPPC_EM_CAP_STEP) 463 464 static unsigned int get_perf_level_count(struct cpufreq_policy *policy) 465 { 466 struct cppc_perf_caps *perf_caps; 467 unsigned int min_cap, max_cap; 468 struct cppc_cpudata *cpu_data; 469 int cpu = policy->cpu; 470 471 cpu_data = policy->driver_data; 472 perf_caps = &cpu_data->perf_caps; 473 max_cap = arch_scale_cpu_capacity(cpu); 474 min_cap = div_u64(max_cap * perf_caps->lowest_perf, perf_caps->highest_perf); 475 if ((min_cap == 0) || (max_cap < min_cap)) 476 return 0; 477 return 1 + max_cap / CPPC_EM_CAP_STEP - min_cap / CPPC_EM_CAP_STEP; 478 } 479 480 /* 481 * The cost is defined as: 482 * cost = power * max_frequency / frequency 483 */ 484 static inline unsigned long compute_cost(int cpu, int step) 485 { 486 return CPPC_EM_COST_GAP * per_cpu(efficiency_class, cpu) + 487 step * CPPC_EM_COST_STEP; 488 } 489 490 static int cppc_get_cpu_power(struct device *cpu_dev, 491 unsigned long *power, unsigned long *KHz) 492 { 493 unsigned long perf_step, perf_prev, perf, perf_check; 494 unsigned int min_step, max_step, step, step_check; 495 unsigned long prev_freq = *KHz; 496 unsigned int min_cap, max_cap; 497 struct cpufreq_policy *policy; 498 499 struct cppc_perf_caps *perf_caps; 500 struct cppc_cpudata *cpu_data; 501 502 policy = cpufreq_cpu_get_raw(cpu_dev->id); 503 cpu_data = policy->driver_data; 504 perf_caps = &cpu_data->perf_caps; 505 max_cap = arch_scale_cpu_capacity(cpu_dev->id); 506 min_cap = div_u64(max_cap * perf_caps->lowest_perf, 507 perf_caps->highest_perf); 508 509 perf_step = CPPC_EM_CAP_STEP * perf_caps->highest_perf / max_cap; 510 min_step = min_cap / CPPC_EM_CAP_STEP; 511 max_step = max_cap / CPPC_EM_CAP_STEP; 512 513 perf_prev = cppc_cpufreq_khz_to_perf(cpu_data, *KHz); 514 step = perf_prev / perf_step; 515 516 if (step > max_step) 517 return -EINVAL; 518 519 if (min_step == max_step) { 520 step = max_step; 521 perf = perf_caps->highest_perf; 522 } else if (step < min_step) { 523 step = min_step; 524 perf = perf_caps->lowest_perf; 525 } else { 526 step++; 527 if (step == max_step) 528 perf = perf_caps->highest_perf; 529 else 530 perf = step * perf_step; 531 } 532 533 *KHz = cppc_cpufreq_perf_to_khz(cpu_data, perf); 534 perf_check = cppc_cpufreq_khz_to_perf(cpu_data, *KHz); 535 step_check = perf_check / perf_step; 536 537 /* 538 * To avoid bad integer approximation, check that new frequency value 539 * increased and that the new frequency will be converted to the 540 * desired step value. 541 */ 542 while ((*KHz == prev_freq) || (step_check != step)) { 543 perf++; 544 *KHz = cppc_cpufreq_perf_to_khz(cpu_data, perf); 545 perf_check = cppc_cpufreq_khz_to_perf(cpu_data, *KHz); 546 step_check = perf_check / perf_step; 547 } 548 549 /* 550 * With an artificial EM, only the cost value is used. Still the power 551 * is populated such as 0 < power < EM_MAX_POWER. This allows to add 552 * more sense to the artificial performance states. 553 */ 554 *power = compute_cost(cpu_dev->id, step); 555 556 return 0; 557 } 558 559 static int cppc_get_cpu_cost(struct device *cpu_dev, unsigned long KHz, 560 unsigned long *cost) 561 { 562 unsigned long perf_step, perf_prev; 563 struct cppc_perf_caps *perf_caps; 564 struct cpufreq_policy *policy; 565 struct cppc_cpudata *cpu_data; 566 unsigned int max_cap; 567 int step; 568 569 policy = cpufreq_cpu_get_raw(cpu_dev->id); 570 cpu_data = policy->driver_data; 571 perf_caps = &cpu_data->perf_caps; 572 max_cap = arch_scale_cpu_capacity(cpu_dev->id); 573 574 perf_prev = cppc_cpufreq_khz_to_perf(cpu_data, KHz); 575 perf_step = CPPC_EM_CAP_STEP * perf_caps->highest_perf / max_cap; 576 step = perf_prev / perf_step; 577 578 *cost = compute_cost(cpu_dev->id, step); 579 580 return 0; 581 } 582 583 static int populate_efficiency_class(void) 584 { 585 struct acpi_madt_generic_interrupt *gicc; 586 DECLARE_BITMAP(used_classes, 256) = {}; 587 int class, cpu, index; 588 589 for_each_possible_cpu(cpu) { 590 gicc = acpi_cpu_get_madt_gicc(cpu); 591 class = gicc->efficiency_class; 592 bitmap_set(used_classes, class, 1); 593 } 594 595 if (bitmap_weight(used_classes, 256) <= 1) { 596 pr_debug("Efficiency classes are all equal (=%d). " 597 "No EM registered", class); 598 return -EINVAL; 599 } 600 601 /* 602 * Squeeze efficiency class values on [0:#efficiency_class-1]. 603 * Values are per spec in [0:255]. 604 */ 605 index = 0; 606 for_each_set_bit(class, used_classes, 256) { 607 for_each_possible_cpu(cpu) { 608 gicc = acpi_cpu_get_madt_gicc(cpu); 609 if (gicc->efficiency_class == class) 610 per_cpu(efficiency_class, cpu) = index; 611 } 612 index++; 613 } 614 cppc_cpufreq_driver.register_em = cppc_cpufreq_register_em; 615 616 return 0; 617 } 618 619 static void cppc_cpufreq_register_em(struct cpufreq_policy *policy) 620 { 621 struct cppc_cpudata *cpu_data; 622 struct em_data_callback em_cb = 623 EM_ADV_DATA_CB(cppc_get_cpu_power, cppc_get_cpu_cost); 624 625 cpu_data = policy->driver_data; 626 em_dev_register_perf_domain(get_cpu_device(policy->cpu), 627 get_perf_level_count(policy), &em_cb, 628 cpu_data->shared_cpu_map, 0); 629 } 630 631 #else 632 static int populate_efficiency_class(void) 633 { 634 return 0; 635 } 636 #endif 637 638 static struct cppc_cpudata *cppc_cpufreq_get_cpu_data(unsigned int cpu) 639 { 640 struct cppc_cpudata *cpu_data; 641 int ret; 642 643 cpu_data = kzalloc(sizeof(struct cppc_cpudata), GFP_KERNEL); 644 if (!cpu_data) 645 goto out; 646 647 if (!zalloc_cpumask_var(&cpu_data->shared_cpu_map, GFP_KERNEL)) 648 goto free_cpu; 649 650 ret = acpi_get_psd_map(cpu, cpu_data); 651 if (ret) { 652 pr_debug("Err parsing CPU%d PSD data: ret:%d\n", cpu, ret); 653 goto free_mask; 654 } 655 656 ret = cppc_get_perf_caps(cpu, &cpu_data->perf_caps); 657 if (ret) { 658 pr_debug("Err reading CPU%d perf caps: ret:%d\n", cpu, ret); 659 goto free_mask; 660 } 661 662 /* Convert the lowest and nominal freq from MHz to KHz */ 663 cpu_data->perf_caps.lowest_freq *= 1000; 664 cpu_data->perf_caps.nominal_freq *= 1000; 665 666 list_add(&cpu_data->node, &cpu_data_list); 667 668 return cpu_data; 669 670 free_mask: 671 free_cpumask_var(cpu_data->shared_cpu_map); 672 free_cpu: 673 kfree(cpu_data); 674 out: 675 return NULL; 676 } 677 678 static void cppc_cpufreq_put_cpu_data(struct cpufreq_policy *policy) 679 { 680 struct cppc_cpudata *cpu_data = policy->driver_data; 681 682 list_del(&cpu_data->node); 683 free_cpumask_var(cpu_data->shared_cpu_map); 684 kfree(cpu_data); 685 policy->driver_data = NULL; 686 } 687 688 static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy) 689 { 690 unsigned int cpu = policy->cpu; 691 struct cppc_cpudata *cpu_data; 692 struct cppc_perf_caps *caps; 693 int ret; 694 695 cpu_data = cppc_cpufreq_get_cpu_data(cpu); 696 if (!cpu_data) { 697 pr_err("Error in acquiring _CPC/_PSD data for CPU%d.\n", cpu); 698 return -ENODEV; 699 } 700 caps = &cpu_data->perf_caps; 701 policy->driver_data = cpu_data; 702 703 /* 704 * Set min to lowest nonlinear perf to avoid any efficiency penalty (see 705 * Section 8.4.7.1.1.5 of ACPI 6.1 spec) 706 */ 707 policy->min = cppc_cpufreq_perf_to_khz(cpu_data, 708 caps->lowest_nonlinear_perf); 709 policy->max = cppc_cpufreq_perf_to_khz(cpu_data, 710 caps->nominal_perf); 711 712 /* 713 * Set cpuinfo.min_freq to Lowest to make the full range of performance 714 * available if userspace wants to use any perf between lowest & lowest 715 * nonlinear perf 716 */ 717 policy->cpuinfo.min_freq = cppc_cpufreq_perf_to_khz(cpu_data, 718 caps->lowest_perf); 719 policy->cpuinfo.max_freq = cppc_cpufreq_perf_to_khz(cpu_data, 720 caps->nominal_perf); 721 722 policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu); 723 policy->shared_type = cpu_data->shared_type; 724 725 switch (policy->shared_type) { 726 case CPUFREQ_SHARED_TYPE_HW: 727 case CPUFREQ_SHARED_TYPE_NONE: 728 /* Nothing to be done - we'll have a policy for each CPU */ 729 break; 730 case CPUFREQ_SHARED_TYPE_ANY: 731 /* 732 * All CPUs in the domain will share a policy and all cpufreq 733 * operations will use a single cppc_cpudata structure stored 734 * in policy->driver_data. 735 */ 736 cpumask_copy(policy->cpus, cpu_data->shared_cpu_map); 737 break; 738 default: 739 pr_debug("Unsupported CPU co-ord type: %d\n", 740 policy->shared_type); 741 ret = -EFAULT; 742 goto out; 743 } 744 745 policy->fast_switch_possible = cppc_allow_fast_switch(); 746 policy->dvfs_possible_from_any_cpu = true; 747 748 /* 749 * If 'highest_perf' is greater than 'nominal_perf', we assume CPU Boost 750 * is supported. 751 */ 752 if (caps->highest_perf > caps->nominal_perf) 753 boost_supported = true; 754 755 /* Set policy->cur to max now. The governors will adjust later. */ 756 policy->cur = cppc_cpufreq_perf_to_khz(cpu_data, caps->highest_perf); 757 cpu_data->perf_ctrls.desired_perf = caps->highest_perf; 758 759 ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls); 760 if (ret) { 761 pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n", 762 caps->highest_perf, cpu, ret); 763 goto out; 764 } 765 766 cppc_cpufreq_cpu_fie_init(policy); 767 return 0; 768 769 out: 770 cppc_cpufreq_put_cpu_data(policy); 771 return ret; 772 } 773 774 static int cppc_cpufreq_cpu_exit(struct cpufreq_policy *policy) 775 { 776 struct cppc_cpudata *cpu_data = policy->driver_data; 777 struct cppc_perf_caps *caps = &cpu_data->perf_caps; 778 unsigned int cpu = policy->cpu; 779 int ret; 780 781 cppc_cpufreq_cpu_fie_exit(policy); 782 783 cpu_data->perf_ctrls.desired_perf = caps->lowest_perf; 784 785 ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls); 786 if (ret) 787 pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n", 788 caps->lowest_perf, cpu, ret); 789 790 cppc_cpufreq_put_cpu_data(policy); 791 return 0; 792 } 793 794 static inline u64 get_delta(u64 t1, u64 t0) 795 { 796 if (t1 > t0 || t0 > ~(u32)0) 797 return t1 - t0; 798 799 return (u32)t1 - (u32)t0; 800 } 801 802 static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data, 803 struct cppc_perf_fb_ctrs *fb_ctrs_t0, 804 struct cppc_perf_fb_ctrs *fb_ctrs_t1) 805 { 806 u64 delta_reference, delta_delivered; 807 u64 reference_perf; 808 809 reference_perf = fb_ctrs_t0->reference_perf; 810 811 delta_reference = get_delta(fb_ctrs_t1->reference, 812 fb_ctrs_t0->reference); 813 delta_delivered = get_delta(fb_ctrs_t1->delivered, 814 fb_ctrs_t0->delivered); 815 816 /* Check to avoid divide-by zero and invalid delivered_perf */ 817 if (!delta_reference || !delta_delivered) 818 return cpu_data->perf_ctrls.desired_perf; 819 820 return (reference_perf * delta_delivered) / delta_reference; 821 } 822 823 static unsigned int cppc_cpufreq_get_rate(unsigned int cpu) 824 { 825 struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0}; 826 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 827 struct cppc_cpudata *cpu_data = policy->driver_data; 828 u64 delivered_perf; 829 int ret; 830 831 cpufreq_cpu_put(policy); 832 833 ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t0); 834 if (ret) 835 return ret; 836 837 udelay(2); /* 2usec delay between sampling */ 838 839 ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t1); 840 if (ret) 841 return ret; 842 843 delivered_perf = cppc_perf_from_fbctrs(cpu_data, &fb_ctrs_t0, 844 &fb_ctrs_t1); 845 846 return cppc_cpufreq_perf_to_khz(cpu_data, delivered_perf); 847 } 848 849 static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state) 850 { 851 struct cppc_cpudata *cpu_data = policy->driver_data; 852 struct cppc_perf_caps *caps = &cpu_data->perf_caps; 853 int ret; 854 855 if (!boost_supported) { 856 pr_err("BOOST not supported by CPU or firmware\n"); 857 return -EINVAL; 858 } 859 860 if (state) 861 policy->max = cppc_cpufreq_perf_to_khz(cpu_data, 862 caps->highest_perf); 863 else 864 policy->max = cppc_cpufreq_perf_to_khz(cpu_data, 865 caps->nominal_perf); 866 policy->cpuinfo.max_freq = policy->max; 867 868 ret = freq_qos_update_request(policy->max_freq_req, policy->max); 869 if (ret < 0) 870 return ret; 871 872 return 0; 873 } 874 875 static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf) 876 { 877 struct cppc_cpudata *cpu_data = policy->driver_data; 878 879 return cpufreq_show_cpus(cpu_data->shared_cpu_map, buf); 880 } 881 cpufreq_freq_attr_ro(freqdomain_cpus); 882 883 static struct freq_attr *cppc_cpufreq_attr[] = { 884 &freqdomain_cpus, 885 NULL, 886 }; 887 888 static struct cpufreq_driver cppc_cpufreq_driver = { 889 .flags = CPUFREQ_CONST_LOOPS, 890 .verify = cppc_verify_policy, 891 .target = cppc_cpufreq_set_target, 892 .get = cppc_cpufreq_get_rate, 893 .fast_switch = cppc_cpufreq_fast_switch, 894 .init = cppc_cpufreq_cpu_init, 895 .exit = cppc_cpufreq_cpu_exit, 896 .set_boost = cppc_cpufreq_set_boost, 897 .attr = cppc_cpufreq_attr, 898 .name = "cppc_cpufreq", 899 }; 900 901 /* 902 * HISI platform does not support delivered performance counter and 903 * reference performance counter. It can calculate the performance using the 904 * platform specific mechanism. We reuse the desired performance register to 905 * store the real performance calculated by the platform. 906 */ 907 static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu) 908 { 909 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 910 struct cppc_cpudata *cpu_data = policy->driver_data; 911 u64 desired_perf; 912 int ret; 913 914 cpufreq_cpu_put(policy); 915 916 ret = cppc_get_desired_perf(cpu, &desired_perf); 917 if (ret < 0) 918 return -EIO; 919 920 return cppc_cpufreq_perf_to_khz(cpu_data, desired_perf); 921 } 922 923 static void cppc_check_hisi_workaround(void) 924 { 925 struct acpi_table_header *tbl; 926 acpi_status status = AE_OK; 927 int i; 928 929 status = acpi_get_table(ACPI_SIG_PCCT, 0, &tbl); 930 if (ACPI_FAILURE(status) || !tbl) 931 return; 932 933 for (i = 0; i < ARRAY_SIZE(wa_info); i++) { 934 if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) && 935 !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) && 936 wa_info[i].oem_revision == tbl->oem_revision) { 937 /* Overwrite the get() callback */ 938 cppc_cpufreq_driver.get = hisi_cppc_cpufreq_get_rate; 939 break; 940 } 941 } 942 943 acpi_put_table(tbl); 944 } 945 946 static int __init cppc_cpufreq_init(void) 947 { 948 int ret; 949 950 if ((acpi_disabled) || !acpi_cpc_valid()) 951 return -ENODEV; 952 953 cppc_check_hisi_workaround(); 954 cppc_freq_invariance_init(); 955 populate_efficiency_class(); 956 957 ret = cpufreq_register_driver(&cppc_cpufreq_driver); 958 if (ret) 959 cppc_freq_invariance_exit(); 960 961 return ret; 962 } 963 964 static inline void free_cpu_data(void) 965 { 966 struct cppc_cpudata *iter, *tmp; 967 968 list_for_each_entry_safe(iter, tmp, &cpu_data_list, node) { 969 free_cpumask_var(iter->shared_cpu_map); 970 list_del(&iter->node); 971 kfree(iter); 972 } 973 974 } 975 976 static void __exit cppc_cpufreq_exit(void) 977 { 978 cpufreq_unregister_driver(&cppc_cpufreq_driver); 979 cppc_freq_invariance_exit(); 980 981 free_cpu_data(); 982 } 983 984 module_exit(cppc_cpufreq_exit); 985 MODULE_AUTHOR("Ashwin Chaugule"); 986 MODULE_DESCRIPTION("CPUFreq driver based on the ACPI CPPC v5.0+ spec"); 987 MODULE_LICENSE("GPL"); 988 989 late_initcall(cppc_cpufreq_init); 990 991 static const struct acpi_device_id cppc_acpi_ids[] __used = { 992 {ACPI_PROCESSOR_DEVICE_HID, }, 993 {} 994 }; 995 996 MODULE_DEVICE_TABLE(acpi, cppc_acpi_ids); 997