1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * CPPC (Collaborative Processor Performance Control) driver for 4 * interfacing with the CPUfreq layer and governors. See 5 * cppc_acpi.c for CPPC specific methods. 6 * 7 * (C) Copyright 2014, 2015 Linaro Ltd. 8 * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org> 9 */ 10 11 #define pr_fmt(fmt) "CPPC Cpufreq:" fmt 12 13 #include <linux/kernel.h> 14 #include <linux/module.h> 15 #include <linux/delay.h> 16 #include <linux/cpu.h> 17 #include <linux/cpufreq.h> 18 #include <linux/dmi.h> 19 #include <linux/time.h> 20 #include <linux/vmalloc.h> 21 22 #include <asm/unaligned.h> 23 24 #include <acpi/cppc_acpi.h> 25 26 /* Minimum struct length needed for the DMI processor entry we want */ 27 #define DMI_ENTRY_PROCESSOR_MIN_LENGTH 48 28 29 /* Offest in the DMI processor structure for the max frequency */ 30 #define DMI_PROCESSOR_MAX_SPEED 0x14 31 32 /* 33 * These structs contain information parsed from per CPU 34 * ACPI _CPC structures. 35 * e.g. For each CPU the highest, lowest supported 36 * performance capabilities, desired performance level 37 * requested etc. 38 */ 39 static struct cppc_cpudata **all_cpu_data; 40 static bool boost_supported; 41 42 struct cppc_workaround_oem_info { 43 char oem_id[ACPI_OEM_ID_SIZE + 1]; 44 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1]; 45 u32 oem_revision; 46 }; 47 48 static struct cppc_workaround_oem_info wa_info[] = { 49 { 50 .oem_id = "HISI ", 51 .oem_table_id = "HIP07 ", 52 .oem_revision = 0, 53 }, { 54 .oem_id = "HISI ", 55 .oem_table_id = "HIP08 ", 56 .oem_revision = 0, 57 } 58 }; 59 60 /* Callback function used to retrieve the max frequency from DMI */ 61 static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private) 62 { 63 const u8 *dmi_data = (const u8 *)dm; 64 u16 *mhz = (u16 *)private; 65 66 if (dm->type == DMI_ENTRY_PROCESSOR && 67 dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) { 68 u16 val = (u16)get_unaligned((const u16 *) 69 (dmi_data + DMI_PROCESSOR_MAX_SPEED)); 70 *mhz = val > *mhz ? val : *mhz; 71 } 72 } 73 74 /* Look up the max frequency in DMI */ 75 static u64 cppc_get_dmi_max_khz(void) 76 { 77 u16 mhz = 0; 78 79 dmi_walk(cppc_find_dmi_mhz, &mhz); 80 81 /* 82 * Real stupid fallback value, just in case there is no 83 * actual value set. 84 */ 85 mhz = mhz ? mhz : 1; 86 87 return (1000 * mhz); 88 } 89 90 /* 91 * If CPPC lowest_freq and nominal_freq registers are exposed then we can 92 * use them to convert perf to freq and vice versa 93 * 94 * If the perf/freq point lies between Nominal and Lowest, we can treat 95 * (Low perf, Low freq) and (Nom Perf, Nom freq) as 2D co-ordinates of a line 96 * and extrapolate the rest 97 * For perf/freq > Nominal, we use the ratio perf:freq at Nominal for conversion 98 */ 99 static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu, 100 unsigned int perf) 101 { 102 static u64 max_khz; 103 struct cppc_perf_caps *caps = &cpu->perf_caps; 104 u64 mul, div; 105 106 if (caps->lowest_freq && caps->nominal_freq) { 107 if (perf >= caps->nominal_perf) { 108 mul = caps->nominal_freq; 109 div = caps->nominal_perf; 110 } else { 111 mul = caps->nominal_freq - caps->lowest_freq; 112 div = caps->nominal_perf - caps->lowest_perf; 113 } 114 } else { 115 if (!max_khz) 116 max_khz = cppc_get_dmi_max_khz(); 117 mul = max_khz; 118 div = caps->highest_perf; 119 } 120 return (u64)perf * mul / div; 121 } 122 123 static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu, 124 unsigned int freq) 125 { 126 static u64 max_khz; 127 struct cppc_perf_caps *caps = &cpu->perf_caps; 128 u64 mul, div; 129 130 if (caps->lowest_freq && caps->nominal_freq) { 131 if (freq >= caps->nominal_freq) { 132 mul = caps->nominal_perf; 133 div = caps->nominal_freq; 134 } else { 135 mul = caps->lowest_perf; 136 div = caps->lowest_freq; 137 } 138 } else { 139 if (!max_khz) 140 max_khz = cppc_get_dmi_max_khz(); 141 mul = caps->highest_perf; 142 div = max_khz; 143 } 144 145 return (u64)freq * mul / div; 146 } 147 148 static int cppc_cpufreq_set_target(struct cpufreq_policy *policy, 149 unsigned int target_freq, 150 unsigned int relation) 151 { 152 struct cppc_cpudata *cpu; 153 struct cpufreq_freqs freqs; 154 u32 desired_perf; 155 int ret = 0; 156 157 cpu = all_cpu_data[policy->cpu]; 158 159 desired_perf = cppc_cpufreq_khz_to_perf(cpu, target_freq); 160 /* Return if it is exactly the same perf */ 161 if (desired_perf == cpu->perf_ctrls.desired_perf) 162 return ret; 163 164 cpu->perf_ctrls.desired_perf = desired_perf; 165 freqs.old = policy->cur; 166 freqs.new = target_freq; 167 168 cpufreq_freq_transition_begin(policy, &freqs); 169 ret = cppc_set_perf(cpu->cpu, &cpu->perf_ctrls); 170 cpufreq_freq_transition_end(policy, &freqs, ret != 0); 171 172 if (ret) 173 pr_debug("Failed to set target on CPU:%d. ret:%d\n", 174 cpu->cpu, ret); 175 176 return ret; 177 } 178 179 static int cppc_verify_policy(struct cpufreq_policy_data *policy) 180 { 181 cpufreq_verify_within_cpu_limits(policy); 182 return 0; 183 } 184 185 static void cppc_cpufreq_stop_cpu(struct cpufreq_policy *policy) 186 { 187 int cpu_num = policy->cpu; 188 struct cppc_cpudata *cpu = all_cpu_data[cpu_num]; 189 int ret; 190 191 cpu->perf_ctrls.desired_perf = cpu->perf_caps.lowest_perf; 192 193 ret = cppc_set_perf(cpu_num, &cpu->perf_ctrls); 194 if (ret) 195 pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n", 196 cpu->perf_caps.lowest_perf, cpu_num, ret); 197 } 198 199 /* 200 * The PCC subspace describes the rate at which platform can accept commands 201 * on the shared PCC channel (including READs which do not count towards freq 202 * trasition requests), so ideally we need to use the PCC values as a fallback 203 * if we don't have a platform specific transition_delay_us 204 */ 205 #ifdef CONFIG_ARM64 206 #include <asm/cputype.h> 207 208 static unsigned int cppc_cpufreq_get_transition_delay_us(int cpu) 209 { 210 unsigned long implementor = read_cpuid_implementor(); 211 unsigned long part_num = read_cpuid_part_number(); 212 unsigned int delay_us = 0; 213 214 switch (implementor) { 215 case ARM_CPU_IMP_QCOM: 216 switch (part_num) { 217 case QCOM_CPU_PART_FALKOR_V1: 218 case QCOM_CPU_PART_FALKOR: 219 delay_us = 10000; 220 break; 221 default: 222 delay_us = cppc_get_transition_latency(cpu) / NSEC_PER_USEC; 223 break; 224 } 225 break; 226 default: 227 delay_us = cppc_get_transition_latency(cpu) / NSEC_PER_USEC; 228 break; 229 } 230 231 return delay_us; 232 } 233 234 #else 235 236 static unsigned int cppc_cpufreq_get_transition_delay_us(int cpu) 237 { 238 return cppc_get_transition_latency(cpu) / NSEC_PER_USEC; 239 } 240 #endif 241 242 static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy) 243 { 244 struct cppc_cpudata *cpu; 245 unsigned int cpu_num = policy->cpu; 246 int ret = 0; 247 248 cpu = all_cpu_data[policy->cpu]; 249 250 cpu->cpu = cpu_num; 251 ret = cppc_get_perf_caps(policy->cpu, &cpu->perf_caps); 252 253 if (ret) { 254 pr_debug("Err reading CPU%d perf capabilities. ret:%d\n", 255 cpu_num, ret); 256 return ret; 257 } 258 259 /* Convert the lowest and nominal freq from MHz to KHz */ 260 cpu->perf_caps.lowest_freq *= 1000; 261 cpu->perf_caps.nominal_freq *= 1000; 262 263 /* 264 * Set min to lowest nonlinear perf to avoid any efficiency penalty (see 265 * Section 8.4.7.1.1.5 of ACPI 6.1 spec) 266 */ 267 policy->min = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.lowest_nonlinear_perf); 268 policy->max = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.nominal_perf); 269 270 /* 271 * Set cpuinfo.min_freq to Lowest to make the full range of performance 272 * available if userspace wants to use any perf between lowest & lowest 273 * nonlinear perf 274 */ 275 policy->cpuinfo.min_freq = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.lowest_perf); 276 policy->cpuinfo.max_freq = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.nominal_perf); 277 278 policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu_num); 279 policy->shared_type = cpu->shared_type; 280 281 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { 282 int i; 283 284 cpumask_copy(policy->cpus, cpu->shared_cpu_map); 285 286 for_each_cpu(i, policy->cpus) { 287 if (unlikely(i == policy->cpu)) 288 continue; 289 290 memcpy(&all_cpu_data[i]->perf_caps, &cpu->perf_caps, 291 sizeof(cpu->perf_caps)); 292 } 293 } else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) { 294 /* Support only SW_ANY for now. */ 295 pr_debug("Unsupported CPU co-ord type\n"); 296 return -EFAULT; 297 } 298 299 cpu->cur_policy = policy; 300 301 /* 302 * If 'highest_perf' is greater than 'nominal_perf', we assume CPU Boost 303 * is supported. 304 */ 305 if (cpu->perf_caps.highest_perf > cpu->perf_caps.nominal_perf) 306 boost_supported = true; 307 308 /* Set policy->cur to max now. The governors will adjust later. */ 309 policy->cur = cppc_cpufreq_perf_to_khz(cpu, 310 cpu->perf_caps.highest_perf); 311 cpu->perf_ctrls.desired_perf = cpu->perf_caps.highest_perf; 312 313 ret = cppc_set_perf(cpu_num, &cpu->perf_ctrls); 314 if (ret) 315 pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n", 316 cpu->perf_caps.highest_perf, cpu_num, ret); 317 318 return ret; 319 } 320 321 static inline u64 get_delta(u64 t1, u64 t0) 322 { 323 if (t1 > t0 || t0 > ~(u32)0) 324 return t1 - t0; 325 326 return (u32)t1 - (u32)t0; 327 } 328 329 static int cppc_get_rate_from_fbctrs(struct cppc_cpudata *cpu, 330 struct cppc_perf_fb_ctrs fb_ctrs_t0, 331 struct cppc_perf_fb_ctrs fb_ctrs_t1) 332 { 333 u64 delta_reference, delta_delivered; 334 u64 reference_perf, delivered_perf; 335 336 reference_perf = fb_ctrs_t0.reference_perf; 337 338 delta_reference = get_delta(fb_ctrs_t1.reference, 339 fb_ctrs_t0.reference); 340 delta_delivered = get_delta(fb_ctrs_t1.delivered, 341 fb_ctrs_t0.delivered); 342 343 /* Check to avoid divide-by zero */ 344 if (delta_reference || delta_delivered) 345 delivered_perf = (reference_perf * delta_delivered) / 346 delta_reference; 347 else 348 delivered_perf = cpu->perf_ctrls.desired_perf; 349 350 return cppc_cpufreq_perf_to_khz(cpu, delivered_perf); 351 } 352 353 static unsigned int cppc_cpufreq_get_rate(unsigned int cpunum) 354 { 355 struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0}; 356 struct cppc_cpudata *cpu = all_cpu_data[cpunum]; 357 int ret; 358 359 ret = cppc_get_perf_ctrs(cpunum, &fb_ctrs_t0); 360 if (ret) 361 return ret; 362 363 udelay(2); /* 2usec delay between sampling */ 364 365 ret = cppc_get_perf_ctrs(cpunum, &fb_ctrs_t1); 366 if (ret) 367 return ret; 368 369 return cppc_get_rate_from_fbctrs(cpu, fb_ctrs_t0, fb_ctrs_t1); 370 } 371 372 static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state) 373 { 374 struct cppc_cpudata *cpudata; 375 int ret; 376 377 if (!boost_supported) { 378 pr_err("BOOST not supported by CPU or firmware\n"); 379 return -EINVAL; 380 } 381 382 cpudata = all_cpu_data[policy->cpu]; 383 if (state) 384 policy->max = cppc_cpufreq_perf_to_khz(cpudata, 385 cpudata->perf_caps.highest_perf); 386 else 387 policy->max = cppc_cpufreq_perf_to_khz(cpudata, 388 cpudata->perf_caps.nominal_perf); 389 policy->cpuinfo.max_freq = policy->max; 390 391 ret = freq_qos_update_request(policy->max_freq_req, policy->max); 392 if (ret < 0) 393 return ret; 394 395 return 0; 396 } 397 398 static struct cpufreq_driver cppc_cpufreq_driver = { 399 .flags = CPUFREQ_CONST_LOOPS, 400 .verify = cppc_verify_policy, 401 .target = cppc_cpufreq_set_target, 402 .get = cppc_cpufreq_get_rate, 403 .init = cppc_cpufreq_cpu_init, 404 .stop_cpu = cppc_cpufreq_stop_cpu, 405 .set_boost = cppc_cpufreq_set_boost, 406 .name = "cppc_cpufreq", 407 }; 408 409 /* 410 * HISI platform does not support delivered performance counter and 411 * reference performance counter. It can calculate the performance using the 412 * platform specific mechanism. We reuse the desired performance register to 413 * store the real performance calculated by the platform. 414 */ 415 static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpunum) 416 { 417 struct cppc_cpudata *cpudata = all_cpu_data[cpunum]; 418 u64 desired_perf; 419 int ret; 420 421 ret = cppc_get_desired_perf(cpunum, &desired_perf); 422 if (ret < 0) 423 return -EIO; 424 425 return cppc_cpufreq_perf_to_khz(cpudata, desired_perf); 426 } 427 428 static void cppc_check_hisi_workaround(void) 429 { 430 struct acpi_table_header *tbl; 431 acpi_status status = AE_OK; 432 int i; 433 434 status = acpi_get_table(ACPI_SIG_PCCT, 0, &tbl); 435 if (ACPI_FAILURE(status) || !tbl) 436 return; 437 438 for (i = 0; i < ARRAY_SIZE(wa_info); i++) { 439 if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) && 440 !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) && 441 wa_info[i].oem_revision == tbl->oem_revision) { 442 /* Overwrite the get() callback */ 443 cppc_cpufreq_driver.get = hisi_cppc_cpufreq_get_rate; 444 break; 445 } 446 } 447 448 acpi_put_table(tbl); 449 } 450 451 static int __init cppc_cpufreq_init(void) 452 { 453 int i, ret = 0; 454 struct cppc_cpudata *cpu; 455 456 if (acpi_disabled) 457 return -ENODEV; 458 459 all_cpu_data = kcalloc(num_possible_cpus(), sizeof(void *), 460 GFP_KERNEL); 461 if (!all_cpu_data) 462 return -ENOMEM; 463 464 for_each_possible_cpu(i) { 465 all_cpu_data[i] = kzalloc(sizeof(struct cppc_cpudata), GFP_KERNEL); 466 if (!all_cpu_data[i]) 467 goto out; 468 469 cpu = all_cpu_data[i]; 470 if (!zalloc_cpumask_var(&cpu->shared_cpu_map, GFP_KERNEL)) 471 goto out; 472 } 473 474 ret = acpi_get_psd_map(all_cpu_data); 475 if (ret) { 476 pr_debug("Error parsing PSD data. Aborting cpufreq registration.\n"); 477 goto out; 478 } 479 480 cppc_check_hisi_workaround(); 481 482 ret = cpufreq_register_driver(&cppc_cpufreq_driver); 483 if (ret) 484 goto out; 485 486 return ret; 487 488 out: 489 for_each_possible_cpu(i) { 490 cpu = all_cpu_data[i]; 491 if (!cpu) 492 break; 493 free_cpumask_var(cpu->shared_cpu_map); 494 kfree(cpu); 495 } 496 497 kfree(all_cpu_data); 498 return -ENODEV; 499 } 500 501 static void __exit cppc_cpufreq_exit(void) 502 { 503 struct cppc_cpudata *cpu; 504 int i; 505 506 cpufreq_unregister_driver(&cppc_cpufreq_driver); 507 508 for_each_possible_cpu(i) { 509 cpu = all_cpu_data[i]; 510 free_cpumask_var(cpu->shared_cpu_map); 511 kfree(cpu); 512 } 513 514 kfree(all_cpu_data); 515 } 516 517 module_exit(cppc_cpufreq_exit); 518 MODULE_AUTHOR("Ashwin Chaugule"); 519 MODULE_DESCRIPTION("CPUFreq driver based on the ACPI CPPC v5.0+ spec"); 520 MODULE_LICENSE("GPL"); 521 522 late_initcall(cppc_cpufreq_init); 523 524 static const struct acpi_device_id cppc_acpi_ids[] __used = { 525 {ACPI_PROCESSOR_DEVICE_HID, }, 526 {} 527 }; 528 529 MODULE_DEVICE_TABLE(acpi, cppc_acpi_ids); 530