1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * CPPC (Collaborative Processor Performance Control) driver for 4 * interfacing with the CPUfreq layer and governors. See 5 * cppc_acpi.c for CPPC specific methods. 6 * 7 * (C) Copyright 2014, 2015 Linaro Ltd. 8 * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org> 9 */ 10 11 #define pr_fmt(fmt) "CPPC Cpufreq:" fmt 12 13 #include <linux/kernel.h> 14 #include <linux/module.h> 15 #include <linux/delay.h> 16 #include <linux/cpu.h> 17 #include <linux/cpufreq.h> 18 #include <linux/dmi.h> 19 #include <linux/time.h> 20 #include <linux/vmalloc.h> 21 22 #include <asm/unaligned.h> 23 24 #include <acpi/cppc_acpi.h> 25 26 /* Minimum struct length needed for the DMI processor entry we want */ 27 #define DMI_ENTRY_PROCESSOR_MIN_LENGTH 48 28 29 /* Offset in the DMI processor structure for the max frequency */ 30 #define DMI_PROCESSOR_MAX_SPEED 0x14 31 32 /* 33 * These structs contain information parsed from per CPU 34 * ACPI _CPC structures. 35 * e.g. For each CPU the highest, lowest supported 36 * performance capabilities, desired performance level 37 * requested etc. 38 */ 39 static struct cppc_cpudata **all_cpu_data; 40 static bool boost_supported; 41 42 struct cppc_workaround_oem_info { 43 char oem_id[ACPI_OEM_ID_SIZE + 1]; 44 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1]; 45 u32 oem_revision; 46 }; 47 48 static struct cppc_workaround_oem_info wa_info[] = { 49 { 50 .oem_id = "HISI ", 51 .oem_table_id = "HIP07 ", 52 .oem_revision = 0, 53 }, { 54 .oem_id = "HISI ", 55 .oem_table_id = "HIP08 ", 56 .oem_revision = 0, 57 } 58 }; 59 60 /* Callback function used to retrieve the max frequency from DMI */ 61 static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private) 62 { 63 const u8 *dmi_data = (const u8 *)dm; 64 u16 *mhz = (u16 *)private; 65 66 if (dm->type == DMI_ENTRY_PROCESSOR && 67 dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) { 68 u16 val = (u16)get_unaligned((const u16 *) 69 (dmi_data + DMI_PROCESSOR_MAX_SPEED)); 70 *mhz = val > *mhz ? val : *mhz; 71 } 72 } 73 74 /* Look up the max frequency in DMI */ 75 static u64 cppc_get_dmi_max_khz(void) 76 { 77 u16 mhz = 0; 78 79 dmi_walk(cppc_find_dmi_mhz, &mhz); 80 81 /* 82 * Real stupid fallback value, just in case there is no 83 * actual value set. 84 */ 85 mhz = mhz ? mhz : 1; 86 87 return (1000 * mhz); 88 } 89 90 /* 91 * If CPPC lowest_freq and nominal_freq registers are exposed then we can 92 * use them to convert perf to freq and vice versa 93 * 94 * If the perf/freq point lies between Nominal and Lowest, we can treat 95 * (Low perf, Low freq) and (Nom Perf, Nom freq) as 2D co-ordinates of a line 96 * and extrapolate the rest 97 * For perf/freq > Nominal, we use the ratio perf:freq at Nominal for conversion 98 */ 99 static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu_data, 100 unsigned int perf) 101 { 102 struct cppc_perf_caps *caps = &cpu_data->perf_caps; 103 static u64 max_khz; 104 u64 mul, div; 105 106 if (caps->lowest_freq && caps->nominal_freq) { 107 if (perf >= caps->nominal_perf) { 108 mul = caps->nominal_freq; 109 div = caps->nominal_perf; 110 } else { 111 mul = caps->nominal_freq - caps->lowest_freq; 112 div = caps->nominal_perf - caps->lowest_perf; 113 } 114 } else { 115 if (!max_khz) 116 max_khz = cppc_get_dmi_max_khz(); 117 mul = max_khz; 118 div = caps->highest_perf; 119 } 120 return (u64)perf * mul / div; 121 } 122 123 static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu_data, 124 unsigned int freq) 125 { 126 struct cppc_perf_caps *caps = &cpu_data->perf_caps; 127 static u64 max_khz; 128 u64 mul, div; 129 130 if (caps->lowest_freq && caps->nominal_freq) { 131 if (freq >= caps->nominal_freq) { 132 mul = caps->nominal_perf; 133 div = caps->nominal_freq; 134 } else { 135 mul = caps->lowest_perf; 136 div = caps->lowest_freq; 137 } 138 } else { 139 if (!max_khz) 140 max_khz = cppc_get_dmi_max_khz(); 141 mul = caps->highest_perf; 142 div = max_khz; 143 } 144 145 return (u64)freq * mul / div; 146 } 147 148 static int cppc_cpufreq_set_target(struct cpufreq_policy *policy, 149 unsigned int target_freq, 150 unsigned int relation) 151 { 152 struct cppc_cpudata *cpu_data = all_cpu_data[policy->cpu]; 153 struct cpufreq_freqs freqs; 154 u32 desired_perf; 155 int ret = 0; 156 157 desired_perf = cppc_cpufreq_khz_to_perf(cpu_data, target_freq); 158 /* Return if it is exactly the same perf */ 159 if (desired_perf == cpu_data->perf_ctrls.desired_perf) 160 return ret; 161 162 cpu_data->perf_ctrls.desired_perf = desired_perf; 163 freqs.old = policy->cur; 164 freqs.new = target_freq; 165 166 cpufreq_freq_transition_begin(policy, &freqs); 167 ret = cppc_set_perf(cpu_data->cpu, &cpu_data->perf_ctrls); 168 cpufreq_freq_transition_end(policy, &freqs, ret != 0); 169 170 if (ret) 171 pr_debug("Failed to set target on CPU:%d. ret:%d\n", 172 cpu_data->cpu, ret); 173 174 return ret; 175 } 176 177 static int cppc_verify_policy(struct cpufreq_policy_data *policy) 178 { 179 cpufreq_verify_within_cpu_limits(policy); 180 return 0; 181 } 182 183 static void cppc_cpufreq_stop_cpu(struct cpufreq_policy *policy) 184 { 185 struct cppc_cpudata *cpu_data = all_cpu_data[policy->cpu]; 186 struct cppc_perf_caps *caps = &cpu_data->perf_caps; 187 unsigned int cpu = policy->cpu; 188 int ret; 189 190 cpu_data->perf_ctrls.desired_perf = caps->lowest_perf; 191 192 ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls); 193 if (ret) 194 pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n", 195 caps->lowest_perf, cpu, ret); 196 } 197 198 /* 199 * The PCC subspace describes the rate at which platform can accept commands 200 * on the shared PCC channel (including READs which do not count towards freq 201 * transition requests), so ideally we need to use the PCC values as a fallback 202 * if we don't have a platform specific transition_delay_us 203 */ 204 #ifdef CONFIG_ARM64 205 #include <asm/cputype.h> 206 207 static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu) 208 { 209 unsigned long implementor = read_cpuid_implementor(); 210 unsigned long part_num = read_cpuid_part_number(); 211 unsigned int delay_us = 0; 212 213 switch (implementor) { 214 case ARM_CPU_IMP_QCOM: 215 switch (part_num) { 216 case QCOM_CPU_PART_FALKOR_V1: 217 case QCOM_CPU_PART_FALKOR: 218 delay_us = 10000; 219 break; 220 default: 221 delay_us = cppc_get_transition_latency(cpu) / NSEC_PER_USEC; 222 break; 223 } 224 break; 225 default: 226 delay_us = cppc_get_transition_latency(cpu) / NSEC_PER_USEC; 227 break; 228 } 229 230 return delay_us; 231 } 232 233 #else 234 235 static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu) 236 { 237 return cppc_get_transition_latency(cpu) / NSEC_PER_USEC; 238 } 239 #endif 240 241 static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy) 242 { 243 struct cppc_cpudata *cpu_data = all_cpu_data[policy->cpu]; 244 struct cppc_perf_caps *caps = &cpu_data->perf_caps; 245 unsigned int cpu = policy->cpu; 246 int ret = 0; 247 248 cpu_data->cpu = cpu; 249 ret = cppc_get_perf_caps(cpu, caps); 250 251 if (ret) { 252 pr_debug("Err reading CPU%d perf capabilities. ret:%d\n", 253 cpu, ret); 254 return ret; 255 } 256 257 /* Convert the lowest and nominal freq from MHz to KHz */ 258 caps->lowest_freq *= 1000; 259 caps->nominal_freq *= 1000; 260 261 /* 262 * Set min to lowest nonlinear perf to avoid any efficiency penalty (see 263 * Section 8.4.7.1.1.5 of ACPI 6.1 spec) 264 */ 265 policy->min = cppc_cpufreq_perf_to_khz(cpu_data, 266 caps->lowest_nonlinear_perf); 267 policy->max = cppc_cpufreq_perf_to_khz(cpu_data, 268 caps->nominal_perf); 269 270 /* 271 * Set cpuinfo.min_freq to Lowest to make the full range of performance 272 * available if userspace wants to use any perf between lowest & lowest 273 * nonlinear perf 274 */ 275 policy->cpuinfo.min_freq = cppc_cpufreq_perf_to_khz(cpu_data, 276 caps->lowest_perf); 277 policy->cpuinfo.max_freq = cppc_cpufreq_perf_to_khz(cpu_data, 278 caps->nominal_perf); 279 280 policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu); 281 policy->shared_type = cpu_data->shared_type; 282 283 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { 284 int i; 285 286 cpumask_copy(policy->cpus, cpu_data->shared_cpu_map); 287 288 for_each_cpu(i, policy->cpus) { 289 if (unlikely(i == cpu)) 290 continue; 291 292 memcpy(&all_cpu_data[i]->perf_caps, caps, 293 sizeof(cpu_data->perf_caps)); 294 } 295 } else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) { 296 /* Support only SW_ANY for now. */ 297 pr_debug("Unsupported CPU co-ord type\n"); 298 return -EFAULT; 299 } 300 301 cpu_data->cur_policy = policy; 302 303 /* 304 * If 'highest_perf' is greater than 'nominal_perf', we assume CPU Boost 305 * is supported. 306 */ 307 if (caps->highest_perf > caps->nominal_perf) 308 boost_supported = true; 309 310 /* Set policy->cur to max now. The governors will adjust later. */ 311 policy->cur = cppc_cpufreq_perf_to_khz(cpu_data, caps->highest_perf); 312 cpu_data->perf_ctrls.desired_perf = caps->highest_perf; 313 314 ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls); 315 if (ret) 316 pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n", 317 caps->highest_perf, cpu, ret); 318 319 return ret; 320 } 321 322 static inline u64 get_delta(u64 t1, u64 t0) 323 { 324 if (t1 > t0 || t0 > ~(u32)0) 325 return t1 - t0; 326 327 return (u32)t1 - (u32)t0; 328 } 329 330 static int cppc_get_rate_from_fbctrs(struct cppc_cpudata *cpu_data, 331 struct cppc_perf_fb_ctrs fb_ctrs_t0, 332 struct cppc_perf_fb_ctrs fb_ctrs_t1) 333 { 334 u64 delta_reference, delta_delivered; 335 u64 reference_perf, delivered_perf; 336 337 reference_perf = fb_ctrs_t0.reference_perf; 338 339 delta_reference = get_delta(fb_ctrs_t1.reference, 340 fb_ctrs_t0.reference); 341 delta_delivered = get_delta(fb_ctrs_t1.delivered, 342 fb_ctrs_t0.delivered); 343 344 /* Check to avoid divide-by zero */ 345 if (delta_reference || delta_delivered) 346 delivered_perf = (reference_perf * delta_delivered) / 347 delta_reference; 348 else 349 delivered_perf = cpu_data->perf_ctrls.desired_perf; 350 351 return cppc_cpufreq_perf_to_khz(cpu_data, delivered_perf); 352 } 353 354 static unsigned int cppc_cpufreq_get_rate(unsigned int cpu) 355 { 356 struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0}; 357 struct cppc_cpudata *cpu_data = all_cpu_data[cpu]; 358 int ret; 359 360 ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t0); 361 if (ret) 362 return ret; 363 364 udelay(2); /* 2usec delay between sampling */ 365 366 ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t1); 367 if (ret) 368 return ret; 369 370 return cppc_get_rate_from_fbctrs(cpu_data, fb_ctrs_t0, fb_ctrs_t1); 371 } 372 373 static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state) 374 { 375 struct cppc_cpudata *cpu_data = all_cpu_data[policy->cpu]; 376 struct cppc_perf_caps *caps = &cpu_data->perf_caps; 377 int ret; 378 379 if (!boost_supported) { 380 pr_err("BOOST not supported by CPU or firmware\n"); 381 return -EINVAL; 382 } 383 384 if (state) 385 policy->max = cppc_cpufreq_perf_to_khz(cpu_data, 386 caps->highest_perf); 387 else 388 policy->max = cppc_cpufreq_perf_to_khz(cpu_data, 389 caps->nominal_perf); 390 policy->cpuinfo.max_freq = policy->max; 391 392 ret = freq_qos_update_request(policy->max_freq_req, policy->max); 393 if (ret < 0) 394 return ret; 395 396 return 0; 397 } 398 399 static struct cpufreq_driver cppc_cpufreq_driver = { 400 .flags = CPUFREQ_CONST_LOOPS, 401 .verify = cppc_verify_policy, 402 .target = cppc_cpufreq_set_target, 403 .get = cppc_cpufreq_get_rate, 404 .init = cppc_cpufreq_cpu_init, 405 .stop_cpu = cppc_cpufreq_stop_cpu, 406 .set_boost = cppc_cpufreq_set_boost, 407 .name = "cppc_cpufreq", 408 }; 409 410 /* 411 * HISI platform does not support delivered performance counter and 412 * reference performance counter. It can calculate the performance using the 413 * platform specific mechanism. We reuse the desired performance register to 414 * store the real performance calculated by the platform. 415 */ 416 static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu) 417 { 418 struct cppc_cpudata *cpu_data = all_cpu_data[cpu]; 419 u64 desired_perf; 420 int ret; 421 422 ret = cppc_get_desired_perf(cpu, &desired_perf); 423 if (ret < 0) 424 return -EIO; 425 426 return cppc_cpufreq_perf_to_khz(cpu_data, desired_perf); 427 } 428 429 static void cppc_check_hisi_workaround(void) 430 { 431 struct acpi_table_header *tbl; 432 acpi_status status = AE_OK; 433 int i; 434 435 status = acpi_get_table(ACPI_SIG_PCCT, 0, &tbl); 436 if (ACPI_FAILURE(status) || !tbl) 437 return; 438 439 for (i = 0; i < ARRAY_SIZE(wa_info); i++) { 440 if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) && 441 !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) && 442 wa_info[i].oem_revision == tbl->oem_revision) { 443 /* Overwrite the get() callback */ 444 cppc_cpufreq_driver.get = hisi_cppc_cpufreq_get_rate; 445 break; 446 } 447 } 448 449 acpi_put_table(tbl); 450 } 451 452 static int __init cppc_cpufreq_init(void) 453 { 454 struct cppc_cpudata *cpu_data; 455 int i, ret = 0; 456 457 if (acpi_disabled) 458 return -ENODEV; 459 460 all_cpu_data = kcalloc(num_possible_cpus(), sizeof(void *), 461 GFP_KERNEL); 462 if (!all_cpu_data) 463 return -ENOMEM; 464 465 for_each_possible_cpu(i) { 466 all_cpu_data[i] = kzalloc(sizeof(struct cppc_cpudata), GFP_KERNEL); 467 if (!all_cpu_data[i]) 468 goto out; 469 470 cpu_data = all_cpu_data[i]; 471 if (!zalloc_cpumask_var(&cpu_data->shared_cpu_map, GFP_KERNEL)) 472 goto out; 473 } 474 475 ret = acpi_get_psd_map(all_cpu_data); 476 if (ret) { 477 pr_debug("Error parsing PSD data. Aborting cpufreq registration.\n"); 478 goto out; 479 } 480 481 cppc_check_hisi_workaround(); 482 483 ret = cpufreq_register_driver(&cppc_cpufreq_driver); 484 if (ret) 485 goto out; 486 487 return ret; 488 489 out: 490 for_each_possible_cpu(i) { 491 cpu_data = all_cpu_data[i]; 492 if (!cpu_data) 493 break; 494 free_cpumask_var(cpu_data->shared_cpu_map); 495 kfree(cpu_data); 496 } 497 498 kfree(all_cpu_data); 499 return -ENODEV; 500 } 501 502 static void __exit cppc_cpufreq_exit(void) 503 { 504 struct cppc_cpudata *cpu_data; 505 int i; 506 507 cpufreq_unregister_driver(&cppc_cpufreq_driver); 508 509 for_each_possible_cpu(i) { 510 cpu_data = all_cpu_data[i]; 511 free_cpumask_var(cpu_data->shared_cpu_map); 512 kfree(cpu_data); 513 } 514 515 kfree(all_cpu_data); 516 } 517 518 module_exit(cppc_cpufreq_exit); 519 MODULE_AUTHOR("Ashwin Chaugule"); 520 MODULE_DESCRIPTION("CPUFreq driver based on the ACPI CPPC v5.0+ spec"); 521 MODULE_LICENSE("GPL"); 522 523 late_initcall(cppc_cpufreq_init); 524 525 static const struct acpi_device_id cppc_acpi_ids[] __used = { 526 {ACPI_PROCESSOR_DEVICE_HID, }, 527 {} 528 }; 529 530 MODULE_DEVICE_TABLE(acpi, cppc_acpi_ids); 531