1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * CPPC (Collaborative Processor Performance Control) driver for 4 * interfacing with the CPUfreq layer and governors. See 5 * cppc_acpi.c for CPPC specific methods. 6 * 7 * (C) Copyright 2014, 2015 Linaro Ltd. 8 * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org> 9 */ 10 11 #define pr_fmt(fmt) "CPPC Cpufreq:" fmt 12 13 #include <linux/kernel.h> 14 #include <linux/module.h> 15 #include <linux/delay.h> 16 #include <linux/cpu.h> 17 #include <linux/cpufreq.h> 18 #include <linux/dmi.h> 19 #include <linux/time.h> 20 #include <linux/vmalloc.h> 21 22 #include <asm/unaligned.h> 23 24 #include <acpi/cppc_acpi.h> 25 26 /* Minimum struct length needed for the DMI processor entry we want */ 27 #define DMI_ENTRY_PROCESSOR_MIN_LENGTH 48 28 29 /* Offset in the DMI processor structure for the max frequency */ 30 #define DMI_PROCESSOR_MAX_SPEED 0x14 31 32 /* 33 * This list contains information parsed from per CPU ACPI _CPC and _PSD 34 * structures: e.g. the highest and lowest supported performance, capabilities, 35 * desired performance, level requested etc. Depending on the share_type, not 36 * all CPUs will have an entry in the list. 37 */ 38 static LIST_HEAD(cpu_data_list); 39 40 static bool boost_supported; 41 42 struct cppc_workaround_oem_info { 43 char oem_id[ACPI_OEM_ID_SIZE + 1]; 44 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1]; 45 u32 oem_revision; 46 }; 47 48 static struct cppc_workaround_oem_info wa_info[] = { 49 { 50 .oem_id = "HISI ", 51 .oem_table_id = "HIP07 ", 52 .oem_revision = 0, 53 }, { 54 .oem_id = "HISI ", 55 .oem_table_id = "HIP08 ", 56 .oem_revision = 0, 57 } 58 }; 59 60 /* Callback function used to retrieve the max frequency from DMI */ 61 static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private) 62 { 63 const u8 *dmi_data = (const u8 *)dm; 64 u16 *mhz = (u16 *)private; 65 66 if (dm->type == DMI_ENTRY_PROCESSOR && 67 dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) { 68 u16 val = (u16)get_unaligned((const u16 *) 69 (dmi_data + DMI_PROCESSOR_MAX_SPEED)); 70 *mhz = val > *mhz ? val : *mhz; 71 } 72 } 73 74 /* Look up the max frequency in DMI */ 75 static u64 cppc_get_dmi_max_khz(void) 76 { 77 u16 mhz = 0; 78 79 dmi_walk(cppc_find_dmi_mhz, &mhz); 80 81 /* 82 * Real stupid fallback value, just in case there is no 83 * actual value set. 84 */ 85 mhz = mhz ? mhz : 1; 86 87 return (1000 * mhz); 88 } 89 90 /* 91 * If CPPC lowest_freq and nominal_freq registers are exposed then we can 92 * use them to convert perf to freq and vice versa 93 * 94 * If the perf/freq point lies between Nominal and Lowest, we can treat 95 * (Low perf, Low freq) and (Nom Perf, Nom freq) as 2D co-ordinates of a line 96 * and extrapolate the rest 97 * For perf/freq > Nominal, we use the ratio perf:freq at Nominal for conversion 98 */ 99 static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu_data, 100 unsigned int perf) 101 { 102 struct cppc_perf_caps *caps = &cpu_data->perf_caps; 103 static u64 max_khz; 104 u64 mul, div; 105 106 if (caps->lowest_freq && caps->nominal_freq) { 107 if (perf >= caps->nominal_perf) { 108 mul = caps->nominal_freq; 109 div = caps->nominal_perf; 110 } else { 111 mul = caps->nominal_freq - caps->lowest_freq; 112 div = caps->nominal_perf - caps->lowest_perf; 113 } 114 } else { 115 if (!max_khz) 116 max_khz = cppc_get_dmi_max_khz(); 117 mul = max_khz; 118 div = caps->highest_perf; 119 } 120 return (u64)perf * mul / div; 121 } 122 123 static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu_data, 124 unsigned int freq) 125 { 126 struct cppc_perf_caps *caps = &cpu_data->perf_caps; 127 static u64 max_khz; 128 u64 mul, div; 129 130 if (caps->lowest_freq && caps->nominal_freq) { 131 if (freq >= caps->nominal_freq) { 132 mul = caps->nominal_perf; 133 div = caps->nominal_freq; 134 } else { 135 mul = caps->lowest_perf; 136 div = caps->lowest_freq; 137 } 138 } else { 139 if (!max_khz) 140 max_khz = cppc_get_dmi_max_khz(); 141 mul = caps->highest_perf; 142 div = max_khz; 143 } 144 145 return (u64)freq * mul / div; 146 } 147 148 static int cppc_cpufreq_set_target(struct cpufreq_policy *policy, 149 unsigned int target_freq, 150 unsigned int relation) 151 152 { 153 struct cppc_cpudata *cpu_data = policy->driver_data; 154 unsigned int cpu = policy->cpu; 155 struct cpufreq_freqs freqs; 156 u32 desired_perf; 157 int ret = 0; 158 159 desired_perf = cppc_cpufreq_khz_to_perf(cpu_data, target_freq); 160 /* Return if it is exactly the same perf */ 161 if (desired_perf == cpu_data->perf_ctrls.desired_perf) 162 return ret; 163 164 cpu_data->perf_ctrls.desired_perf = desired_perf; 165 freqs.old = policy->cur; 166 freqs.new = target_freq; 167 168 cpufreq_freq_transition_begin(policy, &freqs); 169 ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls); 170 cpufreq_freq_transition_end(policy, &freqs, ret != 0); 171 172 if (ret) 173 pr_debug("Failed to set target on CPU:%d. ret:%d\n", 174 cpu, ret); 175 176 return ret; 177 } 178 179 static int cppc_verify_policy(struct cpufreq_policy_data *policy) 180 { 181 cpufreq_verify_within_cpu_limits(policy); 182 return 0; 183 } 184 185 static void cppc_cpufreq_stop_cpu(struct cpufreq_policy *policy) 186 { 187 struct cppc_cpudata *cpu_data = policy->driver_data; 188 struct cppc_perf_caps *caps = &cpu_data->perf_caps; 189 unsigned int cpu = policy->cpu; 190 int ret; 191 192 cpu_data->perf_ctrls.desired_perf = caps->lowest_perf; 193 194 ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls); 195 if (ret) 196 pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n", 197 caps->lowest_perf, cpu, ret); 198 199 /* Remove CPU node from list and free driver data for policy */ 200 free_cpumask_var(cpu_data->shared_cpu_map); 201 list_del(&cpu_data->node); 202 kfree(policy->driver_data); 203 policy->driver_data = NULL; 204 } 205 206 /* 207 * The PCC subspace describes the rate at which platform can accept commands 208 * on the shared PCC channel (including READs which do not count towards freq 209 * transition requests), so ideally we need to use the PCC values as a fallback 210 * if we don't have a platform specific transition_delay_us 211 */ 212 #ifdef CONFIG_ARM64 213 #include <asm/cputype.h> 214 215 static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu) 216 { 217 unsigned long implementor = read_cpuid_implementor(); 218 unsigned long part_num = read_cpuid_part_number(); 219 220 switch (implementor) { 221 case ARM_CPU_IMP_QCOM: 222 switch (part_num) { 223 case QCOM_CPU_PART_FALKOR_V1: 224 case QCOM_CPU_PART_FALKOR: 225 return 10000; 226 } 227 } 228 return cppc_get_transition_latency(cpu) / NSEC_PER_USEC; 229 } 230 231 #else 232 233 static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu) 234 { 235 return cppc_get_transition_latency(cpu) / NSEC_PER_USEC; 236 } 237 #endif 238 239 240 static struct cppc_cpudata *cppc_cpufreq_get_cpu_data(unsigned int cpu) 241 { 242 struct cppc_cpudata *cpu_data; 243 int ret; 244 245 cpu_data = kzalloc(sizeof(struct cppc_cpudata), GFP_KERNEL); 246 if (!cpu_data) 247 goto out; 248 249 if (!zalloc_cpumask_var(&cpu_data->shared_cpu_map, GFP_KERNEL)) 250 goto free_cpu; 251 252 ret = acpi_get_psd_map(cpu, cpu_data); 253 if (ret) { 254 pr_debug("Err parsing CPU%d PSD data: ret:%d\n", cpu, ret); 255 goto free_mask; 256 } 257 258 ret = cppc_get_perf_caps(cpu, &cpu_data->perf_caps); 259 if (ret) { 260 pr_debug("Err reading CPU%d perf caps: ret:%d\n", cpu, ret); 261 goto free_mask; 262 } 263 264 /* Convert the lowest and nominal freq from MHz to KHz */ 265 cpu_data->perf_caps.lowest_freq *= 1000; 266 cpu_data->perf_caps.nominal_freq *= 1000; 267 268 list_add(&cpu_data->node, &cpu_data_list); 269 270 return cpu_data; 271 272 free_mask: 273 free_cpumask_var(cpu_data->shared_cpu_map); 274 free_cpu: 275 kfree(cpu_data); 276 out: 277 return NULL; 278 } 279 280 static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy) 281 { 282 unsigned int cpu = policy->cpu; 283 struct cppc_cpudata *cpu_data; 284 struct cppc_perf_caps *caps; 285 int ret; 286 287 cpu_data = cppc_cpufreq_get_cpu_data(cpu); 288 if (!cpu_data) { 289 pr_err("Error in acquiring _CPC/_PSD data for CPU%d.\n", cpu); 290 return -ENODEV; 291 } 292 caps = &cpu_data->perf_caps; 293 policy->driver_data = cpu_data; 294 295 /* 296 * Set min to lowest nonlinear perf to avoid any efficiency penalty (see 297 * Section 8.4.7.1.1.5 of ACPI 6.1 spec) 298 */ 299 policy->min = cppc_cpufreq_perf_to_khz(cpu_data, 300 caps->lowest_nonlinear_perf); 301 policy->max = cppc_cpufreq_perf_to_khz(cpu_data, 302 caps->nominal_perf); 303 304 /* 305 * Set cpuinfo.min_freq to Lowest to make the full range of performance 306 * available if userspace wants to use any perf between lowest & lowest 307 * nonlinear perf 308 */ 309 policy->cpuinfo.min_freq = cppc_cpufreq_perf_to_khz(cpu_data, 310 caps->lowest_perf); 311 policy->cpuinfo.max_freq = cppc_cpufreq_perf_to_khz(cpu_data, 312 caps->nominal_perf); 313 314 policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu); 315 policy->shared_type = cpu_data->shared_type; 316 317 switch (policy->shared_type) { 318 case CPUFREQ_SHARED_TYPE_HW: 319 case CPUFREQ_SHARED_TYPE_NONE: 320 /* Nothing to be done - we'll have a policy for each CPU */ 321 break; 322 case CPUFREQ_SHARED_TYPE_ANY: 323 /* 324 * All CPUs in the domain will share a policy and all cpufreq 325 * operations will use a single cppc_cpudata structure stored 326 * in policy->driver_data. 327 */ 328 cpumask_copy(policy->cpus, cpu_data->shared_cpu_map); 329 break; 330 default: 331 pr_debug("Unsupported CPU co-ord type: %d\n", 332 policy->shared_type); 333 return -EFAULT; 334 } 335 336 /* 337 * If 'highest_perf' is greater than 'nominal_perf', we assume CPU Boost 338 * is supported. 339 */ 340 if (caps->highest_perf > caps->nominal_perf) 341 boost_supported = true; 342 343 /* Set policy->cur to max now. The governors will adjust later. */ 344 policy->cur = cppc_cpufreq_perf_to_khz(cpu_data, caps->highest_perf); 345 cpu_data->perf_ctrls.desired_perf = caps->highest_perf; 346 347 ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls); 348 if (ret) 349 pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n", 350 caps->highest_perf, cpu, ret); 351 352 return ret; 353 } 354 355 static inline u64 get_delta(u64 t1, u64 t0) 356 { 357 if (t1 > t0 || t0 > ~(u32)0) 358 return t1 - t0; 359 360 return (u32)t1 - (u32)t0; 361 } 362 363 static int cppc_get_rate_from_fbctrs(struct cppc_cpudata *cpu_data, 364 struct cppc_perf_fb_ctrs fb_ctrs_t0, 365 struct cppc_perf_fb_ctrs fb_ctrs_t1) 366 { 367 u64 delta_reference, delta_delivered; 368 u64 reference_perf, delivered_perf; 369 370 reference_perf = fb_ctrs_t0.reference_perf; 371 372 delta_reference = get_delta(fb_ctrs_t1.reference, 373 fb_ctrs_t0.reference); 374 delta_delivered = get_delta(fb_ctrs_t1.delivered, 375 fb_ctrs_t0.delivered); 376 377 /* Check to avoid divide-by zero */ 378 if (delta_reference || delta_delivered) 379 delivered_perf = (reference_perf * delta_delivered) / 380 delta_reference; 381 else 382 delivered_perf = cpu_data->perf_ctrls.desired_perf; 383 384 return cppc_cpufreq_perf_to_khz(cpu_data, delivered_perf); 385 } 386 387 static unsigned int cppc_cpufreq_get_rate(unsigned int cpu) 388 { 389 struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0}; 390 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 391 struct cppc_cpudata *cpu_data = policy->driver_data; 392 int ret; 393 394 cpufreq_cpu_put(policy); 395 396 ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t0); 397 if (ret) 398 return ret; 399 400 udelay(2); /* 2usec delay between sampling */ 401 402 ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t1); 403 if (ret) 404 return ret; 405 406 return cppc_get_rate_from_fbctrs(cpu_data, fb_ctrs_t0, fb_ctrs_t1); 407 } 408 409 static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state) 410 { 411 struct cppc_cpudata *cpu_data = policy->driver_data; 412 struct cppc_perf_caps *caps = &cpu_data->perf_caps; 413 int ret; 414 415 if (!boost_supported) { 416 pr_err("BOOST not supported by CPU or firmware\n"); 417 return -EINVAL; 418 } 419 420 if (state) 421 policy->max = cppc_cpufreq_perf_to_khz(cpu_data, 422 caps->highest_perf); 423 else 424 policy->max = cppc_cpufreq_perf_to_khz(cpu_data, 425 caps->nominal_perf); 426 policy->cpuinfo.max_freq = policy->max; 427 428 ret = freq_qos_update_request(policy->max_freq_req, policy->max); 429 if (ret < 0) 430 return ret; 431 432 return 0; 433 } 434 435 static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf) 436 { 437 struct cppc_cpudata *cpu_data = policy->driver_data; 438 439 return cpufreq_show_cpus(cpu_data->shared_cpu_map, buf); 440 } 441 cpufreq_freq_attr_ro(freqdomain_cpus); 442 443 static struct freq_attr *cppc_cpufreq_attr[] = { 444 &freqdomain_cpus, 445 NULL, 446 }; 447 448 static struct cpufreq_driver cppc_cpufreq_driver = { 449 .flags = CPUFREQ_CONST_LOOPS, 450 .verify = cppc_verify_policy, 451 .target = cppc_cpufreq_set_target, 452 .get = cppc_cpufreq_get_rate, 453 .init = cppc_cpufreq_cpu_init, 454 .stop_cpu = cppc_cpufreq_stop_cpu, 455 .set_boost = cppc_cpufreq_set_boost, 456 .attr = cppc_cpufreq_attr, 457 .name = "cppc_cpufreq", 458 }; 459 460 /* 461 * HISI platform does not support delivered performance counter and 462 * reference performance counter. It can calculate the performance using the 463 * platform specific mechanism. We reuse the desired performance register to 464 * store the real performance calculated by the platform. 465 */ 466 static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu) 467 { 468 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 469 struct cppc_cpudata *cpu_data = policy->driver_data; 470 u64 desired_perf; 471 int ret; 472 473 cpufreq_cpu_put(policy); 474 475 ret = cppc_get_desired_perf(cpu, &desired_perf); 476 if (ret < 0) 477 return -EIO; 478 479 return cppc_cpufreq_perf_to_khz(cpu_data, desired_perf); 480 } 481 482 static void cppc_check_hisi_workaround(void) 483 { 484 struct acpi_table_header *tbl; 485 acpi_status status = AE_OK; 486 int i; 487 488 status = acpi_get_table(ACPI_SIG_PCCT, 0, &tbl); 489 if (ACPI_FAILURE(status) || !tbl) 490 return; 491 492 for (i = 0; i < ARRAY_SIZE(wa_info); i++) { 493 if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) && 494 !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) && 495 wa_info[i].oem_revision == tbl->oem_revision) { 496 /* Overwrite the get() callback */ 497 cppc_cpufreq_driver.get = hisi_cppc_cpufreq_get_rate; 498 break; 499 } 500 } 501 502 acpi_put_table(tbl); 503 } 504 505 static int __init cppc_cpufreq_init(void) 506 { 507 if ((acpi_disabled) || !acpi_cpc_valid()) 508 return -ENODEV; 509 510 INIT_LIST_HEAD(&cpu_data_list); 511 512 cppc_check_hisi_workaround(); 513 514 return cpufreq_register_driver(&cppc_cpufreq_driver); 515 } 516 517 static inline void free_cpu_data(void) 518 { 519 struct cppc_cpudata *iter, *tmp; 520 521 list_for_each_entry_safe(iter, tmp, &cpu_data_list, node) { 522 free_cpumask_var(iter->shared_cpu_map); 523 list_del(&iter->node); 524 kfree(iter); 525 } 526 527 } 528 529 static void __exit cppc_cpufreq_exit(void) 530 { 531 cpufreq_unregister_driver(&cppc_cpufreq_driver); 532 533 free_cpu_data(); 534 } 535 536 module_exit(cppc_cpufreq_exit); 537 MODULE_AUTHOR("Ashwin Chaugule"); 538 MODULE_DESCRIPTION("CPUFreq driver based on the ACPI CPPC v5.0+ spec"); 539 MODULE_LICENSE("GPL"); 540 541 late_initcall(cppc_cpufreq_init); 542 543 static const struct acpi_device_id cppc_acpi_ids[] __used = { 544 {ACPI_PROCESSOR_DEVICE_HID, }, 545 {} 546 }; 547 548 MODULE_DEVICE_TABLE(acpi, cppc_acpi_ids); 549