1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/drivers/thermal/cpufreq_cooling.c 4 * 5 * Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com) 6 * 7 * Copyright (C) 2012-2018 Linaro Limited. 8 * 9 * Authors: Amit Daniel <amit.kachhap@linaro.org> 10 * Viresh Kumar <viresh.kumar@linaro.org> 11 * 12 */ 13 #include <linux/cpu.h> 14 #include <linux/cpufreq.h> 15 #include <linux/cpu_cooling.h> 16 #include <linux/energy_model.h> 17 #include <linux/err.h> 18 #include <linux/export.h> 19 #include <linux/idr.h> 20 #include <linux/pm_opp.h> 21 #include <linux/pm_qos.h> 22 #include <linux/slab.h> 23 #include <linux/thermal.h> 24 25 #include <trace/events/thermal.h> 26 27 /* 28 * Cooling state <-> CPUFreq frequency 29 * 30 * Cooling states are translated to frequencies throughout this driver and this 31 * is the relation between them. 32 * 33 * Highest cooling state corresponds to lowest possible frequency. 34 * 35 * i.e. 36 * level 0 --> 1st Max Freq 37 * level 1 --> 2nd Max Freq 38 * ... 39 */ 40 41 /** 42 * struct time_in_idle - Idle time stats 43 * @time: previous reading of the absolute time that this cpu was idle 44 * @timestamp: wall time of the last invocation of get_cpu_idle_time_us() 45 */ 46 struct time_in_idle { 47 u64 time; 48 u64 timestamp; 49 }; 50 51 /** 52 * struct cpufreq_cooling_device - data for cooling device with cpufreq 53 * @id: unique integer value corresponding to each cpufreq_cooling_device 54 * registered. 55 * @last_load: load measured by the latest call to cpufreq_get_requested_power() 56 * @cpufreq_state: integer value representing the current state of cpufreq 57 * cooling devices. 58 * @max_level: maximum cooling level. One less than total number of valid 59 * cpufreq frequencies. 60 * @em: Reference on the Energy Model of the device 61 * @cdev: thermal_cooling_device pointer to keep track of the 62 * registered cooling device. 63 * @policy: cpufreq policy. 64 * @node: list_head to link all cpufreq_cooling_device together. 65 * @idle_time: idle time stats 66 * @qos_req: PM QoS contraint to apply 67 * 68 * This structure is required for keeping information of each registered 69 * cpufreq_cooling_device. 70 */ 71 struct cpufreq_cooling_device { 72 int id; 73 u32 last_load; 74 unsigned int cpufreq_state; 75 unsigned int max_level; 76 struct em_perf_domain *em; 77 struct cpufreq_policy *policy; 78 struct list_head node; 79 #ifndef CONFIG_SMP 80 struct time_in_idle *idle_time; 81 #endif 82 struct freq_qos_request qos_req; 83 }; 84 85 static DEFINE_IDA(cpufreq_ida); 86 static DEFINE_MUTEX(cooling_list_lock); 87 static LIST_HEAD(cpufreq_cdev_list); 88 89 #ifdef CONFIG_THERMAL_GOV_POWER_ALLOCATOR 90 /** 91 * get_level: Find the level for a particular frequency 92 * @cpufreq_cdev: cpufreq_cdev for which the property is required 93 * @freq: Frequency 94 * 95 * Return: level corresponding to the frequency. 96 */ 97 static unsigned long get_level(struct cpufreq_cooling_device *cpufreq_cdev, 98 unsigned int freq) 99 { 100 int i; 101 102 for (i = cpufreq_cdev->max_level - 1; i >= 0; i--) { 103 if (freq > cpufreq_cdev->em->table[i].frequency) 104 break; 105 } 106 107 return cpufreq_cdev->max_level - i - 1; 108 } 109 110 static u32 cpu_freq_to_power(struct cpufreq_cooling_device *cpufreq_cdev, 111 u32 freq) 112 { 113 int i; 114 115 for (i = cpufreq_cdev->max_level - 1; i >= 0; i--) { 116 if (freq > cpufreq_cdev->em->table[i].frequency) 117 break; 118 } 119 120 return cpufreq_cdev->em->table[i + 1].power; 121 } 122 123 static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_cdev, 124 u32 power) 125 { 126 int i; 127 128 for (i = cpufreq_cdev->max_level; i >= 0; i--) { 129 if (power >= cpufreq_cdev->em->table[i].power) 130 break; 131 } 132 133 return cpufreq_cdev->em->table[i].frequency; 134 } 135 136 /** 137 * get_load() - get load for a cpu 138 * @cpufreq_cdev: struct cpufreq_cooling_device for the cpu 139 * @cpu: cpu number 140 * @cpu_idx: index of the cpu in time_in_idle array 141 * 142 * Return: The average load of cpu @cpu in percentage since this 143 * function was last called. 144 */ 145 #ifdef CONFIG_SMP 146 static u32 get_load(struct cpufreq_cooling_device *cpufreq_cdev, int cpu, 147 int cpu_idx) 148 { 149 unsigned long max = arch_scale_cpu_capacity(cpu); 150 unsigned long util; 151 152 util = sched_cpu_util(cpu, max); 153 return (util * 100) / max; 154 } 155 #else /* !CONFIG_SMP */ 156 static u32 get_load(struct cpufreq_cooling_device *cpufreq_cdev, int cpu, 157 int cpu_idx) 158 { 159 u32 load; 160 u64 now, now_idle, delta_time, delta_idle; 161 struct time_in_idle *idle_time = &cpufreq_cdev->idle_time[cpu_idx]; 162 163 now_idle = get_cpu_idle_time(cpu, &now, 0); 164 delta_idle = now_idle - idle_time->time; 165 delta_time = now - idle_time->timestamp; 166 167 if (delta_time <= delta_idle) 168 load = 0; 169 else 170 load = div64_u64(100 * (delta_time - delta_idle), delta_time); 171 172 idle_time->time = now_idle; 173 idle_time->timestamp = now; 174 175 return load; 176 } 177 #endif /* CONFIG_SMP */ 178 179 /** 180 * get_dynamic_power() - calculate the dynamic power 181 * @cpufreq_cdev: &cpufreq_cooling_device for this cdev 182 * @freq: current frequency 183 * 184 * Return: the dynamic power consumed by the cpus described by 185 * @cpufreq_cdev. 186 */ 187 static u32 get_dynamic_power(struct cpufreq_cooling_device *cpufreq_cdev, 188 unsigned long freq) 189 { 190 u32 raw_cpu_power; 191 192 raw_cpu_power = cpu_freq_to_power(cpufreq_cdev, freq); 193 return (raw_cpu_power * cpufreq_cdev->last_load) / 100; 194 } 195 196 /** 197 * cpufreq_get_requested_power() - get the current power 198 * @cdev: &thermal_cooling_device pointer 199 * @power: pointer in which to store the resulting power 200 * 201 * Calculate the current power consumption of the cpus in milliwatts 202 * and store it in @power. This function should actually calculate 203 * the requested power, but it's hard to get the frequency that 204 * cpufreq would have assigned if there were no thermal limits. 205 * Instead, we calculate the current power on the assumption that the 206 * immediate future will look like the immediate past. 207 * 208 * We use the current frequency and the average load since this 209 * function was last called. In reality, there could have been 210 * multiple opps since this function was last called and that affects 211 * the load calculation. While it's not perfectly accurate, this 212 * simplification is good enough and works. REVISIT this, as more 213 * complex code may be needed if experiments show that it's not 214 * accurate enough. 215 * 216 * Return: 0 on success, -E* if getting the static power failed. 217 */ 218 static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev, 219 u32 *power) 220 { 221 unsigned long freq; 222 int i = 0, cpu; 223 u32 total_load = 0; 224 struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata; 225 struct cpufreq_policy *policy = cpufreq_cdev->policy; 226 u32 *load_cpu = NULL; 227 228 freq = cpufreq_quick_get(policy->cpu); 229 230 if (trace_thermal_power_cpu_get_power_enabled()) { 231 u32 ncpus = cpumask_weight(policy->related_cpus); 232 233 load_cpu = kcalloc(ncpus, sizeof(*load_cpu), GFP_KERNEL); 234 } 235 236 for_each_cpu(cpu, policy->related_cpus) { 237 u32 load; 238 239 if (cpu_online(cpu)) 240 load = get_load(cpufreq_cdev, cpu, i); 241 else 242 load = 0; 243 244 total_load += load; 245 if (load_cpu) 246 load_cpu[i] = load; 247 248 i++; 249 } 250 251 cpufreq_cdev->last_load = total_load; 252 253 *power = get_dynamic_power(cpufreq_cdev, freq); 254 255 if (load_cpu) { 256 trace_thermal_power_cpu_get_power(policy->related_cpus, freq, 257 load_cpu, i, *power); 258 259 kfree(load_cpu); 260 } 261 262 return 0; 263 } 264 265 /** 266 * cpufreq_state2power() - convert a cpu cdev state to power consumed 267 * @cdev: &thermal_cooling_device pointer 268 * @state: cooling device state to be converted 269 * @power: pointer in which to store the resulting power 270 * 271 * Convert cooling device state @state into power consumption in 272 * milliwatts assuming 100% load. Store the calculated power in 273 * @power. 274 * 275 * Return: 0 on success, -EINVAL if the cooling device state could not 276 * be converted into a frequency or other -E* if there was an error 277 * when calculating the static power. 278 */ 279 static int cpufreq_state2power(struct thermal_cooling_device *cdev, 280 unsigned long state, u32 *power) 281 { 282 unsigned int freq, num_cpus, idx; 283 struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata; 284 285 /* Request state should be less than max_level */ 286 if (state > cpufreq_cdev->max_level) 287 return -EINVAL; 288 289 num_cpus = cpumask_weight(cpufreq_cdev->policy->cpus); 290 291 idx = cpufreq_cdev->max_level - state; 292 freq = cpufreq_cdev->em->table[idx].frequency; 293 *power = cpu_freq_to_power(cpufreq_cdev, freq) * num_cpus; 294 295 return 0; 296 } 297 298 /** 299 * cpufreq_power2state() - convert power to a cooling device state 300 * @cdev: &thermal_cooling_device pointer 301 * @power: power in milliwatts to be converted 302 * @state: pointer in which to store the resulting state 303 * 304 * Calculate a cooling device state for the cpus described by @cdev 305 * that would allow them to consume at most @power mW and store it in 306 * @state. Note that this calculation depends on external factors 307 * such as the cpu load or the current static power. Calling this 308 * function with the same power as input can yield different cooling 309 * device states depending on those external factors. 310 * 311 * Return: 0 on success, -ENODEV if no cpus are online or -EINVAL if 312 * the calculated frequency could not be converted to a valid state. 313 * The latter should not happen unless the frequencies available to 314 * cpufreq have changed since the initialization of the cpu cooling 315 * device. 316 */ 317 static int cpufreq_power2state(struct thermal_cooling_device *cdev, 318 u32 power, unsigned long *state) 319 { 320 unsigned int target_freq; 321 u32 last_load, normalised_power; 322 struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata; 323 struct cpufreq_policy *policy = cpufreq_cdev->policy; 324 325 last_load = cpufreq_cdev->last_load ?: 1; 326 normalised_power = (power * 100) / last_load; 327 target_freq = cpu_power_to_freq(cpufreq_cdev, normalised_power); 328 329 *state = get_level(cpufreq_cdev, target_freq); 330 trace_thermal_power_cpu_limit(policy->related_cpus, target_freq, *state, 331 power); 332 return 0; 333 } 334 335 static inline bool em_is_sane(struct cpufreq_cooling_device *cpufreq_cdev, 336 struct em_perf_domain *em) { 337 struct cpufreq_policy *policy; 338 unsigned int nr_levels; 339 340 if (!em) 341 return false; 342 343 policy = cpufreq_cdev->policy; 344 if (!cpumask_equal(policy->related_cpus, em_span_cpus(em))) { 345 pr_err("The span of pd %*pbl is misaligned with cpufreq policy %*pbl\n", 346 cpumask_pr_args(em_span_cpus(em)), 347 cpumask_pr_args(policy->related_cpus)); 348 return false; 349 } 350 351 nr_levels = cpufreq_cdev->max_level + 1; 352 if (em_pd_nr_perf_states(em) != nr_levels) { 353 pr_err("The number of performance states in pd %*pbl (%u) doesn't match the number of cooling levels (%u)\n", 354 cpumask_pr_args(em_span_cpus(em)), 355 em_pd_nr_perf_states(em), nr_levels); 356 return false; 357 } 358 359 return true; 360 } 361 #endif /* CONFIG_THERMAL_GOV_POWER_ALLOCATOR */ 362 363 #ifdef CONFIG_SMP 364 static inline int allocate_idle_time(struct cpufreq_cooling_device *cpufreq_cdev) 365 { 366 return 0; 367 } 368 369 static inline void free_idle_time(struct cpufreq_cooling_device *cpufreq_cdev) 370 { 371 } 372 #else 373 static int allocate_idle_time(struct cpufreq_cooling_device *cpufreq_cdev) 374 { 375 unsigned int num_cpus = cpumask_weight(cpufreq_cdev->policy->related_cpus); 376 377 cpufreq_cdev->idle_time = kcalloc(num_cpus, 378 sizeof(*cpufreq_cdev->idle_time), 379 GFP_KERNEL); 380 if (!cpufreq_cdev->idle_time) 381 return -ENOMEM; 382 383 return 0; 384 } 385 386 static void free_idle_time(struct cpufreq_cooling_device *cpufreq_cdev) 387 { 388 kfree(cpufreq_cdev->idle_time); 389 cpufreq_cdev->idle_time = NULL; 390 } 391 #endif /* CONFIG_SMP */ 392 393 static unsigned int get_state_freq(struct cpufreq_cooling_device *cpufreq_cdev, 394 unsigned long state) 395 { 396 struct cpufreq_policy *policy; 397 unsigned long idx; 398 399 #ifdef CONFIG_THERMAL_GOV_POWER_ALLOCATOR 400 /* Use the Energy Model table if available */ 401 if (cpufreq_cdev->em) { 402 idx = cpufreq_cdev->max_level - state; 403 return cpufreq_cdev->em->table[idx].frequency; 404 } 405 #endif 406 407 /* Otherwise, fallback on the CPUFreq table */ 408 policy = cpufreq_cdev->policy; 409 if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING) 410 idx = cpufreq_cdev->max_level - state; 411 else 412 idx = state; 413 414 return policy->freq_table[idx].frequency; 415 } 416 417 /* cpufreq cooling device callback functions are defined below */ 418 419 /** 420 * cpufreq_get_max_state - callback function to get the max cooling state. 421 * @cdev: thermal cooling device pointer. 422 * @state: fill this variable with the max cooling state. 423 * 424 * Callback for the thermal cooling device to return the cpufreq 425 * max cooling state. 426 * 427 * Return: 0 on success, an error code otherwise. 428 */ 429 static int cpufreq_get_max_state(struct thermal_cooling_device *cdev, 430 unsigned long *state) 431 { 432 struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata; 433 434 *state = cpufreq_cdev->max_level; 435 return 0; 436 } 437 438 /** 439 * cpufreq_get_cur_state - callback function to get the current cooling state. 440 * @cdev: thermal cooling device pointer. 441 * @state: fill this variable with the current cooling state. 442 * 443 * Callback for the thermal cooling device to return the cpufreq 444 * current cooling state. 445 * 446 * Return: 0 on success, an error code otherwise. 447 */ 448 static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev, 449 unsigned long *state) 450 { 451 struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata; 452 453 *state = cpufreq_cdev->cpufreq_state; 454 455 return 0; 456 } 457 458 /** 459 * cpufreq_set_cur_state - callback function to set the current cooling state. 460 * @cdev: thermal cooling device pointer. 461 * @state: set this variable to the current cooling state. 462 * 463 * Callback for the thermal cooling device to change the cpufreq 464 * current cooling state. 465 * 466 * Return: 0 on success, an error code otherwise. 467 */ 468 static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev, 469 unsigned long state) 470 { 471 struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata; 472 struct cpumask *cpus; 473 unsigned int frequency; 474 unsigned long max_capacity, capacity; 475 int ret; 476 477 /* Request state should be less than max_level */ 478 if (state > cpufreq_cdev->max_level) 479 return -EINVAL; 480 481 /* Check if the old cooling action is same as new cooling action */ 482 if (cpufreq_cdev->cpufreq_state == state) 483 return 0; 484 485 frequency = get_state_freq(cpufreq_cdev, state); 486 487 ret = freq_qos_update_request(&cpufreq_cdev->qos_req, frequency); 488 if (ret >= 0) { 489 cpufreq_cdev->cpufreq_state = state; 490 cpus = cpufreq_cdev->policy->cpus; 491 max_capacity = arch_scale_cpu_capacity(cpumask_first(cpus)); 492 capacity = frequency * max_capacity; 493 capacity /= cpufreq_cdev->policy->cpuinfo.max_freq; 494 arch_set_thermal_pressure(cpus, max_capacity - capacity); 495 ret = 0; 496 } 497 498 return ret; 499 } 500 501 /* Bind cpufreq callbacks to thermal cooling device ops */ 502 503 static struct thermal_cooling_device_ops cpufreq_cooling_ops = { 504 .get_max_state = cpufreq_get_max_state, 505 .get_cur_state = cpufreq_get_cur_state, 506 .set_cur_state = cpufreq_set_cur_state, 507 }; 508 509 /** 510 * __cpufreq_cooling_register - helper function to create cpufreq cooling device 511 * @np: a valid struct device_node to the cooling device device tree node 512 * @policy: cpufreq policy 513 * Normally this should be same as cpufreq policy->related_cpus. 514 * @em: Energy Model of the cpufreq policy 515 * 516 * This interface function registers the cpufreq cooling device with the name 517 * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq 518 * cooling devices. It also gives the opportunity to link the cooling device 519 * with a device tree node, in order to bind it via the thermal DT code. 520 * 521 * Return: a valid struct thermal_cooling_device pointer on success, 522 * on failure, it returns a corresponding ERR_PTR(). 523 */ 524 static struct thermal_cooling_device * 525 __cpufreq_cooling_register(struct device_node *np, 526 struct cpufreq_policy *policy, 527 struct em_perf_domain *em) 528 { 529 struct thermal_cooling_device *cdev; 530 struct cpufreq_cooling_device *cpufreq_cdev; 531 char dev_name[THERMAL_NAME_LENGTH]; 532 unsigned int i; 533 struct device *dev; 534 int ret; 535 struct thermal_cooling_device_ops *cooling_ops; 536 537 dev = get_cpu_device(policy->cpu); 538 if (unlikely(!dev)) { 539 pr_warn("No cpu device for cpu %d\n", policy->cpu); 540 return ERR_PTR(-ENODEV); 541 } 542 543 if (IS_ERR_OR_NULL(policy)) { 544 pr_err("%s: cpufreq policy isn't valid: %p\n", __func__, policy); 545 return ERR_PTR(-EINVAL); 546 } 547 548 i = cpufreq_table_count_valid_entries(policy); 549 if (!i) { 550 pr_debug("%s: CPUFreq table not found or has no valid entries\n", 551 __func__); 552 return ERR_PTR(-ENODEV); 553 } 554 555 cpufreq_cdev = kzalloc(sizeof(*cpufreq_cdev), GFP_KERNEL); 556 if (!cpufreq_cdev) 557 return ERR_PTR(-ENOMEM); 558 559 cpufreq_cdev->policy = policy; 560 561 ret = allocate_idle_time(cpufreq_cdev); 562 if (ret) { 563 cdev = ERR_PTR(ret); 564 goto free_cdev; 565 } 566 567 /* max_level is an index, not a counter */ 568 cpufreq_cdev->max_level = i - 1; 569 570 ret = ida_simple_get(&cpufreq_ida, 0, 0, GFP_KERNEL); 571 if (ret < 0) { 572 cdev = ERR_PTR(ret); 573 goto free_idle_time; 574 } 575 cpufreq_cdev->id = ret; 576 577 snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d", 578 cpufreq_cdev->id); 579 580 cooling_ops = &cpufreq_cooling_ops; 581 582 #ifdef CONFIG_THERMAL_GOV_POWER_ALLOCATOR 583 if (em_is_sane(cpufreq_cdev, em)) { 584 cpufreq_cdev->em = em; 585 cooling_ops->get_requested_power = cpufreq_get_requested_power; 586 cooling_ops->state2power = cpufreq_state2power; 587 cooling_ops->power2state = cpufreq_power2state; 588 } else 589 #endif 590 if (policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED) { 591 pr_err("%s: unsorted frequency tables are not supported\n", 592 __func__); 593 cdev = ERR_PTR(-EINVAL); 594 goto remove_ida; 595 } 596 597 ret = freq_qos_add_request(&policy->constraints, 598 &cpufreq_cdev->qos_req, FREQ_QOS_MAX, 599 get_state_freq(cpufreq_cdev, 0)); 600 if (ret < 0) { 601 pr_err("%s: Failed to add freq constraint (%d)\n", __func__, 602 ret); 603 cdev = ERR_PTR(ret); 604 goto remove_ida; 605 } 606 607 cdev = thermal_of_cooling_device_register(np, dev_name, cpufreq_cdev, 608 cooling_ops); 609 if (IS_ERR(cdev)) 610 goto remove_qos_req; 611 612 mutex_lock(&cooling_list_lock); 613 list_add(&cpufreq_cdev->node, &cpufreq_cdev_list); 614 mutex_unlock(&cooling_list_lock); 615 616 return cdev; 617 618 remove_qos_req: 619 freq_qos_remove_request(&cpufreq_cdev->qos_req); 620 remove_ida: 621 ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id); 622 free_idle_time: 623 free_idle_time(cpufreq_cdev); 624 free_cdev: 625 kfree(cpufreq_cdev); 626 return cdev; 627 } 628 629 /** 630 * cpufreq_cooling_register - function to create cpufreq cooling device. 631 * @policy: cpufreq policy 632 * 633 * This interface function registers the cpufreq cooling device with the name 634 * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq 635 * cooling devices. 636 * 637 * Return: a valid struct thermal_cooling_device pointer on success, 638 * on failure, it returns a corresponding ERR_PTR(). 639 */ 640 struct thermal_cooling_device * 641 cpufreq_cooling_register(struct cpufreq_policy *policy) 642 { 643 return __cpufreq_cooling_register(NULL, policy, NULL); 644 } 645 EXPORT_SYMBOL_GPL(cpufreq_cooling_register); 646 647 /** 648 * of_cpufreq_cooling_register - function to create cpufreq cooling device. 649 * @policy: cpufreq policy 650 * 651 * This interface function registers the cpufreq cooling device with the name 652 * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq 653 * cooling devices. Using this API, the cpufreq cooling device will be 654 * linked to the device tree node provided. 655 * 656 * Using this function, the cooling device will implement the power 657 * extensions by using a simple cpu power model. The cpus must have 658 * registered their OPPs using the OPP library. 659 * 660 * It also takes into account, if property present in policy CPU node, the 661 * static power consumed by the cpu. 662 * 663 * Return: a valid struct thermal_cooling_device pointer on success, 664 * and NULL on failure. 665 */ 666 struct thermal_cooling_device * 667 of_cpufreq_cooling_register(struct cpufreq_policy *policy) 668 { 669 struct device_node *np = of_get_cpu_node(policy->cpu, NULL); 670 struct thermal_cooling_device *cdev = NULL; 671 672 if (!np) { 673 pr_err("cpufreq_cooling: OF node not available for cpu%d\n", 674 policy->cpu); 675 return NULL; 676 } 677 678 if (of_find_property(np, "#cooling-cells", NULL)) { 679 struct em_perf_domain *em = em_cpu_get(policy->cpu); 680 681 cdev = __cpufreq_cooling_register(np, policy, em); 682 if (IS_ERR(cdev)) { 683 pr_err("cpufreq_cooling: cpu%d failed to register as cooling device: %ld\n", 684 policy->cpu, PTR_ERR(cdev)); 685 cdev = NULL; 686 } 687 } 688 689 of_node_put(np); 690 return cdev; 691 } 692 EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register); 693 694 /** 695 * cpufreq_cooling_unregister - function to remove cpufreq cooling device. 696 * @cdev: thermal cooling device pointer. 697 * 698 * This interface function unregisters the "thermal-cpufreq-%x" cooling device. 699 */ 700 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) 701 { 702 struct cpufreq_cooling_device *cpufreq_cdev; 703 704 if (!cdev) 705 return; 706 707 cpufreq_cdev = cdev->devdata; 708 709 mutex_lock(&cooling_list_lock); 710 list_del(&cpufreq_cdev->node); 711 mutex_unlock(&cooling_list_lock); 712 713 thermal_cooling_device_unregister(cdev); 714 freq_qos_remove_request(&cpufreq_cdev->qos_req); 715 ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id); 716 free_idle_time(cpufreq_cdev); 717 kfree(cpufreq_cdev); 718 } 719 EXPORT_SYMBOL_GPL(cpufreq_cooling_unregister); 720