1 /* 2 * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework 3 * for Non-CPU Devices. 4 * 5 * Copyright (C) 2011 Samsung Electronics 6 * MyungJoo Ham <myungjoo.ham@samsung.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/kernel.h> 14 #include <linux/sched.h> 15 #include <linux/errno.h> 16 #include <linux/err.h> 17 #include <linux/init.h> 18 #include <linux/export.h> 19 #include <linux/slab.h> 20 #include <linux/stat.h> 21 #include <linux/pm_opp.h> 22 #include <linux/devfreq.h> 23 #include <linux/workqueue.h> 24 #include <linux/platform_device.h> 25 #include <linux/list.h> 26 #include <linux/printk.h> 27 #include <linux/hrtimer.h> 28 #include <linux/of.h> 29 #include "governor.h" 30 31 #define MAX(a,b) ((a > b) ? a : b) 32 #define MIN(a,b) ((a < b) ? a : b) 33 34 static struct class *devfreq_class; 35 36 /* 37 * devfreq core provides delayed work based load monitoring helper 38 * functions. Governors can use these or can implement their own 39 * monitoring mechanism. 40 */ 41 static struct workqueue_struct *devfreq_wq; 42 43 /* The list of all device-devfreq governors */ 44 static LIST_HEAD(devfreq_governor_list); 45 /* The list of all device-devfreq */ 46 static LIST_HEAD(devfreq_list); 47 static DEFINE_MUTEX(devfreq_list_lock); 48 49 /** 50 * find_device_devfreq() - find devfreq struct using device pointer 51 * @dev: device pointer used to lookup device devfreq. 52 * 53 * Search the list of device devfreqs and return the matched device's 54 * devfreq info. devfreq_list_lock should be held by the caller. 55 */ 56 static struct devfreq *find_device_devfreq(struct device *dev) 57 { 58 struct devfreq *tmp_devfreq; 59 60 if (IS_ERR_OR_NULL(dev)) { 61 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__); 62 return ERR_PTR(-EINVAL); 63 } 64 WARN(!mutex_is_locked(&devfreq_list_lock), 65 "devfreq_list_lock must be locked."); 66 67 list_for_each_entry(tmp_devfreq, &devfreq_list, node) { 68 if (tmp_devfreq->dev.parent == dev) 69 return tmp_devfreq; 70 } 71 72 return ERR_PTR(-ENODEV); 73 } 74 75 static unsigned long find_available_min_freq(struct devfreq *devfreq) 76 { 77 struct dev_pm_opp *opp; 78 unsigned long min_freq = 0; 79 80 opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &min_freq); 81 if (IS_ERR(opp)) 82 min_freq = 0; 83 else 84 dev_pm_opp_put(opp); 85 86 return min_freq; 87 } 88 89 static unsigned long find_available_max_freq(struct devfreq *devfreq) 90 { 91 struct dev_pm_opp *opp; 92 unsigned long max_freq = ULONG_MAX; 93 94 opp = dev_pm_opp_find_freq_floor(devfreq->dev.parent, &max_freq); 95 if (IS_ERR(opp)) 96 max_freq = 0; 97 else 98 dev_pm_opp_put(opp); 99 100 return max_freq; 101 } 102 103 /** 104 * devfreq_get_freq_level() - Lookup freq_table for the frequency 105 * @devfreq: the devfreq instance 106 * @freq: the target frequency 107 */ 108 static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq) 109 { 110 int lev; 111 112 for (lev = 0; lev < devfreq->profile->max_state; lev++) 113 if (freq == devfreq->profile->freq_table[lev]) 114 return lev; 115 116 return -EINVAL; 117 } 118 119 static int set_freq_table(struct devfreq *devfreq) 120 { 121 struct devfreq_dev_profile *profile = devfreq->profile; 122 struct dev_pm_opp *opp; 123 unsigned long freq; 124 int i, count; 125 126 /* Initialize the freq_table from OPP table */ 127 count = dev_pm_opp_get_opp_count(devfreq->dev.parent); 128 if (count <= 0) 129 return -EINVAL; 130 131 profile->max_state = count; 132 profile->freq_table = devm_kcalloc(devfreq->dev.parent, 133 profile->max_state, 134 sizeof(*profile->freq_table), 135 GFP_KERNEL); 136 if (!profile->freq_table) { 137 profile->max_state = 0; 138 return -ENOMEM; 139 } 140 141 for (i = 0, freq = 0; i < profile->max_state; i++, freq++) { 142 opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &freq); 143 if (IS_ERR(opp)) { 144 devm_kfree(devfreq->dev.parent, profile->freq_table); 145 profile->max_state = 0; 146 return PTR_ERR(opp); 147 } 148 dev_pm_opp_put(opp); 149 profile->freq_table[i] = freq; 150 } 151 152 return 0; 153 } 154 155 /** 156 * devfreq_update_status() - Update statistics of devfreq behavior 157 * @devfreq: the devfreq instance 158 * @freq: the update target frequency 159 */ 160 int devfreq_update_status(struct devfreq *devfreq, unsigned long freq) 161 { 162 int lev, prev_lev, ret = 0; 163 unsigned long cur_time; 164 165 cur_time = jiffies; 166 167 /* Immediately exit if previous_freq is not initialized yet. */ 168 if (!devfreq->previous_freq) 169 goto out; 170 171 prev_lev = devfreq_get_freq_level(devfreq, devfreq->previous_freq); 172 if (prev_lev < 0) { 173 ret = prev_lev; 174 goto out; 175 } 176 177 devfreq->time_in_state[prev_lev] += 178 cur_time - devfreq->last_stat_updated; 179 180 lev = devfreq_get_freq_level(devfreq, freq); 181 if (lev < 0) { 182 ret = lev; 183 goto out; 184 } 185 186 if (lev != prev_lev) { 187 devfreq->trans_table[(prev_lev * 188 devfreq->profile->max_state) + lev]++; 189 devfreq->total_trans++; 190 } 191 192 out: 193 devfreq->last_stat_updated = cur_time; 194 return ret; 195 } 196 EXPORT_SYMBOL(devfreq_update_status); 197 198 /** 199 * find_devfreq_governor() - find devfreq governor from name 200 * @name: name of the governor 201 * 202 * Search the list of devfreq governors and return the matched 203 * governor's pointer. devfreq_list_lock should be held by the caller. 204 */ 205 static struct devfreq_governor *find_devfreq_governor(const char *name) 206 { 207 struct devfreq_governor *tmp_governor; 208 209 if (IS_ERR_OR_NULL(name)) { 210 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__); 211 return ERR_PTR(-EINVAL); 212 } 213 WARN(!mutex_is_locked(&devfreq_list_lock), 214 "devfreq_list_lock must be locked."); 215 216 list_for_each_entry(tmp_governor, &devfreq_governor_list, node) { 217 if (!strncmp(tmp_governor->name, name, DEVFREQ_NAME_LEN)) 218 return tmp_governor; 219 } 220 221 return ERR_PTR(-ENODEV); 222 } 223 224 static int devfreq_notify_transition(struct devfreq *devfreq, 225 struct devfreq_freqs *freqs, unsigned int state) 226 { 227 if (!devfreq) 228 return -EINVAL; 229 230 switch (state) { 231 case DEVFREQ_PRECHANGE: 232 srcu_notifier_call_chain(&devfreq->transition_notifier_list, 233 DEVFREQ_PRECHANGE, freqs); 234 break; 235 236 case DEVFREQ_POSTCHANGE: 237 srcu_notifier_call_chain(&devfreq->transition_notifier_list, 238 DEVFREQ_POSTCHANGE, freqs); 239 break; 240 default: 241 return -EINVAL; 242 } 243 244 return 0; 245 } 246 247 /* Load monitoring helper functions for governors use */ 248 249 /** 250 * update_devfreq() - Reevaluate the device and configure frequency. 251 * @devfreq: the devfreq instance. 252 * 253 * Note: Lock devfreq->lock before calling update_devfreq 254 * This function is exported for governors. 255 */ 256 int update_devfreq(struct devfreq *devfreq) 257 { 258 struct devfreq_freqs freqs; 259 unsigned long freq, cur_freq, min_freq, max_freq; 260 int err = 0; 261 u32 flags = 0; 262 263 if (!mutex_is_locked(&devfreq->lock)) { 264 WARN(true, "devfreq->lock must be locked by the caller.\n"); 265 return -EINVAL; 266 } 267 268 if (!devfreq->governor) 269 return -EINVAL; 270 271 /* Reevaluate the proper frequency */ 272 err = devfreq->governor->get_target_freq(devfreq, &freq); 273 if (err) 274 return err; 275 276 /* 277 * Adjust the frequency with user freq, QoS and available freq. 278 * 279 * List from the highest priority 280 * max_freq 281 * min_freq 282 */ 283 max_freq = MIN(devfreq->scaling_max_freq, devfreq->max_freq); 284 min_freq = MAX(devfreq->scaling_min_freq, devfreq->min_freq); 285 286 if (min_freq && freq < min_freq) { 287 freq = min_freq; 288 flags &= ~DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use GLB */ 289 } 290 if (max_freq && freq > max_freq) { 291 freq = max_freq; 292 flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */ 293 } 294 295 if (devfreq->profile->get_cur_freq) 296 devfreq->profile->get_cur_freq(devfreq->dev.parent, &cur_freq); 297 else 298 cur_freq = devfreq->previous_freq; 299 300 freqs.old = cur_freq; 301 freqs.new = freq; 302 devfreq_notify_transition(devfreq, &freqs, DEVFREQ_PRECHANGE); 303 304 err = devfreq->profile->target(devfreq->dev.parent, &freq, flags); 305 if (err) { 306 freqs.new = cur_freq; 307 devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE); 308 return err; 309 } 310 311 freqs.new = freq; 312 devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE); 313 314 if (devfreq_update_status(devfreq, freq)) 315 dev_err(&devfreq->dev, 316 "Couldn't update frequency transition information.\n"); 317 318 devfreq->previous_freq = freq; 319 return err; 320 } 321 EXPORT_SYMBOL(update_devfreq); 322 323 /** 324 * devfreq_monitor() - Periodically poll devfreq objects. 325 * @work: the work struct used to run devfreq_monitor periodically. 326 * 327 */ 328 static void devfreq_monitor(struct work_struct *work) 329 { 330 int err; 331 struct devfreq *devfreq = container_of(work, 332 struct devfreq, work.work); 333 334 mutex_lock(&devfreq->lock); 335 err = update_devfreq(devfreq); 336 if (err) 337 dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err); 338 339 queue_delayed_work(devfreq_wq, &devfreq->work, 340 msecs_to_jiffies(devfreq->profile->polling_ms)); 341 mutex_unlock(&devfreq->lock); 342 } 343 344 /** 345 * devfreq_monitor_start() - Start load monitoring of devfreq instance 346 * @devfreq: the devfreq instance. 347 * 348 * Helper function for starting devfreq device load monitoing. By 349 * default delayed work based monitoring is supported. Function 350 * to be called from governor in response to DEVFREQ_GOV_START 351 * event when device is added to devfreq framework. 352 */ 353 void devfreq_monitor_start(struct devfreq *devfreq) 354 { 355 INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor); 356 if (devfreq->profile->polling_ms) 357 queue_delayed_work(devfreq_wq, &devfreq->work, 358 msecs_to_jiffies(devfreq->profile->polling_ms)); 359 } 360 EXPORT_SYMBOL(devfreq_monitor_start); 361 362 /** 363 * devfreq_monitor_stop() - Stop load monitoring of a devfreq instance 364 * @devfreq: the devfreq instance. 365 * 366 * Helper function to stop devfreq device load monitoing. Function 367 * to be called from governor in response to DEVFREQ_GOV_STOP 368 * event when device is removed from devfreq framework. 369 */ 370 void devfreq_monitor_stop(struct devfreq *devfreq) 371 { 372 cancel_delayed_work_sync(&devfreq->work); 373 } 374 EXPORT_SYMBOL(devfreq_monitor_stop); 375 376 /** 377 * devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance 378 * @devfreq: the devfreq instance. 379 * 380 * Helper function to suspend devfreq device load monitoing. Function 381 * to be called from governor in response to DEVFREQ_GOV_SUSPEND 382 * event or when polling interval is set to zero. 383 * 384 * Note: Though this function is same as devfreq_monitor_stop(), 385 * intentionally kept separate to provide hooks for collecting 386 * transition statistics. 387 */ 388 void devfreq_monitor_suspend(struct devfreq *devfreq) 389 { 390 mutex_lock(&devfreq->lock); 391 if (devfreq->stop_polling) { 392 mutex_unlock(&devfreq->lock); 393 return; 394 } 395 396 devfreq_update_status(devfreq, devfreq->previous_freq); 397 devfreq->stop_polling = true; 398 mutex_unlock(&devfreq->lock); 399 cancel_delayed_work_sync(&devfreq->work); 400 } 401 EXPORT_SYMBOL(devfreq_monitor_suspend); 402 403 /** 404 * devfreq_monitor_resume() - Resume load monitoring of a devfreq instance 405 * @devfreq: the devfreq instance. 406 * 407 * Helper function to resume devfreq device load monitoing. Function 408 * to be called from governor in response to DEVFREQ_GOV_RESUME 409 * event or when polling interval is set to non-zero. 410 */ 411 void devfreq_monitor_resume(struct devfreq *devfreq) 412 { 413 unsigned long freq; 414 415 mutex_lock(&devfreq->lock); 416 if (!devfreq->stop_polling) 417 goto out; 418 419 if (!delayed_work_pending(&devfreq->work) && 420 devfreq->profile->polling_ms) 421 queue_delayed_work(devfreq_wq, &devfreq->work, 422 msecs_to_jiffies(devfreq->profile->polling_ms)); 423 424 devfreq->last_stat_updated = jiffies; 425 devfreq->stop_polling = false; 426 427 if (devfreq->profile->get_cur_freq && 428 !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq)) 429 devfreq->previous_freq = freq; 430 431 out: 432 mutex_unlock(&devfreq->lock); 433 } 434 EXPORT_SYMBOL(devfreq_monitor_resume); 435 436 /** 437 * devfreq_interval_update() - Update device devfreq monitoring interval 438 * @devfreq: the devfreq instance. 439 * @delay: new polling interval to be set. 440 * 441 * Helper function to set new load monitoring polling interval. Function 442 * to be called from governor in response to DEVFREQ_GOV_INTERVAL event. 443 */ 444 void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay) 445 { 446 unsigned int cur_delay = devfreq->profile->polling_ms; 447 unsigned int new_delay = *delay; 448 449 mutex_lock(&devfreq->lock); 450 devfreq->profile->polling_ms = new_delay; 451 452 if (devfreq->stop_polling) 453 goto out; 454 455 /* if new delay is zero, stop polling */ 456 if (!new_delay) { 457 mutex_unlock(&devfreq->lock); 458 cancel_delayed_work_sync(&devfreq->work); 459 return; 460 } 461 462 /* if current delay is zero, start polling with new delay */ 463 if (!cur_delay) { 464 queue_delayed_work(devfreq_wq, &devfreq->work, 465 msecs_to_jiffies(devfreq->profile->polling_ms)); 466 goto out; 467 } 468 469 /* if current delay is greater than new delay, restart polling */ 470 if (cur_delay > new_delay) { 471 mutex_unlock(&devfreq->lock); 472 cancel_delayed_work_sync(&devfreq->work); 473 mutex_lock(&devfreq->lock); 474 if (!devfreq->stop_polling) 475 queue_delayed_work(devfreq_wq, &devfreq->work, 476 msecs_to_jiffies(devfreq->profile->polling_ms)); 477 } 478 out: 479 mutex_unlock(&devfreq->lock); 480 } 481 EXPORT_SYMBOL(devfreq_interval_update); 482 483 /** 484 * devfreq_notifier_call() - Notify that the device frequency requirements 485 * has been changed out of devfreq framework. 486 * @nb: the notifier_block (supposed to be devfreq->nb) 487 * @type: not used 488 * @devp: not used 489 * 490 * Called by a notifier that uses devfreq->nb. 491 */ 492 static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type, 493 void *devp) 494 { 495 struct devfreq *devfreq = container_of(nb, struct devfreq, nb); 496 int ret; 497 498 mutex_lock(&devfreq->lock); 499 500 devfreq->scaling_min_freq = find_available_min_freq(devfreq); 501 if (!devfreq->scaling_min_freq) { 502 mutex_unlock(&devfreq->lock); 503 return -EINVAL; 504 } 505 506 devfreq->scaling_max_freq = find_available_max_freq(devfreq); 507 if (!devfreq->scaling_max_freq) { 508 mutex_unlock(&devfreq->lock); 509 return -EINVAL; 510 } 511 512 ret = update_devfreq(devfreq); 513 mutex_unlock(&devfreq->lock); 514 515 return ret; 516 } 517 518 /** 519 * devfreq_dev_release() - Callback for struct device to release the device. 520 * @dev: the devfreq device 521 * 522 * Remove devfreq from the list and release its resources. 523 */ 524 static void devfreq_dev_release(struct device *dev) 525 { 526 struct devfreq *devfreq = to_devfreq(dev); 527 528 mutex_lock(&devfreq_list_lock); 529 if (IS_ERR(find_device_devfreq(devfreq->dev.parent))) { 530 mutex_unlock(&devfreq_list_lock); 531 dev_warn(&devfreq->dev, "releasing devfreq which doesn't exist\n"); 532 return; 533 } 534 list_del(&devfreq->node); 535 mutex_unlock(&devfreq_list_lock); 536 537 if (devfreq->governor) 538 devfreq->governor->event_handler(devfreq, 539 DEVFREQ_GOV_STOP, NULL); 540 541 if (devfreq->profile->exit) 542 devfreq->profile->exit(devfreq->dev.parent); 543 544 mutex_destroy(&devfreq->lock); 545 kfree(devfreq); 546 } 547 548 /** 549 * devfreq_add_device() - Add devfreq feature to the device 550 * @dev: the device to add devfreq feature. 551 * @profile: device-specific profile to run devfreq. 552 * @governor_name: name of the policy to choose frequency. 553 * @data: private data for the governor. The devfreq framework does not 554 * touch this value. 555 */ 556 struct devfreq *devfreq_add_device(struct device *dev, 557 struct devfreq_dev_profile *profile, 558 const char *governor_name, 559 void *data) 560 { 561 struct devfreq *devfreq; 562 struct devfreq_governor *governor; 563 static atomic_t devfreq_no = ATOMIC_INIT(-1); 564 int err = 0; 565 566 if (!dev || !profile || !governor_name) { 567 dev_err(dev, "%s: Invalid parameters.\n", __func__); 568 return ERR_PTR(-EINVAL); 569 } 570 571 mutex_lock(&devfreq_list_lock); 572 devfreq = find_device_devfreq(dev); 573 mutex_unlock(&devfreq_list_lock); 574 if (!IS_ERR(devfreq)) { 575 dev_err(dev, "%s: Unable to create devfreq for the device.\n", 576 __func__); 577 err = -EINVAL; 578 goto err_out; 579 } 580 581 devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL); 582 if (!devfreq) { 583 err = -ENOMEM; 584 goto err_out; 585 } 586 587 mutex_init(&devfreq->lock); 588 mutex_lock(&devfreq->lock); 589 devfreq->dev.parent = dev; 590 devfreq->dev.class = devfreq_class; 591 devfreq->dev.release = devfreq_dev_release; 592 devfreq->profile = profile; 593 strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN); 594 devfreq->previous_freq = profile->initial_freq; 595 devfreq->last_status.current_frequency = profile->initial_freq; 596 devfreq->data = data; 597 devfreq->nb.notifier_call = devfreq_notifier_call; 598 599 if (!devfreq->profile->max_state && !devfreq->profile->freq_table) { 600 mutex_unlock(&devfreq->lock); 601 err = set_freq_table(devfreq); 602 if (err < 0) 603 goto err_out; 604 mutex_lock(&devfreq->lock); 605 } 606 607 devfreq->min_freq = find_available_min_freq(devfreq); 608 if (!devfreq->min_freq) { 609 mutex_unlock(&devfreq->lock); 610 err = -EINVAL; 611 goto err_dev; 612 } 613 devfreq->scaling_min_freq = devfreq->min_freq; 614 615 devfreq->max_freq = find_available_max_freq(devfreq); 616 if (!devfreq->max_freq) { 617 mutex_unlock(&devfreq->lock); 618 err = -EINVAL; 619 goto err_dev; 620 } 621 devfreq->scaling_max_freq = devfreq->max_freq; 622 623 dev_set_name(&devfreq->dev, "devfreq%d", 624 atomic_inc_return(&devfreq_no)); 625 err = device_register(&devfreq->dev); 626 if (err) { 627 mutex_unlock(&devfreq->lock); 628 goto err_dev; 629 } 630 631 devfreq->trans_table = devm_kzalloc(&devfreq->dev, 632 sizeof(unsigned int) * 633 devfreq->profile->max_state * 634 devfreq->profile->max_state, 635 GFP_KERNEL); 636 devfreq->time_in_state = devm_kzalloc(&devfreq->dev, 637 sizeof(unsigned long) * 638 devfreq->profile->max_state, 639 GFP_KERNEL); 640 devfreq->last_stat_updated = jiffies; 641 642 srcu_init_notifier_head(&devfreq->transition_notifier_list); 643 644 mutex_unlock(&devfreq->lock); 645 646 mutex_lock(&devfreq_list_lock); 647 list_add(&devfreq->node, &devfreq_list); 648 649 governor = find_devfreq_governor(devfreq->governor_name); 650 if (IS_ERR(governor)) { 651 dev_err(dev, "%s: Unable to find governor for the device\n", 652 __func__); 653 err = PTR_ERR(governor); 654 goto err_init; 655 } 656 657 devfreq->governor = governor; 658 err = devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_START, 659 NULL); 660 if (err) { 661 dev_err(dev, "%s: Unable to start governor for the device\n", 662 __func__); 663 goto err_init; 664 } 665 mutex_unlock(&devfreq_list_lock); 666 667 return devfreq; 668 669 err_init: 670 list_del(&devfreq->node); 671 mutex_unlock(&devfreq_list_lock); 672 673 device_unregister(&devfreq->dev); 674 err_dev: 675 if (devfreq) 676 kfree(devfreq); 677 err_out: 678 return ERR_PTR(err); 679 } 680 EXPORT_SYMBOL(devfreq_add_device); 681 682 /** 683 * devfreq_remove_device() - Remove devfreq feature from a device. 684 * @devfreq: the devfreq instance to be removed 685 * 686 * The opposite of devfreq_add_device(). 687 */ 688 int devfreq_remove_device(struct devfreq *devfreq) 689 { 690 if (!devfreq) 691 return -EINVAL; 692 693 device_unregister(&devfreq->dev); 694 695 return 0; 696 } 697 EXPORT_SYMBOL(devfreq_remove_device); 698 699 static int devm_devfreq_dev_match(struct device *dev, void *res, void *data) 700 { 701 struct devfreq **r = res; 702 703 if (WARN_ON(!r || !*r)) 704 return 0; 705 706 return *r == data; 707 } 708 709 static void devm_devfreq_dev_release(struct device *dev, void *res) 710 { 711 devfreq_remove_device(*(struct devfreq **)res); 712 } 713 714 /** 715 * devm_devfreq_add_device() - Resource-managed devfreq_add_device() 716 * @dev: the device to add devfreq feature. 717 * @profile: device-specific profile to run devfreq. 718 * @governor_name: name of the policy to choose frequency. 719 * @data: private data for the governor. The devfreq framework does not 720 * touch this value. 721 * 722 * This function manages automatically the memory of devfreq device using device 723 * resource management and simplify the free operation for memory of devfreq 724 * device. 725 */ 726 struct devfreq *devm_devfreq_add_device(struct device *dev, 727 struct devfreq_dev_profile *profile, 728 const char *governor_name, 729 void *data) 730 { 731 struct devfreq **ptr, *devfreq; 732 733 ptr = devres_alloc(devm_devfreq_dev_release, sizeof(*ptr), GFP_KERNEL); 734 if (!ptr) 735 return ERR_PTR(-ENOMEM); 736 737 devfreq = devfreq_add_device(dev, profile, governor_name, data); 738 if (IS_ERR(devfreq)) { 739 devres_free(ptr); 740 return devfreq; 741 } 742 743 *ptr = devfreq; 744 devres_add(dev, ptr); 745 746 return devfreq; 747 } 748 EXPORT_SYMBOL(devm_devfreq_add_device); 749 750 #ifdef CONFIG_OF 751 /* 752 * devfreq_get_devfreq_by_phandle - Get the devfreq device from devicetree 753 * @dev - instance to the given device 754 * @index - index into list of devfreq 755 * 756 * return the instance of devfreq device 757 */ 758 struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index) 759 { 760 struct device_node *node; 761 struct devfreq *devfreq; 762 763 if (!dev) 764 return ERR_PTR(-EINVAL); 765 766 if (!dev->of_node) 767 return ERR_PTR(-EINVAL); 768 769 node = of_parse_phandle(dev->of_node, "devfreq", index); 770 if (!node) 771 return ERR_PTR(-ENODEV); 772 773 mutex_lock(&devfreq_list_lock); 774 list_for_each_entry(devfreq, &devfreq_list, node) { 775 if (devfreq->dev.parent 776 && devfreq->dev.parent->of_node == node) { 777 mutex_unlock(&devfreq_list_lock); 778 of_node_put(node); 779 return devfreq; 780 } 781 } 782 mutex_unlock(&devfreq_list_lock); 783 of_node_put(node); 784 785 return ERR_PTR(-EPROBE_DEFER); 786 } 787 #else 788 struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index) 789 { 790 return ERR_PTR(-ENODEV); 791 } 792 #endif /* CONFIG_OF */ 793 EXPORT_SYMBOL_GPL(devfreq_get_devfreq_by_phandle); 794 795 /** 796 * devm_devfreq_remove_device() - Resource-managed devfreq_remove_device() 797 * @dev: the device to add devfreq feature. 798 * @devfreq: the devfreq instance to be removed 799 */ 800 void devm_devfreq_remove_device(struct device *dev, struct devfreq *devfreq) 801 { 802 WARN_ON(devres_release(dev, devm_devfreq_dev_release, 803 devm_devfreq_dev_match, devfreq)); 804 } 805 EXPORT_SYMBOL(devm_devfreq_remove_device); 806 807 /** 808 * devfreq_suspend_device() - Suspend devfreq of a device. 809 * @devfreq: the devfreq instance to be suspended 810 * 811 * This function is intended to be called by the pm callbacks 812 * (e.g., runtime_suspend, suspend) of the device driver that 813 * holds the devfreq. 814 */ 815 int devfreq_suspend_device(struct devfreq *devfreq) 816 { 817 if (!devfreq) 818 return -EINVAL; 819 820 if (!devfreq->governor) 821 return 0; 822 823 return devfreq->governor->event_handler(devfreq, 824 DEVFREQ_GOV_SUSPEND, NULL); 825 } 826 EXPORT_SYMBOL(devfreq_suspend_device); 827 828 /** 829 * devfreq_resume_device() - Resume devfreq of a device. 830 * @devfreq: the devfreq instance to be resumed 831 * 832 * This function is intended to be called by the pm callbacks 833 * (e.g., runtime_resume, resume) of the device driver that 834 * holds the devfreq. 835 */ 836 int devfreq_resume_device(struct devfreq *devfreq) 837 { 838 if (!devfreq) 839 return -EINVAL; 840 841 if (!devfreq->governor) 842 return 0; 843 844 return devfreq->governor->event_handler(devfreq, 845 DEVFREQ_GOV_RESUME, NULL); 846 } 847 EXPORT_SYMBOL(devfreq_resume_device); 848 849 /** 850 * devfreq_add_governor() - Add devfreq governor 851 * @governor: the devfreq governor to be added 852 */ 853 int devfreq_add_governor(struct devfreq_governor *governor) 854 { 855 struct devfreq_governor *g; 856 struct devfreq *devfreq; 857 int err = 0; 858 859 if (!governor) { 860 pr_err("%s: Invalid parameters.\n", __func__); 861 return -EINVAL; 862 } 863 864 mutex_lock(&devfreq_list_lock); 865 g = find_devfreq_governor(governor->name); 866 if (!IS_ERR(g)) { 867 pr_err("%s: governor %s already registered\n", __func__, 868 g->name); 869 err = -EINVAL; 870 goto err_out; 871 } 872 873 list_add(&governor->node, &devfreq_governor_list); 874 875 list_for_each_entry(devfreq, &devfreq_list, node) { 876 int ret = 0; 877 struct device *dev = devfreq->dev.parent; 878 879 if (!strncmp(devfreq->governor_name, governor->name, 880 DEVFREQ_NAME_LEN)) { 881 /* The following should never occur */ 882 if (devfreq->governor) { 883 dev_warn(dev, 884 "%s: Governor %s already present\n", 885 __func__, devfreq->governor->name); 886 ret = devfreq->governor->event_handler(devfreq, 887 DEVFREQ_GOV_STOP, NULL); 888 if (ret) { 889 dev_warn(dev, 890 "%s: Governor %s stop = %d\n", 891 __func__, 892 devfreq->governor->name, ret); 893 } 894 /* Fall through */ 895 } 896 devfreq->governor = governor; 897 ret = devfreq->governor->event_handler(devfreq, 898 DEVFREQ_GOV_START, NULL); 899 if (ret) { 900 dev_warn(dev, "%s: Governor %s start=%d\n", 901 __func__, devfreq->governor->name, 902 ret); 903 } 904 } 905 } 906 907 err_out: 908 mutex_unlock(&devfreq_list_lock); 909 910 return err; 911 } 912 EXPORT_SYMBOL(devfreq_add_governor); 913 914 /** 915 * devfreq_remove_governor() - Remove devfreq feature from a device. 916 * @governor: the devfreq governor to be removed 917 */ 918 int devfreq_remove_governor(struct devfreq_governor *governor) 919 { 920 struct devfreq_governor *g; 921 struct devfreq *devfreq; 922 int err = 0; 923 924 if (!governor) { 925 pr_err("%s: Invalid parameters.\n", __func__); 926 return -EINVAL; 927 } 928 929 mutex_lock(&devfreq_list_lock); 930 g = find_devfreq_governor(governor->name); 931 if (IS_ERR(g)) { 932 pr_err("%s: governor %s not registered\n", __func__, 933 governor->name); 934 err = PTR_ERR(g); 935 goto err_out; 936 } 937 list_for_each_entry(devfreq, &devfreq_list, node) { 938 int ret; 939 struct device *dev = devfreq->dev.parent; 940 941 if (!strncmp(devfreq->governor_name, governor->name, 942 DEVFREQ_NAME_LEN)) { 943 /* we should have a devfreq governor! */ 944 if (!devfreq->governor) { 945 dev_warn(dev, "%s: Governor %s NOT present\n", 946 __func__, governor->name); 947 continue; 948 /* Fall through */ 949 } 950 ret = devfreq->governor->event_handler(devfreq, 951 DEVFREQ_GOV_STOP, NULL); 952 if (ret) { 953 dev_warn(dev, "%s: Governor %s stop=%d\n", 954 __func__, devfreq->governor->name, 955 ret); 956 } 957 devfreq->governor = NULL; 958 } 959 } 960 961 list_del(&governor->node); 962 err_out: 963 mutex_unlock(&devfreq_list_lock); 964 965 return err; 966 } 967 EXPORT_SYMBOL(devfreq_remove_governor); 968 969 static ssize_t governor_show(struct device *dev, 970 struct device_attribute *attr, char *buf) 971 { 972 if (!to_devfreq(dev)->governor) 973 return -EINVAL; 974 975 return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name); 976 } 977 978 static ssize_t governor_store(struct device *dev, struct device_attribute *attr, 979 const char *buf, size_t count) 980 { 981 struct devfreq *df = to_devfreq(dev); 982 int ret; 983 char str_governor[DEVFREQ_NAME_LEN + 1]; 984 struct devfreq_governor *governor; 985 986 ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor); 987 if (ret != 1) 988 return -EINVAL; 989 990 mutex_lock(&devfreq_list_lock); 991 governor = find_devfreq_governor(str_governor); 992 if (IS_ERR(governor)) { 993 ret = PTR_ERR(governor); 994 goto out; 995 } 996 if (df->governor == governor) { 997 ret = 0; 998 goto out; 999 } else if ((df->governor && df->governor->immutable) || 1000 governor->immutable) { 1001 ret = -EINVAL; 1002 goto out; 1003 } 1004 1005 if (df->governor) { 1006 ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL); 1007 if (ret) { 1008 dev_warn(dev, "%s: Governor %s not stopped(%d)\n", 1009 __func__, df->governor->name, ret); 1010 goto out; 1011 } 1012 } 1013 df->governor = governor; 1014 strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN); 1015 ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL); 1016 if (ret) 1017 dev_warn(dev, "%s: Governor %s not started(%d)\n", 1018 __func__, df->governor->name, ret); 1019 out: 1020 mutex_unlock(&devfreq_list_lock); 1021 1022 if (!ret) 1023 ret = count; 1024 return ret; 1025 } 1026 static DEVICE_ATTR_RW(governor); 1027 1028 static ssize_t available_governors_show(struct device *d, 1029 struct device_attribute *attr, 1030 char *buf) 1031 { 1032 struct devfreq *df = to_devfreq(d); 1033 ssize_t count = 0; 1034 1035 mutex_lock(&devfreq_list_lock); 1036 1037 /* 1038 * The devfreq with immutable governor (e.g., passive) shows 1039 * only own governor. 1040 */ 1041 if (df->governor->immutable) { 1042 count = scnprintf(&buf[count], DEVFREQ_NAME_LEN, 1043 "%s ", df->governor_name); 1044 /* 1045 * The devfreq device shows the registered governor except for 1046 * immutable governors such as passive governor . 1047 */ 1048 } else { 1049 struct devfreq_governor *governor; 1050 1051 list_for_each_entry(governor, &devfreq_governor_list, node) { 1052 if (governor->immutable) 1053 continue; 1054 count += scnprintf(&buf[count], (PAGE_SIZE - count - 2), 1055 "%s ", governor->name); 1056 } 1057 } 1058 1059 mutex_unlock(&devfreq_list_lock); 1060 1061 /* Truncate the trailing space */ 1062 if (count) 1063 count--; 1064 1065 count += sprintf(&buf[count], "\n"); 1066 1067 return count; 1068 } 1069 static DEVICE_ATTR_RO(available_governors); 1070 1071 static ssize_t cur_freq_show(struct device *dev, struct device_attribute *attr, 1072 char *buf) 1073 { 1074 unsigned long freq; 1075 struct devfreq *devfreq = to_devfreq(dev); 1076 1077 if (devfreq->profile->get_cur_freq && 1078 !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq)) 1079 return sprintf(buf, "%lu\n", freq); 1080 1081 return sprintf(buf, "%lu\n", devfreq->previous_freq); 1082 } 1083 static DEVICE_ATTR_RO(cur_freq); 1084 1085 static ssize_t target_freq_show(struct device *dev, 1086 struct device_attribute *attr, char *buf) 1087 { 1088 return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq); 1089 } 1090 static DEVICE_ATTR_RO(target_freq); 1091 1092 static ssize_t polling_interval_show(struct device *dev, 1093 struct device_attribute *attr, char *buf) 1094 { 1095 return sprintf(buf, "%d\n", to_devfreq(dev)->profile->polling_ms); 1096 } 1097 1098 static ssize_t polling_interval_store(struct device *dev, 1099 struct device_attribute *attr, 1100 const char *buf, size_t count) 1101 { 1102 struct devfreq *df = to_devfreq(dev); 1103 unsigned int value; 1104 int ret; 1105 1106 if (!df->governor) 1107 return -EINVAL; 1108 1109 ret = sscanf(buf, "%u", &value); 1110 if (ret != 1) 1111 return -EINVAL; 1112 1113 df->governor->event_handler(df, DEVFREQ_GOV_INTERVAL, &value); 1114 ret = count; 1115 1116 return ret; 1117 } 1118 static DEVICE_ATTR_RW(polling_interval); 1119 1120 static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr, 1121 const char *buf, size_t count) 1122 { 1123 struct devfreq *df = to_devfreq(dev); 1124 unsigned long value; 1125 int ret; 1126 unsigned long max; 1127 1128 ret = sscanf(buf, "%lu", &value); 1129 if (ret != 1) 1130 return -EINVAL; 1131 1132 mutex_lock(&df->lock); 1133 max = df->max_freq; 1134 if (value && max && value > max) { 1135 ret = -EINVAL; 1136 goto unlock; 1137 } 1138 1139 df->min_freq = value; 1140 update_devfreq(df); 1141 ret = count; 1142 unlock: 1143 mutex_unlock(&df->lock); 1144 return ret; 1145 } 1146 1147 static ssize_t min_freq_show(struct device *dev, struct device_attribute *attr, 1148 char *buf) 1149 { 1150 struct devfreq *df = to_devfreq(dev); 1151 1152 return sprintf(buf, "%lu\n", MAX(df->scaling_min_freq, df->min_freq)); 1153 } 1154 1155 static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr, 1156 const char *buf, size_t count) 1157 { 1158 struct devfreq *df = to_devfreq(dev); 1159 unsigned long value; 1160 int ret; 1161 unsigned long min; 1162 1163 ret = sscanf(buf, "%lu", &value); 1164 if (ret != 1) 1165 return -EINVAL; 1166 1167 mutex_lock(&df->lock); 1168 min = df->min_freq; 1169 if (value && min && value < min) { 1170 ret = -EINVAL; 1171 goto unlock; 1172 } 1173 1174 df->max_freq = value; 1175 update_devfreq(df); 1176 ret = count; 1177 unlock: 1178 mutex_unlock(&df->lock); 1179 return ret; 1180 } 1181 static DEVICE_ATTR_RW(min_freq); 1182 1183 static ssize_t max_freq_show(struct device *dev, struct device_attribute *attr, 1184 char *buf) 1185 { 1186 struct devfreq *df = to_devfreq(dev); 1187 1188 return sprintf(buf, "%lu\n", MIN(df->scaling_max_freq, df->max_freq)); 1189 } 1190 static DEVICE_ATTR_RW(max_freq); 1191 1192 static ssize_t available_frequencies_show(struct device *d, 1193 struct device_attribute *attr, 1194 char *buf) 1195 { 1196 struct devfreq *df = to_devfreq(d); 1197 ssize_t count = 0; 1198 int i; 1199 1200 mutex_lock(&df->lock); 1201 1202 for (i = 0; i < df->profile->max_state; i++) 1203 count += scnprintf(&buf[count], (PAGE_SIZE - count - 2), 1204 "%lu ", df->profile->freq_table[i]); 1205 1206 mutex_unlock(&df->lock); 1207 /* Truncate the trailing space */ 1208 if (count) 1209 count--; 1210 1211 count += sprintf(&buf[count], "\n"); 1212 1213 return count; 1214 } 1215 static DEVICE_ATTR_RO(available_frequencies); 1216 1217 static ssize_t trans_stat_show(struct device *dev, 1218 struct device_attribute *attr, char *buf) 1219 { 1220 struct devfreq *devfreq = to_devfreq(dev); 1221 ssize_t len; 1222 int i, j; 1223 unsigned int max_state = devfreq->profile->max_state; 1224 1225 if (!devfreq->stop_polling && 1226 devfreq_update_status(devfreq, devfreq->previous_freq)) 1227 return 0; 1228 if (max_state == 0) 1229 return sprintf(buf, "Not Supported.\n"); 1230 1231 len = sprintf(buf, " From : To\n"); 1232 len += sprintf(buf + len, " :"); 1233 for (i = 0; i < max_state; i++) 1234 len += sprintf(buf + len, "%10lu", 1235 devfreq->profile->freq_table[i]); 1236 1237 len += sprintf(buf + len, " time(ms)\n"); 1238 1239 for (i = 0; i < max_state; i++) { 1240 if (devfreq->profile->freq_table[i] 1241 == devfreq->previous_freq) { 1242 len += sprintf(buf + len, "*"); 1243 } else { 1244 len += sprintf(buf + len, " "); 1245 } 1246 len += sprintf(buf + len, "%10lu:", 1247 devfreq->profile->freq_table[i]); 1248 for (j = 0; j < max_state; j++) 1249 len += sprintf(buf + len, "%10u", 1250 devfreq->trans_table[(i * max_state) + j]); 1251 len += sprintf(buf + len, "%10u\n", 1252 jiffies_to_msecs(devfreq->time_in_state[i])); 1253 } 1254 1255 len += sprintf(buf + len, "Total transition : %u\n", 1256 devfreq->total_trans); 1257 return len; 1258 } 1259 static DEVICE_ATTR_RO(trans_stat); 1260 1261 static struct attribute *devfreq_attrs[] = { 1262 &dev_attr_governor.attr, 1263 &dev_attr_available_governors.attr, 1264 &dev_attr_cur_freq.attr, 1265 &dev_attr_available_frequencies.attr, 1266 &dev_attr_target_freq.attr, 1267 &dev_attr_polling_interval.attr, 1268 &dev_attr_min_freq.attr, 1269 &dev_attr_max_freq.attr, 1270 &dev_attr_trans_stat.attr, 1271 NULL, 1272 }; 1273 ATTRIBUTE_GROUPS(devfreq); 1274 1275 static int __init devfreq_init(void) 1276 { 1277 devfreq_class = class_create(THIS_MODULE, "devfreq"); 1278 if (IS_ERR(devfreq_class)) { 1279 pr_err("%s: couldn't create class\n", __FILE__); 1280 return PTR_ERR(devfreq_class); 1281 } 1282 1283 devfreq_wq = create_freezable_workqueue("devfreq_wq"); 1284 if (!devfreq_wq) { 1285 class_destroy(devfreq_class); 1286 pr_err("%s: couldn't create workqueue\n", __FILE__); 1287 return -ENOMEM; 1288 } 1289 devfreq_class->dev_groups = devfreq_groups; 1290 1291 return 0; 1292 } 1293 subsys_initcall(devfreq_init); 1294 1295 /* 1296 * The following are helper functions for devfreq user device drivers with 1297 * OPP framework. 1298 */ 1299 1300 /** 1301 * devfreq_recommended_opp() - Helper function to get proper OPP for the 1302 * freq value given to target callback. 1303 * @dev: The devfreq user device. (parent of devfreq) 1304 * @freq: The frequency given to target function 1305 * @flags: Flags handed from devfreq framework. 1306 * 1307 * The callers are required to call dev_pm_opp_put() for the returned OPP after 1308 * use. 1309 */ 1310 struct dev_pm_opp *devfreq_recommended_opp(struct device *dev, 1311 unsigned long *freq, 1312 u32 flags) 1313 { 1314 struct dev_pm_opp *opp; 1315 1316 if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) { 1317 /* The freq is an upper bound. opp should be lower */ 1318 opp = dev_pm_opp_find_freq_floor(dev, freq); 1319 1320 /* If not available, use the closest opp */ 1321 if (opp == ERR_PTR(-ERANGE)) 1322 opp = dev_pm_opp_find_freq_ceil(dev, freq); 1323 } else { 1324 /* The freq is an lower bound. opp should be higher */ 1325 opp = dev_pm_opp_find_freq_ceil(dev, freq); 1326 1327 /* If not available, use the closest opp */ 1328 if (opp == ERR_PTR(-ERANGE)) 1329 opp = dev_pm_opp_find_freq_floor(dev, freq); 1330 } 1331 1332 return opp; 1333 } 1334 EXPORT_SYMBOL(devfreq_recommended_opp); 1335 1336 /** 1337 * devfreq_register_opp_notifier() - Helper function to get devfreq notified 1338 * for any changes in the OPP availability 1339 * changes 1340 * @dev: The devfreq user device. (parent of devfreq) 1341 * @devfreq: The devfreq object. 1342 */ 1343 int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq) 1344 { 1345 return dev_pm_opp_register_notifier(dev, &devfreq->nb); 1346 } 1347 EXPORT_SYMBOL(devfreq_register_opp_notifier); 1348 1349 /** 1350 * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq 1351 * notified for any changes in the OPP 1352 * availability changes anymore. 1353 * @dev: The devfreq user device. (parent of devfreq) 1354 * @devfreq: The devfreq object. 1355 * 1356 * At exit() callback of devfreq_dev_profile, this must be included if 1357 * devfreq_recommended_opp is used. 1358 */ 1359 int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq) 1360 { 1361 return dev_pm_opp_unregister_notifier(dev, &devfreq->nb); 1362 } 1363 EXPORT_SYMBOL(devfreq_unregister_opp_notifier); 1364 1365 static void devm_devfreq_opp_release(struct device *dev, void *res) 1366 { 1367 devfreq_unregister_opp_notifier(dev, *(struct devfreq **)res); 1368 } 1369 1370 /** 1371 * devm_ devfreq_register_opp_notifier() 1372 * - Resource-managed devfreq_register_opp_notifier() 1373 * @dev: The devfreq user device. (parent of devfreq) 1374 * @devfreq: The devfreq object. 1375 */ 1376 int devm_devfreq_register_opp_notifier(struct device *dev, 1377 struct devfreq *devfreq) 1378 { 1379 struct devfreq **ptr; 1380 int ret; 1381 1382 ptr = devres_alloc(devm_devfreq_opp_release, sizeof(*ptr), GFP_KERNEL); 1383 if (!ptr) 1384 return -ENOMEM; 1385 1386 ret = devfreq_register_opp_notifier(dev, devfreq); 1387 if (ret) { 1388 devres_free(ptr); 1389 return ret; 1390 } 1391 1392 *ptr = devfreq; 1393 devres_add(dev, ptr); 1394 1395 return 0; 1396 } 1397 EXPORT_SYMBOL(devm_devfreq_register_opp_notifier); 1398 1399 /** 1400 * devm_devfreq_unregister_opp_notifier() 1401 * - Resource-managed devfreq_unregister_opp_notifier() 1402 * @dev: The devfreq user device. (parent of devfreq) 1403 * @devfreq: The devfreq object. 1404 */ 1405 void devm_devfreq_unregister_opp_notifier(struct device *dev, 1406 struct devfreq *devfreq) 1407 { 1408 WARN_ON(devres_release(dev, devm_devfreq_opp_release, 1409 devm_devfreq_dev_match, devfreq)); 1410 } 1411 EXPORT_SYMBOL(devm_devfreq_unregister_opp_notifier); 1412 1413 /** 1414 * devfreq_register_notifier() - Register a driver with devfreq 1415 * @devfreq: The devfreq object. 1416 * @nb: The notifier block to register. 1417 * @list: DEVFREQ_TRANSITION_NOTIFIER. 1418 */ 1419 int devfreq_register_notifier(struct devfreq *devfreq, 1420 struct notifier_block *nb, 1421 unsigned int list) 1422 { 1423 int ret = 0; 1424 1425 if (!devfreq) 1426 return -EINVAL; 1427 1428 switch (list) { 1429 case DEVFREQ_TRANSITION_NOTIFIER: 1430 ret = srcu_notifier_chain_register( 1431 &devfreq->transition_notifier_list, nb); 1432 break; 1433 default: 1434 ret = -EINVAL; 1435 } 1436 1437 return ret; 1438 } 1439 EXPORT_SYMBOL(devfreq_register_notifier); 1440 1441 /* 1442 * devfreq_unregister_notifier() - Unregister a driver with devfreq 1443 * @devfreq: The devfreq object. 1444 * @nb: The notifier block to be unregistered. 1445 * @list: DEVFREQ_TRANSITION_NOTIFIER. 1446 */ 1447 int devfreq_unregister_notifier(struct devfreq *devfreq, 1448 struct notifier_block *nb, 1449 unsigned int list) 1450 { 1451 int ret = 0; 1452 1453 if (!devfreq) 1454 return -EINVAL; 1455 1456 switch (list) { 1457 case DEVFREQ_TRANSITION_NOTIFIER: 1458 ret = srcu_notifier_chain_unregister( 1459 &devfreq->transition_notifier_list, nb); 1460 break; 1461 default: 1462 ret = -EINVAL; 1463 } 1464 1465 return ret; 1466 } 1467 EXPORT_SYMBOL(devfreq_unregister_notifier); 1468 1469 struct devfreq_notifier_devres { 1470 struct devfreq *devfreq; 1471 struct notifier_block *nb; 1472 unsigned int list; 1473 }; 1474 1475 static void devm_devfreq_notifier_release(struct device *dev, void *res) 1476 { 1477 struct devfreq_notifier_devres *this = res; 1478 1479 devfreq_unregister_notifier(this->devfreq, this->nb, this->list); 1480 } 1481 1482 /** 1483 * devm_devfreq_register_notifier() 1484 - Resource-managed devfreq_register_notifier() 1485 * @dev: The devfreq user device. (parent of devfreq) 1486 * @devfreq: The devfreq object. 1487 * @nb: The notifier block to be unregistered. 1488 * @list: DEVFREQ_TRANSITION_NOTIFIER. 1489 */ 1490 int devm_devfreq_register_notifier(struct device *dev, 1491 struct devfreq *devfreq, 1492 struct notifier_block *nb, 1493 unsigned int list) 1494 { 1495 struct devfreq_notifier_devres *ptr; 1496 int ret; 1497 1498 ptr = devres_alloc(devm_devfreq_notifier_release, sizeof(*ptr), 1499 GFP_KERNEL); 1500 if (!ptr) 1501 return -ENOMEM; 1502 1503 ret = devfreq_register_notifier(devfreq, nb, list); 1504 if (ret) { 1505 devres_free(ptr); 1506 return ret; 1507 } 1508 1509 ptr->devfreq = devfreq; 1510 ptr->nb = nb; 1511 ptr->list = list; 1512 devres_add(dev, ptr); 1513 1514 return 0; 1515 } 1516 EXPORT_SYMBOL(devm_devfreq_register_notifier); 1517 1518 /** 1519 * devm_devfreq_unregister_notifier() 1520 - Resource-managed devfreq_unregister_notifier() 1521 * @dev: The devfreq user device. (parent of devfreq) 1522 * @devfreq: The devfreq object. 1523 * @nb: The notifier block to be unregistered. 1524 * @list: DEVFREQ_TRANSITION_NOTIFIER. 1525 */ 1526 void devm_devfreq_unregister_notifier(struct device *dev, 1527 struct devfreq *devfreq, 1528 struct notifier_block *nb, 1529 unsigned int list) 1530 { 1531 WARN_ON(devres_release(dev, devm_devfreq_notifier_release, 1532 devm_devfreq_dev_match, devfreq)); 1533 } 1534 EXPORT_SYMBOL(devm_devfreq_unregister_notifier); 1535