1 /* 2 * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework 3 * for Non-CPU Devices. 4 * 5 * Copyright (C) 2011 Samsung Electronics 6 * MyungJoo Ham <myungjoo.ham@samsung.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/kernel.h> 14 #include <linux/sched.h> 15 #include <linux/errno.h> 16 #include <linux/err.h> 17 #include <linux/init.h> 18 #include <linux/export.h> 19 #include <linux/slab.h> 20 #include <linux/stat.h> 21 #include <linux/pm_opp.h> 22 #include <linux/devfreq.h> 23 #include <linux/workqueue.h> 24 #include <linux/platform_device.h> 25 #include <linux/list.h> 26 #include <linux/printk.h> 27 #include <linux/hrtimer.h> 28 #include <linux/of.h> 29 #include "governor.h" 30 31 #define MAX(a,b) ((a > b) ? a : b) 32 #define MIN(a,b) ((a < b) ? a : b) 33 34 static struct class *devfreq_class; 35 36 /* 37 * devfreq core provides delayed work based load monitoring helper 38 * functions. Governors can use these or can implement their own 39 * monitoring mechanism. 40 */ 41 static struct workqueue_struct *devfreq_wq; 42 43 /* The list of all device-devfreq governors */ 44 static LIST_HEAD(devfreq_governor_list); 45 /* The list of all device-devfreq */ 46 static LIST_HEAD(devfreq_list); 47 static DEFINE_MUTEX(devfreq_list_lock); 48 49 /** 50 * find_device_devfreq() - find devfreq struct using device pointer 51 * @dev: device pointer used to lookup device devfreq. 52 * 53 * Search the list of device devfreqs and return the matched device's 54 * devfreq info. devfreq_list_lock should be held by the caller. 55 */ 56 static struct devfreq *find_device_devfreq(struct device *dev) 57 { 58 struct devfreq *tmp_devfreq; 59 60 if (IS_ERR_OR_NULL(dev)) { 61 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__); 62 return ERR_PTR(-EINVAL); 63 } 64 WARN(!mutex_is_locked(&devfreq_list_lock), 65 "devfreq_list_lock must be locked."); 66 67 list_for_each_entry(tmp_devfreq, &devfreq_list, node) { 68 if (tmp_devfreq->dev.parent == dev) 69 return tmp_devfreq; 70 } 71 72 return ERR_PTR(-ENODEV); 73 } 74 75 static unsigned long find_available_min_freq(struct devfreq *devfreq) 76 { 77 struct dev_pm_opp *opp; 78 unsigned long min_freq = 0; 79 80 opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &min_freq); 81 if (IS_ERR(opp)) 82 min_freq = 0; 83 else 84 dev_pm_opp_put(opp); 85 86 return min_freq; 87 } 88 89 static unsigned long find_available_max_freq(struct devfreq *devfreq) 90 { 91 struct dev_pm_opp *opp; 92 unsigned long max_freq = ULONG_MAX; 93 94 opp = dev_pm_opp_find_freq_floor(devfreq->dev.parent, &max_freq); 95 if (IS_ERR(opp)) 96 max_freq = 0; 97 else 98 dev_pm_opp_put(opp); 99 100 return max_freq; 101 } 102 103 /** 104 * devfreq_get_freq_level() - Lookup freq_table for the frequency 105 * @devfreq: the devfreq instance 106 * @freq: the target frequency 107 */ 108 static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq) 109 { 110 int lev; 111 112 for (lev = 0; lev < devfreq->profile->max_state; lev++) 113 if (freq == devfreq->profile->freq_table[lev]) 114 return lev; 115 116 return -EINVAL; 117 } 118 119 static int set_freq_table(struct devfreq *devfreq) 120 { 121 struct devfreq_dev_profile *profile = devfreq->profile; 122 struct dev_pm_opp *opp; 123 unsigned long freq; 124 int i, count; 125 126 /* Initialize the freq_table from OPP table */ 127 count = dev_pm_opp_get_opp_count(devfreq->dev.parent); 128 if (count <= 0) 129 return -EINVAL; 130 131 profile->max_state = count; 132 profile->freq_table = devm_kcalloc(devfreq->dev.parent, 133 profile->max_state, 134 sizeof(*profile->freq_table), 135 GFP_KERNEL); 136 if (!profile->freq_table) { 137 profile->max_state = 0; 138 return -ENOMEM; 139 } 140 141 for (i = 0, freq = 0; i < profile->max_state; i++, freq++) { 142 opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &freq); 143 if (IS_ERR(opp)) { 144 devm_kfree(devfreq->dev.parent, profile->freq_table); 145 profile->max_state = 0; 146 return PTR_ERR(opp); 147 } 148 dev_pm_opp_put(opp); 149 profile->freq_table[i] = freq; 150 } 151 152 return 0; 153 } 154 155 /** 156 * devfreq_update_status() - Update statistics of devfreq behavior 157 * @devfreq: the devfreq instance 158 * @freq: the update target frequency 159 */ 160 int devfreq_update_status(struct devfreq *devfreq, unsigned long freq) 161 { 162 int lev, prev_lev, ret = 0; 163 unsigned long cur_time; 164 165 cur_time = jiffies; 166 167 /* Immediately exit if previous_freq is not initialized yet. */ 168 if (!devfreq->previous_freq) 169 goto out; 170 171 prev_lev = devfreq_get_freq_level(devfreq, devfreq->previous_freq); 172 if (prev_lev < 0) { 173 ret = prev_lev; 174 goto out; 175 } 176 177 devfreq->time_in_state[prev_lev] += 178 cur_time - devfreq->last_stat_updated; 179 180 lev = devfreq_get_freq_level(devfreq, freq); 181 if (lev < 0) { 182 ret = lev; 183 goto out; 184 } 185 186 if (lev != prev_lev) { 187 devfreq->trans_table[(prev_lev * 188 devfreq->profile->max_state) + lev]++; 189 devfreq->total_trans++; 190 } 191 192 out: 193 devfreq->last_stat_updated = cur_time; 194 return ret; 195 } 196 EXPORT_SYMBOL(devfreq_update_status); 197 198 /** 199 * find_devfreq_governor() - find devfreq governor from name 200 * @name: name of the governor 201 * 202 * Search the list of devfreq governors and return the matched 203 * governor's pointer. devfreq_list_lock should be held by the caller. 204 */ 205 static struct devfreq_governor *find_devfreq_governor(const char *name) 206 { 207 struct devfreq_governor *tmp_governor; 208 209 if (IS_ERR_OR_NULL(name)) { 210 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__); 211 return ERR_PTR(-EINVAL); 212 } 213 WARN(!mutex_is_locked(&devfreq_list_lock), 214 "devfreq_list_lock must be locked."); 215 216 list_for_each_entry(tmp_governor, &devfreq_governor_list, node) { 217 if (!strncmp(tmp_governor->name, name, DEVFREQ_NAME_LEN)) 218 return tmp_governor; 219 } 220 221 return ERR_PTR(-ENODEV); 222 } 223 224 static int devfreq_notify_transition(struct devfreq *devfreq, 225 struct devfreq_freqs *freqs, unsigned int state) 226 { 227 if (!devfreq) 228 return -EINVAL; 229 230 switch (state) { 231 case DEVFREQ_PRECHANGE: 232 srcu_notifier_call_chain(&devfreq->transition_notifier_list, 233 DEVFREQ_PRECHANGE, freqs); 234 break; 235 236 case DEVFREQ_POSTCHANGE: 237 srcu_notifier_call_chain(&devfreq->transition_notifier_list, 238 DEVFREQ_POSTCHANGE, freqs); 239 break; 240 default: 241 return -EINVAL; 242 } 243 244 return 0; 245 } 246 247 /* Load monitoring helper functions for governors use */ 248 249 /** 250 * update_devfreq() - Reevaluate the device and configure frequency. 251 * @devfreq: the devfreq instance. 252 * 253 * Note: Lock devfreq->lock before calling update_devfreq 254 * This function is exported for governors. 255 */ 256 int update_devfreq(struct devfreq *devfreq) 257 { 258 struct devfreq_freqs freqs; 259 unsigned long freq, cur_freq, min_freq, max_freq; 260 int err = 0; 261 u32 flags = 0; 262 263 if (!mutex_is_locked(&devfreq->lock)) { 264 WARN(true, "devfreq->lock must be locked by the caller.\n"); 265 return -EINVAL; 266 } 267 268 if (!devfreq->governor) 269 return -EINVAL; 270 271 /* Reevaluate the proper frequency */ 272 err = devfreq->governor->get_target_freq(devfreq, &freq); 273 if (err) 274 return err; 275 276 /* 277 * Adjust the frequency with user freq, QoS and available freq. 278 * 279 * List from the highest priority 280 * max_freq 281 * min_freq 282 */ 283 max_freq = MIN(devfreq->scaling_max_freq, devfreq->max_freq); 284 min_freq = MAX(devfreq->scaling_min_freq, devfreq->min_freq); 285 286 if (min_freq && freq < min_freq) { 287 freq = min_freq; 288 flags &= ~DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use GLB */ 289 } 290 if (max_freq && freq > max_freq) { 291 freq = max_freq; 292 flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */ 293 } 294 295 if (devfreq->profile->get_cur_freq) 296 devfreq->profile->get_cur_freq(devfreq->dev.parent, &cur_freq); 297 else 298 cur_freq = devfreq->previous_freq; 299 300 freqs.old = cur_freq; 301 freqs.new = freq; 302 devfreq_notify_transition(devfreq, &freqs, DEVFREQ_PRECHANGE); 303 304 err = devfreq->profile->target(devfreq->dev.parent, &freq, flags); 305 if (err) { 306 freqs.new = cur_freq; 307 devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE); 308 return err; 309 } 310 311 freqs.new = freq; 312 devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE); 313 314 if (devfreq_update_status(devfreq, freq)) 315 dev_err(&devfreq->dev, 316 "Couldn't update frequency transition information.\n"); 317 318 devfreq->previous_freq = freq; 319 return err; 320 } 321 EXPORT_SYMBOL(update_devfreq); 322 323 /** 324 * devfreq_monitor() - Periodically poll devfreq objects. 325 * @work: the work struct used to run devfreq_monitor periodically. 326 * 327 */ 328 static void devfreq_monitor(struct work_struct *work) 329 { 330 int err; 331 struct devfreq *devfreq = container_of(work, 332 struct devfreq, work.work); 333 334 mutex_lock(&devfreq->lock); 335 err = update_devfreq(devfreq); 336 if (err) 337 dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err); 338 339 queue_delayed_work(devfreq_wq, &devfreq->work, 340 msecs_to_jiffies(devfreq->profile->polling_ms)); 341 mutex_unlock(&devfreq->lock); 342 } 343 344 /** 345 * devfreq_monitor_start() - Start load monitoring of devfreq instance 346 * @devfreq: the devfreq instance. 347 * 348 * Helper function for starting devfreq device load monitoing. By 349 * default delayed work based monitoring is supported. Function 350 * to be called from governor in response to DEVFREQ_GOV_START 351 * event when device is added to devfreq framework. 352 */ 353 void devfreq_monitor_start(struct devfreq *devfreq) 354 { 355 INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor); 356 if (devfreq->profile->polling_ms) 357 queue_delayed_work(devfreq_wq, &devfreq->work, 358 msecs_to_jiffies(devfreq->profile->polling_ms)); 359 } 360 EXPORT_SYMBOL(devfreq_monitor_start); 361 362 /** 363 * devfreq_monitor_stop() - Stop load monitoring of a devfreq instance 364 * @devfreq: the devfreq instance. 365 * 366 * Helper function to stop devfreq device load monitoing. Function 367 * to be called from governor in response to DEVFREQ_GOV_STOP 368 * event when device is removed from devfreq framework. 369 */ 370 void devfreq_monitor_stop(struct devfreq *devfreq) 371 { 372 cancel_delayed_work_sync(&devfreq->work); 373 } 374 EXPORT_SYMBOL(devfreq_monitor_stop); 375 376 /** 377 * devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance 378 * @devfreq: the devfreq instance. 379 * 380 * Helper function to suspend devfreq device load monitoing. Function 381 * to be called from governor in response to DEVFREQ_GOV_SUSPEND 382 * event or when polling interval is set to zero. 383 * 384 * Note: Though this function is same as devfreq_monitor_stop(), 385 * intentionally kept separate to provide hooks for collecting 386 * transition statistics. 387 */ 388 void devfreq_monitor_suspend(struct devfreq *devfreq) 389 { 390 mutex_lock(&devfreq->lock); 391 if (devfreq->stop_polling) { 392 mutex_unlock(&devfreq->lock); 393 return; 394 } 395 396 devfreq_update_status(devfreq, devfreq->previous_freq); 397 devfreq->stop_polling = true; 398 mutex_unlock(&devfreq->lock); 399 cancel_delayed_work_sync(&devfreq->work); 400 } 401 EXPORT_SYMBOL(devfreq_monitor_suspend); 402 403 /** 404 * devfreq_monitor_resume() - Resume load monitoring of a devfreq instance 405 * @devfreq: the devfreq instance. 406 * 407 * Helper function to resume devfreq device load monitoing. Function 408 * to be called from governor in response to DEVFREQ_GOV_RESUME 409 * event or when polling interval is set to non-zero. 410 */ 411 void devfreq_monitor_resume(struct devfreq *devfreq) 412 { 413 unsigned long freq; 414 415 mutex_lock(&devfreq->lock); 416 if (!devfreq->stop_polling) 417 goto out; 418 419 if (!delayed_work_pending(&devfreq->work) && 420 devfreq->profile->polling_ms) 421 queue_delayed_work(devfreq_wq, &devfreq->work, 422 msecs_to_jiffies(devfreq->profile->polling_ms)); 423 424 devfreq->last_stat_updated = jiffies; 425 devfreq->stop_polling = false; 426 427 if (devfreq->profile->get_cur_freq && 428 !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq)) 429 devfreq->previous_freq = freq; 430 431 out: 432 mutex_unlock(&devfreq->lock); 433 } 434 EXPORT_SYMBOL(devfreq_monitor_resume); 435 436 /** 437 * devfreq_interval_update() - Update device devfreq monitoring interval 438 * @devfreq: the devfreq instance. 439 * @delay: new polling interval to be set. 440 * 441 * Helper function to set new load monitoring polling interval. Function 442 * to be called from governor in response to DEVFREQ_GOV_INTERVAL event. 443 */ 444 void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay) 445 { 446 unsigned int cur_delay = devfreq->profile->polling_ms; 447 unsigned int new_delay = *delay; 448 449 mutex_lock(&devfreq->lock); 450 devfreq->profile->polling_ms = new_delay; 451 452 if (devfreq->stop_polling) 453 goto out; 454 455 /* if new delay is zero, stop polling */ 456 if (!new_delay) { 457 mutex_unlock(&devfreq->lock); 458 cancel_delayed_work_sync(&devfreq->work); 459 return; 460 } 461 462 /* if current delay is zero, start polling with new delay */ 463 if (!cur_delay) { 464 queue_delayed_work(devfreq_wq, &devfreq->work, 465 msecs_to_jiffies(devfreq->profile->polling_ms)); 466 goto out; 467 } 468 469 /* if current delay is greater than new delay, restart polling */ 470 if (cur_delay > new_delay) { 471 mutex_unlock(&devfreq->lock); 472 cancel_delayed_work_sync(&devfreq->work); 473 mutex_lock(&devfreq->lock); 474 if (!devfreq->stop_polling) 475 queue_delayed_work(devfreq_wq, &devfreq->work, 476 msecs_to_jiffies(devfreq->profile->polling_ms)); 477 } 478 out: 479 mutex_unlock(&devfreq->lock); 480 } 481 EXPORT_SYMBOL(devfreq_interval_update); 482 483 /** 484 * devfreq_notifier_call() - Notify that the device frequency requirements 485 * has been changed out of devfreq framework. 486 * @nb: the notifier_block (supposed to be devfreq->nb) 487 * @type: not used 488 * @devp: not used 489 * 490 * Called by a notifier that uses devfreq->nb. 491 */ 492 static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type, 493 void *devp) 494 { 495 struct devfreq *devfreq = container_of(nb, struct devfreq, nb); 496 int ret; 497 498 mutex_lock(&devfreq->lock); 499 500 devfreq->scaling_min_freq = find_available_min_freq(devfreq); 501 if (!devfreq->scaling_min_freq) { 502 mutex_unlock(&devfreq->lock); 503 return -EINVAL; 504 } 505 506 devfreq->scaling_max_freq = find_available_max_freq(devfreq); 507 if (!devfreq->scaling_max_freq) { 508 mutex_unlock(&devfreq->lock); 509 return -EINVAL; 510 } 511 512 ret = update_devfreq(devfreq); 513 mutex_unlock(&devfreq->lock); 514 515 return ret; 516 } 517 518 /** 519 * devfreq_dev_release() - Callback for struct device to release the device. 520 * @dev: the devfreq device 521 * 522 * Remove devfreq from the list and release its resources. 523 */ 524 static void devfreq_dev_release(struct device *dev) 525 { 526 struct devfreq *devfreq = to_devfreq(dev); 527 528 mutex_lock(&devfreq_list_lock); 529 if (IS_ERR(find_device_devfreq(devfreq->dev.parent))) { 530 mutex_unlock(&devfreq_list_lock); 531 dev_warn(&devfreq->dev, "releasing devfreq which doesn't exist\n"); 532 return; 533 } 534 list_del(&devfreq->node); 535 mutex_unlock(&devfreq_list_lock); 536 537 if (devfreq->governor) 538 devfreq->governor->event_handler(devfreq, 539 DEVFREQ_GOV_STOP, NULL); 540 541 if (devfreq->profile->exit) 542 devfreq->profile->exit(devfreq->dev.parent); 543 544 mutex_destroy(&devfreq->lock); 545 kfree(devfreq); 546 } 547 548 /** 549 * devfreq_add_device() - Add devfreq feature to the device 550 * @dev: the device to add devfreq feature. 551 * @profile: device-specific profile to run devfreq. 552 * @governor_name: name of the policy to choose frequency. 553 * @data: private data for the governor. The devfreq framework does not 554 * touch this value. 555 */ 556 struct devfreq *devfreq_add_device(struct device *dev, 557 struct devfreq_dev_profile *profile, 558 const char *governor_name, 559 void *data) 560 { 561 struct devfreq *devfreq; 562 struct devfreq_governor *governor; 563 static atomic_t devfreq_no = ATOMIC_INIT(-1); 564 int err = 0; 565 566 if (!dev || !profile || !governor_name) { 567 dev_err(dev, "%s: Invalid parameters.\n", __func__); 568 return ERR_PTR(-EINVAL); 569 } 570 571 mutex_lock(&devfreq_list_lock); 572 devfreq = find_device_devfreq(dev); 573 mutex_unlock(&devfreq_list_lock); 574 if (!IS_ERR(devfreq)) { 575 dev_err(dev, "%s: Unable to create devfreq for the device.\n", 576 __func__); 577 err = -EINVAL; 578 goto err_out; 579 } 580 581 devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL); 582 if (!devfreq) { 583 err = -ENOMEM; 584 goto err_out; 585 } 586 587 mutex_init(&devfreq->lock); 588 mutex_lock(&devfreq->lock); 589 devfreq->dev.parent = dev; 590 devfreq->dev.class = devfreq_class; 591 devfreq->dev.release = devfreq_dev_release; 592 devfreq->profile = profile; 593 strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN); 594 devfreq->previous_freq = profile->initial_freq; 595 devfreq->last_status.current_frequency = profile->initial_freq; 596 devfreq->data = data; 597 devfreq->nb.notifier_call = devfreq_notifier_call; 598 599 if (!devfreq->profile->max_state && !devfreq->profile->freq_table) { 600 mutex_unlock(&devfreq->lock); 601 err = set_freq_table(devfreq); 602 if (err < 0) 603 goto err_out; 604 mutex_lock(&devfreq->lock); 605 } 606 607 devfreq->min_freq = find_available_min_freq(devfreq); 608 if (!devfreq->min_freq) { 609 mutex_unlock(&devfreq->lock); 610 err = -EINVAL; 611 goto err_dev; 612 } 613 devfreq->scaling_min_freq = devfreq->min_freq; 614 615 devfreq->max_freq = find_available_max_freq(devfreq); 616 if (!devfreq->max_freq) { 617 mutex_unlock(&devfreq->lock); 618 err = -EINVAL; 619 goto err_dev; 620 } 621 devfreq->scaling_max_freq = devfreq->max_freq; 622 623 dev_set_name(&devfreq->dev, "devfreq%d", 624 atomic_inc_return(&devfreq_no)); 625 err = device_register(&devfreq->dev); 626 if (err) { 627 mutex_unlock(&devfreq->lock); 628 goto err_dev; 629 } 630 631 devfreq->trans_table = 632 devm_kzalloc(&devfreq->dev, 633 array3_size(sizeof(unsigned int), 634 devfreq->profile->max_state, 635 devfreq->profile->max_state), 636 GFP_KERNEL); 637 devfreq->time_in_state = devm_kcalloc(&devfreq->dev, 638 devfreq->profile->max_state, 639 sizeof(unsigned long), 640 GFP_KERNEL); 641 devfreq->last_stat_updated = jiffies; 642 643 srcu_init_notifier_head(&devfreq->transition_notifier_list); 644 645 mutex_unlock(&devfreq->lock); 646 647 mutex_lock(&devfreq_list_lock); 648 list_add(&devfreq->node, &devfreq_list); 649 650 governor = find_devfreq_governor(devfreq->governor_name); 651 if (IS_ERR(governor)) { 652 dev_err(dev, "%s: Unable to find governor for the device\n", 653 __func__); 654 err = PTR_ERR(governor); 655 goto err_init; 656 } 657 658 devfreq->governor = governor; 659 err = devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_START, 660 NULL); 661 if (err) { 662 dev_err(dev, "%s: Unable to start governor for the device\n", 663 __func__); 664 goto err_init; 665 } 666 mutex_unlock(&devfreq_list_lock); 667 668 return devfreq; 669 670 err_init: 671 list_del(&devfreq->node); 672 mutex_unlock(&devfreq_list_lock); 673 674 device_unregister(&devfreq->dev); 675 err_dev: 676 if (devfreq) 677 kfree(devfreq); 678 err_out: 679 return ERR_PTR(err); 680 } 681 EXPORT_SYMBOL(devfreq_add_device); 682 683 /** 684 * devfreq_remove_device() - Remove devfreq feature from a device. 685 * @devfreq: the devfreq instance to be removed 686 * 687 * The opposite of devfreq_add_device(). 688 */ 689 int devfreq_remove_device(struct devfreq *devfreq) 690 { 691 if (!devfreq) 692 return -EINVAL; 693 694 device_unregister(&devfreq->dev); 695 696 return 0; 697 } 698 EXPORT_SYMBOL(devfreq_remove_device); 699 700 static int devm_devfreq_dev_match(struct device *dev, void *res, void *data) 701 { 702 struct devfreq **r = res; 703 704 if (WARN_ON(!r || !*r)) 705 return 0; 706 707 return *r == data; 708 } 709 710 static void devm_devfreq_dev_release(struct device *dev, void *res) 711 { 712 devfreq_remove_device(*(struct devfreq **)res); 713 } 714 715 /** 716 * devm_devfreq_add_device() - Resource-managed devfreq_add_device() 717 * @dev: the device to add devfreq feature. 718 * @profile: device-specific profile to run devfreq. 719 * @governor_name: name of the policy to choose frequency. 720 * @data: private data for the governor. The devfreq framework does not 721 * touch this value. 722 * 723 * This function manages automatically the memory of devfreq device using device 724 * resource management and simplify the free operation for memory of devfreq 725 * device. 726 */ 727 struct devfreq *devm_devfreq_add_device(struct device *dev, 728 struct devfreq_dev_profile *profile, 729 const char *governor_name, 730 void *data) 731 { 732 struct devfreq **ptr, *devfreq; 733 734 ptr = devres_alloc(devm_devfreq_dev_release, sizeof(*ptr), GFP_KERNEL); 735 if (!ptr) 736 return ERR_PTR(-ENOMEM); 737 738 devfreq = devfreq_add_device(dev, profile, governor_name, data); 739 if (IS_ERR(devfreq)) { 740 devres_free(ptr); 741 return devfreq; 742 } 743 744 *ptr = devfreq; 745 devres_add(dev, ptr); 746 747 return devfreq; 748 } 749 EXPORT_SYMBOL(devm_devfreq_add_device); 750 751 #ifdef CONFIG_OF 752 /* 753 * devfreq_get_devfreq_by_phandle - Get the devfreq device from devicetree 754 * @dev - instance to the given device 755 * @index - index into list of devfreq 756 * 757 * return the instance of devfreq device 758 */ 759 struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index) 760 { 761 struct device_node *node; 762 struct devfreq *devfreq; 763 764 if (!dev) 765 return ERR_PTR(-EINVAL); 766 767 if (!dev->of_node) 768 return ERR_PTR(-EINVAL); 769 770 node = of_parse_phandle(dev->of_node, "devfreq", index); 771 if (!node) 772 return ERR_PTR(-ENODEV); 773 774 mutex_lock(&devfreq_list_lock); 775 list_for_each_entry(devfreq, &devfreq_list, node) { 776 if (devfreq->dev.parent 777 && devfreq->dev.parent->of_node == node) { 778 mutex_unlock(&devfreq_list_lock); 779 of_node_put(node); 780 return devfreq; 781 } 782 } 783 mutex_unlock(&devfreq_list_lock); 784 of_node_put(node); 785 786 return ERR_PTR(-EPROBE_DEFER); 787 } 788 #else 789 struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index) 790 { 791 return ERR_PTR(-ENODEV); 792 } 793 #endif /* CONFIG_OF */ 794 EXPORT_SYMBOL_GPL(devfreq_get_devfreq_by_phandle); 795 796 /** 797 * devm_devfreq_remove_device() - Resource-managed devfreq_remove_device() 798 * @dev: the device to add devfreq feature. 799 * @devfreq: the devfreq instance to be removed 800 */ 801 void devm_devfreq_remove_device(struct device *dev, struct devfreq *devfreq) 802 { 803 WARN_ON(devres_release(dev, devm_devfreq_dev_release, 804 devm_devfreq_dev_match, devfreq)); 805 } 806 EXPORT_SYMBOL(devm_devfreq_remove_device); 807 808 /** 809 * devfreq_suspend_device() - Suspend devfreq of a device. 810 * @devfreq: the devfreq instance to be suspended 811 * 812 * This function is intended to be called by the pm callbacks 813 * (e.g., runtime_suspend, suspend) of the device driver that 814 * holds the devfreq. 815 */ 816 int devfreq_suspend_device(struct devfreq *devfreq) 817 { 818 if (!devfreq) 819 return -EINVAL; 820 821 if (!devfreq->governor) 822 return 0; 823 824 return devfreq->governor->event_handler(devfreq, 825 DEVFREQ_GOV_SUSPEND, NULL); 826 } 827 EXPORT_SYMBOL(devfreq_suspend_device); 828 829 /** 830 * devfreq_resume_device() - Resume devfreq of a device. 831 * @devfreq: the devfreq instance to be resumed 832 * 833 * This function is intended to be called by the pm callbacks 834 * (e.g., runtime_resume, resume) of the device driver that 835 * holds the devfreq. 836 */ 837 int devfreq_resume_device(struct devfreq *devfreq) 838 { 839 if (!devfreq) 840 return -EINVAL; 841 842 if (!devfreq->governor) 843 return 0; 844 845 return devfreq->governor->event_handler(devfreq, 846 DEVFREQ_GOV_RESUME, NULL); 847 } 848 EXPORT_SYMBOL(devfreq_resume_device); 849 850 /** 851 * devfreq_add_governor() - Add devfreq governor 852 * @governor: the devfreq governor to be added 853 */ 854 int devfreq_add_governor(struct devfreq_governor *governor) 855 { 856 struct devfreq_governor *g; 857 struct devfreq *devfreq; 858 int err = 0; 859 860 if (!governor) { 861 pr_err("%s: Invalid parameters.\n", __func__); 862 return -EINVAL; 863 } 864 865 mutex_lock(&devfreq_list_lock); 866 g = find_devfreq_governor(governor->name); 867 if (!IS_ERR(g)) { 868 pr_err("%s: governor %s already registered\n", __func__, 869 g->name); 870 err = -EINVAL; 871 goto err_out; 872 } 873 874 list_add(&governor->node, &devfreq_governor_list); 875 876 list_for_each_entry(devfreq, &devfreq_list, node) { 877 int ret = 0; 878 struct device *dev = devfreq->dev.parent; 879 880 if (!strncmp(devfreq->governor_name, governor->name, 881 DEVFREQ_NAME_LEN)) { 882 /* The following should never occur */ 883 if (devfreq->governor) { 884 dev_warn(dev, 885 "%s: Governor %s already present\n", 886 __func__, devfreq->governor->name); 887 ret = devfreq->governor->event_handler(devfreq, 888 DEVFREQ_GOV_STOP, NULL); 889 if (ret) { 890 dev_warn(dev, 891 "%s: Governor %s stop = %d\n", 892 __func__, 893 devfreq->governor->name, ret); 894 } 895 /* Fall through */ 896 } 897 devfreq->governor = governor; 898 ret = devfreq->governor->event_handler(devfreq, 899 DEVFREQ_GOV_START, NULL); 900 if (ret) { 901 dev_warn(dev, "%s: Governor %s start=%d\n", 902 __func__, devfreq->governor->name, 903 ret); 904 } 905 } 906 } 907 908 err_out: 909 mutex_unlock(&devfreq_list_lock); 910 911 return err; 912 } 913 EXPORT_SYMBOL(devfreq_add_governor); 914 915 /** 916 * devfreq_remove_governor() - Remove devfreq feature from a device. 917 * @governor: the devfreq governor to be removed 918 */ 919 int devfreq_remove_governor(struct devfreq_governor *governor) 920 { 921 struct devfreq_governor *g; 922 struct devfreq *devfreq; 923 int err = 0; 924 925 if (!governor) { 926 pr_err("%s: Invalid parameters.\n", __func__); 927 return -EINVAL; 928 } 929 930 mutex_lock(&devfreq_list_lock); 931 g = find_devfreq_governor(governor->name); 932 if (IS_ERR(g)) { 933 pr_err("%s: governor %s not registered\n", __func__, 934 governor->name); 935 err = PTR_ERR(g); 936 goto err_out; 937 } 938 list_for_each_entry(devfreq, &devfreq_list, node) { 939 int ret; 940 struct device *dev = devfreq->dev.parent; 941 942 if (!strncmp(devfreq->governor_name, governor->name, 943 DEVFREQ_NAME_LEN)) { 944 /* we should have a devfreq governor! */ 945 if (!devfreq->governor) { 946 dev_warn(dev, "%s: Governor %s NOT present\n", 947 __func__, governor->name); 948 continue; 949 /* Fall through */ 950 } 951 ret = devfreq->governor->event_handler(devfreq, 952 DEVFREQ_GOV_STOP, NULL); 953 if (ret) { 954 dev_warn(dev, "%s: Governor %s stop=%d\n", 955 __func__, devfreq->governor->name, 956 ret); 957 } 958 devfreq->governor = NULL; 959 } 960 } 961 962 list_del(&governor->node); 963 err_out: 964 mutex_unlock(&devfreq_list_lock); 965 966 return err; 967 } 968 EXPORT_SYMBOL(devfreq_remove_governor); 969 970 static ssize_t governor_show(struct device *dev, 971 struct device_attribute *attr, char *buf) 972 { 973 if (!to_devfreq(dev)->governor) 974 return -EINVAL; 975 976 return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name); 977 } 978 979 static ssize_t governor_store(struct device *dev, struct device_attribute *attr, 980 const char *buf, size_t count) 981 { 982 struct devfreq *df = to_devfreq(dev); 983 int ret; 984 char str_governor[DEVFREQ_NAME_LEN + 1]; 985 struct devfreq_governor *governor; 986 987 ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor); 988 if (ret != 1) 989 return -EINVAL; 990 991 mutex_lock(&devfreq_list_lock); 992 governor = find_devfreq_governor(str_governor); 993 if (IS_ERR(governor)) { 994 ret = PTR_ERR(governor); 995 goto out; 996 } 997 if (df->governor == governor) { 998 ret = 0; 999 goto out; 1000 } else if ((df->governor && df->governor->immutable) || 1001 governor->immutable) { 1002 ret = -EINVAL; 1003 goto out; 1004 } 1005 1006 if (df->governor) { 1007 ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL); 1008 if (ret) { 1009 dev_warn(dev, "%s: Governor %s not stopped(%d)\n", 1010 __func__, df->governor->name, ret); 1011 goto out; 1012 } 1013 } 1014 df->governor = governor; 1015 strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN); 1016 ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL); 1017 if (ret) 1018 dev_warn(dev, "%s: Governor %s not started(%d)\n", 1019 __func__, df->governor->name, ret); 1020 out: 1021 mutex_unlock(&devfreq_list_lock); 1022 1023 if (!ret) 1024 ret = count; 1025 return ret; 1026 } 1027 static DEVICE_ATTR_RW(governor); 1028 1029 static ssize_t available_governors_show(struct device *d, 1030 struct device_attribute *attr, 1031 char *buf) 1032 { 1033 struct devfreq *df = to_devfreq(d); 1034 ssize_t count = 0; 1035 1036 mutex_lock(&devfreq_list_lock); 1037 1038 /* 1039 * The devfreq with immutable governor (e.g., passive) shows 1040 * only own governor. 1041 */ 1042 if (df->governor->immutable) { 1043 count = scnprintf(&buf[count], DEVFREQ_NAME_LEN, 1044 "%s ", df->governor_name); 1045 /* 1046 * The devfreq device shows the registered governor except for 1047 * immutable governors such as passive governor . 1048 */ 1049 } else { 1050 struct devfreq_governor *governor; 1051 1052 list_for_each_entry(governor, &devfreq_governor_list, node) { 1053 if (governor->immutable) 1054 continue; 1055 count += scnprintf(&buf[count], (PAGE_SIZE - count - 2), 1056 "%s ", governor->name); 1057 } 1058 } 1059 1060 mutex_unlock(&devfreq_list_lock); 1061 1062 /* Truncate the trailing space */ 1063 if (count) 1064 count--; 1065 1066 count += sprintf(&buf[count], "\n"); 1067 1068 return count; 1069 } 1070 static DEVICE_ATTR_RO(available_governors); 1071 1072 static ssize_t cur_freq_show(struct device *dev, struct device_attribute *attr, 1073 char *buf) 1074 { 1075 unsigned long freq; 1076 struct devfreq *devfreq = to_devfreq(dev); 1077 1078 if (devfreq->profile->get_cur_freq && 1079 !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq)) 1080 return sprintf(buf, "%lu\n", freq); 1081 1082 return sprintf(buf, "%lu\n", devfreq->previous_freq); 1083 } 1084 static DEVICE_ATTR_RO(cur_freq); 1085 1086 static ssize_t target_freq_show(struct device *dev, 1087 struct device_attribute *attr, char *buf) 1088 { 1089 return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq); 1090 } 1091 static DEVICE_ATTR_RO(target_freq); 1092 1093 static ssize_t polling_interval_show(struct device *dev, 1094 struct device_attribute *attr, char *buf) 1095 { 1096 return sprintf(buf, "%d\n", to_devfreq(dev)->profile->polling_ms); 1097 } 1098 1099 static ssize_t polling_interval_store(struct device *dev, 1100 struct device_attribute *attr, 1101 const char *buf, size_t count) 1102 { 1103 struct devfreq *df = to_devfreq(dev); 1104 unsigned int value; 1105 int ret; 1106 1107 if (!df->governor) 1108 return -EINVAL; 1109 1110 ret = sscanf(buf, "%u", &value); 1111 if (ret != 1) 1112 return -EINVAL; 1113 1114 df->governor->event_handler(df, DEVFREQ_GOV_INTERVAL, &value); 1115 ret = count; 1116 1117 return ret; 1118 } 1119 static DEVICE_ATTR_RW(polling_interval); 1120 1121 static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr, 1122 const char *buf, size_t count) 1123 { 1124 struct devfreq *df = to_devfreq(dev); 1125 unsigned long value; 1126 int ret; 1127 unsigned long max; 1128 1129 ret = sscanf(buf, "%lu", &value); 1130 if (ret != 1) 1131 return -EINVAL; 1132 1133 mutex_lock(&df->lock); 1134 max = df->max_freq; 1135 if (value && max && value > max) { 1136 ret = -EINVAL; 1137 goto unlock; 1138 } 1139 1140 df->min_freq = value; 1141 update_devfreq(df); 1142 ret = count; 1143 unlock: 1144 mutex_unlock(&df->lock); 1145 return ret; 1146 } 1147 1148 static ssize_t min_freq_show(struct device *dev, struct device_attribute *attr, 1149 char *buf) 1150 { 1151 struct devfreq *df = to_devfreq(dev); 1152 1153 return sprintf(buf, "%lu\n", MAX(df->scaling_min_freq, df->min_freq)); 1154 } 1155 1156 static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr, 1157 const char *buf, size_t count) 1158 { 1159 struct devfreq *df = to_devfreq(dev); 1160 unsigned long value; 1161 int ret; 1162 unsigned long min; 1163 1164 ret = sscanf(buf, "%lu", &value); 1165 if (ret != 1) 1166 return -EINVAL; 1167 1168 mutex_lock(&df->lock); 1169 min = df->min_freq; 1170 if (value && min && value < min) { 1171 ret = -EINVAL; 1172 goto unlock; 1173 } 1174 1175 df->max_freq = value; 1176 update_devfreq(df); 1177 ret = count; 1178 unlock: 1179 mutex_unlock(&df->lock); 1180 return ret; 1181 } 1182 static DEVICE_ATTR_RW(min_freq); 1183 1184 static ssize_t max_freq_show(struct device *dev, struct device_attribute *attr, 1185 char *buf) 1186 { 1187 struct devfreq *df = to_devfreq(dev); 1188 1189 return sprintf(buf, "%lu\n", MIN(df->scaling_max_freq, df->max_freq)); 1190 } 1191 static DEVICE_ATTR_RW(max_freq); 1192 1193 static ssize_t available_frequencies_show(struct device *d, 1194 struct device_attribute *attr, 1195 char *buf) 1196 { 1197 struct devfreq *df = to_devfreq(d); 1198 ssize_t count = 0; 1199 int i; 1200 1201 mutex_lock(&df->lock); 1202 1203 for (i = 0; i < df->profile->max_state; i++) 1204 count += scnprintf(&buf[count], (PAGE_SIZE - count - 2), 1205 "%lu ", df->profile->freq_table[i]); 1206 1207 mutex_unlock(&df->lock); 1208 /* Truncate the trailing space */ 1209 if (count) 1210 count--; 1211 1212 count += sprintf(&buf[count], "\n"); 1213 1214 return count; 1215 } 1216 static DEVICE_ATTR_RO(available_frequencies); 1217 1218 static ssize_t trans_stat_show(struct device *dev, 1219 struct device_attribute *attr, char *buf) 1220 { 1221 struct devfreq *devfreq = to_devfreq(dev); 1222 ssize_t len; 1223 int i, j; 1224 unsigned int max_state = devfreq->profile->max_state; 1225 1226 if (!devfreq->stop_polling && 1227 devfreq_update_status(devfreq, devfreq->previous_freq)) 1228 return 0; 1229 if (max_state == 0) 1230 return sprintf(buf, "Not Supported.\n"); 1231 1232 len = sprintf(buf, " From : To\n"); 1233 len += sprintf(buf + len, " :"); 1234 for (i = 0; i < max_state; i++) 1235 len += sprintf(buf + len, "%10lu", 1236 devfreq->profile->freq_table[i]); 1237 1238 len += sprintf(buf + len, " time(ms)\n"); 1239 1240 for (i = 0; i < max_state; i++) { 1241 if (devfreq->profile->freq_table[i] 1242 == devfreq->previous_freq) { 1243 len += sprintf(buf + len, "*"); 1244 } else { 1245 len += sprintf(buf + len, " "); 1246 } 1247 len += sprintf(buf + len, "%10lu:", 1248 devfreq->profile->freq_table[i]); 1249 for (j = 0; j < max_state; j++) 1250 len += sprintf(buf + len, "%10u", 1251 devfreq->trans_table[(i * max_state) + j]); 1252 len += sprintf(buf + len, "%10u\n", 1253 jiffies_to_msecs(devfreq->time_in_state[i])); 1254 } 1255 1256 len += sprintf(buf + len, "Total transition : %u\n", 1257 devfreq->total_trans); 1258 return len; 1259 } 1260 static DEVICE_ATTR_RO(trans_stat); 1261 1262 static struct attribute *devfreq_attrs[] = { 1263 &dev_attr_governor.attr, 1264 &dev_attr_available_governors.attr, 1265 &dev_attr_cur_freq.attr, 1266 &dev_attr_available_frequencies.attr, 1267 &dev_attr_target_freq.attr, 1268 &dev_attr_polling_interval.attr, 1269 &dev_attr_min_freq.attr, 1270 &dev_attr_max_freq.attr, 1271 &dev_attr_trans_stat.attr, 1272 NULL, 1273 }; 1274 ATTRIBUTE_GROUPS(devfreq); 1275 1276 static int __init devfreq_init(void) 1277 { 1278 devfreq_class = class_create(THIS_MODULE, "devfreq"); 1279 if (IS_ERR(devfreq_class)) { 1280 pr_err("%s: couldn't create class\n", __FILE__); 1281 return PTR_ERR(devfreq_class); 1282 } 1283 1284 devfreq_wq = create_freezable_workqueue("devfreq_wq"); 1285 if (!devfreq_wq) { 1286 class_destroy(devfreq_class); 1287 pr_err("%s: couldn't create workqueue\n", __FILE__); 1288 return -ENOMEM; 1289 } 1290 devfreq_class->dev_groups = devfreq_groups; 1291 1292 return 0; 1293 } 1294 subsys_initcall(devfreq_init); 1295 1296 /* 1297 * The following are helper functions for devfreq user device drivers with 1298 * OPP framework. 1299 */ 1300 1301 /** 1302 * devfreq_recommended_opp() - Helper function to get proper OPP for the 1303 * freq value given to target callback. 1304 * @dev: The devfreq user device. (parent of devfreq) 1305 * @freq: The frequency given to target function 1306 * @flags: Flags handed from devfreq framework. 1307 * 1308 * The callers are required to call dev_pm_opp_put() for the returned OPP after 1309 * use. 1310 */ 1311 struct dev_pm_opp *devfreq_recommended_opp(struct device *dev, 1312 unsigned long *freq, 1313 u32 flags) 1314 { 1315 struct dev_pm_opp *opp; 1316 1317 if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) { 1318 /* The freq is an upper bound. opp should be lower */ 1319 opp = dev_pm_opp_find_freq_floor(dev, freq); 1320 1321 /* If not available, use the closest opp */ 1322 if (opp == ERR_PTR(-ERANGE)) 1323 opp = dev_pm_opp_find_freq_ceil(dev, freq); 1324 } else { 1325 /* The freq is an lower bound. opp should be higher */ 1326 opp = dev_pm_opp_find_freq_ceil(dev, freq); 1327 1328 /* If not available, use the closest opp */ 1329 if (opp == ERR_PTR(-ERANGE)) 1330 opp = dev_pm_opp_find_freq_floor(dev, freq); 1331 } 1332 1333 return opp; 1334 } 1335 EXPORT_SYMBOL(devfreq_recommended_opp); 1336 1337 /** 1338 * devfreq_register_opp_notifier() - Helper function to get devfreq notified 1339 * for any changes in the OPP availability 1340 * changes 1341 * @dev: The devfreq user device. (parent of devfreq) 1342 * @devfreq: The devfreq object. 1343 */ 1344 int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq) 1345 { 1346 return dev_pm_opp_register_notifier(dev, &devfreq->nb); 1347 } 1348 EXPORT_SYMBOL(devfreq_register_opp_notifier); 1349 1350 /** 1351 * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq 1352 * notified for any changes in the OPP 1353 * availability changes anymore. 1354 * @dev: The devfreq user device. (parent of devfreq) 1355 * @devfreq: The devfreq object. 1356 * 1357 * At exit() callback of devfreq_dev_profile, this must be included if 1358 * devfreq_recommended_opp is used. 1359 */ 1360 int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq) 1361 { 1362 return dev_pm_opp_unregister_notifier(dev, &devfreq->nb); 1363 } 1364 EXPORT_SYMBOL(devfreq_unregister_opp_notifier); 1365 1366 static void devm_devfreq_opp_release(struct device *dev, void *res) 1367 { 1368 devfreq_unregister_opp_notifier(dev, *(struct devfreq **)res); 1369 } 1370 1371 /** 1372 * devm_ devfreq_register_opp_notifier() 1373 * - Resource-managed devfreq_register_opp_notifier() 1374 * @dev: The devfreq user device. (parent of devfreq) 1375 * @devfreq: The devfreq object. 1376 */ 1377 int devm_devfreq_register_opp_notifier(struct device *dev, 1378 struct devfreq *devfreq) 1379 { 1380 struct devfreq **ptr; 1381 int ret; 1382 1383 ptr = devres_alloc(devm_devfreq_opp_release, sizeof(*ptr), GFP_KERNEL); 1384 if (!ptr) 1385 return -ENOMEM; 1386 1387 ret = devfreq_register_opp_notifier(dev, devfreq); 1388 if (ret) { 1389 devres_free(ptr); 1390 return ret; 1391 } 1392 1393 *ptr = devfreq; 1394 devres_add(dev, ptr); 1395 1396 return 0; 1397 } 1398 EXPORT_SYMBOL(devm_devfreq_register_opp_notifier); 1399 1400 /** 1401 * devm_devfreq_unregister_opp_notifier() 1402 * - Resource-managed devfreq_unregister_opp_notifier() 1403 * @dev: The devfreq user device. (parent of devfreq) 1404 * @devfreq: The devfreq object. 1405 */ 1406 void devm_devfreq_unregister_opp_notifier(struct device *dev, 1407 struct devfreq *devfreq) 1408 { 1409 WARN_ON(devres_release(dev, devm_devfreq_opp_release, 1410 devm_devfreq_dev_match, devfreq)); 1411 } 1412 EXPORT_SYMBOL(devm_devfreq_unregister_opp_notifier); 1413 1414 /** 1415 * devfreq_register_notifier() - Register a driver with devfreq 1416 * @devfreq: The devfreq object. 1417 * @nb: The notifier block to register. 1418 * @list: DEVFREQ_TRANSITION_NOTIFIER. 1419 */ 1420 int devfreq_register_notifier(struct devfreq *devfreq, 1421 struct notifier_block *nb, 1422 unsigned int list) 1423 { 1424 int ret = 0; 1425 1426 if (!devfreq) 1427 return -EINVAL; 1428 1429 switch (list) { 1430 case DEVFREQ_TRANSITION_NOTIFIER: 1431 ret = srcu_notifier_chain_register( 1432 &devfreq->transition_notifier_list, nb); 1433 break; 1434 default: 1435 ret = -EINVAL; 1436 } 1437 1438 return ret; 1439 } 1440 EXPORT_SYMBOL(devfreq_register_notifier); 1441 1442 /* 1443 * devfreq_unregister_notifier() - Unregister a driver with devfreq 1444 * @devfreq: The devfreq object. 1445 * @nb: The notifier block to be unregistered. 1446 * @list: DEVFREQ_TRANSITION_NOTIFIER. 1447 */ 1448 int devfreq_unregister_notifier(struct devfreq *devfreq, 1449 struct notifier_block *nb, 1450 unsigned int list) 1451 { 1452 int ret = 0; 1453 1454 if (!devfreq) 1455 return -EINVAL; 1456 1457 switch (list) { 1458 case DEVFREQ_TRANSITION_NOTIFIER: 1459 ret = srcu_notifier_chain_unregister( 1460 &devfreq->transition_notifier_list, nb); 1461 break; 1462 default: 1463 ret = -EINVAL; 1464 } 1465 1466 return ret; 1467 } 1468 EXPORT_SYMBOL(devfreq_unregister_notifier); 1469 1470 struct devfreq_notifier_devres { 1471 struct devfreq *devfreq; 1472 struct notifier_block *nb; 1473 unsigned int list; 1474 }; 1475 1476 static void devm_devfreq_notifier_release(struct device *dev, void *res) 1477 { 1478 struct devfreq_notifier_devres *this = res; 1479 1480 devfreq_unregister_notifier(this->devfreq, this->nb, this->list); 1481 } 1482 1483 /** 1484 * devm_devfreq_register_notifier() 1485 - Resource-managed devfreq_register_notifier() 1486 * @dev: The devfreq user device. (parent of devfreq) 1487 * @devfreq: The devfreq object. 1488 * @nb: The notifier block to be unregistered. 1489 * @list: DEVFREQ_TRANSITION_NOTIFIER. 1490 */ 1491 int devm_devfreq_register_notifier(struct device *dev, 1492 struct devfreq *devfreq, 1493 struct notifier_block *nb, 1494 unsigned int list) 1495 { 1496 struct devfreq_notifier_devres *ptr; 1497 int ret; 1498 1499 ptr = devres_alloc(devm_devfreq_notifier_release, sizeof(*ptr), 1500 GFP_KERNEL); 1501 if (!ptr) 1502 return -ENOMEM; 1503 1504 ret = devfreq_register_notifier(devfreq, nb, list); 1505 if (ret) { 1506 devres_free(ptr); 1507 return ret; 1508 } 1509 1510 ptr->devfreq = devfreq; 1511 ptr->nb = nb; 1512 ptr->list = list; 1513 devres_add(dev, ptr); 1514 1515 return 0; 1516 } 1517 EXPORT_SYMBOL(devm_devfreq_register_notifier); 1518 1519 /** 1520 * devm_devfreq_unregister_notifier() 1521 - Resource-managed devfreq_unregister_notifier() 1522 * @dev: The devfreq user device. (parent of devfreq) 1523 * @devfreq: The devfreq object. 1524 * @nb: The notifier block to be unregistered. 1525 * @list: DEVFREQ_TRANSITION_NOTIFIER. 1526 */ 1527 void devm_devfreq_unregister_notifier(struct device *dev, 1528 struct devfreq *devfreq, 1529 struct notifier_block *nb, 1530 unsigned int list) 1531 { 1532 WARN_ON(devres_release(dev, devm_devfreq_notifier_release, 1533 devm_devfreq_dev_match, devfreq)); 1534 } 1535 EXPORT_SYMBOL(devm_devfreq_unregister_notifier); 1536