1 /* 2 * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework 3 * for Non-CPU Devices. 4 * 5 * Copyright (C) 2011 Samsung Electronics 6 * MyungJoo Ham <myungjoo.ham@samsung.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/kernel.h> 14 #include <linux/sched.h> 15 #include <linux/errno.h> 16 #include <linux/err.h> 17 #include <linux/init.h> 18 #include <linux/export.h> 19 #include <linux/slab.h> 20 #include <linux/stat.h> 21 #include <linux/pm_opp.h> 22 #include <linux/devfreq.h> 23 #include <linux/workqueue.h> 24 #include <linux/platform_device.h> 25 #include <linux/list.h> 26 #include <linux/printk.h> 27 #include <linux/hrtimer.h> 28 #include <linux/of.h> 29 #include "governor.h" 30 31 #define MAX(a,b) ((a > b) ? a : b) 32 #define MIN(a,b) ((a < b) ? a : b) 33 34 static struct class *devfreq_class; 35 36 /* 37 * devfreq core provides delayed work based load monitoring helper 38 * functions. Governors can use these or can implement their own 39 * monitoring mechanism. 40 */ 41 static struct workqueue_struct *devfreq_wq; 42 43 /* The list of all device-devfreq governors */ 44 static LIST_HEAD(devfreq_governor_list); 45 /* The list of all device-devfreq */ 46 static LIST_HEAD(devfreq_list); 47 static DEFINE_MUTEX(devfreq_list_lock); 48 49 /** 50 * find_device_devfreq() - find devfreq struct using device pointer 51 * @dev: device pointer used to lookup device devfreq. 52 * 53 * Search the list of device devfreqs and return the matched device's 54 * devfreq info. devfreq_list_lock should be held by the caller. 55 */ 56 static struct devfreq *find_device_devfreq(struct device *dev) 57 { 58 struct devfreq *tmp_devfreq; 59 60 if (IS_ERR_OR_NULL(dev)) { 61 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__); 62 return ERR_PTR(-EINVAL); 63 } 64 WARN(!mutex_is_locked(&devfreq_list_lock), 65 "devfreq_list_lock must be locked."); 66 67 list_for_each_entry(tmp_devfreq, &devfreq_list, node) { 68 if (tmp_devfreq->dev.parent == dev) 69 return tmp_devfreq; 70 } 71 72 return ERR_PTR(-ENODEV); 73 } 74 75 static unsigned long find_available_min_freq(struct devfreq *devfreq) 76 { 77 struct dev_pm_opp *opp; 78 unsigned long min_freq = 0; 79 80 opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &min_freq); 81 if (IS_ERR(opp)) 82 min_freq = 0; 83 else 84 dev_pm_opp_put(opp); 85 86 return min_freq; 87 } 88 89 static unsigned long find_available_max_freq(struct devfreq *devfreq) 90 { 91 struct dev_pm_opp *opp; 92 unsigned long max_freq = ULONG_MAX; 93 94 opp = dev_pm_opp_find_freq_floor(devfreq->dev.parent, &max_freq); 95 if (IS_ERR(opp)) 96 max_freq = 0; 97 else 98 dev_pm_opp_put(opp); 99 100 return max_freq; 101 } 102 103 /** 104 * devfreq_get_freq_level() - Lookup freq_table for the frequency 105 * @devfreq: the devfreq instance 106 * @freq: the target frequency 107 */ 108 static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq) 109 { 110 int lev; 111 112 for (lev = 0; lev < devfreq->profile->max_state; lev++) 113 if (freq == devfreq->profile->freq_table[lev]) 114 return lev; 115 116 return -EINVAL; 117 } 118 119 static int set_freq_table(struct devfreq *devfreq) 120 { 121 struct devfreq_dev_profile *profile = devfreq->profile; 122 struct dev_pm_opp *opp; 123 unsigned long freq; 124 int i, count; 125 126 /* Initialize the freq_table from OPP table */ 127 count = dev_pm_opp_get_opp_count(devfreq->dev.parent); 128 if (count <= 0) 129 return -EINVAL; 130 131 profile->max_state = count; 132 profile->freq_table = devm_kcalloc(devfreq->dev.parent, 133 profile->max_state, 134 sizeof(*profile->freq_table), 135 GFP_KERNEL); 136 if (!profile->freq_table) { 137 profile->max_state = 0; 138 return -ENOMEM; 139 } 140 141 for (i = 0, freq = 0; i < profile->max_state; i++, freq++) { 142 opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &freq); 143 if (IS_ERR(opp)) { 144 devm_kfree(devfreq->dev.parent, profile->freq_table); 145 profile->max_state = 0; 146 return PTR_ERR(opp); 147 } 148 dev_pm_opp_put(opp); 149 profile->freq_table[i] = freq; 150 } 151 152 return 0; 153 } 154 155 /** 156 * devfreq_update_status() - Update statistics of devfreq behavior 157 * @devfreq: the devfreq instance 158 * @freq: the update target frequency 159 */ 160 int devfreq_update_status(struct devfreq *devfreq, unsigned long freq) 161 { 162 int lev, prev_lev, ret = 0; 163 unsigned long cur_time; 164 165 cur_time = jiffies; 166 167 /* Immediately exit if previous_freq is not initialized yet. */ 168 if (!devfreq->previous_freq) 169 goto out; 170 171 prev_lev = devfreq_get_freq_level(devfreq, devfreq->previous_freq); 172 if (prev_lev < 0) { 173 ret = prev_lev; 174 goto out; 175 } 176 177 devfreq->time_in_state[prev_lev] += 178 cur_time - devfreq->last_stat_updated; 179 180 lev = devfreq_get_freq_level(devfreq, freq); 181 if (lev < 0) { 182 ret = lev; 183 goto out; 184 } 185 186 if (lev != prev_lev) { 187 devfreq->trans_table[(prev_lev * 188 devfreq->profile->max_state) + lev]++; 189 devfreq->total_trans++; 190 } 191 192 out: 193 devfreq->last_stat_updated = cur_time; 194 return ret; 195 } 196 EXPORT_SYMBOL(devfreq_update_status); 197 198 /** 199 * find_devfreq_governor() - find devfreq governor from name 200 * @name: name of the governor 201 * 202 * Search the list of devfreq governors and return the matched 203 * governor's pointer. devfreq_list_lock should be held by the caller. 204 */ 205 static struct devfreq_governor *find_devfreq_governor(const char *name) 206 { 207 struct devfreq_governor *tmp_governor; 208 209 if (IS_ERR_OR_NULL(name)) { 210 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__); 211 return ERR_PTR(-EINVAL); 212 } 213 WARN(!mutex_is_locked(&devfreq_list_lock), 214 "devfreq_list_lock must be locked."); 215 216 list_for_each_entry(tmp_governor, &devfreq_governor_list, node) { 217 if (!strncmp(tmp_governor->name, name, DEVFREQ_NAME_LEN)) 218 return tmp_governor; 219 } 220 221 return ERR_PTR(-ENODEV); 222 } 223 224 static int devfreq_notify_transition(struct devfreq *devfreq, 225 struct devfreq_freqs *freqs, unsigned int state) 226 { 227 if (!devfreq) 228 return -EINVAL; 229 230 switch (state) { 231 case DEVFREQ_PRECHANGE: 232 srcu_notifier_call_chain(&devfreq->transition_notifier_list, 233 DEVFREQ_PRECHANGE, freqs); 234 break; 235 236 case DEVFREQ_POSTCHANGE: 237 srcu_notifier_call_chain(&devfreq->transition_notifier_list, 238 DEVFREQ_POSTCHANGE, freqs); 239 break; 240 default: 241 return -EINVAL; 242 } 243 244 return 0; 245 } 246 247 /* Load monitoring helper functions for governors use */ 248 249 /** 250 * update_devfreq() - Reevaluate the device and configure frequency. 251 * @devfreq: the devfreq instance. 252 * 253 * Note: Lock devfreq->lock before calling update_devfreq 254 * This function is exported for governors. 255 */ 256 int update_devfreq(struct devfreq *devfreq) 257 { 258 struct devfreq_freqs freqs; 259 unsigned long freq, cur_freq, min_freq, max_freq; 260 int err = 0; 261 u32 flags = 0; 262 263 if (!mutex_is_locked(&devfreq->lock)) { 264 WARN(true, "devfreq->lock must be locked by the caller.\n"); 265 return -EINVAL; 266 } 267 268 if (!devfreq->governor) 269 return -EINVAL; 270 271 /* Reevaluate the proper frequency */ 272 err = devfreq->governor->get_target_freq(devfreq, &freq); 273 if (err) 274 return err; 275 276 /* 277 * Adjust the frequency with user freq, QoS and available freq. 278 * 279 * List from the highest priority 280 * max_freq 281 * min_freq 282 */ 283 max_freq = MIN(devfreq->scaling_max_freq, devfreq->max_freq); 284 min_freq = MAX(devfreq->scaling_min_freq, devfreq->min_freq); 285 286 if (min_freq && freq < min_freq) { 287 freq = min_freq; 288 flags &= ~DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use GLB */ 289 } 290 if (max_freq && freq > max_freq) { 291 freq = max_freq; 292 flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */ 293 } 294 295 if (devfreq->profile->get_cur_freq) 296 devfreq->profile->get_cur_freq(devfreq->dev.parent, &cur_freq); 297 else 298 cur_freq = devfreq->previous_freq; 299 300 freqs.old = cur_freq; 301 freqs.new = freq; 302 devfreq_notify_transition(devfreq, &freqs, DEVFREQ_PRECHANGE); 303 304 err = devfreq->profile->target(devfreq->dev.parent, &freq, flags); 305 if (err) { 306 freqs.new = cur_freq; 307 devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE); 308 return err; 309 } 310 311 freqs.new = freq; 312 devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE); 313 314 if (devfreq_update_status(devfreq, freq)) 315 dev_err(&devfreq->dev, 316 "Couldn't update frequency transition information.\n"); 317 318 devfreq->previous_freq = freq; 319 return err; 320 } 321 EXPORT_SYMBOL(update_devfreq); 322 323 /** 324 * devfreq_monitor() - Periodically poll devfreq objects. 325 * @work: the work struct used to run devfreq_monitor periodically. 326 * 327 */ 328 static void devfreq_monitor(struct work_struct *work) 329 { 330 int err; 331 struct devfreq *devfreq = container_of(work, 332 struct devfreq, work.work); 333 334 mutex_lock(&devfreq->lock); 335 err = update_devfreq(devfreq); 336 if (err) 337 dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err); 338 339 queue_delayed_work(devfreq_wq, &devfreq->work, 340 msecs_to_jiffies(devfreq->profile->polling_ms)); 341 mutex_unlock(&devfreq->lock); 342 } 343 344 /** 345 * devfreq_monitor_start() - Start load monitoring of devfreq instance 346 * @devfreq: the devfreq instance. 347 * 348 * Helper function for starting devfreq device load monitoing. By 349 * default delayed work based monitoring is supported. Function 350 * to be called from governor in response to DEVFREQ_GOV_START 351 * event when device is added to devfreq framework. 352 */ 353 void devfreq_monitor_start(struct devfreq *devfreq) 354 { 355 INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor); 356 if (devfreq->profile->polling_ms) 357 queue_delayed_work(devfreq_wq, &devfreq->work, 358 msecs_to_jiffies(devfreq->profile->polling_ms)); 359 } 360 EXPORT_SYMBOL(devfreq_monitor_start); 361 362 /** 363 * devfreq_monitor_stop() - Stop load monitoring of a devfreq instance 364 * @devfreq: the devfreq instance. 365 * 366 * Helper function to stop devfreq device load monitoing. Function 367 * to be called from governor in response to DEVFREQ_GOV_STOP 368 * event when device is removed from devfreq framework. 369 */ 370 void devfreq_monitor_stop(struct devfreq *devfreq) 371 { 372 cancel_delayed_work_sync(&devfreq->work); 373 } 374 EXPORT_SYMBOL(devfreq_monitor_stop); 375 376 /** 377 * devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance 378 * @devfreq: the devfreq instance. 379 * 380 * Helper function to suspend devfreq device load monitoing. Function 381 * to be called from governor in response to DEVFREQ_GOV_SUSPEND 382 * event or when polling interval is set to zero. 383 * 384 * Note: Though this function is same as devfreq_monitor_stop(), 385 * intentionally kept separate to provide hooks for collecting 386 * transition statistics. 387 */ 388 void devfreq_monitor_suspend(struct devfreq *devfreq) 389 { 390 mutex_lock(&devfreq->lock); 391 if (devfreq->stop_polling) { 392 mutex_unlock(&devfreq->lock); 393 return; 394 } 395 396 devfreq_update_status(devfreq, devfreq->previous_freq); 397 devfreq->stop_polling = true; 398 mutex_unlock(&devfreq->lock); 399 cancel_delayed_work_sync(&devfreq->work); 400 } 401 EXPORT_SYMBOL(devfreq_monitor_suspend); 402 403 /** 404 * devfreq_monitor_resume() - Resume load monitoring of a devfreq instance 405 * @devfreq: the devfreq instance. 406 * 407 * Helper function to resume devfreq device load monitoing. Function 408 * to be called from governor in response to DEVFREQ_GOV_RESUME 409 * event or when polling interval is set to non-zero. 410 */ 411 void devfreq_monitor_resume(struct devfreq *devfreq) 412 { 413 unsigned long freq; 414 415 mutex_lock(&devfreq->lock); 416 if (!devfreq->stop_polling) 417 goto out; 418 419 if (!delayed_work_pending(&devfreq->work) && 420 devfreq->profile->polling_ms) 421 queue_delayed_work(devfreq_wq, &devfreq->work, 422 msecs_to_jiffies(devfreq->profile->polling_ms)); 423 424 devfreq->last_stat_updated = jiffies; 425 devfreq->stop_polling = false; 426 427 if (devfreq->profile->get_cur_freq && 428 !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq)) 429 devfreq->previous_freq = freq; 430 431 out: 432 mutex_unlock(&devfreq->lock); 433 } 434 EXPORT_SYMBOL(devfreq_monitor_resume); 435 436 /** 437 * devfreq_interval_update() - Update device devfreq monitoring interval 438 * @devfreq: the devfreq instance. 439 * @delay: new polling interval to be set. 440 * 441 * Helper function to set new load monitoring polling interval. Function 442 * to be called from governor in response to DEVFREQ_GOV_INTERVAL event. 443 */ 444 void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay) 445 { 446 unsigned int cur_delay = devfreq->profile->polling_ms; 447 unsigned int new_delay = *delay; 448 449 mutex_lock(&devfreq->lock); 450 devfreq->profile->polling_ms = new_delay; 451 452 if (devfreq->stop_polling) 453 goto out; 454 455 /* if new delay is zero, stop polling */ 456 if (!new_delay) { 457 mutex_unlock(&devfreq->lock); 458 cancel_delayed_work_sync(&devfreq->work); 459 return; 460 } 461 462 /* if current delay is zero, start polling with new delay */ 463 if (!cur_delay) { 464 queue_delayed_work(devfreq_wq, &devfreq->work, 465 msecs_to_jiffies(devfreq->profile->polling_ms)); 466 goto out; 467 } 468 469 /* if current delay is greater than new delay, restart polling */ 470 if (cur_delay > new_delay) { 471 mutex_unlock(&devfreq->lock); 472 cancel_delayed_work_sync(&devfreq->work); 473 mutex_lock(&devfreq->lock); 474 if (!devfreq->stop_polling) 475 queue_delayed_work(devfreq_wq, &devfreq->work, 476 msecs_to_jiffies(devfreq->profile->polling_ms)); 477 } 478 out: 479 mutex_unlock(&devfreq->lock); 480 } 481 EXPORT_SYMBOL(devfreq_interval_update); 482 483 /** 484 * devfreq_notifier_call() - Notify that the device frequency requirements 485 * has been changed out of devfreq framework. 486 * @nb: the notifier_block (supposed to be devfreq->nb) 487 * @type: not used 488 * @devp: not used 489 * 490 * Called by a notifier that uses devfreq->nb. 491 */ 492 static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type, 493 void *devp) 494 { 495 struct devfreq *devfreq = container_of(nb, struct devfreq, nb); 496 int ret; 497 498 mutex_lock(&devfreq->lock); 499 500 devfreq->scaling_min_freq = find_available_min_freq(devfreq); 501 if (!devfreq->scaling_min_freq) { 502 mutex_unlock(&devfreq->lock); 503 return -EINVAL; 504 } 505 506 devfreq->scaling_max_freq = find_available_max_freq(devfreq); 507 if (!devfreq->scaling_max_freq) { 508 mutex_unlock(&devfreq->lock); 509 return -EINVAL; 510 } 511 512 ret = update_devfreq(devfreq); 513 mutex_unlock(&devfreq->lock); 514 515 return ret; 516 } 517 518 /** 519 * devfreq_dev_release() - Callback for struct device to release the device. 520 * @dev: the devfreq device 521 * 522 * Remove devfreq from the list and release its resources. 523 */ 524 static void devfreq_dev_release(struct device *dev) 525 { 526 struct devfreq *devfreq = to_devfreq(dev); 527 528 mutex_lock(&devfreq_list_lock); 529 if (IS_ERR(find_device_devfreq(devfreq->dev.parent))) { 530 mutex_unlock(&devfreq_list_lock); 531 dev_warn(&devfreq->dev, "releasing devfreq which doesn't exist\n"); 532 return; 533 } 534 list_del(&devfreq->node); 535 mutex_unlock(&devfreq_list_lock); 536 537 if (devfreq->governor) 538 devfreq->governor->event_handler(devfreq, 539 DEVFREQ_GOV_STOP, NULL); 540 541 if (devfreq->profile->exit) 542 devfreq->profile->exit(devfreq->dev.parent); 543 544 mutex_destroy(&devfreq->lock); 545 kfree(devfreq); 546 } 547 548 /** 549 * devfreq_add_device() - Add devfreq feature to the device 550 * @dev: the device to add devfreq feature. 551 * @profile: device-specific profile to run devfreq. 552 * @governor_name: name of the policy to choose frequency. 553 * @data: private data for the governor. The devfreq framework does not 554 * touch this value. 555 */ 556 struct devfreq *devfreq_add_device(struct device *dev, 557 struct devfreq_dev_profile *profile, 558 const char *governor_name, 559 void *data) 560 { 561 struct devfreq *devfreq; 562 struct devfreq_governor *governor; 563 static atomic_t devfreq_no = ATOMIC_INIT(-1); 564 int err = 0; 565 566 if (!dev || !profile || !governor_name) { 567 dev_err(dev, "%s: Invalid parameters.\n", __func__); 568 return ERR_PTR(-EINVAL); 569 } 570 571 mutex_lock(&devfreq_list_lock); 572 devfreq = find_device_devfreq(dev); 573 mutex_unlock(&devfreq_list_lock); 574 if (!IS_ERR(devfreq)) { 575 dev_err(dev, "%s: Unable to create devfreq for the device.\n", 576 __func__); 577 err = -EINVAL; 578 goto err_out; 579 } 580 581 devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL); 582 if (!devfreq) { 583 err = -ENOMEM; 584 goto err_out; 585 } 586 587 mutex_init(&devfreq->lock); 588 mutex_lock(&devfreq->lock); 589 devfreq->dev.parent = dev; 590 devfreq->dev.class = devfreq_class; 591 devfreq->dev.release = devfreq_dev_release; 592 devfreq->profile = profile; 593 strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN); 594 devfreq->previous_freq = profile->initial_freq; 595 devfreq->last_status.current_frequency = profile->initial_freq; 596 devfreq->data = data; 597 devfreq->nb.notifier_call = devfreq_notifier_call; 598 599 if (!devfreq->profile->max_state && !devfreq->profile->freq_table) { 600 mutex_unlock(&devfreq->lock); 601 err = set_freq_table(devfreq); 602 if (err < 0) 603 goto err_out; 604 mutex_lock(&devfreq->lock); 605 } 606 607 devfreq->scaling_min_freq = find_available_min_freq(devfreq); 608 if (!devfreq->scaling_min_freq) { 609 mutex_unlock(&devfreq->lock); 610 err = -EINVAL; 611 goto err_dev; 612 } 613 devfreq->min_freq = devfreq->scaling_min_freq; 614 615 devfreq->scaling_max_freq = find_available_max_freq(devfreq); 616 if (!devfreq->scaling_max_freq) { 617 mutex_unlock(&devfreq->lock); 618 err = -EINVAL; 619 goto err_dev; 620 } 621 devfreq->max_freq = devfreq->scaling_max_freq; 622 623 dev_set_name(&devfreq->dev, "devfreq%d", 624 atomic_inc_return(&devfreq_no)); 625 err = device_register(&devfreq->dev); 626 if (err) { 627 mutex_unlock(&devfreq->lock); 628 put_device(&devfreq->dev); 629 goto err_out; 630 } 631 632 devfreq->trans_table = 633 devm_kzalloc(&devfreq->dev, 634 array3_size(sizeof(unsigned int), 635 devfreq->profile->max_state, 636 devfreq->profile->max_state), 637 GFP_KERNEL); 638 devfreq->time_in_state = devm_kcalloc(&devfreq->dev, 639 devfreq->profile->max_state, 640 sizeof(unsigned long), 641 GFP_KERNEL); 642 devfreq->last_stat_updated = jiffies; 643 644 srcu_init_notifier_head(&devfreq->transition_notifier_list); 645 646 mutex_unlock(&devfreq->lock); 647 648 mutex_lock(&devfreq_list_lock); 649 list_add(&devfreq->node, &devfreq_list); 650 651 governor = find_devfreq_governor(devfreq->governor_name); 652 if (IS_ERR(governor)) { 653 dev_err(dev, "%s: Unable to find governor for the device\n", 654 __func__); 655 err = PTR_ERR(governor); 656 goto err_init; 657 } 658 659 devfreq->governor = governor; 660 err = devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_START, 661 NULL); 662 if (err) { 663 dev_err(dev, "%s: Unable to start governor for the device\n", 664 __func__); 665 goto err_init; 666 } 667 mutex_unlock(&devfreq_list_lock); 668 669 return devfreq; 670 671 err_init: 672 list_del(&devfreq->node); 673 mutex_unlock(&devfreq_list_lock); 674 675 device_unregister(&devfreq->dev); 676 devfreq = NULL; 677 err_dev: 678 if (devfreq) 679 kfree(devfreq); 680 err_out: 681 return ERR_PTR(err); 682 } 683 EXPORT_SYMBOL(devfreq_add_device); 684 685 /** 686 * devfreq_remove_device() - Remove devfreq feature from a device. 687 * @devfreq: the devfreq instance to be removed 688 * 689 * The opposite of devfreq_add_device(). 690 */ 691 int devfreq_remove_device(struct devfreq *devfreq) 692 { 693 if (!devfreq) 694 return -EINVAL; 695 696 device_unregister(&devfreq->dev); 697 698 return 0; 699 } 700 EXPORT_SYMBOL(devfreq_remove_device); 701 702 static int devm_devfreq_dev_match(struct device *dev, void *res, void *data) 703 { 704 struct devfreq **r = res; 705 706 if (WARN_ON(!r || !*r)) 707 return 0; 708 709 return *r == data; 710 } 711 712 static void devm_devfreq_dev_release(struct device *dev, void *res) 713 { 714 devfreq_remove_device(*(struct devfreq **)res); 715 } 716 717 /** 718 * devm_devfreq_add_device() - Resource-managed devfreq_add_device() 719 * @dev: the device to add devfreq feature. 720 * @profile: device-specific profile to run devfreq. 721 * @governor_name: name of the policy to choose frequency. 722 * @data: private data for the governor. The devfreq framework does not 723 * touch this value. 724 * 725 * This function manages automatically the memory of devfreq device using device 726 * resource management and simplify the free operation for memory of devfreq 727 * device. 728 */ 729 struct devfreq *devm_devfreq_add_device(struct device *dev, 730 struct devfreq_dev_profile *profile, 731 const char *governor_name, 732 void *data) 733 { 734 struct devfreq **ptr, *devfreq; 735 736 ptr = devres_alloc(devm_devfreq_dev_release, sizeof(*ptr), GFP_KERNEL); 737 if (!ptr) 738 return ERR_PTR(-ENOMEM); 739 740 devfreq = devfreq_add_device(dev, profile, governor_name, data); 741 if (IS_ERR(devfreq)) { 742 devres_free(ptr); 743 return devfreq; 744 } 745 746 *ptr = devfreq; 747 devres_add(dev, ptr); 748 749 return devfreq; 750 } 751 EXPORT_SYMBOL(devm_devfreq_add_device); 752 753 #ifdef CONFIG_OF 754 /* 755 * devfreq_get_devfreq_by_phandle - Get the devfreq device from devicetree 756 * @dev - instance to the given device 757 * @index - index into list of devfreq 758 * 759 * return the instance of devfreq device 760 */ 761 struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index) 762 { 763 struct device_node *node; 764 struct devfreq *devfreq; 765 766 if (!dev) 767 return ERR_PTR(-EINVAL); 768 769 if (!dev->of_node) 770 return ERR_PTR(-EINVAL); 771 772 node = of_parse_phandle(dev->of_node, "devfreq", index); 773 if (!node) 774 return ERR_PTR(-ENODEV); 775 776 mutex_lock(&devfreq_list_lock); 777 list_for_each_entry(devfreq, &devfreq_list, node) { 778 if (devfreq->dev.parent 779 && devfreq->dev.parent->of_node == node) { 780 mutex_unlock(&devfreq_list_lock); 781 of_node_put(node); 782 return devfreq; 783 } 784 } 785 mutex_unlock(&devfreq_list_lock); 786 of_node_put(node); 787 788 return ERR_PTR(-EPROBE_DEFER); 789 } 790 #else 791 struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index) 792 { 793 return ERR_PTR(-ENODEV); 794 } 795 #endif /* CONFIG_OF */ 796 EXPORT_SYMBOL_GPL(devfreq_get_devfreq_by_phandle); 797 798 /** 799 * devm_devfreq_remove_device() - Resource-managed devfreq_remove_device() 800 * @dev: the device to add devfreq feature. 801 * @devfreq: the devfreq instance to be removed 802 */ 803 void devm_devfreq_remove_device(struct device *dev, struct devfreq *devfreq) 804 { 805 WARN_ON(devres_release(dev, devm_devfreq_dev_release, 806 devm_devfreq_dev_match, devfreq)); 807 } 808 EXPORT_SYMBOL(devm_devfreq_remove_device); 809 810 /** 811 * devfreq_suspend_device() - Suspend devfreq of a device. 812 * @devfreq: the devfreq instance to be suspended 813 * 814 * This function is intended to be called by the pm callbacks 815 * (e.g., runtime_suspend, suspend) of the device driver that 816 * holds the devfreq. 817 */ 818 int devfreq_suspend_device(struct devfreq *devfreq) 819 { 820 if (!devfreq) 821 return -EINVAL; 822 823 if (!devfreq->governor) 824 return 0; 825 826 return devfreq->governor->event_handler(devfreq, 827 DEVFREQ_GOV_SUSPEND, NULL); 828 } 829 EXPORT_SYMBOL(devfreq_suspend_device); 830 831 /** 832 * devfreq_resume_device() - Resume devfreq of a device. 833 * @devfreq: the devfreq instance to be resumed 834 * 835 * This function is intended to be called by the pm callbacks 836 * (e.g., runtime_resume, resume) of the device driver that 837 * holds the devfreq. 838 */ 839 int devfreq_resume_device(struct devfreq *devfreq) 840 { 841 if (!devfreq) 842 return -EINVAL; 843 844 if (!devfreq->governor) 845 return 0; 846 847 return devfreq->governor->event_handler(devfreq, 848 DEVFREQ_GOV_RESUME, NULL); 849 } 850 EXPORT_SYMBOL(devfreq_resume_device); 851 852 /** 853 * devfreq_add_governor() - Add devfreq governor 854 * @governor: the devfreq governor to be added 855 */ 856 int devfreq_add_governor(struct devfreq_governor *governor) 857 { 858 struct devfreq_governor *g; 859 struct devfreq *devfreq; 860 int err = 0; 861 862 if (!governor) { 863 pr_err("%s: Invalid parameters.\n", __func__); 864 return -EINVAL; 865 } 866 867 mutex_lock(&devfreq_list_lock); 868 g = find_devfreq_governor(governor->name); 869 if (!IS_ERR(g)) { 870 pr_err("%s: governor %s already registered\n", __func__, 871 g->name); 872 err = -EINVAL; 873 goto err_out; 874 } 875 876 list_add(&governor->node, &devfreq_governor_list); 877 878 list_for_each_entry(devfreq, &devfreq_list, node) { 879 int ret = 0; 880 struct device *dev = devfreq->dev.parent; 881 882 if (!strncmp(devfreq->governor_name, governor->name, 883 DEVFREQ_NAME_LEN)) { 884 /* The following should never occur */ 885 if (devfreq->governor) { 886 dev_warn(dev, 887 "%s: Governor %s already present\n", 888 __func__, devfreq->governor->name); 889 ret = devfreq->governor->event_handler(devfreq, 890 DEVFREQ_GOV_STOP, NULL); 891 if (ret) { 892 dev_warn(dev, 893 "%s: Governor %s stop = %d\n", 894 __func__, 895 devfreq->governor->name, ret); 896 } 897 /* Fall through */ 898 } 899 devfreq->governor = governor; 900 ret = devfreq->governor->event_handler(devfreq, 901 DEVFREQ_GOV_START, NULL); 902 if (ret) { 903 dev_warn(dev, "%s: Governor %s start=%d\n", 904 __func__, devfreq->governor->name, 905 ret); 906 } 907 } 908 } 909 910 err_out: 911 mutex_unlock(&devfreq_list_lock); 912 913 return err; 914 } 915 EXPORT_SYMBOL(devfreq_add_governor); 916 917 /** 918 * devfreq_remove_governor() - Remove devfreq feature from a device. 919 * @governor: the devfreq governor to be removed 920 */ 921 int devfreq_remove_governor(struct devfreq_governor *governor) 922 { 923 struct devfreq_governor *g; 924 struct devfreq *devfreq; 925 int err = 0; 926 927 if (!governor) { 928 pr_err("%s: Invalid parameters.\n", __func__); 929 return -EINVAL; 930 } 931 932 mutex_lock(&devfreq_list_lock); 933 g = find_devfreq_governor(governor->name); 934 if (IS_ERR(g)) { 935 pr_err("%s: governor %s not registered\n", __func__, 936 governor->name); 937 err = PTR_ERR(g); 938 goto err_out; 939 } 940 list_for_each_entry(devfreq, &devfreq_list, node) { 941 int ret; 942 struct device *dev = devfreq->dev.parent; 943 944 if (!strncmp(devfreq->governor_name, governor->name, 945 DEVFREQ_NAME_LEN)) { 946 /* we should have a devfreq governor! */ 947 if (!devfreq->governor) { 948 dev_warn(dev, "%s: Governor %s NOT present\n", 949 __func__, governor->name); 950 continue; 951 /* Fall through */ 952 } 953 ret = devfreq->governor->event_handler(devfreq, 954 DEVFREQ_GOV_STOP, NULL); 955 if (ret) { 956 dev_warn(dev, "%s: Governor %s stop=%d\n", 957 __func__, devfreq->governor->name, 958 ret); 959 } 960 devfreq->governor = NULL; 961 } 962 } 963 964 list_del(&governor->node); 965 err_out: 966 mutex_unlock(&devfreq_list_lock); 967 968 return err; 969 } 970 EXPORT_SYMBOL(devfreq_remove_governor); 971 972 static ssize_t governor_show(struct device *dev, 973 struct device_attribute *attr, char *buf) 974 { 975 if (!to_devfreq(dev)->governor) 976 return -EINVAL; 977 978 return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name); 979 } 980 981 static ssize_t governor_store(struct device *dev, struct device_attribute *attr, 982 const char *buf, size_t count) 983 { 984 struct devfreq *df = to_devfreq(dev); 985 int ret; 986 char str_governor[DEVFREQ_NAME_LEN + 1]; 987 struct devfreq_governor *governor; 988 989 ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor); 990 if (ret != 1) 991 return -EINVAL; 992 993 mutex_lock(&devfreq_list_lock); 994 governor = find_devfreq_governor(str_governor); 995 if (IS_ERR(governor)) { 996 ret = PTR_ERR(governor); 997 goto out; 998 } 999 if (df->governor == governor) { 1000 ret = 0; 1001 goto out; 1002 } else if ((df->governor && df->governor->immutable) || 1003 governor->immutable) { 1004 ret = -EINVAL; 1005 goto out; 1006 } 1007 1008 if (df->governor) { 1009 ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL); 1010 if (ret) { 1011 dev_warn(dev, "%s: Governor %s not stopped(%d)\n", 1012 __func__, df->governor->name, ret); 1013 goto out; 1014 } 1015 } 1016 df->governor = governor; 1017 strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN); 1018 ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL); 1019 if (ret) 1020 dev_warn(dev, "%s: Governor %s not started(%d)\n", 1021 __func__, df->governor->name, ret); 1022 out: 1023 mutex_unlock(&devfreq_list_lock); 1024 1025 if (!ret) 1026 ret = count; 1027 return ret; 1028 } 1029 static DEVICE_ATTR_RW(governor); 1030 1031 static ssize_t available_governors_show(struct device *d, 1032 struct device_attribute *attr, 1033 char *buf) 1034 { 1035 struct devfreq *df = to_devfreq(d); 1036 ssize_t count = 0; 1037 1038 mutex_lock(&devfreq_list_lock); 1039 1040 /* 1041 * The devfreq with immutable governor (e.g., passive) shows 1042 * only own governor. 1043 */ 1044 if (df->governor->immutable) { 1045 count = scnprintf(&buf[count], DEVFREQ_NAME_LEN, 1046 "%s ", df->governor_name); 1047 /* 1048 * The devfreq device shows the registered governor except for 1049 * immutable governors such as passive governor . 1050 */ 1051 } else { 1052 struct devfreq_governor *governor; 1053 1054 list_for_each_entry(governor, &devfreq_governor_list, node) { 1055 if (governor->immutable) 1056 continue; 1057 count += scnprintf(&buf[count], (PAGE_SIZE - count - 2), 1058 "%s ", governor->name); 1059 } 1060 } 1061 1062 mutex_unlock(&devfreq_list_lock); 1063 1064 /* Truncate the trailing space */ 1065 if (count) 1066 count--; 1067 1068 count += sprintf(&buf[count], "\n"); 1069 1070 return count; 1071 } 1072 static DEVICE_ATTR_RO(available_governors); 1073 1074 static ssize_t cur_freq_show(struct device *dev, struct device_attribute *attr, 1075 char *buf) 1076 { 1077 unsigned long freq; 1078 struct devfreq *devfreq = to_devfreq(dev); 1079 1080 if (devfreq->profile->get_cur_freq && 1081 !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq)) 1082 return sprintf(buf, "%lu\n", freq); 1083 1084 return sprintf(buf, "%lu\n", devfreq->previous_freq); 1085 } 1086 static DEVICE_ATTR_RO(cur_freq); 1087 1088 static ssize_t target_freq_show(struct device *dev, 1089 struct device_attribute *attr, char *buf) 1090 { 1091 return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq); 1092 } 1093 static DEVICE_ATTR_RO(target_freq); 1094 1095 static ssize_t polling_interval_show(struct device *dev, 1096 struct device_attribute *attr, char *buf) 1097 { 1098 return sprintf(buf, "%d\n", to_devfreq(dev)->profile->polling_ms); 1099 } 1100 1101 static ssize_t polling_interval_store(struct device *dev, 1102 struct device_attribute *attr, 1103 const char *buf, size_t count) 1104 { 1105 struct devfreq *df = to_devfreq(dev); 1106 unsigned int value; 1107 int ret; 1108 1109 if (!df->governor) 1110 return -EINVAL; 1111 1112 ret = sscanf(buf, "%u", &value); 1113 if (ret != 1) 1114 return -EINVAL; 1115 1116 df->governor->event_handler(df, DEVFREQ_GOV_INTERVAL, &value); 1117 ret = count; 1118 1119 return ret; 1120 } 1121 static DEVICE_ATTR_RW(polling_interval); 1122 1123 static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr, 1124 const char *buf, size_t count) 1125 { 1126 struct devfreq *df = to_devfreq(dev); 1127 unsigned long value; 1128 int ret; 1129 unsigned long max; 1130 1131 ret = sscanf(buf, "%lu", &value); 1132 if (ret != 1) 1133 return -EINVAL; 1134 1135 mutex_lock(&df->lock); 1136 max = df->max_freq; 1137 if (value && max && value > max) { 1138 ret = -EINVAL; 1139 goto unlock; 1140 } 1141 1142 df->min_freq = value; 1143 update_devfreq(df); 1144 ret = count; 1145 unlock: 1146 mutex_unlock(&df->lock); 1147 return ret; 1148 } 1149 1150 static ssize_t min_freq_show(struct device *dev, struct device_attribute *attr, 1151 char *buf) 1152 { 1153 struct devfreq *df = to_devfreq(dev); 1154 1155 return sprintf(buf, "%lu\n", MAX(df->scaling_min_freq, df->min_freq)); 1156 } 1157 1158 static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr, 1159 const char *buf, size_t count) 1160 { 1161 struct devfreq *df = to_devfreq(dev); 1162 unsigned long value; 1163 int ret; 1164 unsigned long min; 1165 1166 ret = sscanf(buf, "%lu", &value); 1167 if (ret != 1) 1168 return -EINVAL; 1169 1170 mutex_lock(&df->lock); 1171 min = df->min_freq; 1172 if (value && min && value < min) { 1173 ret = -EINVAL; 1174 goto unlock; 1175 } 1176 1177 df->max_freq = value; 1178 update_devfreq(df); 1179 ret = count; 1180 unlock: 1181 mutex_unlock(&df->lock); 1182 return ret; 1183 } 1184 static DEVICE_ATTR_RW(min_freq); 1185 1186 static ssize_t max_freq_show(struct device *dev, struct device_attribute *attr, 1187 char *buf) 1188 { 1189 struct devfreq *df = to_devfreq(dev); 1190 1191 return sprintf(buf, "%lu\n", MIN(df->scaling_max_freq, df->max_freq)); 1192 } 1193 static DEVICE_ATTR_RW(max_freq); 1194 1195 static ssize_t available_frequencies_show(struct device *d, 1196 struct device_attribute *attr, 1197 char *buf) 1198 { 1199 struct devfreq *df = to_devfreq(d); 1200 ssize_t count = 0; 1201 int i; 1202 1203 mutex_lock(&df->lock); 1204 1205 for (i = 0; i < df->profile->max_state; i++) 1206 count += scnprintf(&buf[count], (PAGE_SIZE - count - 2), 1207 "%lu ", df->profile->freq_table[i]); 1208 1209 mutex_unlock(&df->lock); 1210 /* Truncate the trailing space */ 1211 if (count) 1212 count--; 1213 1214 count += sprintf(&buf[count], "\n"); 1215 1216 return count; 1217 } 1218 static DEVICE_ATTR_RO(available_frequencies); 1219 1220 static ssize_t trans_stat_show(struct device *dev, 1221 struct device_attribute *attr, char *buf) 1222 { 1223 struct devfreq *devfreq = to_devfreq(dev); 1224 ssize_t len; 1225 int i, j; 1226 unsigned int max_state = devfreq->profile->max_state; 1227 1228 if (!devfreq->stop_polling && 1229 devfreq_update_status(devfreq, devfreq->previous_freq)) 1230 return 0; 1231 if (max_state == 0) 1232 return sprintf(buf, "Not Supported.\n"); 1233 1234 len = sprintf(buf, " From : To\n"); 1235 len += sprintf(buf + len, " :"); 1236 for (i = 0; i < max_state; i++) 1237 len += sprintf(buf + len, "%10lu", 1238 devfreq->profile->freq_table[i]); 1239 1240 len += sprintf(buf + len, " time(ms)\n"); 1241 1242 for (i = 0; i < max_state; i++) { 1243 if (devfreq->profile->freq_table[i] 1244 == devfreq->previous_freq) { 1245 len += sprintf(buf + len, "*"); 1246 } else { 1247 len += sprintf(buf + len, " "); 1248 } 1249 len += sprintf(buf + len, "%10lu:", 1250 devfreq->profile->freq_table[i]); 1251 for (j = 0; j < max_state; j++) 1252 len += sprintf(buf + len, "%10u", 1253 devfreq->trans_table[(i * max_state) + j]); 1254 len += sprintf(buf + len, "%10u\n", 1255 jiffies_to_msecs(devfreq->time_in_state[i])); 1256 } 1257 1258 len += sprintf(buf + len, "Total transition : %u\n", 1259 devfreq->total_trans); 1260 return len; 1261 } 1262 static DEVICE_ATTR_RO(trans_stat); 1263 1264 static struct attribute *devfreq_attrs[] = { 1265 &dev_attr_governor.attr, 1266 &dev_attr_available_governors.attr, 1267 &dev_attr_cur_freq.attr, 1268 &dev_attr_available_frequencies.attr, 1269 &dev_attr_target_freq.attr, 1270 &dev_attr_polling_interval.attr, 1271 &dev_attr_min_freq.attr, 1272 &dev_attr_max_freq.attr, 1273 &dev_attr_trans_stat.attr, 1274 NULL, 1275 }; 1276 ATTRIBUTE_GROUPS(devfreq); 1277 1278 static int __init devfreq_init(void) 1279 { 1280 devfreq_class = class_create(THIS_MODULE, "devfreq"); 1281 if (IS_ERR(devfreq_class)) { 1282 pr_err("%s: couldn't create class\n", __FILE__); 1283 return PTR_ERR(devfreq_class); 1284 } 1285 1286 devfreq_wq = create_freezable_workqueue("devfreq_wq"); 1287 if (!devfreq_wq) { 1288 class_destroy(devfreq_class); 1289 pr_err("%s: couldn't create workqueue\n", __FILE__); 1290 return -ENOMEM; 1291 } 1292 devfreq_class->dev_groups = devfreq_groups; 1293 1294 return 0; 1295 } 1296 subsys_initcall(devfreq_init); 1297 1298 /* 1299 * The following are helper functions for devfreq user device drivers with 1300 * OPP framework. 1301 */ 1302 1303 /** 1304 * devfreq_recommended_opp() - Helper function to get proper OPP for the 1305 * freq value given to target callback. 1306 * @dev: The devfreq user device. (parent of devfreq) 1307 * @freq: The frequency given to target function 1308 * @flags: Flags handed from devfreq framework. 1309 * 1310 * The callers are required to call dev_pm_opp_put() for the returned OPP after 1311 * use. 1312 */ 1313 struct dev_pm_opp *devfreq_recommended_opp(struct device *dev, 1314 unsigned long *freq, 1315 u32 flags) 1316 { 1317 struct dev_pm_opp *opp; 1318 1319 if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) { 1320 /* The freq is an upper bound. opp should be lower */ 1321 opp = dev_pm_opp_find_freq_floor(dev, freq); 1322 1323 /* If not available, use the closest opp */ 1324 if (opp == ERR_PTR(-ERANGE)) 1325 opp = dev_pm_opp_find_freq_ceil(dev, freq); 1326 } else { 1327 /* The freq is an lower bound. opp should be higher */ 1328 opp = dev_pm_opp_find_freq_ceil(dev, freq); 1329 1330 /* If not available, use the closest opp */ 1331 if (opp == ERR_PTR(-ERANGE)) 1332 opp = dev_pm_opp_find_freq_floor(dev, freq); 1333 } 1334 1335 return opp; 1336 } 1337 EXPORT_SYMBOL(devfreq_recommended_opp); 1338 1339 /** 1340 * devfreq_register_opp_notifier() - Helper function to get devfreq notified 1341 * for any changes in the OPP availability 1342 * changes 1343 * @dev: The devfreq user device. (parent of devfreq) 1344 * @devfreq: The devfreq object. 1345 */ 1346 int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq) 1347 { 1348 return dev_pm_opp_register_notifier(dev, &devfreq->nb); 1349 } 1350 EXPORT_SYMBOL(devfreq_register_opp_notifier); 1351 1352 /** 1353 * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq 1354 * notified for any changes in the OPP 1355 * availability changes anymore. 1356 * @dev: The devfreq user device. (parent of devfreq) 1357 * @devfreq: The devfreq object. 1358 * 1359 * At exit() callback of devfreq_dev_profile, this must be included if 1360 * devfreq_recommended_opp is used. 1361 */ 1362 int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq) 1363 { 1364 return dev_pm_opp_unregister_notifier(dev, &devfreq->nb); 1365 } 1366 EXPORT_SYMBOL(devfreq_unregister_opp_notifier); 1367 1368 static void devm_devfreq_opp_release(struct device *dev, void *res) 1369 { 1370 devfreq_unregister_opp_notifier(dev, *(struct devfreq **)res); 1371 } 1372 1373 /** 1374 * devm_ devfreq_register_opp_notifier() 1375 * - Resource-managed devfreq_register_opp_notifier() 1376 * @dev: The devfreq user device. (parent of devfreq) 1377 * @devfreq: The devfreq object. 1378 */ 1379 int devm_devfreq_register_opp_notifier(struct device *dev, 1380 struct devfreq *devfreq) 1381 { 1382 struct devfreq **ptr; 1383 int ret; 1384 1385 ptr = devres_alloc(devm_devfreq_opp_release, sizeof(*ptr), GFP_KERNEL); 1386 if (!ptr) 1387 return -ENOMEM; 1388 1389 ret = devfreq_register_opp_notifier(dev, devfreq); 1390 if (ret) { 1391 devres_free(ptr); 1392 return ret; 1393 } 1394 1395 *ptr = devfreq; 1396 devres_add(dev, ptr); 1397 1398 return 0; 1399 } 1400 EXPORT_SYMBOL(devm_devfreq_register_opp_notifier); 1401 1402 /** 1403 * devm_devfreq_unregister_opp_notifier() 1404 * - Resource-managed devfreq_unregister_opp_notifier() 1405 * @dev: The devfreq user device. (parent of devfreq) 1406 * @devfreq: The devfreq object. 1407 */ 1408 void devm_devfreq_unregister_opp_notifier(struct device *dev, 1409 struct devfreq *devfreq) 1410 { 1411 WARN_ON(devres_release(dev, devm_devfreq_opp_release, 1412 devm_devfreq_dev_match, devfreq)); 1413 } 1414 EXPORT_SYMBOL(devm_devfreq_unregister_opp_notifier); 1415 1416 /** 1417 * devfreq_register_notifier() - Register a driver with devfreq 1418 * @devfreq: The devfreq object. 1419 * @nb: The notifier block to register. 1420 * @list: DEVFREQ_TRANSITION_NOTIFIER. 1421 */ 1422 int devfreq_register_notifier(struct devfreq *devfreq, 1423 struct notifier_block *nb, 1424 unsigned int list) 1425 { 1426 int ret = 0; 1427 1428 if (!devfreq) 1429 return -EINVAL; 1430 1431 switch (list) { 1432 case DEVFREQ_TRANSITION_NOTIFIER: 1433 ret = srcu_notifier_chain_register( 1434 &devfreq->transition_notifier_list, nb); 1435 break; 1436 default: 1437 ret = -EINVAL; 1438 } 1439 1440 return ret; 1441 } 1442 EXPORT_SYMBOL(devfreq_register_notifier); 1443 1444 /* 1445 * devfreq_unregister_notifier() - Unregister a driver with devfreq 1446 * @devfreq: The devfreq object. 1447 * @nb: The notifier block to be unregistered. 1448 * @list: DEVFREQ_TRANSITION_NOTIFIER. 1449 */ 1450 int devfreq_unregister_notifier(struct devfreq *devfreq, 1451 struct notifier_block *nb, 1452 unsigned int list) 1453 { 1454 int ret = 0; 1455 1456 if (!devfreq) 1457 return -EINVAL; 1458 1459 switch (list) { 1460 case DEVFREQ_TRANSITION_NOTIFIER: 1461 ret = srcu_notifier_chain_unregister( 1462 &devfreq->transition_notifier_list, nb); 1463 break; 1464 default: 1465 ret = -EINVAL; 1466 } 1467 1468 return ret; 1469 } 1470 EXPORT_SYMBOL(devfreq_unregister_notifier); 1471 1472 struct devfreq_notifier_devres { 1473 struct devfreq *devfreq; 1474 struct notifier_block *nb; 1475 unsigned int list; 1476 }; 1477 1478 static void devm_devfreq_notifier_release(struct device *dev, void *res) 1479 { 1480 struct devfreq_notifier_devres *this = res; 1481 1482 devfreq_unregister_notifier(this->devfreq, this->nb, this->list); 1483 } 1484 1485 /** 1486 * devm_devfreq_register_notifier() 1487 - Resource-managed devfreq_register_notifier() 1488 * @dev: The devfreq user device. (parent of devfreq) 1489 * @devfreq: The devfreq object. 1490 * @nb: The notifier block to be unregistered. 1491 * @list: DEVFREQ_TRANSITION_NOTIFIER. 1492 */ 1493 int devm_devfreq_register_notifier(struct device *dev, 1494 struct devfreq *devfreq, 1495 struct notifier_block *nb, 1496 unsigned int list) 1497 { 1498 struct devfreq_notifier_devres *ptr; 1499 int ret; 1500 1501 ptr = devres_alloc(devm_devfreq_notifier_release, sizeof(*ptr), 1502 GFP_KERNEL); 1503 if (!ptr) 1504 return -ENOMEM; 1505 1506 ret = devfreq_register_notifier(devfreq, nb, list); 1507 if (ret) { 1508 devres_free(ptr); 1509 return ret; 1510 } 1511 1512 ptr->devfreq = devfreq; 1513 ptr->nb = nb; 1514 ptr->list = list; 1515 devres_add(dev, ptr); 1516 1517 return 0; 1518 } 1519 EXPORT_SYMBOL(devm_devfreq_register_notifier); 1520 1521 /** 1522 * devm_devfreq_unregister_notifier() 1523 - Resource-managed devfreq_unregister_notifier() 1524 * @dev: The devfreq user device. (parent of devfreq) 1525 * @devfreq: The devfreq object. 1526 * @nb: The notifier block to be unregistered. 1527 * @list: DEVFREQ_TRANSITION_NOTIFIER. 1528 */ 1529 void devm_devfreq_unregister_notifier(struct device *dev, 1530 struct devfreq *devfreq, 1531 struct notifier_block *nb, 1532 unsigned int list) 1533 { 1534 WARN_ON(devres_release(dev, devm_devfreq_notifier_release, 1535 devm_devfreq_dev_match, devfreq)); 1536 } 1537 EXPORT_SYMBOL(devm_devfreq_unregister_notifier); 1538