1 /* 2 * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework 3 * for Non-CPU Devices. 4 * 5 * Copyright (C) 2011 Samsung Electronics 6 * MyungJoo Ham <myungjoo.ham@samsung.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/kernel.h> 14 #include <linux/sched.h> 15 #include <linux/errno.h> 16 #include <linux/err.h> 17 #include <linux/init.h> 18 #include <linux/module.h> 19 #include <linux/slab.h> 20 #include <linux/stat.h> 21 #include <linux/pm_opp.h> 22 #include <linux/devfreq.h> 23 #include <linux/workqueue.h> 24 #include <linux/platform_device.h> 25 #include <linux/list.h> 26 #include <linux/printk.h> 27 #include <linux/hrtimer.h> 28 #include <linux/of.h> 29 #include "governor.h" 30 31 static struct class *devfreq_class; 32 33 /* 34 * devfreq core provides delayed work based load monitoring helper 35 * functions. Governors can use these or can implement their own 36 * monitoring mechanism. 37 */ 38 static struct workqueue_struct *devfreq_wq; 39 40 /* The list of all device-devfreq governors */ 41 static LIST_HEAD(devfreq_governor_list); 42 /* The list of all device-devfreq */ 43 static LIST_HEAD(devfreq_list); 44 static DEFINE_MUTEX(devfreq_list_lock); 45 46 /** 47 * find_device_devfreq() - find devfreq struct using device pointer 48 * @dev: device pointer used to lookup device devfreq. 49 * 50 * Search the list of device devfreqs and return the matched device's 51 * devfreq info. devfreq_list_lock should be held by the caller. 52 */ 53 static struct devfreq *find_device_devfreq(struct device *dev) 54 { 55 struct devfreq *tmp_devfreq; 56 57 if (IS_ERR_OR_NULL(dev)) { 58 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__); 59 return ERR_PTR(-EINVAL); 60 } 61 WARN(!mutex_is_locked(&devfreq_list_lock), 62 "devfreq_list_lock must be locked."); 63 64 list_for_each_entry(tmp_devfreq, &devfreq_list, node) { 65 if (tmp_devfreq->dev.parent == dev) 66 return tmp_devfreq; 67 } 68 69 return ERR_PTR(-ENODEV); 70 } 71 72 /** 73 * devfreq_get_freq_level() - Lookup freq_table for the frequency 74 * @devfreq: the devfreq instance 75 * @freq: the target frequency 76 */ 77 static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq) 78 { 79 int lev; 80 81 for (lev = 0; lev < devfreq->profile->max_state; lev++) 82 if (freq == devfreq->profile->freq_table[lev]) 83 return lev; 84 85 return -EINVAL; 86 } 87 88 /** 89 * devfreq_set_freq_table() - Initialize freq_table for the frequency 90 * @devfreq: the devfreq instance 91 */ 92 static void devfreq_set_freq_table(struct devfreq *devfreq) 93 { 94 struct devfreq_dev_profile *profile = devfreq->profile; 95 struct dev_pm_opp *opp; 96 unsigned long freq; 97 int i, count; 98 99 /* Initialize the freq_table from OPP table */ 100 count = dev_pm_opp_get_opp_count(devfreq->dev.parent); 101 if (count <= 0) 102 return; 103 104 profile->max_state = count; 105 profile->freq_table = devm_kcalloc(devfreq->dev.parent, 106 profile->max_state, 107 sizeof(*profile->freq_table), 108 GFP_KERNEL); 109 if (!profile->freq_table) { 110 profile->max_state = 0; 111 return; 112 } 113 114 rcu_read_lock(); 115 for (i = 0, freq = 0; i < profile->max_state; i++, freq++) { 116 opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &freq); 117 if (IS_ERR(opp)) { 118 devm_kfree(devfreq->dev.parent, profile->freq_table); 119 profile->max_state = 0; 120 rcu_read_unlock(); 121 return; 122 } 123 profile->freq_table[i] = freq; 124 } 125 rcu_read_unlock(); 126 } 127 128 /** 129 * devfreq_update_status() - Update statistics of devfreq behavior 130 * @devfreq: the devfreq instance 131 * @freq: the update target frequency 132 */ 133 static int devfreq_update_status(struct devfreq *devfreq, unsigned long freq) 134 { 135 int lev, prev_lev, ret = 0; 136 unsigned long cur_time; 137 138 cur_time = jiffies; 139 140 prev_lev = devfreq_get_freq_level(devfreq, devfreq->previous_freq); 141 if (prev_lev < 0) { 142 ret = prev_lev; 143 goto out; 144 } 145 146 devfreq->time_in_state[prev_lev] += 147 cur_time - devfreq->last_stat_updated; 148 149 lev = devfreq_get_freq_level(devfreq, freq); 150 if (lev < 0) { 151 ret = lev; 152 goto out; 153 } 154 155 if (lev != prev_lev) { 156 devfreq->trans_table[(prev_lev * 157 devfreq->profile->max_state) + lev]++; 158 devfreq->total_trans++; 159 } 160 161 out: 162 devfreq->last_stat_updated = cur_time; 163 return ret; 164 } 165 166 /** 167 * find_devfreq_governor() - find devfreq governor from name 168 * @name: name of the governor 169 * 170 * Search the list of devfreq governors and return the matched 171 * governor's pointer. devfreq_list_lock should be held by the caller. 172 */ 173 static struct devfreq_governor *find_devfreq_governor(const char *name) 174 { 175 struct devfreq_governor *tmp_governor; 176 177 if (IS_ERR_OR_NULL(name)) { 178 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__); 179 return ERR_PTR(-EINVAL); 180 } 181 WARN(!mutex_is_locked(&devfreq_list_lock), 182 "devfreq_list_lock must be locked."); 183 184 list_for_each_entry(tmp_governor, &devfreq_governor_list, node) { 185 if (!strncmp(tmp_governor->name, name, DEVFREQ_NAME_LEN)) 186 return tmp_governor; 187 } 188 189 return ERR_PTR(-ENODEV); 190 } 191 192 static int devfreq_notify_transition(struct devfreq *devfreq, 193 struct devfreq_freqs *freqs, unsigned int state) 194 { 195 if (!devfreq) 196 return -EINVAL; 197 198 switch (state) { 199 case DEVFREQ_PRECHANGE: 200 srcu_notifier_call_chain(&devfreq->transition_notifier_list, 201 DEVFREQ_PRECHANGE, freqs); 202 break; 203 204 case DEVFREQ_POSTCHANGE: 205 srcu_notifier_call_chain(&devfreq->transition_notifier_list, 206 DEVFREQ_POSTCHANGE, freqs); 207 break; 208 default: 209 return -EINVAL; 210 } 211 212 return 0; 213 } 214 215 /* Load monitoring helper functions for governors use */ 216 217 /** 218 * update_devfreq() - Reevaluate the device and configure frequency. 219 * @devfreq: the devfreq instance. 220 * 221 * Note: Lock devfreq->lock before calling update_devfreq 222 * This function is exported for governors. 223 */ 224 int update_devfreq(struct devfreq *devfreq) 225 { 226 struct devfreq_freqs freqs; 227 unsigned long freq, cur_freq; 228 int err = 0; 229 u32 flags = 0; 230 231 if (!mutex_is_locked(&devfreq->lock)) { 232 WARN(true, "devfreq->lock must be locked by the caller.\n"); 233 return -EINVAL; 234 } 235 236 if (!devfreq->governor) 237 return -EINVAL; 238 239 /* Reevaluate the proper frequency */ 240 err = devfreq->governor->get_target_freq(devfreq, &freq); 241 if (err) 242 return err; 243 244 /* 245 * Adjust the frequency with user freq and QoS. 246 * 247 * List from the highest priority 248 * max_freq 249 * min_freq 250 */ 251 252 if (devfreq->min_freq && freq < devfreq->min_freq) { 253 freq = devfreq->min_freq; 254 flags &= ~DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use GLB */ 255 } 256 if (devfreq->max_freq && freq > devfreq->max_freq) { 257 freq = devfreq->max_freq; 258 flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */ 259 } 260 261 if (devfreq->profile->get_cur_freq) 262 devfreq->profile->get_cur_freq(devfreq->dev.parent, &cur_freq); 263 else 264 cur_freq = devfreq->previous_freq; 265 266 freqs.old = cur_freq; 267 freqs.new = freq; 268 devfreq_notify_transition(devfreq, &freqs, DEVFREQ_PRECHANGE); 269 270 err = devfreq->profile->target(devfreq->dev.parent, &freq, flags); 271 if (err) 272 return err; 273 274 freqs.new = freq; 275 devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE); 276 277 if (devfreq->profile->freq_table) 278 if (devfreq_update_status(devfreq, freq)) 279 dev_err(&devfreq->dev, 280 "Couldn't update frequency transition information.\n"); 281 282 devfreq->previous_freq = freq; 283 return err; 284 } 285 EXPORT_SYMBOL(update_devfreq); 286 287 /** 288 * devfreq_monitor() - Periodically poll devfreq objects. 289 * @work: the work struct used to run devfreq_monitor periodically. 290 * 291 */ 292 static void devfreq_monitor(struct work_struct *work) 293 { 294 int err; 295 struct devfreq *devfreq = container_of(work, 296 struct devfreq, work.work); 297 298 mutex_lock(&devfreq->lock); 299 err = update_devfreq(devfreq); 300 if (err) 301 dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err); 302 303 queue_delayed_work(devfreq_wq, &devfreq->work, 304 msecs_to_jiffies(devfreq->profile->polling_ms)); 305 mutex_unlock(&devfreq->lock); 306 } 307 308 /** 309 * devfreq_monitor_start() - Start load monitoring of devfreq instance 310 * @devfreq: the devfreq instance. 311 * 312 * Helper function for starting devfreq device load monitoing. By 313 * default delayed work based monitoring is supported. Function 314 * to be called from governor in response to DEVFREQ_GOV_START 315 * event when device is added to devfreq framework. 316 */ 317 void devfreq_monitor_start(struct devfreq *devfreq) 318 { 319 INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor); 320 if (devfreq->profile->polling_ms) 321 queue_delayed_work(devfreq_wq, &devfreq->work, 322 msecs_to_jiffies(devfreq->profile->polling_ms)); 323 } 324 EXPORT_SYMBOL(devfreq_monitor_start); 325 326 /** 327 * devfreq_monitor_stop() - Stop load monitoring of a devfreq instance 328 * @devfreq: the devfreq instance. 329 * 330 * Helper function to stop devfreq device load monitoing. Function 331 * to be called from governor in response to DEVFREQ_GOV_STOP 332 * event when device is removed from devfreq framework. 333 */ 334 void devfreq_monitor_stop(struct devfreq *devfreq) 335 { 336 cancel_delayed_work_sync(&devfreq->work); 337 } 338 EXPORT_SYMBOL(devfreq_monitor_stop); 339 340 /** 341 * devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance 342 * @devfreq: the devfreq instance. 343 * 344 * Helper function to suspend devfreq device load monitoing. Function 345 * to be called from governor in response to DEVFREQ_GOV_SUSPEND 346 * event or when polling interval is set to zero. 347 * 348 * Note: Though this function is same as devfreq_monitor_stop(), 349 * intentionally kept separate to provide hooks for collecting 350 * transition statistics. 351 */ 352 void devfreq_monitor_suspend(struct devfreq *devfreq) 353 { 354 mutex_lock(&devfreq->lock); 355 if (devfreq->stop_polling) { 356 mutex_unlock(&devfreq->lock); 357 return; 358 } 359 360 devfreq_update_status(devfreq, devfreq->previous_freq); 361 devfreq->stop_polling = true; 362 mutex_unlock(&devfreq->lock); 363 cancel_delayed_work_sync(&devfreq->work); 364 } 365 EXPORT_SYMBOL(devfreq_monitor_suspend); 366 367 /** 368 * devfreq_monitor_resume() - Resume load monitoring of a devfreq instance 369 * @devfreq: the devfreq instance. 370 * 371 * Helper function to resume devfreq device load monitoing. Function 372 * to be called from governor in response to DEVFREQ_GOV_RESUME 373 * event or when polling interval is set to non-zero. 374 */ 375 void devfreq_monitor_resume(struct devfreq *devfreq) 376 { 377 unsigned long freq; 378 379 mutex_lock(&devfreq->lock); 380 if (!devfreq->stop_polling) 381 goto out; 382 383 if (!delayed_work_pending(&devfreq->work) && 384 devfreq->profile->polling_ms) 385 queue_delayed_work(devfreq_wq, &devfreq->work, 386 msecs_to_jiffies(devfreq->profile->polling_ms)); 387 388 devfreq->last_stat_updated = jiffies; 389 devfreq->stop_polling = false; 390 391 if (devfreq->profile->get_cur_freq && 392 !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq)) 393 devfreq->previous_freq = freq; 394 395 out: 396 mutex_unlock(&devfreq->lock); 397 } 398 EXPORT_SYMBOL(devfreq_monitor_resume); 399 400 /** 401 * devfreq_interval_update() - Update device devfreq monitoring interval 402 * @devfreq: the devfreq instance. 403 * @delay: new polling interval to be set. 404 * 405 * Helper function to set new load monitoring polling interval. Function 406 * to be called from governor in response to DEVFREQ_GOV_INTERVAL event. 407 */ 408 void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay) 409 { 410 unsigned int cur_delay = devfreq->profile->polling_ms; 411 unsigned int new_delay = *delay; 412 413 mutex_lock(&devfreq->lock); 414 devfreq->profile->polling_ms = new_delay; 415 416 if (devfreq->stop_polling) 417 goto out; 418 419 /* if new delay is zero, stop polling */ 420 if (!new_delay) { 421 mutex_unlock(&devfreq->lock); 422 cancel_delayed_work_sync(&devfreq->work); 423 return; 424 } 425 426 /* if current delay is zero, start polling with new delay */ 427 if (!cur_delay) { 428 queue_delayed_work(devfreq_wq, &devfreq->work, 429 msecs_to_jiffies(devfreq->profile->polling_ms)); 430 goto out; 431 } 432 433 /* if current delay is greater than new delay, restart polling */ 434 if (cur_delay > new_delay) { 435 mutex_unlock(&devfreq->lock); 436 cancel_delayed_work_sync(&devfreq->work); 437 mutex_lock(&devfreq->lock); 438 if (!devfreq->stop_polling) 439 queue_delayed_work(devfreq_wq, &devfreq->work, 440 msecs_to_jiffies(devfreq->profile->polling_ms)); 441 } 442 out: 443 mutex_unlock(&devfreq->lock); 444 } 445 EXPORT_SYMBOL(devfreq_interval_update); 446 447 /** 448 * devfreq_notifier_call() - Notify that the device frequency requirements 449 * has been changed out of devfreq framework. 450 * @nb: the notifier_block (supposed to be devfreq->nb) 451 * @type: not used 452 * @devp: not used 453 * 454 * Called by a notifier that uses devfreq->nb. 455 */ 456 static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type, 457 void *devp) 458 { 459 struct devfreq *devfreq = container_of(nb, struct devfreq, nb); 460 int ret; 461 462 mutex_lock(&devfreq->lock); 463 ret = update_devfreq(devfreq); 464 mutex_unlock(&devfreq->lock); 465 466 return ret; 467 } 468 469 /** 470 * _remove_devfreq() - Remove devfreq from the list and release its resources. 471 * @devfreq: the devfreq struct 472 */ 473 static void _remove_devfreq(struct devfreq *devfreq) 474 { 475 mutex_lock(&devfreq_list_lock); 476 if (IS_ERR(find_device_devfreq(devfreq->dev.parent))) { 477 mutex_unlock(&devfreq_list_lock); 478 dev_warn(&devfreq->dev, "releasing devfreq which doesn't exist\n"); 479 return; 480 } 481 list_del(&devfreq->node); 482 mutex_unlock(&devfreq_list_lock); 483 484 if (devfreq->governor) 485 devfreq->governor->event_handler(devfreq, 486 DEVFREQ_GOV_STOP, NULL); 487 488 if (devfreq->profile->exit) 489 devfreq->profile->exit(devfreq->dev.parent); 490 491 mutex_destroy(&devfreq->lock); 492 kfree(devfreq); 493 } 494 495 /** 496 * devfreq_dev_release() - Callback for struct device to release the device. 497 * @dev: the devfreq device 498 * 499 * This calls _remove_devfreq() if _remove_devfreq() is not called. 500 */ 501 static void devfreq_dev_release(struct device *dev) 502 { 503 struct devfreq *devfreq = to_devfreq(dev); 504 505 _remove_devfreq(devfreq); 506 } 507 508 /** 509 * devfreq_add_device() - Add devfreq feature to the device 510 * @dev: the device to add devfreq feature. 511 * @profile: device-specific profile to run devfreq. 512 * @governor_name: name of the policy to choose frequency. 513 * @data: private data for the governor. The devfreq framework does not 514 * touch this value. 515 */ 516 struct devfreq *devfreq_add_device(struct device *dev, 517 struct devfreq_dev_profile *profile, 518 const char *governor_name, 519 void *data) 520 { 521 struct devfreq *devfreq; 522 struct devfreq_governor *governor; 523 int err = 0; 524 525 if (!dev || !profile || !governor_name) { 526 dev_err(dev, "%s: Invalid parameters.\n", __func__); 527 return ERR_PTR(-EINVAL); 528 } 529 530 mutex_lock(&devfreq_list_lock); 531 devfreq = find_device_devfreq(dev); 532 mutex_unlock(&devfreq_list_lock); 533 if (!IS_ERR(devfreq)) { 534 dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__); 535 err = -EINVAL; 536 goto err_out; 537 } 538 539 devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL); 540 if (!devfreq) { 541 dev_err(dev, "%s: Unable to create devfreq for the device\n", 542 __func__); 543 err = -ENOMEM; 544 goto err_out; 545 } 546 547 mutex_init(&devfreq->lock); 548 mutex_lock(&devfreq->lock); 549 devfreq->dev.parent = dev; 550 devfreq->dev.class = devfreq_class; 551 devfreq->dev.release = devfreq_dev_release; 552 devfreq->profile = profile; 553 strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN); 554 devfreq->previous_freq = profile->initial_freq; 555 devfreq->data = data; 556 devfreq->nb.notifier_call = devfreq_notifier_call; 557 558 if (!devfreq->profile->max_state && !devfreq->profile->freq_table) { 559 mutex_unlock(&devfreq->lock); 560 devfreq_set_freq_table(devfreq); 561 mutex_lock(&devfreq->lock); 562 } 563 564 devfreq->trans_table = devm_kzalloc(dev, sizeof(unsigned int) * 565 devfreq->profile->max_state * 566 devfreq->profile->max_state, 567 GFP_KERNEL); 568 devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned long) * 569 devfreq->profile->max_state, 570 GFP_KERNEL); 571 devfreq->last_stat_updated = jiffies; 572 573 dev_set_name(&devfreq->dev, "%s", dev_name(dev)); 574 err = device_register(&devfreq->dev); 575 if (err) { 576 put_device(&devfreq->dev); 577 mutex_unlock(&devfreq->lock); 578 goto err_out; 579 } 580 581 srcu_init_notifier_head(&devfreq->transition_notifier_list); 582 583 mutex_unlock(&devfreq->lock); 584 585 mutex_lock(&devfreq_list_lock); 586 list_add(&devfreq->node, &devfreq_list); 587 588 governor = find_devfreq_governor(devfreq->governor_name); 589 if (!IS_ERR(governor)) 590 devfreq->governor = governor; 591 if (devfreq->governor) 592 err = devfreq->governor->event_handler(devfreq, 593 DEVFREQ_GOV_START, NULL); 594 mutex_unlock(&devfreq_list_lock); 595 if (err) { 596 dev_err(dev, "%s: Unable to start governor for the device\n", 597 __func__); 598 goto err_init; 599 } 600 601 return devfreq; 602 603 err_init: 604 list_del(&devfreq->node); 605 device_unregister(&devfreq->dev); 606 kfree(devfreq); 607 err_out: 608 return ERR_PTR(err); 609 } 610 EXPORT_SYMBOL(devfreq_add_device); 611 612 /** 613 * devfreq_remove_device() - Remove devfreq feature from a device. 614 * @devfreq: the devfreq instance to be removed 615 * 616 * The opposite of devfreq_add_device(). 617 */ 618 int devfreq_remove_device(struct devfreq *devfreq) 619 { 620 if (!devfreq) 621 return -EINVAL; 622 623 device_unregister(&devfreq->dev); 624 put_device(&devfreq->dev); 625 626 return 0; 627 } 628 EXPORT_SYMBOL(devfreq_remove_device); 629 630 static int devm_devfreq_dev_match(struct device *dev, void *res, void *data) 631 { 632 struct devfreq **r = res; 633 634 if (WARN_ON(!r || !*r)) 635 return 0; 636 637 return *r == data; 638 } 639 640 static void devm_devfreq_dev_release(struct device *dev, void *res) 641 { 642 devfreq_remove_device(*(struct devfreq **)res); 643 } 644 645 /** 646 * devm_devfreq_add_device() - Resource-managed devfreq_add_device() 647 * @dev: the device to add devfreq feature. 648 * @profile: device-specific profile to run devfreq. 649 * @governor_name: name of the policy to choose frequency. 650 * @data: private data for the governor. The devfreq framework does not 651 * touch this value. 652 * 653 * This function manages automatically the memory of devfreq device using device 654 * resource management and simplify the free operation for memory of devfreq 655 * device. 656 */ 657 struct devfreq *devm_devfreq_add_device(struct device *dev, 658 struct devfreq_dev_profile *profile, 659 const char *governor_name, 660 void *data) 661 { 662 struct devfreq **ptr, *devfreq; 663 664 ptr = devres_alloc(devm_devfreq_dev_release, sizeof(*ptr), GFP_KERNEL); 665 if (!ptr) 666 return ERR_PTR(-ENOMEM); 667 668 devfreq = devfreq_add_device(dev, profile, governor_name, data); 669 if (IS_ERR(devfreq)) { 670 devres_free(ptr); 671 return ERR_PTR(-ENOMEM); 672 } 673 674 *ptr = devfreq; 675 devres_add(dev, ptr); 676 677 return devfreq; 678 } 679 EXPORT_SYMBOL(devm_devfreq_add_device); 680 681 #ifdef CONFIG_OF 682 /* 683 * devfreq_get_devfreq_by_phandle - Get the devfreq device from devicetree 684 * @dev - instance to the given device 685 * @index - index into list of devfreq 686 * 687 * return the instance of devfreq device 688 */ 689 struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index) 690 { 691 struct device_node *node; 692 struct devfreq *devfreq; 693 694 if (!dev) 695 return ERR_PTR(-EINVAL); 696 697 if (!dev->of_node) 698 return ERR_PTR(-EINVAL); 699 700 node = of_parse_phandle(dev->of_node, "devfreq", index); 701 if (!node) 702 return ERR_PTR(-ENODEV); 703 704 mutex_lock(&devfreq_list_lock); 705 list_for_each_entry(devfreq, &devfreq_list, node) { 706 if (devfreq->dev.parent 707 && devfreq->dev.parent->of_node == node) { 708 mutex_unlock(&devfreq_list_lock); 709 return devfreq; 710 } 711 } 712 mutex_unlock(&devfreq_list_lock); 713 714 return ERR_PTR(-EPROBE_DEFER); 715 } 716 #else 717 struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index) 718 { 719 return ERR_PTR(-ENODEV); 720 } 721 #endif /* CONFIG_OF */ 722 EXPORT_SYMBOL_GPL(devfreq_get_devfreq_by_phandle); 723 724 /** 725 * devm_devfreq_remove_device() - Resource-managed devfreq_remove_device() 726 * @dev: the device to add devfreq feature. 727 * @devfreq: the devfreq instance to be removed 728 */ 729 void devm_devfreq_remove_device(struct device *dev, struct devfreq *devfreq) 730 { 731 WARN_ON(devres_release(dev, devm_devfreq_dev_release, 732 devm_devfreq_dev_match, devfreq)); 733 } 734 EXPORT_SYMBOL(devm_devfreq_remove_device); 735 736 /** 737 * devfreq_suspend_device() - Suspend devfreq of a device. 738 * @devfreq: the devfreq instance to be suspended 739 * 740 * This function is intended to be called by the pm callbacks 741 * (e.g., runtime_suspend, suspend) of the device driver that 742 * holds the devfreq. 743 */ 744 int devfreq_suspend_device(struct devfreq *devfreq) 745 { 746 if (!devfreq) 747 return -EINVAL; 748 749 if (!devfreq->governor) 750 return 0; 751 752 return devfreq->governor->event_handler(devfreq, 753 DEVFREQ_GOV_SUSPEND, NULL); 754 } 755 EXPORT_SYMBOL(devfreq_suspend_device); 756 757 /** 758 * devfreq_resume_device() - Resume devfreq of a device. 759 * @devfreq: the devfreq instance to be resumed 760 * 761 * This function is intended to be called by the pm callbacks 762 * (e.g., runtime_resume, resume) of the device driver that 763 * holds the devfreq. 764 */ 765 int devfreq_resume_device(struct devfreq *devfreq) 766 { 767 if (!devfreq) 768 return -EINVAL; 769 770 if (!devfreq->governor) 771 return 0; 772 773 return devfreq->governor->event_handler(devfreq, 774 DEVFREQ_GOV_RESUME, NULL); 775 } 776 EXPORT_SYMBOL(devfreq_resume_device); 777 778 /** 779 * devfreq_add_governor() - Add devfreq governor 780 * @governor: the devfreq governor to be added 781 */ 782 int devfreq_add_governor(struct devfreq_governor *governor) 783 { 784 struct devfreq_governor *g; 785 struct devfreq *devfreq; 786 int err = 0; 787 788 if (!governor) { 789 pr_err("%s: Invalid parameters.\n", __func__); 790 return -EINVAL; 791 } 792 793 mutex_lock(&devfreq_list_lock); 794 g = find_devfreq_governor(governor->name); 795 if (!IS_ERR(g)) { 796 pr_err("%s: governor %s already registered\n", __func__, 797 g->name); 798 err = -EINVAL; 799 goto err_out; 800 } 801 802 list_add(&governor->node, &devfreq_governor_list); 803 804 list_for_each_entry(devfreq, &devfreq_list, node) { 805 int ret = 0; 806 struct device *dev = devfreq->dev.parent; 807 808 if (!strncmp(devfreq->governor_name, governor->name, 809 DEVFREQ_NAME_LEN)) { 810 /* The following should never occur */ 811 if (devfreq->governor) { 812 dev_warn(dev, 813 "%s: Governor %s already present\n", 814 __func__, devfreq->governor->name); 815 ret = devfreq->governor->event_handler(devfreq, 816 DEVFREQ_GOV_STOP, NULL); 817 if (ret) { 818 dev_warn(dev, 819 "%s: Governor %s stop = %d\n", 820 __func__, 821 devfreq->governor->name, ret); 822 } 823 /* Fall through */ 824 } 825 devfreq->governor = governor; 826 ret = devfreq->governor->event_handler(devfreq, 827 DEVFREQ_GOV_START, NULL); 828 if (ret) { 829 dev_warn(dev, "%s: Governor %s start=%d\n", 830 __func__, devfreq->governor->name, 831 ret); 832 } 833 } 834 } 835 836 err_out: 837 mutex_unlock(&devfreq_list_lock); 838 839 return err; 840 } 841 EXPORT_SYMBOL(devfreq_add_governor); 842 843 /** 844 * devfreq_remove_device() - Remove devfreq feature from a device. 845 * @governor: the devfreq governor to be removed 846 */ 847 int devfreq_remove_governor(struct devfreq_governor *governor) 848 { 849 struct devfreq_governor *g; 850 struct devfreq *devfreq; 851 int err = 0; 852 853 if (!governor) { 854 pr_err("%s: Invalid parameters.\n", __func__); 855 return -EINVAL; 856 } 857 858 mutex_lock(&devfreq_list_lock); 859 g = find_devfreq_governor(governor->name); 860 if (IS_ERR(g)) { 861 pr_err("%s: governor %s not registered\n", __func__, 862 governor->name); 863 err = PTR_ERR(g); 864 goto err_out; 865 } 866 list_for_each_entry(devfreq, &devfreq_list, node) { 867 int ret; 868 struct device *dev = devfreq->dev.parent; 869 870 if (!strncmp(devfreq->governor_name, governor->name, 871 DEVFREQ_NAME_LEN)) { 872 /* we should have a devfreq governor! */ 873 if (!devfreq->governor) { 874 dev_warn(dev, "%s: Governor %s NOT present\n", 875 __func__, governor->name); 876 continue; 877 /* Fall through */ 878 } 879 ret = devfreq->governor->event_handler(devfreq, 880 DEVFREQ_GOV_STOP, NULL); 881 if (ret) { 882 dev_warn(dev, "%s: Governor %s stop=%d\n", 883 __func__, devfreq->governor->name, 884 ret); 885 } 886 devfreq->governor = NULL; 887 } 888 } 889 890 list_del(&governor->node); 891 err_out: 892 mutex_unlock(&devfreq_list_lock); 893 894 return err; 895 } 896 EXPORT_SYMBOL(devfreq_remove_governor); 897 898 static ssize_t governor_show(struct device *dev, 899 struct device_attribute *attr, char *buf) 900 { 901 if (!to_devfreq(dev)->governor) 902 return -EINVAL; 903 904 return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name); 905 } 906 907 static ssize_t governor_store(struct device *dev, struct device_attribute *attr, 908 const char *buf, size_t count) 909 { 910 struct devfreq *df = to_devfreq(dev); 911 int ret; 912 char str_governor[DEVFREQ_NAME_LEN + 1]; 913 struct devfreq_governor *governor; 914 915 ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor); 916 if (ret != 1) 917 return -EINVAL; 918 919 mutex_lock(&devfreq_list_lock); 920 governor = find_devfreq_governor(str_governor); 921 if (IS_ERR(governor)) { 922 ret = PTR_ERR(governor); 923 goto out; 924 } 925 if (df->governor == governor) { 926 ret = 0; 927 goto out; 928 } 929 930 if (df->governor) { 931 ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL); 932 if (ret) { 933 dev_warn(dev, "%s: Governor %s not stopped(%d)\n", 934 __func__, df->governor->name, ret); 935 goto out; 936 } 937 } 938 df->governor = governor; 939 strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN); 940 ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL); 941 if (ret) 942 dev_warn(dev, "%s: Governor %s not started(%d)\n", 943 __func__, df->governor->name, ret); 944 out: 945 mutex_unlock(&devfreq_list_lock); 946 947 if (!ret) 948 ret = count; 949 return ret; 950 } 951 static DEVICE_ATTR_RW(governor); 952 953 static ssize_t available_governors_show(struct device *d, 954 struct device_attribute *attr, 955 char *buf) 956 { 957 struct devfreq_governor *tmp_governor; 958 ssize_t count = 0; 959 960 mutex_lock(&devfreq_list_lock); 961 list_for_each_entry(tmp_governor, &devfreq_governor_list, node) 962 count += scnprintf(&buf[count], (PAGE_SIZE - count - 2), 963 "%s ", tmp_governor->name); 964 mutex_unlock(&devfreq_list_lock); 965 966 /* Truncate the trailing space */ 967 if (count) 968 count--; 969 970 count += sprintf(&buf[count], "\n"); 971 972 return count; 973 } 974 static DEVICE_ATTR_RO(available_governors); 975 976 static ssize_t cur_freq_show(struct device *dev, struct device_attribute *attr, 977 char *buf) 978 { 979 unsigned long freq; 980 struct devfreq *devfreq = to_devfreq(dev); 981 982 if (devfreq->profile->get_cur_freq && 983 !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq)) 984 return sprintf(buf, "%lu\n", freq); 985 986 return sprintf(buf, "%lu\n", devfreq->previous_freq); 987 } 988 static DEVICE_ATTR_RO(cur_freq); 989 990 static ssize_t target_freq_show(struct device *dev, 991 struct device_attribute *attr, char *buf) 992 { 993 return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq); 994 } 995 static DEVICE_ATTR_RO(target_freq); 996 997 static ssize_t polling_interval_show(struct device *dev, 998 struct device_attribute *attr, char *buf) 999 { 1000 return sprintf(buf, "%d\n", to_devfreq(dev)->profile->polling_ms); 1001 } 1002 1003 static ssize_t polling_interval_store(struct device *dev, 1004 struct device_attribute *attr, 1005 const char *buf, size_t count) 1006 { 1007 struct devfreq *df = to_devfreq(dev); 1008 unsigned int value; 1009 int ret; 1010 1011 if (!df->governor) 1012 return -EINVAL; 1013 1014 ret = sscanf(buf, "%u", &value); 1015 if (ret != 1) 1016 return -EINVAL; 1017 1018 df->governor->event_handler(df, DEVFREQ_GOV_INTERVAL, &value); 1019 ret = count; 1020 1021 return ret; 1022 } 1023 static DEVICE_ATTR_RW(polling_interval); 1024 1025 static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr, 1026 const char *buf, size_t count) 1027 { 1028 struct devfreq *df = to_devfreq(dev); 1029 unsigned long value; 1030 int ret; 1031 unsigned long max; 1032 1033 ret = sscanf(buf, "%lu", &value); 1034 if (ret != 1) 1035 return -EINVAL; 1036 1037 mutex_lock(&df->lock); 1038 max = df->max_freq; 1039 if (value && max && value > max) { 1040 ret = -EINVAL; 1041 goto unlock; 1042 } 1043 1044 df->min_freq = value; 1045 update_devfreq(df); 1046 ret = count; 1047 unlock: 1048 mutex_unlock(&df->lock); 1049 return ret; 1050 } 1051 1052 static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr, 1053 const char *buf, size_t count) 1054 { 1055 struct devfreq *df = to_devfreq(dev); 1056 unsigned long value; 1057 int ret; 1058 unsigned long min; 1059 1060 ret = sscanf(buf, "%lu", &value); 1061 if (ret != 1) 1062 return -EINVAL; 1063 1064 mutex_lock(&df->lock); 1065 min = df->min_freq; 1066 if (value && min && value < min) { 1067 ret = -EINVAL; 1068 goto unlock; 1069 } 1070 1071 df->max_freq = value; 1072 update_devfreq(df); 1073 ret = count; 1074 unlock: 1075 mutex_unlock(&df->lock); 1076 return ret; 1077 } 1078 1079 #define show_one(name) \ 1080 static ssize_t name##_show \ 1081 (struct device *dev, struct device_attribute *attr, char *buf) \ 1082 { \ 1083 return sprintf(buf, "%lu\n", to_devfreq(dev)->name); \ 1084 } 1085 show_one(min_freq); 1086 show_one(max_freq); 1087 1088 static DEVICE_ATTR_RW(min_freq); 1089 static DEVICE_ATTR_RW(max_freq); 1090 1091 static ssize_t available_frequencies_show(struct device *d, 1092 struct device_attribute *attr, 1093 char *buf) 1094 { 1095 struct devfreq *df = to_devfreq(d); 1096 struct device *dev = df->dev.parent; 1097 struct dev_pm_opp *opp; 1098 ssize_t count = 0; 1099 unsigned long freq = 0; 1100 1101 rcu_read_lock(); 1102 do { 1103 opp = dev_pm_opp_find_freq_ceil(dev, &freq); 1104 if (IS_ERR(opp)) 1105 break; 1106 1107 count += scnprintf(&buf[count], (PAGE_SIZE - count - 2), 1108 "%lu ", freq); 1109 freq++; 1110 } while (1); 1111 rcu_read_unlock(); 1112 1113 /* Truncate the trailing space */ 1114 if (count) 1115 count--; 1116 1117 count += sprintf(&buf[count], "\n"); 1118 1119 return count; 1120 } 1121 static DEVICE_ATTR_RO(available_frequencies); 1122 1123 static ssize_t trans_stat_show(struct device *dev, 1124 struct device_attribute *attr, char *buf) 1125 { 1126 struct devfreq *devfreq = to_devfreq(dev); 1127 ssize_t len; 1128 int i, j; 1129 unsigned int max_state = devfreq->profile->max_state; 1130 1131 if (!devfreq->stop_polling && 1132 devfreq_update_status(devfreq, devfreq->previous_freq)) 1133 return 0; 1134 if (max_state == 0) 1135 return sprintf(buf, "Not Supported.\n"); 1136 1137 len = sprintf(buf, " From : To\n"); 1138 len += sprintf(buf + len, " :"); 1139 for (i = 0; i < max_state; i++) 1140 len += sprintf(buf + len, "%10lu", 1141 devfreq->profile->freq_table[i]); 1142 1143 len += sprintf(buf + len, " time(ms)\n"); 1144 1145 for (i = 0; i < max_state; i++) { 1146 if (devfreq->profile->freq_table[i] 1147 == devfreq->previous_freq) { 1148 len += sprintf(buf + len, "*"); 1149 } else { 1150 len += sprintf(buf + len, " "); 1151 } 1152 len += sprintf(buf + len, "%10lu:", 1153 devfreq->profile->freq_table[i]); 1154 for (j = 0; j < max_state; j++) 1155 len += sprintf(buf + len, "%10u", 1156 devfreq->trans_table[(i * max_state) + j]); 1157 len += sprintf(buf + len, "%10u\n", 1158 jiffies_to_msecs(devfreq->time_in_state[i])); 1159 } 1160 1161 len += sprintf(buf + len, "Total transition : %u\n", 1162 devfreq->total_trans); 1163 return len; 1164 } 1165 static DEVICE_ATTR_RO(trans_stat); 1166 1167 static struct attribute *devfreq_attrs[] = { 1168 &dev_attr_governor.attr, 1169 &dev_attr_available_governors.attr, 1170 &dev_attr_cur_freq.attr, 1171 &dev_attr_available_frequencies.attr, 1172 &dev_attr_target_freq.attr, 1173 &dev_attr_polling_interval.attr, 1174 &dev_attr_min_freq.attr, 1175 &dev_attr_max_freq.attr, 1176 &dev_attr_trans_stat.attr, 1177 NULL, 1178 }; 1179 ATTRIBUTE_GROUPS(devfreq); 1180 1181 static int __init devfreq_init(void) 1182 { 1183 devfreq_class = class_create(THIS_MODULE, "devfreq"); 1184 if (IS_ERR(devfreq_class)) { 1185 pr_err("%s: couldn't create class\n", __FILE__); 1186 return PTR_ERR(devfreq_class); 1187 } 1188 1189 devfreq_wq = create_freezable_workqueue("devfreq_wq"); 1190 if (!devfreq_wq) { 1191 class_destroy(devfreq_class); 1192 pr_err("%s: couldn't create workqueue\n", __FILE__); 1193 return -ENOMEM; 1194 } 1195 devfreq_class->dev_groups = devfreq_groups; 1196 1197 return 0; 1198 } 1199 subsys_initcall(devfreq_init); 1200 1201 static void __exit devfreq_exit(void) 1202 { 1203 class_destroy(devfreq_class); 1204 destroy_workqueue(devfreq_wq); 1205 } 1206 module_exit(devfreq_exit); 1207 1208 /* 1209 * The followings are helper functions for devfreq user device drivers with 1210 * OPP framework. 1211 */ 1212 1213 /** 1214 * devfreq_recommended_opp() - Helper function to get proper OPP for the 1215 * freq value given to target callback. 1216 * @dev: The devfreq user device. (parent of devfreq) 1217 * @freq: The frequency given to target function 1218 * @flags: Flags handed from devfreq framework. 1219 * 1220 * Locking: This function must be called under rcu_read_lock(). opp is a rcu 1221 * protected pointer. The reason for the same is that the opp pointer which is 1222 * returned will remain valid for use with opp_get_{voltage, freq} only while 1223 * under the locked area. The pointer returned must be used prior to unlocking 1224 * with rcu_read_unlock() to maintain the integrity of the pointer. 1225 */ 1226 struct dev_pm_opp *devfreq_recommended_opp(struct device *dev, 1227 unsigned long *freq, 1228 u32 flags) 1229 { 1230 struct dev_pm_opp *opp; 1231 1232 if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) { 1233 /* The freq is an upper bound. opp should be lower */ 1234 opp = dev_pm_opp_find_freq_floor(dev, freq); 1235 1236 /* If not available, use the closest opp */ 1237 if (opp == ERR_PTR(-ERANGE)) 1238 opp = dev_pm_opp_find_freq_ceil(dev, freq); 1239 } else { 1240 /* The freq is an lower bound. opp should be higher */ 1241 opp = dev_pm_opp_find_freq_ceil(dev, freq); 1242 1243 /* If not available, use the closest opp */ 1244 if (opp == ERR_PTR(-ERANGE)) 1245 opp = dev_pm_opp_find_freq_floor(dev, freq); 1246 } 1247 1248 return opp; 1249 } 1250 EXPORT_SYMBOL(devfreq_recommended_opp); 1251 1252 /** 1253 * devfreq_register_opp_notifier() - Helper function to get devfreq notified 1254 * for any changes in the OPP availability 1255 * changes 1256 * @dev: The devfreq user device. (parent of devfreq) 1257 * @devfreq: The devfreq object. 1258 */ 1259 int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq) 1260 { 1261 struct srcu_notifier_head *nh; 1262 int ret = 0; 1263 1264 rcu_read_lock(); 1265 nh = dev_pm_opp_get_notifier(dev); 1266 if (IS_ERR(nh)) 1267 ret = PTR_ERR(nh); 1268 rcu_read_unlock(); 1269 if (!ret) 1270 ret = srcu_notifier_chain_register(nh, &devfreq->nb); 1271 1272 return ret; 1273 } 1274 EXPORT_SYMBOL(devfreq_register_opp_notifier); 1275 1276 /** 1277 * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq 1278 * notified for any changes in the OPP 1279 * availability changes anymore. 1280 * @dev: The devfreq user device. (parent of devfreq) 1281 * @devfreq: The devfreq object. 1282 * 1283 * At exit() callback of devfreq_dev_profile, this must be included if 1284 * devfreq_recommended_opp is used. 1285 */ 1286 int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq) 1287 { 1288 struct srcu_notifier_head *nh; 1289 int ret = 0; 1290 1291 rcu_read_lock(); 1292 nh = dev_pm_opp_get_notifier(dev); 1293 if (IS_ERR(nh)) 1294 ret = PTR_ERR(nh); 1295 rcu_read_unlock(); 1296 if (!ret) 1297 ret = srcu_notifier_chain_unregister(nh, &devfreq->nb); 1298 1299 return ret; 1300 } 1301 EXPORT_SYMBOL(devfreq_unregister_opp_notifier); 1302 1303 static void devm_devfreq_opp_release(struct device *dev, void *res) 1304 { 1305 devfreq_unregister_opp_notifier(dev, *(struct devfreq **)res); 1306 } 1307 1308 /** 1309 * devm_ devfreq_register_opp_notifier() 1310 * - Resource-managed devfreq_register_opp_notifier() 1311 * @dev: The devfreq user device. (parent of devfreq) 1312 * @devfreq: The devfreq object. 1313 */ 1314 int devm_devfreq_register_opp_notifier(struct device *dev, 1315 struct devfreq *devfreq) 1316 { 1317 struct devfreq **ptr; 1318 int ret; 1319 1320 ptr = devres_alloc(devm_devfreq_opp_release, sizeof(*ptr), GFP_KERNEL); 1321 if (!ptr) 1322 return -ENOMEM; 1323 1324 ret = devfreq_register_opp_notifier(dev, devfreq); 1325 if (ret) { 1326 devres_free(ptr); 1327 return ret; 1328 } 1329 1330 *ptr = devfreq; 1331 devres_add(dev, ptr); 1332 1333 return 0; 1334 } 1335 EXPORT_SYMBOL(devm_devfreq_register_opp_notifier); 1336 1337 /** 1338 * devm_devfreq_unregister_opp_notifier() 1339 * - Resource-managed devfreq_unregister_opp_notifier() 1340 * @dev: The devfreq user device. (parent of devfreq) 1341 * @devfreq: The devfreq object. 1342 */ 1343 void devm_devfreq_unregister_opp_notifier(struct device *dev, 1344 struct devfreq *devfreq) 1345 { 1346 WARN_ON(devres_release(dev, devm_devfreq_opp_release, 1347 devm_devfreq_dev_match, devfreq)); 1348 } 1349 EXPORT_SYMBOL(devm_devfreq_unregister_opp_notifier); 1350 1351 /** 1352 * devfreq_register_notifier() - Register a driver with devfreq 1353 * @devfreq: The devfreq object. 1354 * @nb: The notifier block to register. 1355 * @list: DEVFREQ_TRANSITION_NOTIFIER. 1356 */ 1357 int devfreq_register_notifier(struct devfreq *devfreq, 1358 struct notifier_block *nb, 1359 unsigned int list) 1360 { 1361 int ret = 0; 1362 1363 if (!devfreq) 1364 return -EINVAL; 1365 1366 switch (list) { 1367 case DEVFREQ_TRANSITION_NOTIFIER: 1368 ret = srcu_notifier_chain_register( 1369 &devfreq->transition_notifier_list, nb); 1370 break; 1371 default: 1372 ret = -EINVAL; 1373 } 1374 1375 return ret; 1376 } 1377 EXPORT_SYMBOL(devfreq_register_notifier); 1378 1379 /* 1380 * devfreq_unregister_notifier() - Unregister a driver with devfreq 1381 * @devfreq: The devfreq object. 1382 * @nb: The notifier block to be unregistered. 1383 * @list: DEVFREQ_TRANSITION_NOTIFIER. 1384 */ 1385 int devfreq_unregister_notifier(struct devfreq *devfreq, 1386 struct notifier_block *nb, 1387 unsigned int list) 1388 { 1389 int ret = 0; 1390 1391 if (!devfreq) 1392 return -EINVAL; 1393 1394 switch (list) { 1395 case DEVFREQ_TRANSITION_NOTIFIER: 1396 ret = srcu_notifier_chain_unregister( 1397 &devfreq->transition_notifier_list, nb); 1398 break; 1399 default: 1400 ret = -EINVAL; 1401 } 1402 1403 return ret; 1404 } 1405 EXPORT_SYMBOL(devfreq_unregister_notifier); 1406 1407 struct devfreq_notifier_devres { 1408 struct devfreq *devfreq; 1409 struct notifier_block *nb; 1410 unsigned int list; 1411 }; 1412 1413 static void devm_devfreq_notifier_release(struct device *dev, void *res) 1414 { 1415 struct devfreq_notifier_devres *this = res; 1416 1417 devfreq_unregister_notifier(this->devfreq, this->nb, this->list); 1418 } 1419 1420 /** 1421 * devm_devfreq_register_notifier() 1422 - Resource-managed devfreq_register_notifier() 1423 * @dev: The devfreq user device. (parent of devfreq) 1424 * @devfreq: The devfreq object. 1425 * @nb: The notifier block to be unregistered. 1426 * @list: DEVFREQ_TRANSITION_NOTIFIER. 1427 */ 1428 int devm_devfreq_register_notifier(struct device *dev, 1429 struct devfreq *devfreq, 1430 struct notifier_block *nb, 1431 unsigned int list) 1432 { 1433 struct devfreq_notifier_devres *ptr; 1434 int ret; 1435 1436 ptr = devres_alloc(devm_devfreq_notifier_release, sizeof(*ptr), 1437 GFP_KERNEL); 1438 if (!ptr) 1439 return -ENOMEM; 1440 1441 ret = devfreq_register_notifier(devfreq, nb, list); 1442 if (ret) { 1443 devres_free(ptr); 1444 return ret; 1445 } 1446 1447 ptr->devfreq = devfreq; 1448 ptr->nb = nb; 1449 ptr->list = list; 1450 devres_add(dev, ptr); 1451 1452 return 0; 1453 } 1454 EXPORT_SYMBOL(devm_devfreq_register_notifier); 1455 1456 /** 1457 * devm_devfreq_unregister_notifier() 1458 - Resource-managed devfreq_unregister_notifier() 1459 * @dev: The devfreq user device. (parent of devfreq) 1460 * @devfreq: The devfreq object. 1461 * @nb: The notifier block to be unregistered. 1462 * @list: DEVFREQ_TRANSITION_NOTIFIER. 1463 */ 1464 void devm_devfreq_unregister_notifier(struct device *dev, 1465 struct devfreq *devfreq, 1466 struct notifier_block *nb, 1467 unsigned int list) 1468 { 1469 WARN_ON(devres_release(dev, devm_devfreq_notifier_release, 1470 devm_devfreq_dev_match, devfreq)); 1471 } 1472 EXPORT_SYMBOL(devm_devfreq_unregister_notifier); 1473 1474 MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>"); 1475 MODULE_DESCRIPTION("devfreq class support"); 1476 MODULE_LICENSE("GPL"); 1477