1 /* 2 * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework 3 * for Non-CPU Devices. 4 * 5 * Copyright (C) 2011 Samsung Electronics 6 * MyungJoo Ham <myungjoo.ham@samsung.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/kernel.h> 14 #include <linux/sched.h> 15 #include <linux/errno.h> 16 #include <linux/err.h> 17 #include <linux/init.h> 18 #include <linux/module.h> 19 #include <linux/slab.h> 20 #include <linux/stat.h> 21 #include <linux/pm_opp.h> 22 #include <linux/devfreq.h> 23 #include <linux/workqueue.h> 24 #include <linux/platform_device.h> 25 #include <linux/list.h> 26 #include <linux/printk.h> 27 #include <linux/hrtimer.h> 28 #include "governor.h" 29 30 static struct class *devfreq_class; 31 32 /* 33 * devfreq core provides delayed work based load monitoring helper 34 * functions. Governors can use these or can implement their own 35 * monitoring mechanism. 36 */ 37 static struct workqueue_struct *devfreq_wq; 38 39 /* The list of all device-devfreq governors */ 40 static LIST_HEAD(devfreq_governor_list); 41 /* The list of all device-devfreq */ 42 static LIST_HEAD(devfreq_list); 43 static DEFINE_MUTEX(devfreq_list_lock); 44 45 /** 46 * find_device_devfreq() - find devfreq struct using device pointer 47 * @dev: device pointer used to lookup device devfreq. 48 * 49 * Search the list of device devfreqs and return the matched device's 50 * devfreq info. devfreq_list_lock should be held by the caller. 51 */ 52 static struct devfreq *find_device_devfreq(struct device *dev) 53 { 54 struct devfreq *tmp_devfreq; 55 56 if (IS_ERR_OR_NULL(dev)) { 57 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__); 58 return ERR_PTR(-EINVAL); 59 } 60 WARN(!mutex_is_locked(&devfreq_list_lock), 61 "devfreq_list_lock must be locked."); 62 63 list_for_each_entry(tmp_devfreq, &devfreq_list, node) { 64 if (tmp_devfreq->dev.parent == dev) 65 return tmp_devfreq; 66 } 67 68 return ERR_PTR(-ENODEV); 69 } 70 71 /** 72 * devfreq_get_freq_level() - Lookup freq_table for the frequency 73 * @devfreq: the devfreq instance 74 * @freq: the target frequency 75 */ 76 static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq) 77 { 78 int lev; 79 80 for (lev = 0; lev < devfreq->profile->max_state; lev++) 81 if (freq == devfreq->profile->freq_table[lev]) 82 return lev; 83 84 return -EINVAL; 85 } 86 87 /** 88 * devfreq_set_freq_table() - Initialize freq_table for the frequency 89 * @devfreq: the devfreq instance 90 */ 91 static void devfreq_set_freq_table(struct devfreq *devfreq) 92 { 93 struct devfreq_dev_profile *profile = devfreq->profile; 94 struct dev_pm_opp *opp; 95 unsigned long freq; 96 int i, count; 97 98 /* Initialize the freq_table from OPP table */ 99 count = dev_pm_opp_get_opp_count(devfreq->dev.parent); 100 if (count <= 0) 101 return; 102 103 profile->max_state = count; 104 profile->freq_table = devm_kcalloc(devfreq->dev.parent, 105 profile->max_state, 106 sizeof(*profile->freq_table), 107 GFP_KERNEL); 108 if (!profile->freq_table) { 109 profile->max_state = 0; 110 return; 111 } 112 113 rcu_read_lock(); 114 for (i = 0, freq = 0; i < profile->max_state; i++, freq++) { 115 opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &freq); 116 if (IS_ERR(opp)) { 117 devm_kfree(devfreq->dev.parent, profile->freq_table); 118 profile->max_state = 0; 119 rcu_read_unlock(); 120 return; 121 } 122 profile->freq_table[i] = freq; 123 } 124 rcu_read_unlock(); 125 } 126 127 /** 128 * devfreq_update_status() - Update statistics of devfreq behavior 129 * @devfreq: the devfreq instance 130 * @freq: the update target frequency 131 */ 132 static int devfreq_update_status(struct devfreq *devfreq, unsigned long freq) 133 { 134 int lev, prev_lev, ret = 0; 135 unsigned long cur_time; 136 137 cur_time = jiffies; 138 139 prev_lev = devfreq_get_freq_level(devfreq, devfreq->previous_freq); 140 if (prev_lev < 0) { 141 ret = prev_lev; 142 goto out; 143 } 144 145 devfreq->time_in_state[prev_lev] += 146 cur_time - devfreq->last_stat_updated; 147 148 lev = devfreq_get_freq_level(devfreq, freq); 149 if (lev < 0) { 150 ret = lev; 151 goto out; 152 } 153 154 if (lev != prev_lev) { 155 devfreq->trans_table[(prev_lev * 156 devfreq->profile->max_state) + lev]++; 157 devfreq->total_trans++; 158 } 159 160 out: 161 devfreq->last_stat_updated = cur_time; 162 return ret; 163 } 164 165 /** 166 * find_devfreq_governor() - find devfreq governor from name 167 * @name: name of the governor 168 * 169 * Search the list of devfreq governors and return the matched 170 * governor's pointer. devfreq_list_lock should be held by the caller. 171 */ 172 static struct devfreq_governor *find_devfreq_governor(const char *name) 173 { 174 struct devfreq_governor *tmp_governor; 175 176 if (IS_ERR_OR_NULL(name)) { 177 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__); 178 return ERR_PTR(-EINVAL); 179 } 180 WARN(!mutex_is_locked(&devfreq_list_lock), 181 "devfreq_list_lock must be locked."); 182 183 list_for_each_entry(tmp_governor, &devfreq_governor_list, node) { 184 if (!strncmp(tmp_governor->name, name, DEVFREQ_NAME_LEN)) 185 return tmp_governor; 186 } 187 188 return ERR_PTR(-ENODEV); 189 } 190 191 /* Load monitoring helper functions for governors use */ 192 193 /** 194 * update_devfreq() - Reevaluate the device and configure frequency. 195 * @devfreq: the devfreq instance. 196 * 197 * Note: Lock devfreq->lock before calling update_devfreq 198 * This function is exported for governors. 199 */ 200 int update_devfreq(struct devfreq *devfreq) 201 { 202 unsigned long freq; 203 int err = 0; 204 u32 flags = 0; 205 206 if (!mutex_is_locked(&devfreq->lock)) { 207 WARN(true, "devfreq->lock must be locked by the caller.\n"); 208 return -EINVAL; 209 } 210 211 if (!devfreq->governor) 212 return -EINVAL; 213 214 /* Reevaluate the proper frequency */ 215 err = devfreq->governor->get_target_freq(devfreq, &freq); 216 if (err) 217 return err; 218 219 /* 220 * Adjust the frequency with user freq and QoS. 221 * 222 * List from the highest priority 223 * max_freq 224 * min_freq 225 */ 226 227 if (devfreq->min_freq && freq < devfreq->min_freq) { 228 freq = devfreq->min_freq; 229 flags &= ~DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use GLB */ 230 } 231 if (devfreq->max_freq && freq > devfreq->max_freq) { 232 freq = devfreq->max_freq; 233 flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */ 234 } 235 236 err = devfreq->profile->target(devfreq->dev.parent, &freq, flags); 237 if (err) 238 return err; 239 240 if (devfreq->profile->freq_table) 241 if (devfreq_update_status(devfreq, freq)) 242 dev_err(&devfreq->dev, 243 "Couldn't update frequency transition information.\n"); 244 245 devfreq->previous_freq = freq; 246 return err; 247 } 248 EXPORT_SYMBOL(update_devfreq); 249 250 /** 251 * devfreq_monitor() - Periodically poll devfreq objects. 252 * @work: the work struct used to run devfreq_monitor periodically. 253 * 254 */ 255 static void devfreq_monitor(struct work_struct *work) 256 { 257 int err; 258 struct devfreq *devfreq = container_of(work, 259 struct devfreq, work.work); 260 261 mutex_lock(&devfreq->lock); 262 err = update_devfreq(devfreq); 263 if (err) 264 dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err); 265 266 queue_delayed_work(devfreq_wq, &devfreq->work, 267 msecs_to_jiffies(devfreq->profile->polling_ms)); 268 mutex_unlock(&devfreq->lock); 269 } 270 271 /** 272 * devfreq_monitor_start() - Start load monitoring of devfreq instance 273 * @devfreq: the devfreq instance. 274 * 275 * Helper function for starting devfreq device load monitoing. By 276 * default delayed work based monitoring is supported. Function 277 * to be called from governor in response to DEVFREQ_GOV_START 278 * event when device is added to devfreq framework. 279 */ 280 void devfreq_monitor_start(struct devfreq *devfreq) 281 { 282 INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor); 283 if (devfreq->profile->polling_ms) 284 queue_delayed_work(devfreq_wq, &devfreq->work, 285 msecs_to_jiffies(devfreq->profile->polling_ms)); 286 } 287 EXPORT_SYMBOL(devfreq_monitor_start); 288 289 /** 290 * devfreq_monitor_stop() - Stop load monitoring of a devfreq instance 291 * @devfreq: the devfreq instance. 292 * 293 * Helper function to stop devfreq device load monitoing. Function 294 * to be called from governor in response to DEVFREQ_GOV_STOP 295 * event when device is removed from devfreq framework. 296 */ 297 void devfreq_monitor_stop(struct devfreq *devfreq) 298 { 299 cancel_delayed_work_sync(&devfreq->work); 300 } 301 EXPORT_SYMBOL(devfreq_monitor_stop); 302 303 /** 304 * devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance 305 * @devfreq: the devfreq instance. 306 * 307 * Helper function to suspend devfreq device load monitoing. Function 308 * to be called from governor in response to DEVFREQ_GOV_SUSPEND 309 * event or when polling interval is set to zero. 310 * 311 * Note: Though this function is same as devfreq_monitor_stop(), 312 * intentionally kept separate to provide hooks for collecting 313 * transition statistics. 314 */ 315 void devfreq_monitor_suspend(struct devfreq *devfreq) 316 { 317 mutex_lock(&devfreq->lock); 318 if (devfreq->stop_polling) { 319 mutex_unlock(&devfreq->lock); 320 return; 321 } 322 323 devfreq_update_status(devfreq, devfreq->previous_freq); 324 devfreq->stop_polling = true; 325 mutex_unlock(&devfreq->lock); 326 cancel_delayed_work_sync(&devfreq->work); 327 } 328 EXPORT_SYMBOL(devfreq_monitor_suspend); 329 330 /** 331 * devfreq_monitor_resume() - Resume load monitoring of a devfreq instance 332 * @devfreq: the devfreq instance. 333 * 334 * Helper function to resume devfreq device load monitoing. Function 335 * to be called from governor in response to DEVFREQ_GOV_RESUME 336 * event or when polling interval is set to non-zero. 337 */ 338 void devfreq_monitor_resume(struct devfreq *devfreq) 339 { 340 unsigned long freq; 341 342 mutex_lock(&devfreq->lock); 343 if (!devfreq->stop_polling) 344 goto out; 345 346 if (!delayed_work_pending(&devfreq->work) && 347 devfreq->profile->polling_ms) 348 queue_delayed_work(devfreq_wq, &devfreq->work, 349 msecs_to_jiffies(devfreq->profile->polling_ms)); 350 351 devfreq->last_stat_updated = jiffies; 352 devfreq->stop_polling = false; 353 354 if (devfreq->profile->get_cur_freq && 355 !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq)) 356 devfreq->previous_freq = freq; 357 358 out: 359 mutex_unlock(&devfreq->lock); 360 } 361 EXPORT_SYMBOL(devfreq_monitor_resume); 362 363 /** 364 * devfreq_interval_update() - Update device devfreq monitoring interval 365 * @devfreq: the devfreq instance. 366 * @delay: new polling interval to be set. 367 * 368 * Helper function to set new load monitoring polling interval. Function 369 * to be called from governor in response to DEVFREQ_GOV_INTERVAL event. 370 */ 371 void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay) 372 { 373 unsigned int cur_delay = devfreq->profile->polling_ms; 374 unsigned int new_delay = *delay; 375 376 mutex_lock(&devfreq->lock); 377 devfreq->profile->polling_ms = new_delay; 378 379 if (devfreq->stop_polling) 380 goto out; 381 382 /* if new delay is zero, stop polling */ 383 if (!new_delay) { 384 mutex_unlock(&devfreq->lock); 385 cancel_delayed_work_sync(&devfreq->work); 386 return; 387 } 388 389 /* if current delay is zero, start polling with new delay */ 390 if (!cur_delay) { 391 queue_delayed_work(devfreq_wq, &devfreq->work, 392 msecs_to_jiffies(devfreq->profile->polling_ms)); 393 goto out; 394 } 395 396 /* if current delay is greater than new delay, restart polling */ 397 if (cur_delay > new_delay) { 398 mutex_unlock(&devfreq->lock); 399 cancel_delayed_work_sync(&devfreq->work); 400 mutex_lock(&devfreq->lock); 401 if (!devfreq->stop_polling) 402 queue_delayed_work(devfreq_wq, &devfreq->work, 403 msecs_to_jiffies(devfreq->profile->polling_ms)); 404 } 405 out: 406 mutex_unlock(&devfreq->lock); 407 } 408 EXPORT_SYMBOL(devfreq_interval_update); 409 410 /** 411 * devfreq_notifier_call() - Notify that the device frequency requirements 412 * has been changed out of devfreq framework. 413 * @nb: the notifier_block (supposed to be devfreq->nb) 414 * @type: not used 415 * @devp: not used 416 * 417 * Called by a notifier that uses devfreq->nb. 418 */ 419 static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type, 420 void *devp) 421 { 422 struct devfreq *devfreq = container_of(nb, struct devfreq, nb); 423 int ret; 424 425 mutex_lock(&devfreq->lock); 426 ret = update_devfreq(devfreq); 427 mutex_unlock(&devfreq->lock); 428 429 return ret; 430 } 431 432 /** 433 * _remove_devfreq() - Remove devfreq from the list and release its resources. 434 * @devfreq: the devfreq struct 435 */ 436 static void _remove_devfreq(struct devfreq *devfreq) 437 { 438 mutex_lock(&devfreq_list_lock); 439 if (IS_ERR(find_device_devfreq(devfreq->dev.parent))) { 440 mutex_unlock(&devfreq_list_lock); 441 dev_warn(&devfreq->dev, "releasing devfreq which doesn't exist\n"); 442 return; 443 } 444 list_del(&devfreq->node); 445 mutex_unlock(&devfreq_list_lock); 446 447 if (devfreq->governor) 448 devfreq->governor->event_handler(devfreq, 449 DEVFREQ_GOV_STOP, NULL); 450 451 if (devfreq->profile->exit) 452 devfreq->profile->exit(devfreq->dev.parent); 453 454 mutex_destroy(&devfreq->lock); 455 kfree(devfreq); 456 } 457 458 /** 459 * devfreq_dev_release() - Callback for struct device to release the device. 460 * @dev: the devfreq device 461 * 462 * This calls _remove_devfreq() if _remove_devfreq() is not called. 463 */ 464 static void devfreq_dev_release(struct device *dev) 465 { 466 struct devfreq *devfreq = to_devfreq(dev); 467 468 _remove_devfreq(devfreq); 469 } 470 471 /** 472 * devfreq_add_device() - Add devfreq feature to the device 473 * @dev: the device to add devfreq feature. 474 * @profile: device-specific profile to run devfreq. 475 * @governor_name: name of the policy to choose frequency. 476 * @data: private data for the governor. The devfreq framework does not 477 * touch this value. 478 */ 479 struct devfreq *devfreq_add_device(struct device *dev, 480 struct devfreq_dev_profile *profile, 481 const char *governor_name, 482 void *data) 483 { 484 struct devfreq *devfreq; 485 struct devfreq_governor *governor; 486 int err = 0; 487 488 if (!dev || !profile || !governor_name) { 489 dev_err(dev, "%s: Invalid parameters.\n", __func__); 490 return ERR_PTR(-EINVAL); 491 } 492 493 mutex_lock(&devfreq_list_lock); 494 devfreq = find_device_devfreq(dev); 495 mutex_unlock(&devfreq_list_lock); 496 if (!IS_ERR(devfreq)) { 497 dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__); 498 err = -EINVAL; 499 goto err_out; 500 } 501 502 devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL); 503 if (!devfreq) { 504 dev_err(dev, "%s: Unable to create devfreq for the device\n", 505 __func__); 506 err = -ENOMEM; 507 goto err_out; 508 } 509 510 mutex_init(&devfreq->lock); 511 mutex_lock(&devfreq->lock); 512 devfreq->dev.parent = dev; 513 devfreq->dev.class = devfreq_class; 514 devfreq->dev.release = devfreq_dev_release; 515 devfreq->profile = profile; 516 strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN); 517 devfreq->previous_freq = profile->initial_freq; 518 devfreq->data = data; 519 devfreq->nb.notifier_call = devfreq_notifier_call; 520 521 if (!devfreq->profile->max_state && !devfreq->profile->freq_table) { 522 mutex_unlock(&devfreq->lock); 523 devfreq_set_freq_table(devfreq); 524 mutex_lock(&devfreq->lock); 525 } 526 527 devfreq->trans_table = devm_kzalloc(dev, sizeof(unsigned int) * 528 devfreq->profile->max_state * 529 devfreq->profile->max_state, 530 GFP_KERNEL); 531 devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned long) * 532 devfreq->profile->max_state, 533 GFP_KERNEL); 534 devfreq->last_stat_updated = jiffies; 535 536 dev_set_name(&devfreq->dev, "%s", dev_name(dev)); 537 err = device_register(&devfreq->dev); 538 if (err) { 539 put_device(&devfreq->dev); 540 mutex_unlock(&devfreq->lock); 541 goto err_out; 542 } 543 544 mutex_unlock(&devfreq->lock); 545 546 mutex_lock(&devfreq_list_lock); 547 list_add(&devfreq->node, &devfreq_list); 548 549 governor = find_devfreq_governor(devfreq->governor_name); 550 if (!IS_ERR(governor)) 551 devfreq->governor = governor; 552 if (devfreq->governor) 553 err = devfreq->governor->event_handler(devfreq, 554 DEVFREQ_GOV_START, NULL); 555 mutex_unlock(&devfreq_list_lock); 556 if (err) { 557 dev_err(dev, "%s: Unable to start governor for the device\n", 558 __func__); 559 goto err_init; 560 } 561 562 return devfreq; 563 564 err_init: 565 list_del(&devfreq->node); 566 device_unregister(&devfreq->dev); 567 kfree(devfreq); 568 err_out: 569 return ERR_PTR(err); 570 } 571 EXPORT_SYMBOL(devfreq_add_device); 572 573 /** 574 * devfreq_remove_device() - Remove devfreq feature from a device. 575 * @devfreq: the devfreq instance to be removed 576 * 577 * The opposite of devfreq_add_device(). 578 */ 579 int devfreq_remove_device(struct devfreq *devfreq) 580 { 581 if (!devfreq) 582 return -EINVAL; 583 584 device_unregister(&devfreq->dev); 585 put_device(&devfreq->dev); 586 587 return 0; 588 } 589 EXPORT_SYMBOL(devfreq_remove_device); 590 591 static int devm_devfreq_dev_match(struct device *dev, void *res, void *data) 592 { 593 struct devfreq **r = res; 594 595 if (WARN_ON(!r || !*r)) 596 return 0; 597 598 return *r == data; 599 } 600 601 static void devm_devfreq_dev_release(struct device *dev, void *res) 602 { 603 devfreq_remove_device(*(struct devfreq **)res); 604 } 605 606 /** 607 * devm_devfreq_add_device() - Resource-managed devfreq_add_device() 608 * @dev: the device to add devfreq feature. 609 * @profile: device-specific profile to run devfreq. 610 * @governor_name: name of the policy to choose frequency. 611 * @data: private data for the governor. The devfreq framework does not 612 * touch this value. 613 * 614 * This function manages automatically the memory of devfreq device using device 615 * resource management and simplify the free operation for memory of devfreq 616 * device. 617 */ 618 struct devfreq *devm_devfreq_add_device(struct device *dev, 619 struct devfreq_dev_profile *profile, 620 const char *governor_name, 621 void *data) 622 { 623 struct devfreq **ptr, *devfreq; 624 625 ptr = devres_alloc(devm_devfreq_dev_release, sizeof(*ptr), GFP_KERNEL); 626 if (!ptr) 627 return ERR_PTR(-ENOMEM); 628 629 devfreq = devfreq_add_device(dev, profile, governor_name, data); 630 if (IS_ERR(devfreq)) { 631 devres_free(ptr); 632 return ERR_PTR(-ENOMEM); 633 } 634 635 *ptr = devfreq; 636 devres_add(dev, ptr); 637 638 return devfreq; 639 } 640 EXPORT_SYMBOL(devm_devfreq_add_device); 641 642 /** 643 * devm_devfreq_remove_device() - Resource-managed devfreq_remove_device() 644 * @dev: the device to add devfreq feature. 645 * @devfreq: the devfreq instance to be removed 646 */ 647 void devm_devfreq_remove_device(struct device *dev, struct devfreq *devfreq) 648 { 649 WARN_ON(devres_release(dev, devm_devfreq_dev_release, 650 devm_devfreq_dev_match, devfreq)); 651 } 652 EXPORT_SYMBOL(devm_devfreq_remove_device); 653 654 /** 655 * devfreq_suspend_device() - Suspend devfreq of a device. 656 * @devfreq: the devfreq instance to be suspended 657 * 658 * This function is intended to be called by the pm callbacks 659 * (e.g., runtime_suspend, suspend) of the device driver that 660 * holds the devfreq. 661 */ 662 int devfreq_suspend_device(struct devfreq *devfreq) 663 { 664 if (!devfreq) 665 return -EINVAL; 666 667 if (!devfreq->governor) 668 return 0; 669 670 return devfreq->governor->event_handler(devfreq, 671 DEVFREQ_GOV_SUSPEND, NULL); 672 } 673 EXPORT_SYMBOL(devfreq_suspend_device); 674 675 /** 676 * devfreq_resume_device() - Resume devfreq of a device. 677 * @devfreq: the devfreq instance to be resumed 678 * 679 * This function is intended to be called by the pm callbacks 680 * (e.g., runtime_resume, resume) of the device driver that 681 * holds the devfreq. 682 */ 683 int devfreq_resume_device(struct devfreq *devfreq) 684 { 685 if (!devfreq) 686 return -EINVAL; 687 688 if (!devfreq->governor) 689 return 0; 690 691 return devfreq->governor->event_handler(devfreq, 692 DEVFREQ_GOV_RESUME, NULL); 693 } 694 EXPORT_SYMBOL(devfreq_resume_device); 695 696 /** 697 * devfreq_add_governor() - Add devfreq governor 698 * @governor: the devfreq governor to be added 699 */ 700 int devfreq_add_governor(struct devfreq_governor *governor) 701 { 702 struct devfreq_governor *g; 703 struct devfreq *devfreq; 704 int err = 0; 705 706 if (!governor) { 707 pr_err("%s: Invalid parameters.\n", __func__); 708 return -EINVAL; 709 } 710 711 mutex_lock(&devfreq_list_lock); 712 g = find_devfreq_governor(governor->name); 713 if (!IS_ERR(g)) { 714 pr_err("%s: governor %s already registered\n", __func__, 715 g->name); 716 err = -EINVAL; 717 goto err_out; 718 } 719 720 list_add(&governor->node, &devfreq_governor_list); 721 722 list_for_each_entry(devfreq, &devfreq_list, node) { 723 int ret = 0; 724 struct device *dev = devfreq->dev.parent; 725 726 if (!strncmp(devfreq->governor_name, governor->name, 727 DEVFREQ_NAME_LEN)) { 728 /* The following should never occur */ 729 if (devfreq->governor) { 730 dev_warn(dev, 731 "%s: Governor %s already present\n", 732 __func__, devfreq->governor->name); 733 ret = devfreq->governor->event_handler(devfreq, 734 DEVFREQ_GOV_STOP, NULL); 735 if (ret) { 736 dev_warn(dev, 737 "%s: Governor %s stop = %d\n", 738 __func__, 739 devfreq->governor->name, ret); 740 } 741 /* Fall through */ 742 } 743 devfreq->governor = governor; 744 ret = devfreq->governor->event_handler(devfreq, 745 DEVFREQ_GOV_START, NULL); 746 if (ret) { 747 dev_warn(dev, "%s: Governor %s start=%d\n", 748 __func__, devfreq->governor->name, 749 ret); 750 } 751 } 752 } 753 754 err_out: 755 mutex_unlock(&devfreq_list_lock); 756 757 return err; 758 } 759 EXPORT_SYMBOL(devfreq_add_governor); 760 761 /** 762 * devfreq_remove_device() - Remove devfreq feature from a device. 763 * @governor: the devfreq governor to be removed 764 */ 765 int devfreq_remove_governor(struct devfreq_governor *governor) 766 { 767 struct devfreq_governor *g; 768 struct devfreq *devfreq; 769 int err = 0; 770 771 if (!governor) { 772 pr_err("%s: Invalid parameters.\n", __func__); 773 return -EINVAL; 774 } 775 776 mutex_lock(&devfreq_list_lock); 777 g = find_devfreq_governor(governor->name); 778 if (IS_ERR(g)) { 779 pr_err("%s: governor %s not registered\n", __func__, 780 governor->name); 781 err = PTR_ERR(g); 782 goto err_out; 783 } 784 list_for_each_entry(devfreq, &devfreq_list, node) { 785 int ret; 786 struct device *dev = devfreq->dev.parent; 787 788 if (!strncmp(devfreq->governor_name, governor->name, 789 DEVFREQ_NAME_LEN)) { 790 /* we should have a devfreq governor! */ 791 if (!devfreq->governor) { 792 dev_warn(dev, "%s: Governor %s NOT present\n", 793 __func__, governor->name); 794 continue; 795 /* Fall through */ 796 } 797 ret = devfreq->governor->event_handler(devfreq, 798 DEVFREQ_GOV_STOP, NULL); 799 if (ret) { 800 dev_warn(dev, "%s: Governor %s stop=%d\n", 801 __func__, devfreq->governor->name, 802 ret); 803 } 804 devfreq->governor = NULL; 805 } 806 } 807 808 list_del(&governor->node); 809 err_out: 810 mutex_unlock(&devfreq_list_lock); 811 812 return err; 813 } 814 EXPORT_SYMBOL(devfreq_remove_governor); 815 816 static ssize_t governor_show(struct device *dev, 817 struct device_attribute *attr, char *buf) 818 { 819 if (!to_devfreq(dev)->governor) 820 return -EINVAL; 821 822 return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name); 823 } 824 825 static ssize_t governor_store(struct device *dev, struct device_attribute *attr, 826 const char *buf, size_t count) 827 { 828 struct devfreq *df = to_devfreq(dev); 829 int ret; 830 char str_governor[DEVFREQ_NAME_LEN + 1]; 831 struct devfreq_governor *governor; 832 833 ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor); 834 if (ret != 1) 835 return -EINVAL; 836 837 mutex_lock(&devfreq_list_lock); 838 governor = find_devfreq_governor(str_governor); 839 if (IS_ERR(governor)) { 840 ret = PTR_ERR(governor); 841 goto out; 842 } 843 if (df->governor == governor) { 844 ret = 0; 845 goto out; 846 } 847 848 if (df->governor) { 849 ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL); 850 if (ret) { 851 dev_warn(dev, "%s: Governor %s not stopped(%d)\n", 852 __func__, df->governor->name, ret); 853 goto out; 854 } 855 } 856 df->governor = governor; 857 strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN); 858 ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL); 859 if (ret) 860 dev_warn(dev, "%s: Governor %s not started(%d)\n", 861 __func__, df->governor->name, ret); 862 out: 863 mutex_unlock(&devfreq_list_lock); 864 865 if (!ret) 866 ret = count; 867 return ret; 868 } 869 static DEVICE_ATTR_RW(governor); 870 871 static ssize_t available_governors_show(struct device *d, 872 struct device_attribute *attr, 873 char *buf) 874 { 875 struct devfreq_governor *tmp_governor; 876 ssize_t count = 0; 877 878 mutex_lock(&devfreq_list_lock); 879 list_for_each_entry(tmp_governor, &devfreq_governor_list, node) 880 count += scnprintf(&buf[count], (PAGE_SIZE - count - 2), 881 "%s ", tmp_governor->name); 882 mutex_unlock(&devfreq_list_lock); 883 884 /* Truncate the trailing space */ 885 if (count) 886 count--; 887 888 count += sprintf(&buf[count], "\n"); 889 890 return count; 891 } 892 static DEVICE_ATTR_RO(available_governors); 893 894 static ssize_t cur_freq_show(struct device *dev, struct device_attribute *attr, 895 char *buf) 896 { 897 unsigned long freq; 898 struct devfreq *devfreq = to_devfreq(dev); 899 900 if (devfreq->profile->get_cur_freq && 901 !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq)) 902 return sprintf(buf, "%lu\n", freq); 903 904 return sprintf(buf, "%lu\n", devfreq->previous_freq); 905 } 906 static DEVICE_ATTR_RO(cur_freq); 907 908 static ssize_t target_freq_show(struct device *dev, 909 struct device_attribute *attr, char *buf) 910 { 911 return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq); 912 } 913 static DEVICE_ATTR_RO(target_freq); 914 915 static ssize_t polling_interval_show(struct device *dev, 916 struct device_attribute *attr, char *buf) 917 { 918 return sprintf(buf, "%d\n", to_devfreq(dev)->profile->polling_ms); 919 } 920 921 static ssize_t polling_interval_store(struct device *dev, 922 struct device_attribute *attr, 923 const char *buf, size_t count) 924 { 925 struct devfreq *df = to_devfreq(dev); 926 unsigned int value; 927 int ret; 928 929 if (!df->governor) 930 return -EINVAL; 931 932 ret = sscanf(buf, "%u", &value); 933 if (ret != 1) 934 return -EINVAL; 935 936 df->governor->event_handler(df, DEVFREQ_GOV_INTERVAL, &value); 937 ret = count; 938 939 return ret; 940 } 941 static DEVICE_ATTR_RW(polling_interval); 942 943 static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr, 944 const char *buf, size_t count) 945 { 946 struct devfreq *df = to_devfreq(dev); 947 unsigned long value; 948 int ret; 949 unsigned long max; 950 951 ret = sscanf(buf, "%lu", &value); 952 if (ret != 1) 953 return -EINVAL; 954 955 mutex_lock(&df->lock); 956 max = df->max_freq; 957 if (value && max && value > max) { 958 ret = -EINVAL; 959 goto unlock; 960 } 961 962 df->min_freq = value; 963 update_devfreq(df); 964 ret = count; 965 unlock: 966 mutex_unlock(&df->lock); 967 return ret; 968 } 969 970 static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr, 971 const char *buf, size_t count) 972 { 973 struct devfreq *df = to_devfreq(dev); 974 unsigned long value; 975 int ret; 976 unsigned long min; 977 978 ret = sscanf(buf, "%lu", &value); 979 if (ret != 1) 980 return -EINVAL; 981 982 mutex_lock(&df->lock); 983 min = df->min_freq; 984 if (value && min && value < min) { 985 ret = -EINVAL; 986 goto unlock; 987 } 988 989 df->max_freq = value; 990 update_devfreq(df); 991 ret = count; 992 unlock: 993 mutex_unlock(&df->lock); 994 return ret; 995 } 996 997 #define show_one(name) \ 998 static ssize_t name##_show \ 999 (struct device *dev, struct device_attribute *attr, char *buf) \ 1000 { \ 1001 return sprintf(buf, "%lu\n", to_devfreq(dev)->name); \ 1002 } 1003 show_one(min_freq); 1004 show_one(max_freq); 1005 1006 static DEVICE_ATTR_RW(min_freq); 1007 static DEVICE_ATTR_RW(max_freq); 1008 1009 static ssize_t available_frequencies_show(struct device *d, 1010 struct device_attribute *attr, 1011 char *buf) 1012 { 1013 struct devfreq *df = to_devfreq(d); 1014 struct device *dev = df->dev.parent; 1015 struct dev_pm_opp *opp; 1016 ssize_t count = 0; 1017 unsigned long freq = 0; 1018 1019 rcu_read_lock(); 1020 do { 1021 opp = dev_pm_opp_find_freq_ceil(dev, &freq); 1022 if (IS_ERR(opp)) 1023 break; 1024 1025 count += scnprintf(&buf[count], (PAGE_SIZE - count - 2), 1026 "%lu ", freq); 1027 freq++; 1028 } while (1); 1029 rcu_read_unlock(); 1030 1031 /* Truncate the trailing space */ 1032 if (count) 1033 count--; 1034 1035 count += sprintf(&buf[count], "\n"); 1036 1037 return count; 1038 } 1039 static DEVICE_ATTR_RO(available_frequencies); 1040 1041 static ssize_t trans_stat_show(struct device *dev, 1042 struct device_attribute *attr, char *buf) 1043 { 1044 struct devfreq *devfreq = to_devfreq(dev); 1045 ssize_t len; 1046 int i, j; 1047 unsigned int max_state = devfreq->profile->max_state; 1048 1049 if (!devfreq->stop_polling && 1050 devfreq_update_status(devfreq, devfreq->previous_freq)) 1051 return 0; 1052 if (max_state == 0) 1053 return sprintf(buf, "Not Supported.\n"); 1054 1055 len = sprintf(buf, " From : To\n"); 1056 len += sprintf(buf + len, " :"); 1057 for (i = 0; i < max_state; i++) 1058 len += sprintf(buf + len, "%10lu", 1059 devfreq->profile->freq_table[i]); 1060 1061 len += sprintf(buf + len, " time(ms)\n"); 1062 1063 for (i = 0; i < max_state; i++) { 1064 if (devfreq->profile->freq_table[i] 1065 == devfreq->previous_freq) { 1066 len += sprintf(buf + len, "*"); 1067 } else { 1068 len += sprintf(buf + len, " "); 1069 } 1070 len += sprintf(buf + len, "%10lu:", 1071 devfreq->profile->freq_table[i]); 1072 for (j = 0; j < max_state; j++) 1073 len += sprintf(buf + len, "%10u", 1074 devfreq->trans_table[(i * max_state) + j]); 1075 len += sprintf(buf + len, "%10u\n", 1076 jiffies_to_msecs(devfreq->time_in_state[i])); 1077 } 1078 1079 len += sprintf(buf + len, "Total transition : %u\n", 1080 devfreq->total_trans); 1081 return len; 1082 } 1083 static DEVICE_ATTR_RO(trans_stat); 1084 1085 static struct attribute *devfreq_attrs[] = { 1086 &dev_attr_governor.attr, 1087 &dev_attr_available_governors.attr, 1088 &dev_attr_cur_freq.attr, 1089 &dev_attr_available_frequencies.attr, 1090 &dev_attr_target_freq.attr, 1091 &dev_attr_polling_interval.attr, 1092 &dev_attr_min_freq.attr, 1093 &dev_attr_max_freq.attr, 1094 &dev_attr_trans_stat.attr, 1095 NULL, 1096 }; 1097 ATTRIBUTE_GROUPS(devfreq); 1098 1099 static int __init devfreq_init(void) 1100 { 1101 devfreq_class = class_create(THIS_MODULE, "devfreq"); 1102 if (IS_ERR(devfreq_class)) { 1103 pr_err("%s: couldn't create class\n", __FILE__); 1104 return PTR_ERR(devfreq_class); 1105 } 1106 1107 devfreq_wq = create_freezable_workqueue("devfreq_wq"); 1108 if (!devfreq_wq) { 1109 class_destroy(devfreq_class); 1110 pr_err("%s: couldn't create workqueue\n", __FILE__); 1111 return -ENOMEM; 1112 } 1113 devfreq_class->dev_groups = devfreq_groups; 1114 1115 return 0; 1116 } 1117 subsys_initcall(devfreq_init); 1118 1119 static void __exit devfreq_exit(void) 1120 { 1121 class_destroy(devfreq_class); 1122 destroy_workqueue(devfreq_wq); 1123 } 1124 module_exit(devfreq_exit); 1125 1126 /* 1127 * The followings are helper functions for devfreq user device drivers with 1128 * OPP framework. 1129 */ 1130 1131 /** 1132 * devfreq_recommended_opp() - Helper function to get proper OPP for the 1133 * freq value given to target callback. 1134 * @dev: The devfreq user device. (parent of devfreq) 1135 * @freq: The frequency given to target function 1136 * @flags: Flags handed from devfreq framework. 1137 * 1138 * Locking: This function must be called under rcu_read_lock(). opp is a rcu 1139 * protected pointer. The reason for the same is that the opp pointer which is 1140 * returned will remain valid for use with opp_get_{voltage, freq} only while 1141 * under the locked area. The pointer returned must be used prior to unlocking 1142 * with rcu_read_unlock() to maintain the integrity of the pointer. 1143 */ 1144 struct dev_pm_opp *devfreq_recommended_opp(struct device *dev, 1145 unsigned long *freq, 1146 u32 flags) 1147 { 1148 struct dev_pm_opp *opp; 1149 1150 if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) { 1151 /* The freq is an upper bound. opp should be lower */ 1152 opp = dev_pm_opp_find_freq_floor(dev, freq); 1153 1154 /* If not available, use the closest opp */ 1155 if (opp == ERR_PTR(-ERANGE)) 1156 opp = dev_pm_opp_find_freq_ceil(dev, freq); 1157 } else { 1158 /* The freq is an lower bound. opp should be higher */ 1159 opp = dev_pm_opp_find_freq_ceil(dev, freq); 1160 1161 /* If not available, use the closest opp */ 1162 if (opp == ERR_PTR(-ERANGE)) 1163 opp = dev_pm_opp_find_freq_floor(dev, freq); 1164 } 1165 1166 return opp; 1167 } 1168 EXPORT_SYMBOL(devfreq_recommended_opp); 1169 1170 /** 1171 * devfreq_register_opp_notifier() - Helper function to get devfreq notified 1172 * for any changes in the OPP availability 1173 * changes 1174 * @dev: The devfreq user device. (parent of devfreq) 1175 * @devfreq: The devfreq object. 1176 */ 1177 int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq) 1178 { 1179 struct srcu_notifier_head *nh; 1180 int ret = 0; 1181 1182 rcu_read_lock(); 1183 nh = dev_pm_opp_get_notifier(dev); 1184 if (IS_ERR(nh)) 1185 ret = PTR_ERR(nh); 1186 rcu_read_unlock(); 1187 if (!ret) 1188 ret = srcu_notifier_chain_register(nh, &devfreq->nb); 1189 1190 return ret; 1191 } 1192 EXPORT_SYMBOL(devfreq_register_opp_notifier); 1193 1194 /** 1195 * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq 1196 * notified for any changes in the OPP 1197 * availability changes anymore. 1198 * @dev: The devfreq user device. (parent of devfreq) 1199 * @devfreq: The devfreq object. 1200 * 1201 * At exit() callback of devfreq_dev_profile, this must be included if 1202 * devfreq_recommended_opp is used. 1203 */ 1204 int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq) 1205 { 1206 struct srcu_notifier_head *nh; 1207 int ret = 0; 1208 1209 rcu_read_lock(); 1210 nh = dev_pm_opp_get_notifier(dev); 1211 if (IS_ERR(nh)) 1212 ret = PTR_ERR(nh); 1213 rcu_read_unlock(); 1214 if (!ret) 1215 ret = srcu_notifier_chain_unregister(nh, &devfreq->nb); 1216 1217 return ret; 1218 } 1219 EXPORT_SYMBOL(devfreq_unregister_opp_notifier); 1220 1221 static void devm_devfreq_opp_release(struct device *dev, void *res) 1222 { 1223 devfreq_unregister_opp_notifier(dev, *(struct devfreq **)res); 1224 } 1225 1226 /** 1227 * devm_ devfreq_register_opp_notifier() 1228 * - Resource-managed devfreq_register_opp_notifier() 1229 * @dev: The devfreq user device. (parent of devfreq) 1230 * @devfreq: The devfreq object. 1231 */ 1232 int devm_devfreq_register_opp_notifier(struct device *dev, 1233 struct devfreq *devfreq) 1234 { 1235 struct devfreq **ptr; 1236 int ret; 1237 1238 ptr = devres_alloc(devm_devfreq_opp_release, sizeof(*ptr), GFP_KERNEL); 1239 if (!ptr) 1240 return -ENOMEM; 1241 1242 ret = devfreq_register_opp_notifier(dev, devfreq); 1243 if (ret) { 1244 devres_free(ptr); 1245 return ret; 1246 } 1247 1248 *ptr = devfreq; 1249 devres_add(dev, ptr); 1250 1251 return 0; 1252 } 1253 EXPORT_SYMBOL(devm_devfreq_register_opp_notifier); 1254 1255 /** 1256 * devm_devfreq_unregister_opp_notifier() 1257 * - Resource-managed devfreq_unregister_opp_notifier() 1258 * @dev: The devfreq user device. (parent of devfreq) 1259 * @devfreq: The devfreq object. 1260 */ 1261 void devm_devfreq_unregister_opp_notifier(struct device *dev, 1262 struct devfreq *devfreq) 1263 { 1264 WARN_ON(devres_release(dev, devm_devfreq_opp_release, 1265 devm_devfreq_dev_match, devfreq)); 1266 } 1267 EXPORT_SYMBOL(devm_devfreq_unregister_opp_notifier); 1268 1269 MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>"); 1270 MODULE_DESCRIPTION("devfreq class support"); 1271 MODULE_LICENSE("GPL"); 1272