1 /* 2 * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework 3 * for Non-CPU Devices. 4 * 5 * Copyright (C) 2011 Samsung Electronics 6 * MyungJoo Ham <myungjoo.ham@samsung.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/kernel.h> 14 #include <linux/sched.h> 15 #include <linux/errno.h> 16 #include <linux/err.h> 17 #include <linux/init.h> 18 #include <linux/module.h> 19 #include <linux/slab.h> 20 #include <linux/stat.h> 21 #include <linux/pm_opp.h> 22 #include <linux/devfreq.h> 23 #include <linux/workqueue.h> 24 #include <linux/platform_device.h> 25 #include <linux/list.h> 26 #include <linux/printk.h> 27 #include <linux/hrtimer.h> 28 #include "governor.h" 29 30 static struct class *devfreq_class; 31 32 /* 33 * devfreq core provides delayed work based load monitoring helper 34 * functions. Governors can use these or can implement their own 35 * monitoring mechanism. 36 */ 37 static struct workqueue_struct *devfreq_wq; 38 39 /* The list of all device-devfreq governors */ 40 static LIST_HEAD(devfreq_governor_list); 41 /* The list of all device-devfreq */ 42 static LIST_HEAD(devfreq_list); 43 static DEFINE_MUTEX(devfreq_list_lock); 44 45 /** 46 * find_device_devfreq() - find devfreq struct using device pointer 47 * @dev: device pointer used to lookup device devfreq. 48 * 49 * Search the list of device devfreqs and return the matched device's 50 * devfreq info. devfreq_list_lock should be held by the caller. 51 */ 52 static struct devfreq *find_device_devfreq(struct device *dev) 53 { 54 struct devfreq *tmp_devfreq; 55 56 if (unlikely(IS_ERR_OR_NULL(dev))) { 57 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__); 58 return ERR_PTR(-EINVAL); 59 } 60 WARN(!mutex_is_locked(&devfreq_list_lock), 61 "devfreq_list_lock must be locked."); 62 63 list_for_each_entry(tmp_devfreq, &devfreq_list, node) { 64 if (tmp_devfreq->dev.parent == dev) 65 return tmp_devfreq; 66 } 67 68 return ERR_PTR(-ENODEV); 69 } 70 71 /** 72 * devfreq_get_freq_level() - Lookup freq_table for the frequency 73 * @devfreq: the devfreq instance 74 * @freq: the target frequency 75 */ 76 static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq) 77 { 78 int lev; 79 80 for (lev = 0; lev < devfreq->profile->max_state; lev++) 81 if (freq == devfreq->profile->freq_table[lev]) 82 return lev; 83 84 return -EINVAL; 85 } 86 87 /** 88 * devfreq_update_status() - Update statistics of devfreq behavior 89 * @devfreq: the devfreq instance 90 * @freq: the update target frequency 91 */ 92 static int devfreq_update_status(struct devfreq *devfreq, unsigned long freq) 93 { 94 int lev, prev_lev, ret = 0; 95 unsigned long cur_time; 96 97 cur_time = jiffies; 98 99 prev_lev = devfreq_get_freq_level(devfreq, devfreq->previous_freq); 100 if (prev_lev < 0) { 101 ret = prev_lev; 102 goto out; 103 } 104 105 devfreq->time_in_state[prev_lev] += 106 cur_time - devfreq->last_stat_updated; 107 108 lev = devfreq_get_freq_level(devfreq, freq); 109 if (lev < 0) { 110 ret = lev; 111 goto out; 112 } 113 114 if (lev != prev_lev) { 115 devfreq->trans_table[(prev_lev * 116 devfreq->profile->max_state) + lev]++; 117 devfreq->total_trans++; 118 } 119 120 out: 121 devfreq->last_stat_updated = cur_time; 122 return ret; 123 } 124 125 /** 126 * find_devfreq_governor() - find devfreq governor from name 127 * @name: name of the governor 128 * 129 * Search the list of devfreq governors and return the matched 130 * governor's pointer. devfreq_list_lock should be held by the caller. 131 */ 132 static struct devfreq_governor *find_devfreq_governor(const char *name) 133 { 134 struct devfreq_governor *tmp_governor; 135 136 if (unlikely(IS_ERR_OR_NULL(name))) { 137 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__); 138 return ERR_PTR(-EINVAL); 139 } 140 WARN(!mutex_is_locked(&devfreq_list_lock), 141 "devfreq_list_lock must be locked."); 142 143 list_for_each_entry(tmp_governor, &devfreq_governor_list, node) { 144 if (!strncmp(tmp_governor->name, name, DEVFREQ_NAME_LEN)) 145 return tmp_governor; 146 } 147 148 return ERR_PTR(-ENODEV); 149 } 150 151 /* Load monitoring helper functions for governors use */ 152 153 /** 154 * update_devfreq() - Reevaluate the device and configure frequency. 155 * @devfreq: the devfreq instance. 156 * 157 * Note: Lock devfreq->lock before calling update_devfreq 158 * This function is exported for governors. 159 */ 160 int update_devfreq(struct devfreq *devfreq) 161 { 162 unsigned long freq; 163 int err = 0; 164 u32 flags = 0; 165 166 if (!mutex_is_locked(&devfreq->lock)) { 167 WARN(true, "devfreq->lock must be locked by the caller.\n"); 168 return -EINVAL; 169 } 170 171 if (!devfreq->governor) 172 return -EINVAL; 173 174 /* Reevaluate the proper frequency */ 175 err = devfreq->governor->get_target_freq(devfreq, &freq); 176 if (err) 177 return err; 178 179 /* 180 * Adjust the freuqency with user freq and QoS. 181 * 182 * List from the highest proiority 183 * max_freq (probably called by thermal when it's too hot) 184 * min_freq 185 */ 186 187 if (devfreq->min_freq && freq < devfreq->min_freq) { 188 freq = devfreq->min_freq; 189 flags &= ~DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use GLB */ 190 } 191 if (devfreq->max_freq && freq > devfreq->max_freq) { 192 freq = devfreq->max_freq; 193 flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */ 194 } 195 196 err = devfreq->profile->target(devfreq->dev.parent, &freq, flags); 197 if (err) 198 return err; 199 200 if (devfreq->profile->freq_table) 201 if (devfreq_update_status(devfreq, freq)) 202 dev_err(&devfreq->dev, 203 "Couldn't update frequency transition information.\n"); 204 205 devfreq->previous_freq = freq; 206 return err; 207 } 208 EXPORT_SYMBOL(update_devfreq); 209 210 /** 211 * devfreq_monitor() - Periodically poll devfreq objects. 212 * @work: the work struct used to run devfreq_monitor periodically. 213 * 214 */ 215 static void devfreq_monitor(struct work_struct *work) 216 { 217 int err; 218 struct devfreq *devfreq = container_of(work, 219 struct devfreq, work.work); 220 221 mutex_lock(&devfreq->lock); 222 err = update_devfreq(devfreq); 223 if (err) 224 dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err); 225 226 queue_delayed_work(devfreq_wq, &devfreq->work, 227 msecs_to_jiffies(devfreq->profile->polling_ms)); 228 mutex_unlock(&devfreq->lock); 229 } 230 231 /** 232 * devfreq_monitor_start() - Start load monitoring of devfreq instance 233 * @devfreq: the devfreq instance. 234 * 235 * Helper function for starting devfreq device load monitoing. By 236 * default delayed work based monitoring is supported. Function 237 * to be called from governor in response to DEVFREQ_GOV_START 238 * event when device is added to devfreq framework. 239 */ 240 void devfreq_monitor_start(struct devfreq *devfreq) 241 { 242 INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor); 243 if (devfreq->profile->polling_ms) 244 queue_delayed_work(devfreq_wq, &devfreq->work, 245 msecs_to_jiffies(devfreq->profile->polling_ms)); 246 } 247 EXPORT_SYMBOL(devfreq_monitor_start); 248 249 /** 250 * devfreq_monitor_stop() - Stop load monitoring of a devfreq instance 251 * @devfreq: the devfreq instance. 252 * 253 * Helper function to stop devfreq device load monitoing. Function 254 * to be called from governor in response to DEVFREQ_GOV_STOP 255 * event when device is removed from devfreq framework. 256 */ 257 void devfreq_monitor_stop(struct devfreq *devfreq) 258 { 259 cancel_delayed_work_sync(&devfreq->work); 260 } 261 EXPORT_SYMBOL(devfreq_monitor_stop); 262 263 /** 264 * devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance 265 * @devfreq: the devfreq instance. 266 * 267 * Helper function to suspend devfreq device load monitoing. Function 268 * to be called from governor in response to DEVFREQ_GOV_SUSPEND 269 * event or when polling interval is set to zero. 270 * 271 * Note: Though this function is same as devfreq_monitor_stop(), 272 * intentionally kept separate to provide hooks for collecting 273 * transition statistics. 274 */ 275 void devfreq_monitor_suspend(struct devfreq *devfreq) 276 { 277 mutex_lock(&devfreq->lock); 278 if (devfreq->stop_polling) { 279 mutex_unlock(&devfreq->lock); 280 return; 281 } 282 283 devfreq_update_status(devfreq, devfreq->previous_freq); 284 devfreq->stop_polling = true; 285 mutex_unlock(&devfreq->lock); 286 cancel_delayed_work_sync(&devfreq->work); 287 } 288 EXPORT_SYMBOL(devfreq_monitor_suspend); 289 290 /** 291 * devfreq_monitor_resume() - Resume load monitoring of a devfreq instance 292 * @devfreq: the devfreq instance. 293 * 294 * Helper function to resume devfreq device load monitoing. Function 295 * to be called from governor in response to DEVFREQ_GOV_RESUME 296 * event or when polling interval is set to non-zero. 297 */ 298 void devfreq_monitor_resume(struct devfreq *devfreq) 299 { 300 unsigned long freq; 301 302 mutex_lock(&devfreq->lock); 303 if (!devfreq->stop_polling) 304 goto out; 305 306 if (!delayed_work_pending(&devfreq->work) && 307 devfreq->profile->polling_ms) 308 queue_delayed_work(devfreq_wq, &devfreq->work, 309 msecs_to_jiffies(devfreq->profile->polling_ms)); 310 311 devfreq->last_stat_updated = jiffies; 312 devfreq->stop_polling = false; 313 314 if (devfreq->profile->get_cur_freq && 315 !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq)) 316 devfreq->previous_freq = freq; 317 318 out: 319 mutex_unlock(&devfreq->lock); 320 } 321 EXPORT_SYMBOL(devfreq_monitor_resume); 322 323 /** 324 * devfreq_interval_update() - Update device devfreq monitoring interval 325 * @devfreq: the devfreq instance. 326 * @delay: new polling interval to be set. 327 * 328 * Helper function to set new load monitoring polling interval. Function 329 * to be called from governor in response to DEVFREQ_GOV_INTERVAL event. 330 */ 331 void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay) 332 { 333 unsigned int cur_delay = devfreq->profile->polling_ms; 334 unsigned int new_delay = *delay; 335 336 mutex_lock(&devfreq->lock); 337 devfreq->profile->polling_ms = new_delay; 338 339 if (devfreq->stop_polling) 340 goto out; 341 342 /* if new delay is zero, stop polling */ 343 if (!new_delay) { 344 mutex_unlock(&devfreq->lock); 345 cancel_delayed_work_sync(&devfreq->work); 346 return; 347 } 348 349 /* if current delay is zero, start polling with new delay */ 350 if (!cur_delay) { 351 queue_delayed_work(devfreq_wq, &devfreq->work, 352 msecs_to_jiffies(devfreq->profile->polling_ms)); 353 goto out; 354 } 355 356 /* if current delay is greater than new delay, restart polling */ 357 if (cur_delay > new_delay) { 358 mutex_unlock(&devfreq->lock); 359 cancel_delayed_work_sync(&devfreq->work); 360 mutex_lock(&devfreq->lock); 361 if (!devfreq->stop_polling) 362 queue_delayed_work(devfreq_wq, &devfreq->work, 363 msecs_to_jiffies(devfreq->profile->polling_ms)); 364 } 365 out: 366 mutex_unlock(&devfreq->lock); 367 } 368 EXPORT_SYMBOL(devfreq_interval_update); 369 370 /** 371 * devfreq_notifier_call() - Notify that the device frequency requirements 372 * has been changed out of devfreq framework. 373 * @nb: the notifier_block (supposed to be devfreq->nb) 374 * @type: not used 375 * @devp: not used 376 * 377 * Called by a notifier that uses devfreq->nb. 378 */ 379 static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type, 380 void *devp) 381 { 382 struct devfreq *devfreq = container_of(nb, struct devfreq, nb); 383 int ret; 384 385 mutex_lock(&devfreq->lock); 386 ret = update_devfreq(devfreq); 387 mutex_unlock(&devfreq->lock); 388 389 return ret; 390 } 391 392 /** 393 * _remove_devfreq() - Remove devfreq from the list and release its resources. 394 * @devfreq: the devfreq struct 395 * @skip: skip calling device_unregister(). 396 */ 397 static void _remove_devfreq(struct devfreq *devfreq, bool skip) 398 { 399 mutex_lock(&devfreq_list_lock); 400 if (IS_ERR(find_device_devfreq(devfreq->dev.parent))) { 401 mutex_unlock(&devfreq_list_lock); 402 dev_warn(&devfreq->dev, "releasing devfreq which doesn't exist\n"); 403 return; 404 } 405 list_del(&devfreq->node); 406 mutex_unlock(&devfreq_list_lock); 407 408 if (devfreq->governor) 409 devfreq->governor->event_handler(devfreq, 410 DEVFREQ_GOV_STOP, NULL); 411 412 if (devfreq->profile->exit) 413 devfreq->profile->exit(devfreq->dev.parent); 414 415 if (!skip && get_device(&devfreq->dev)) { 416 device_unregister(&devfreq->dev); 417 put_device(&devfreq->dev); 418 } 419 420 mutex_destroy(&devfreq->lock); 421 kfree(devfreq); 422 } 423 424 /** 425 * devfreq_dev_release() - Callback for struct device to release the device. 426 * @dev: the devfreq device 427 * 428 * This calls _remove_devfreq() if _remove_devfreq() is not called. 429 * Note that devfreq_dev_release() could be called by _remove_devfreq() as 430 * well as by others unregistering the device. 431 */ 432 static void devfreq_dev_release(struct device *dev) 433 { 434 struct devfreq *devfreq = to_devfreq(dev); 435 436 _remove_devfreq(devfreq, true); 437 } 438 439 /** 440 * devfreq_add_device() - Add devfreq feature to the device 441 * @dev: the device to add devfreq feature. 442 * @profile: device-specific profile to run devfreq. 443 * @governor_name: name of the policy to choose frequency. 444 * @data: private data for the governor. The devfreq framework does not 445 * touch this value. 446 */ 447 struct devfreq *devfreq_add_device(struct device *dev, 448 struct devfreq_dev_profile *profile, 449 const char *governor_name, 450 void *data) 451 { 452 struct devfreq *devfreq; 453 struct devfreq_governor *governor; 454 int err = 0; 455 456 if (!dev || !profile || !governor_name) { 457 dev_err(dev, "%s: Invalid parameters.\n", __func__); 458 return ERR_PTR(-EINVAL); 459 } 460 461 mutex_lock(&devfreq_list_lock); 462 devfreq = find_device_devfreq(dev); 463 mutex_unlock(&devfreq_list_lock); 464 if (!IS_ERR(devfreq)) { 465 dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__); 466 err = -EINVAL; 467 goto err_out; 468 } 469 470 devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL); 471 if (!devfreq) { 472 dev_err(dev, "%s: Unable to create devfreq for the device\n", 473 __func__); 474 err = -ENOMEM; 475 goto err_out; 476 } 477 478 mutex_init(&devfreq->lock); 479 mutex_lock(&devfreq->lock); 480 devfreq->dev.parent = dev; 481 devfreq->dev.class = devfreq_class; 482 devfreq->dev.release = devfreq_dev_release; 483 devfreq->profile = profile; 484 strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN); 485 devfreq->previous_freq = profile->initial_freq; 486 devfreq->data = data; 487 devfreq->nb.notifier_call = devfreq_notifier_call; 488 489 devfreq->trans_table = devm_kzalloc(dev, sizeof(unsigned int) * 490 devfreq->profile->max_state * 491 devfreq->profile->max_state, 492 GFP_KERNEL); 493 devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned int) * 494 devfreq->profile->max_state, 495 GFP_KERNEL); 496 devfreq->last_stat_updated = jiffies; 497 498 dev_set_name(&devfreq->dev, "%s", dev_name(dev)); 499 err = device_register(&devfreq->dev); 500 if (err) { 501 put_device(&devfreq->dev); 502 mutex_unlock(&devfreq->lock); 503 goto err_dev; 504 } 505 506 mutex_unlock(&devfreq->lock); 507 508 mutex_lock(&devfreq_list_lock); 509 list_add(&devfreq->node, &devfreq_list); 510 511 governor = find_devfreq_governor(devfreq->governor_name); 512 if (!IS_ERR(governor)) 513 devfreq->governor = governor; 514 if (devfreq->governor) 515 err = devfreq->governor->event_handler(devfreq, 516 DEVFREQ_GOV_START, NULL); 517 mutex_unlock(&devfreq_list_lock); 518 if (err) { 519 dev_err(dev, "%s: Unable to start governor for the device\n", 520 __func__); 521 goto err_init; 522 } 523 524 return devfreq; 525 526 err_init: 527 list_del(&devfreq->node); 528 device_unregister(&devfreq->dev); 529 err_dev: 530 kfree(devfreq); 531 err_out: 532 return ERR_PTR(err); 533 } 534 EXPORT_SYMBOL(devfreq_add_device); 535 536 /** 537 * devfreq_remove_device() - Remove devfreq feature from a device. 538 * @devfreq: the devfreq instance to be removed 539 * 540 * The opposite of devfreq_add_device(). 541 */ 542 int devfreq_remove_device(struct devfreq *devfreq) 543 { 544 if (!devfreq) 545 return -EINVAL; 546 547 _remove_devfreq(devfreq, false); 548 549 return 0; 550 } 551 EXPORT_SYMBOL(devfreq_remove_device); 552 553 /** 554 * devfreq_suspend_device() - Suspend devfreq of a device. 555 * @devfreq: the devfreq instance to be suspended 556 * 557 * This function is intended to be called by the pm callbacks 558 * (e.g., runtime_suspend, suspend) of the device driver that 559 * holds the devfreq. 560 */ 561 int devfreq_suspend_device(struct devfreq *devfreq) 562 { 563 if (!devfreq) 564 return -EINVAL; 565 566 if (!devfreq->governor) 567 return 0; 568 569 return devfreq->governor->event_handler(devfreq, 570 DEVFREQ_GOV_SUSPEND, NULL); 571 } 572 EXPORT_SYMBOL(devfreq_suspend_device); 573 574 /** 575 * devfreq_resume_device() - Resume devfreq of a device. 576 * @devfreq: the devfreq instance to be resumed 577 * 578 * This function is intended to be called by the pm callbacks 579 * (e.g., runtime_resume, resume) of the device driver that 580 * holds the devfreq. 581 */ 582 int devfreq_resume_device(struct devfreq *devfreq) 583 { 584 if (!devfreq) 585 return -EINVAL; 586 587 if (!devfreq->governor) 588 return 0; 589 590 return devfreq->governor->event_handler(devfreq, 591 DEVFREQ_GOV_RESUME, NULL); 592 } 593 EXPORT_SYMBOL(devfreq_resume_device); 594 595 /** 596 * devfreq_add_governor() - Add devfreq governor 597 * @governor: the devfreq governor to be added 598 */ 599 int devfreq_add_governor(struct devfreq_governor *governor) 600 { 601 struct devfreq_governor *g; 602 struct devfreq *devfreq; 603 int err = 0; 604 605 if (!governor) { 606 pr_err("%s: Invalid parameters.\n", __func__); 607 return -EINVAL; 608 } 609 610 mutex_lock(&devfreq_list_lock); 611 g = find_devfreq_governor(governor->name); 612 if (!IS_ERR(g)) { 613 pr_err("%s: governor %s already registered\n", __func__, 614 g->name); 615 err = -EINVAL; 616 goto err_out; 617 } 618 619 list_add(&governor->node, &devfreq_governor_list); 620 621 list_for_each_entry(devfreq, &devfreq_list, node) { 622 int ret = 0; 623 struct device *dev = devfreq->dev.parent; 624 625 if (!strncmp(devfreq->governor_name, governor->name, 626 DEVFREQ_NAME_LEN)) { 627 /* The following should never occur */ 628 if (devfreq->governor) { 629 dev_warn(dev, 630 "%s: Governor %s already present\n", 631 __func__, devfreq->governor->name); 632 ret = devfreq->governor->event_handler(devfreq, 633 DEVFREQ_GOV_STOP, NULL); 634 if (ret) { 635 dev_warn(dev, 636 "%s: Governor %s stop = %d\n", 637 __func__, 638 devfreq->governor->name, ret); 639 } 640 /* Fall through */ 641 } 642 devfreq->governor = governor; 643 ret = devfreq->governor->event_handler(devfreq, 644 DEVFREQ_GOV_START, NULL); 645 if (ret) { 646 dev_warn(dev, "%s: Governor %s start=%d\n", 647 __func__, devfreq->governor->name, 648 ret); 649 } 650 } 651 } 652 653 err_out: 654 mutex_unlock(&devfreq_list_lock); 655 656 return err; 657 } 658 EXPORT_SYMBOL(devfreq_add_governor); 659 660 /** 661 * devfreq_remove_device() - Remove devfreq feature from a device. 662 * @governor: the devfreq governor to be removed 663 */ 664 int devfreq_remove_governor(struct devfreq_governor *governor) 665 { 666 struct devfreq_governor *g; 667 struct devfreq *devfreq; 668 int err = 0; 669 670 if (!governor) { 671 pr_err("%s: Invalid parameters.\n", __func__); 672 return -EINVAL; 673 } 674 675 mutex_lock(&devfreq_list_lock); 676 g = find_devfreq_governor(governor->name); 677 if (IS_ERR(g)) { 678 pr_err("%s: governor %s not registered\n", __func__, 679 governor->name); 680 err = PTR_ERR(g); 681 goto err_out; 682 } 683 list_for_each_entry(devfreq, &devfreq_list, node) { 684 int ret; 685 struct device *dev = devfreq->dev.parent; 686 687 if (!strncmp(devfreq->governor_name, governor->name, 688 DEVFREQ_NAME_LEN)) { 689 /* we should have a devfreq governor! */ 690 if (!devfreq->governor) { 691 dev_warn(dev, "%s: Governor %s NOT present\n", 692 __func__, governor->name); 693 continue; 694 /* Fall through */ 695 } 696 ret = devfreq->governor->event_handler(devfreq, 697 DEVFREQ_GOV_STOP, NULL); 698 if (ret) { 699 dev_warn(dev, "%s: Governor %s stop=%d\n", 700 __func__, devfreq->governor->name, 701 ret); 702 } 703 devfreq->governor = NULL; 704 } 705 } 706 707 list_del(&governor->node); 708 err_out: 709 mutex_unlock(&devfreq_list_lock); 710 711 return err; 712 } 713 EXPORT_SYMBOL(devfreq_remove_governor); 714 715 static ssize_t governor_show(struct device *dev, 716 struct device_attribute *attr, char *buf) 717 { 718 if (!to_devfreq(dev)->governor) 719 return -EINVAL; 720 721 return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name); 722 } 723 724 static ssize_t governor_store(struct device *dev, struct device_attribute *attr, 725 const char *buf, size_t count) 726 { 727 struct devfreq *df = to_devfreq(dev); 728 int ret; 729 char str_governor[DEVFREQ_NAME_LEN + 1]; 730 struct devfreq_governor *governor; 731 732 ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor); 733 if (ret != 1) 734 return -EINVAL; 735 736 mutex_lock(&devfreq_list_lock); 737 governor = find_devfreq_governor(str_governor); 738 if (IS_ERR(governor)) { 739 ret = PTR_ERR(governor); 740 goto out; 741 } 742 if (df->governor == governor) 743 goto out; 744 745 if (df->governor) { 746 ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL); 747 if (ret) { 748 dev_warn(dev, "%s: Governor %s not stopped(%d)\n", 749 __func__, df->governor->name, ret); 750 goto out; 751 } 752 } 753 df->governor = governor; 754 strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN); 755 ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL); 756 if (ret) 757 dev_warn(dev, "%s: Governor %s not started(%d)\n", 758 __func__, df->governor->name, ret); 759 out: 760 mutex_unlock(&devfreq_list_lock); 761 762 if (!ret) 763 ret = count; 764 return ret; 765 } 766 static DEVICE_ATTR_RW(governor); 767 768 static ssize_t available_governors_show(struct device *d, 769 struct device_attribute *attr, 770 char *buf) 771 { 772 struct devfreq_governor *tmp_governor; 773 ssize_t count = 0; 774 775 mutex_lock(&devfreq_list_lock); 776 list_for_each_entry(tmp_governor, &devfreq_governor_list, node) 777 count += scnprintf(&buf[count], (PAGE_SIZE - count - 2), 778 "%s ", tmp_governor->name); 779 mutex_unlock(&devfreq_list_lock); 780 781 /* Truncate the trailing space */ 782 if (count) 783 count--; 784 785 count += sprintf(&buf[count], "\n"); 786 787 return count; 788 } 789 static DEVICE_ATTR_RO(available_governors); 790 791 static ssize_t cur_freq_show(struct device *dev, struct device_attribute *attr, 792 char *buf) 793 { 794 unsigned long freq; 795 struct devfreq *devfreq = to_devfreq(dev); 796 797 if (devfreq->profile->get_cur_freq && 798 !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq)) 799 return sprintf(buf, "%lu\n", freq); 800 801 return sprintf(buf, "%lu\n", devfreq->previous_freq); 802 } 803 static DEVICE_ATTR_RO(cur_freq); 804 805 static ssize_t target_freq_show(struct device *dev, 806 struct device_attribute *attr, char *buf) 807 { 808 return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq); 809 } 810 static DEVICE_ATTR_RO(target_freq); 811 812 static ssize_t polling_interval_show(struct device *dev, 813 struct device_attribute *attr, char *buf) 814 { 815 return sprintf(buf, "%d\n", to_devfreq(dev)->profile->polling_ms); 816 } 817 818 static ssize_t polling_interval_store(struct device *dev, 819 struct device_attribute *attr, 820 const char *buf, size_t count) 821 { 822 struct devfreq *df = to_devfreq(dev); 823 unsigned int value; 824 int ret; 825 826 if (!df->governor) 827 return -EINVAL; 828 829 ret = sscanf(buf, "%u", &value); 830 if (ret != 1) 831 return -EINVAL; 832 833 df->governor->event_handler(df, DEVFREQ_GOV_INTERVAL, &value); 834 ret = count; 835 836 return ret; 837 } 838 static DEVICE_ATTR_RW(polling_interval); 839 840 static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr, 841 const char *buf, size_t count) 842 { 843 struct devfreq *df = to_devfreq(dev); 844 unsigned long value; 845 int ret; 846 unsigned long max; 847 848 ret = sscanf(buf, "%lu", &value); 849 if (ret != 1) 850 return -EINVAL; 851 852 mutex_lock(&df->lock); 853 max = df->max_freq; 854 if (value && max && value > max) { 855 ret = -EINVAL; 856 goto unlock; 857 } 858 859 df->min_freq = value; 860 update_devfreq(df); 861 ret = count; 862 unlock: 863 mutex_unlock(&df->lock); 864 return ret; 865 } 866 867 static ssize_t min_freq_show(struct device *dev, struct device_attribute *attr, 868 char *buf) 869 { 870 return sprintf(buf, "%lu\n", to_devfreq(dev)->min_freq); 871 } 872 873 static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr, 874 const char *buf, size_t count) 875 { 876 struct devfreq *df = to_devfreq(dev); 877 unsigned long value; 878 int ret; 879 unsigned long min; 880 881 ret = sscanf(buf, "%lu", &value); 882 if (ret != 1) 883 return -EINVAL; 884 885 mutex_lock(&df->lock); 886 min = df->min_freq; 887 if (value && min && value < min) { 888 ret = -EINVAL; 889 goto unlock; 890 } 891 892 df->max_freq = value; 893 update_devfreq(df); 894 ret = count; 895 unlock: 896 mutex_unlock(&df->lock); 897 return ret; 898 } 899 static DEVICE_ATTR_RW(min_freq); 900 901 static ssize_t max_freq_show(struct device *dev, struct device_attribute *attr, 902 char *buf) 903 { 904 return sprintf(buf, "%lu\n", to_devfreq(dev)->max_freq); 905 } 906 static DEVICE_ATTR_RW(max_freq); 907 908 static ssize_t available_frequencies_show(struct device *d, 909 struct device_attribute *attr, 910 char *buf) 911 { 912 struct devfreq *df = to_devfreq(d); 913 struct device *dev = df->dev.parent; 914 struct dev_pm_opp *opp; 915 ssize_t count = 0; 916 unsigned long freq = 0; 917 918 rcu_read_lock(); 919 do { 920 opp = dev_pm_opp_find_freq_ceil(dev, &freq); 921 if (IS_ERR(opp)) 922 break; 923 924 count += scnprintf(&buf[count], (PAGE_SIZE - count - 2), 925 "%lu ", freq); 926 freq++; 927 } while (1); 928 rcu_read_unlock(); 929 930 /* Truncate the trailing space */ 931 if (count) 932 count--; 933 934 count += sprintf(&buf[count], "\n"); 935 936 return count; 937 } 938 static DEVICE_ATTR_RO(available_frequencies); 939 940 static ssize_t trans_stat_show(struct device *dev, 941 struct device_attribute *attr, char *buf) 942 { 943 struct devfreq *devfreq = to_devfreq(dev); 944 ssize_t len; 945 int i, j; 946 unsigned int max_state = devfreq->profile->max_state; 947 948 if (!devfreq->stop_polling && 949 devfreq_update_status(devfreq, devfreq->previous_freq)) 950 return 0; 951 952 len = sprintf(buf, " From : To\n"); 953 len += sprintf(buf + len, " :"); 954 for (i = 0; i < max_state; i++) 955 len += sprintf(buf + len, "%8u", 956 devfreq->profile->freq_table[i]); 957 958 len += sprintf(buf + len, " time(ms)\n"); 959 960 for (i = 0; i < max_state; i++) { 961 if (devfreq->profile->freq_table[i] 962 == devfreq->previous_freq) { 963 len += sprintf(buf + len, "*"); 964 } else { 965 len += sprintf(buf + len, " "); 966 } 967 len += sprintf(buf + len, "%8u:", 968 devfreq->profile->freq_table[i]); 969 for (j = 0; j < max_state; j++) 970 len += sprintf(buf + len, "%8u", 971 devfreq->trans_table[(i * max_state) + j]); 972 len += sprintf(buf + len, "%10u\n", 973 jiffies_to_msecs(devfreq->time_in_state[i])); 974 } 975 976 len += sprintf(buf + len, "Total transition : %u\n", 977 devfreq->total_trans); 978 return len; 979 } 980 static DEVICE_ATTR_RO(trans_stat); 981 982 static struct attribute *devfreq_attrs[] = { 983 &dev_attr_governor.attr, 984 &dev_attr_available_governors.attr, 985 &dev_attr_cur_freq.attr, 986 &dev_attr_available_frequencies.attr, 987 &dev_attr_target_freq.attr, 988 &dev_attr_polling_interval.attr, 989 &dev_attr_min_freq.attr, 990 &dev_attr_max_freq.attr, 991 &dev_attr_trans_stat.attr, 992 NULL, 993 }; 994 ATTRIBUTE_GROUPS(devfreq); 995 996 static int __init devfreq_init(void) 997 { 998 devfreq_class = class_create(THIS_MODULE, "devfreq"); 999 if (IS_ERR(devfreq_class)) { 1000 pr_err("%s: couldn't create class\n", __FILE__); 1001 return PTR_ERR(devfreq_class); 1002 } 1003 1004 devfreq_wq = create_freezable_workqueue("devfreq_wq"); 1005 if (!devfreq_wq) { 1006 class_destroy(devfreq_class); 1007 pr_err("%s: couldn't create workqueue\n", __FILE__); 1008 return -ENOMEM; 1009 } 1010 devfreq_class->dev_groups = devfreq_groups; 1011 1012 return 0; 1013 } 1014 subsys_initcall(devfreq_init); 1015 1016 static void __exit devfreq_exit(void) 1017 { 1018 class_destroy(devfreq_class); 1019 destroy_workqueue(devfreq_wq); 1020 } 1021 module_exit(devfreq_exit); 1022 1023 /* 1024 * The followings are helper functions for devfreq user device drivers with 1025 * OPP framework. 1026 */ 1027 1028 /** 1029 * devfreq_recommended_opp() - Helper function to get proper OPP for the 1030 * freq value given to target callback. 1031 * @dev: The devfreq user device. (parent of devfreq) 1032 * @freq: The frequency given to target function 1033 * @flags: Flags handed from devfreq framework. 1034 * 1035 * Locking: This function must be called under rcu_read_lock(). opp is a rcu 1036 * protected pointer. The reason for the same is that the opp pointer which is 1037 * returned will remain valid for use with opp_get_{voltage, freq} only while 1038 * under the locked area. The pointer returned must be used prior to unlocking 1039 * with rcu_read_unlock() to maintain the integrity of the pointer. 1040 */ 1041 struct dev_pm_opp *devfreq_recommended_opp(struct device *dev, 1042 unsigned long *freq, 1043 u32 flags) 1044 { 1045 struct dev_pm_opp *opp; 1046 1047 if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) { 1048 /* The freq is an upper bound. opp should be lower */ 1049 opp = dev_pm_opp_find_freq_floor(dev, freq); 1050 1051 /* If not available, use the closest opp */ 1052 if (opp == ERR_PTR(-ERANGE)) 1053 opp = dev_pm_opp_find_freq_ceil(dev, freq); 1054 } else { 1055 /* The freq is an lower bound. opp should be higher */ 1056 opp = dev_pm_opp_find_freq_ceil(dev, freq); 1057 1058 /* If not available, use the closest opp */ 1059 if (opp == ERR_PTR(-ERANGE)) 1060 opp = dev_pm_opp_find_freq_floor(dev, freq); 1061 } 1062 1063 return opp; 1064 } 1065 1066 /** 1067 * devfreq_register_opp_notifier() - Helper function to get devfreq notified 1068 * for any changes in the OPP availability 1069 * changes 1070 * @dev: The devfreq user device. (parent of devfreq) 1071 * @devfreq: The devfreq object. 1072 */ 1073 int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq) 1074 { 1075 struct srcu_notifier_head *nh; 1076 int ret = 0; 1077 1078 rcu_read_lock(); 1079 nh = dev_pm_opp_get_notifier(dev); 1080 if (IS_ERR(nh)) 1081 ret = PTR_ERR(nh); 1082 rcu_read_unlock(); 1083 if (!ret) 1084 ret = srcu_notifier_chain_register(nh, &devfreq->nb); 1085 1086 return ret; 1087 } 1088 1089 /** 1090 * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq 1091 * notified for any changes in the OPP 1092 * availability changes anymore. 1093 * @dev: The devfreq user device. (parent of devfreq) 1094 * @devfreq: The devfreq object. 1095 * 1096 * At exit() callback of devfreq_dev_profile, this must be included if 1097 * devfreq_recommended_opp is used. 1098 */ 1099 int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq) 1100 { 1101 struct srcu_notifier_head *nh; 1102 int ret = 0; 1103 1104 rcu_read_lock(); 1105 nh = dev_pm_opp_get_notifier(dev); 1106 if (IS_ERR(nh)) 1107 ret = PTR_ERR(nh); 1108 rcu_read_unlock(); 1109 if (!ret) 1110 ret = srcu_notifier_chain_unregister(nh, &devfreq->nb); 1111 1112 return ret; 1113 } 1114 1115 MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>"); 1116 MODULE_DESCRIPTION("devfreq class support"); 1117 MODULE_LICENSE("GPL"); 1118