1 /* 2 * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework 3 * for Non-CPU Devices. 4 * 5 * Copyright (C) 2011 Samsung Electronics 6 * MyungJoo Ham <myungjoo.ham@samsung.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/kernel.h> 14 #include <linux/sched.h> 15 #include <linux/errno.h> 16 #include <linux/err.h> 17 #include <linux/init.h> 18 #include <linux/module.h> 19 #include <linux/slab.h> 20 #include <linux/stat.h> 21 #include <linux/pm_opp.h> 22 #include <linux/devfreq.h> 23 #include <linux/workqueue.h> 24 #include <linux/platform_device.h> 25 #include <linux/list.h> 26 #include <linux/printk.h> 27 #include <linux/hrtimer.h> 28 #include "governor.h" 29 30 static struct class *devfreq_class; 31 32 /* 33 * devfreq core provides delayed work based load monitoring helper 34 * functions. Governors can use these or can implement their own 35 * monitoring mechanism. 36 */ 37 static struct workqueue_struct *devfreq_wq; 38 39 /* The list of all device-devfreq governors */ 40 static LIST_HEAD(devfreq_governor_list); 41 /* The list of all device-devfreq */ 42 static LIST_HEAD(devfreq_list); 43 static DEFINE_MUTEX(devfreq_list_lock); 44 45 /** 46 * find_device_devfreq() - find devfreq struct using device pointer 47 * @dev: device pointer used to lookup device devfreq. 48 * 49 * Search the list of device devfreqs and return the matched device's 50 * devfreq info. devfreq_list_lock should be held by the caller. 51 */ 52 static struct devfreq *find_device_devfreq(struct device *dev) 53 { 54 struct devfreq *tmp_devfreq; 55 56 if (unlikely(IS_ERR_OR_NULL(dev))) { 57 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__); 58 return ERR_PTR(-EINVAL); 59 } 60 WARN(!mutex_is_locked(&devfreq_list_lock), 61 "devfreq_list_lock must be locked."); 62 63 list_for_each_entry(tmp_devfreq, &devfreq_list, node) { 64 if (tmp_devfreq->dev.parent == dev) 65 return tmp_devfreq; 66 } 67 68 return ERR_PTR(-ENODEV); 69 } 70 71 /** 72 * devfreq_get_freq_level() - Lookup freq_table for the frequency 73 * @devfreq: the devfreq instance 74 * @freq: the target frequency 75 */ 76 static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq) 77 { 78 int lev; 79 80 for (lev = 0; lev < devfreq->profile->max_state; lev++) 81 if (freq == devfreq->profile->freq_table[lev]) 82 return lev; 83 84 return -EINVAL; 85 } 86 87 /** 88 * devfreq_update_status() - Update statistics of devfreq behavior 89 * @devfreq: the devfreq instance 90 * @freq: the update target frequency 91 */ 92 static int devfreq_update_status(struct devfreq *devfreq, unsigned long freq) 93 { 94 int lev, prev_lev; 95 unsigned long cur_time; 96 97 lev = devfreq_get_freq_level(devfreq, freq); 98 if (lev < 0) 99 return lev; 100 101 cur_time = jiffies; 102 devfreq->time_in_state[lev] += 103 cur_time - devfreq->last_stat_updated; 104 if (freq != devfreq->previous_freq) { 105 prev_lev = devfreq_get_freq_level(devfreq, 106 devfreq->previous_freq); 107 devfreq->trans_table[(prev_lev * 108 devfreq->profile->max_state) + lev]++; 109 devfreq->total_trans++; 110 } 111 devfreq->last_stat_updated = cur_time; 112 113 return 0; 114 } 115 116 /** 117 * find_devfreq_governor() - find devfreq governor from name 118 * @name: name of the governor 119 * 120 * Search the list of devfreq governors and return the matched 121 * governor's pointer. devfreq_list_lock should be held by the caller. 122 */ 123 static struct devfreq_governor *find_devfreq_governor(const char *name) 124 { 125 struct devfreq_governor *tmp_governor; 126 127 if (unlikely(IS_ERR_OR_NULL(name))) { 128 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__); 129 return ERR_PTR(-EINVAL); 130 } 131 WARN(!mutex_is_locked(&devfreq_list_lock), 132 "devfreq_list_lock must be locked."); 133 134 list_for_each_entry(tmp_governor, &devfreq_governor_list, node) { 135 if (!strncmp(tmp_governor->name, name, DEVFREQ_NAME_LEN)) 136 return tmp_governor; 137 } 138 139 return ERR_PTR(-ENODEV); 140 } 141 142 /* Load monitoring helper functions for governors use */ 143 144 /** 145 * update_devfreq() - Reevaluate the device and configure frequency. 146 * @devfreq: the devfreq instance. 147 * 148 * Note: Lock devfreq->lock before calling update_devfreq 149 * This function is exported for governors. 150 */ 151 int update_devfreq(struct devfreq *devfreq) 152 { 153 unsigned long freq; 154 int err = 0; 155 u32 flags = 0; 156 157 if (!mutex_is_locked(&devfreq->lock)) { 158 WARN(true, "devfreq->lock must be locked by the caller.\n"); 159 return -EINVAL; 160 } 161 162 if (!devfreq->governor) 163 return -EINVAL; 164 165 /* Reevaluate the proper frequency */ 166 err = devfreq->governor->get_target_freq(devfreq, &freq); 167 if (err) 168 return err; 169 170 /* 171 * Adjust the freuqency with user freq and QoS. 172 * 173 * List from the highest proiority 174 * max_freq (probably called by thermal when it's too hot) 175 * min_freq 176 */ 177 178 if (devfreq->min_freq && freq < devfreq->min_freq) { 179 freq = devfreq->min_freq; 180 flags &= ~DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use GLB */ 181 } 182 if (devfreq->max_freq && freq > devfreq->max_freq) { 183 freq = devfreq->max_freq; 184 flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */ 185 } 186 187 err = devfreq->profile->target(devfreq->dev.parent, &freq, flags); 188 if (err) 189 return err; 190 191 if (devfreq->profile->freq_table) 192 if (devfreq_update_status(devfreq, freq)) 193 dev_err(&devfreq->dev, 194 "Couldn't update frequency transition information.\n"); 195 196 devfreq->previous_freq = freq; 197 return err; 198 } 199 EXPORT_SYMBOL(update_devfreq); 200 201 /** 202 * devfreq_monitor() - Periodically poll devfreq objects. 203 * @work: the work struct used to run devfreq_monitor periodically. 204 * 205 */ 206 static void devfreq_monitor(struct work_struct *work) 207 { 208 int err; 209 struct devfreq *devfreq = container_of(work, 210 struct devfreq, work.work); 211 212 mutex_lock(&devfreq->lock); 213 err = update_devfreq(devfreq); 214 if (err) 215 dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err); 216 217 queue_delayed_work(devfreq_wq, &devfreq->work, 218 msecs_to_jiffies(devfreq->profile->polling_ms)); 219 mutex_unlock(&devfreq->lock); 220 } 221 222 /** 223 * devfreq_monitor_start() - Start load monitoring of devfreq instance 224 * @devfreq: the devfreq instance. 225 * 226 * Helper function for starting devfreq device load monitoing. By 227 * default delayed work based monitoring is supported. Function 228 * to be called from governor in response to DEVFREQ_GOV_START 229 * event when device is added to devfreq framework. 230 */ 231 void devfreq_monitor_start(struct devfreq *devfreq) 232 { 233 INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor); 234 if (devfreq->profile->polling_ms) 235 queue_delayed_work(devfreq_wq, &devfreq->work, 236 msecs_to_jiffies(devfreq->profile->polling_ms)); 237 } 238 EXPORT_SYMBOL(devfreq_monitor_start); 239 240 /** 241 * devfreq_monitor_stop() - Stop load monitoring of a devfreq instance 242 * @devfreq: the devfreq instance. 243 * 244 * Helper function to stop devfreq device load monitoing. Function 245 * to be called from governor in response to DEVFREQ_GOV_STOP 246 * event when device is removed from devfreq framework. 247 */ 248 void devfreq_monitor_stop(struct devfreq *devfreq) 249 { 250 cancel_delayed_work_sync(&devfreq->work); 251 } 252 EXPORT_SYMBOL(devfreq_monitor_stop); 253 254 /** 255 * devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance 256 * @devfreq: the devfreq instance. 257 * 258 * Helper function to suspend devfreq device load monitoing. Function 259 * to be called from governor in response to DEVFREQ_GOV_SUSPEND 260 * event or when polling interval is set to zero. 261 * 262 * Note: Though this function is same as devfreq_monitor_stop(), 263 * intentionally kept separate to provide hooks for collecting 264 * transition statistics. 265 */ 266 void devfreq_monitor_suspend(struct devfreq *devfreq) 267 { 268 mutex_lock(&devfreq->lock); 269 if (devfreq->stop_polling) { 270 mutex_unlock(&devfreq->lock); 271 return; 272 } 273 274 devfreq_update_status(devfreq, devfreq->previous_freq); 275 devfreq->stop_polling = true; 276 mutex_unlock(&devfreq->lock); 277 cancel_delayed_work_sync(&devfreq->work); 278 } 279 EXPORT_SYMBOL(devfreq_monitor_suspend); 280 281 /** 282 * devfreq_monitor_resume() - Resume load monitoring of a devfreq instance 283 * @devfreq: the devfreq instance. 284 * 285 * Helper function to resume devfreq device load monitoing. Function 286 * to be called from governor in response to DEVFREQ_GOV_RESUME 287 * event or when polling interval is set to non-zero. 288 */ 289 void devfreq_monitor_resume(struct devfreq *devfreq) 290 { 291 unsigned long freq; 292 293 mutex_lock(&devfreq->lock); 294 if (!devfreq->stop_polling) 295 goto out; 296 297 if (!delayed_work_pending(&devfreq->work) && 298 devfreq->profile->polling_ms) 299 queue_delayed_work(devfreq_wq, &devfreq->work, 300 msecs_to_jiffies(devfreq->profile->polling_ms)); 301 302 devfreq->last_stat_updated = jiffies; 303 devfreq->stop_polling = false; 304 305 if (devfreq->profile->get_cur_freq && 306 !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq)) 307 devfreq->previous_freq = freq; 308 309 out: 310 mutex_unlock(&devfreq->lock); 311 } 312 EXPORT_SYMBOL(devfreq_monitor_resume); 313 314 /** 315 * devfreq_interval_update() - Update device devfreq monitoring interval 316 * @devfreq: the devfreq instance. 317 * @delay: new polling interval to be set. 318 * 319 * Helper function to set new load monitoring polling interval. Function 320 * to be called from governor in response to DEVFREQ_GOV_INTERVAL event. 321 */ 322 void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay) 323 { 324 unsigned int cur_delay = devfreq->profile->polling_ms; 325 unsigned int new_delay = *delay; 326 327 mutex_lock(&devfreq->lock); 328 devfreq->profile->polling_ms = new_delay; 329 330 if (devfreq->stop_polling) 331 goto out; 332 333 /* if new delay is zero, stop polling */ 334 if (!new_delay) { 335 mutex_unlock(&devfreq->lock); 336 cancel_delayed_work_sync(&devfreq->work); 337 return; 338 } 339 340 /* if current delay is zero, start polling with new delay */ 341 if (!cur_delay) { 342 queue_delayed_work(devfreq_wq, &devfreq->work, 343 msecs_to_jiffies(devfreq->profile->polling_ms)); 344 goto out; 345 } 346 347 /* if current delay is greater than new delay, restart polling */ 348 if (cur_delay > new_delay) { 349 mutex_unlock(&devfreq->lock); 350 cancel_delayed_work_sync(&devfreq->work); 351 mutex_lock(&devfreq->lock); 352 if (!devfreq->stop_polling) 353 queue_delayed_work(devfreq_wq, &devfreq->work, 354 msecs_to_jiffies(devfreq->profile->polling_ms)); 355 } 356 out: 357 mutex_unlock(&devfreq->lock); 358 } 359 EXPORT_SYMBOL(devfreq_interval_update); 360 361 /** 362 * devfreq_notifier_call() - Notify that the device frequency requirements 363 * has been changed out of devfreq framework. 364 * @nb: the notifier_block (supposed to be devfreq->nb) 365 * @type: not used 366 * @devp: not used 367 * 368 * Called by a notifier that uses devfreq->nb. 369 */ 370 static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type, 371 void *devp) 372 { 373 struct devfreq *devfreq = container_of(nb, struct devfreq, nb); 374 int ret; 375 376 mutex_lock(&devfreq->lock); 377 ret = update_devfreq(devfreq); 378 mutex_unlock(&devfreq->lock); 379 380 return ret; 381 } 382 383 /** 384 * _remove_devfreq() - Remove devfreq from the list and release its resources. 385 * @devfreq: the devfreq struct 386 * @skip: skip calling device_unregister(). 387 */ 388 static void _remove_devfreq(struct devfreq *devfreq, bool skip) 389 { 390 mutex_lock(&devfreq_list_lock); 391 if (IS_ERR(find_device_devfreq(devfreq->dev.parent))) { 392 mutex_unlock(&devfreq_list_lock); 393 dev_warn(&devfreq->dev, "releasing devfreq which doesn't exist\n"); 394 return; 395 } 396 list_del(&devfreq->node); 397 mutex_unlock(&devfreq_list_lock); 398 399 if (devfreq->governor) 400 devfreq->governor->event_handler(devfreq, 401 DEVFREQ_GOV_STOP, NULL); 402 403 if (devfreq->profile->exit) 404 devfreq->profile->exit(devfreq->dev.parent); 405 406 if (!skip && get_device(&devfreq->dev)) { 407 device_unregister(&devfreq->dev); 408 put_device(&devfreq->dev); 409 } 410 411 mutex_destroy(&devfreq->lock); 412 kfree(devfreq); 413 } 414 415 /** 416 * devfreq_dev_release() - Callback for struct device to release the device. 417 * @dev: the devfreq device 418 * 419 * This calls _remove_devfreq() if _remove_devfreq() is not called. 420 * Note that devfreq_dev_release() could be called by _remove_devfreq() as 421 * well as by others unregistering the device. 422 */ 423 static void devfreq_dev_release(struct device *dev) 424 { 425 struct devfreq *devfreq = to_devfreq(dev); 426 427 _remove_devfreq(devfreq, true); 428 } 429 430 /** 431 * devfreq_add_device() - Add devfreq feature to the device 432 * @dev: the device to add devfreq feature. 433 * @profile: device-specific profile to run devfreq. 434 * @governor_name: name of the policy to choose frequency. 435 * @data: private data for the governor. The devfreq framework does not 436 * touch this value. 437 */ 438 struct devfreq *devfreq_add_device(struct device *dev, 439 struct devfreq_dev_profile *profile, 440 const char *governor_name, 441 void *data) 442 { 443 struct devfreq *devfreq; 444 struct devfreq_governor *governor; 445 int err = 0; 446 447 if (!dev || !profile || !governor_name) { 448 dev_err(dev, "%s: Invalid parameters.\n", __func__); 449 return ERR_PTR(-EINVAL); 450 } 451 452 mutex_lock(&devfreq_list_lock); 453 devfreq = find_device_devfreq(dev); 454 mutex_unlock(&devfreq_list_lock); 455 if (!IS_ERR(devfreq)) { 456 dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__); 457 err = -EINVAL; 458 goto err_out; 459 } 460 461 devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL); 462 if (!devfreq) { 463 dev_err(dev, "%s: Unable to create devfreq for the device\n", 464 __func__); 465 err = -ENOMEM; 466 goto err_out; 467 } 468 469 mutex_init(&devfreq->lock); 470 mutex_lock(&devfreq->lock); 471 devfreq->dev.parent = dev; 472 devfreq->dev.class = devfreq_class; 473 devfreq->dev.release = devfreq_dev_release; 474 devfreq->profile = profile; 475 strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN); 476 devfreq->previous_freq = profile->initial_freq; 477 devfreq->data = data; 478 devfreq->nb.notifier_call = devfreq_notifier_call; 479 480 devfreq->trans_table = devm_kzalloc(dev, sizeof(unsigned int) * 481 devfreq->profile->max_state * 482 devfreq->profile->max_state, 483 GFP_KERNEL); 484 devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned int) * 485 devfreq->profile->max_state, 486 GFP_KERNEL); 487 devfreq->last_stat_updated = jiffies; 488 489 dev_set_name(&devfreq->dev, "%s", dev_name(dev)); 490 err = device_register(&devfreq->dev); 491 if (err) { 492 put_device(&devfreq->dev); 493 mutex_unlock(&devfreq->lock); 494 goto err_dev; 495 } 496 497 mutex_unlock(&devfreq->lock); 498 499 mutex_lock(&devfreq_list_lock); 500 list_add(&devfreq->node, &devfreq_list); 501 502 governor = find_devfreq_governor(devfreq->governor_name); 503 if (!IS_ERR(governor)) 504 devfreq->governor = governor; 505 if (devfreq->governor) 506 err = devfreq->governor->event_handler(devfreq, 507 DEVFREQ_GOV_START, NULL); 508 mutex_unlock(&devfreq_list_lock); 509 if (err) { 510 dev_err(dev, "%s: Unable to start governor for the device\n", 511 __func__); 512 goto err_init; 513 } 514 515 return devfreq; 516 517 err_init: 518 list_del(&devfreq->node); 519 device_unregister(&devfreq->dev); 520 err_dev: 521 kfree(devfreq); 522 err_out: 523 return ERR_PTR(err); 524 } 525 EXPORT_SYMBOL(devfreq_add_device); 526 527 /** 528 * devfreq_remove_device() - Remove devfreq feature from a device. 529 * @devfreq: the devfreq instance to be removed 530 * 531 * The opposite of devfreq_add_device(). 532 */ 533 int devfreq_remove_device(struct devfreq *devfreq) 534 { 535 if (!devfreq) 536 return -EINVAL; 537 538 _remove_devfreq(devfreq, false); 539 540 return 0; 541 } 542 EXPORT_SYMBOL(devfreq_remove_device); 543 544 /** 545 * devfreq_suspend_device() - Suspend devfreq of a device. 546 * @devfreq: the devfreq instance to be suspended 547 * 548 * This function is intended to be called by the pm callbacks 549 * (e.g., runtime_suspend, suspend) of the device driver that 550 * holds the devfreq. 551 */ 552 int devfreq_suspend_device(struct devfreq *devfreq) 553 { 554 if (!devfreq) 555 return -EINVAL; 556 557 if (!devfreq->governor) 558 return 0; 559 560 return devfreq->governor->event_handler(devfreq, 561 DEVFREQ_GOV_SUSPEND, NULL); 562 } 563 EXPORT_SYMBOL(devfreq_suspend_device); 564 565 /** 566 * devfreq_resume_device() - Resume devfreq of a device. 567 * @devfreq: the devfreq instance to be resumed 568 * 569 * This function is intended to be called by the pm callbacks 570 * (e.g., runtime_resume, resume) of the device driver that 571 * holds the devfreq. 572 */ 573 int devfreq_resume_device(struct devfreq *devfreq) 574 { 575 if (!devfreq) 576 return -EINVAL; 577 578 if (!devfreq->governor) 579 return 0; 580 581 return devfreq->governor->event_handler(devfreq, 582 DEVFREQ_GOV_RESUME, NULL); 583 } 584 EXPORT_SYMBOL(devfreq_resume_device); 585 586 /** 587 * devfreq_add_governor() - Add devfreq governor 588 * @governor: the devfreq governor to be added 589 */ 590 int devfreq_add_governor(struct devfreq_governor *governor) 591 { 592 struct devfreq_governor *g; 593 struct devfreq *devfreq; 594 int err = 0; 595 596 if (!governor) { 597 pr_err("%s: Invalid parameters.\n", __func__); 598 return -EINVAL; 599 } 600 601 mutex_lock(&devfreq_list_lock); 602 g = find_devfreq_governor(governor->name); 603 if (!IS_ERR(g)) { 604 pr_err("%s: governor %s already registered\n", __func__, 605 g->name); 606 err = -EINVAL; 607 goto err_out; 608 } 609 610 list_add(&governor->node, &devfreq_governor_list); 611 612 list_for_each_entry(devfreq, &devfreq_list, node) { 613 int ret = 0; 614 struct device *dev = devfreq->dev.parent; 615 616 if (!strncmp(devfreq->governor_name, governor->name, 617 DEVFREQ_NAME_LEN)) { 618 /* The following should never occur */ 619 if (devfreq->governor) { 620 dev_warn(dev, 621 "%s: Governor %s already present\n", 622 __func__, devfreq->governor->name); 623 ret = devfreq->governor->event_handler(devfreq, 624 DEVFREQ_GOV_STOP, NULL); 625 if (ret) { 626 dev_warn(dev, 627 "%s: Governor %s stop = %d\n", 628 __func__, 629 devfreq->governor->name, ret); 630 } 631 /* Fall through */ 632 } 633 devfreq->governor = governor; 634 ret = devfreq->governor->event_handler(devfreq, 635 DEVFREQ_GOV_START, NULL); 636 if (ret) { 637 dev_warn(dev, "%s: Governor %s start=%d\n", 638 __func__, devfreq->governor->name, 639 ret); 640 } 641 } 642 } 643 644 err_out: 645 mutex_unlock(&devfreq_list_lock); 646 647 return err; 648 } 649 EXPORT_SYMBOL(devfreq_add_governor); 650 651 /** 652 * devfreq_remove_device() - Remove devfreq feature from a device. 653 * @governor: the devfreq governor to be removed 654 */ 655 int devfreq_remove_governor(struct devfreq_governor *governor) 656 { 657 struct devfreq_governor *g; 658 struct devfreq *devfreq; 659 int err = 0; 660 661 if (!governor) { 662 pr_err("%s: Invalid parameters.\n", __func__); 663 return -EINVAL; 664 } 665 666 mutex_lock(&devfreq_list_lock); 667 g = find_devfreq_governor(governor->name); 668 if (IS_ERR(g)) { 669 pr_err("%s: governor %s not registered\n", __func__, 670 governor->name); 671 err = PTR_ERR(g); 672 goto err_out; 673 } 674 list_for_each_entry(devfreq, &devfreq_list, node) { 675 int ret; 676 struct device *dev = devfreq->dev.parent; 677 678 if (!strncmp(devfreq->governor_name, governor->name, 679 DEVFREQ_NAME_LEN)) { 680 /* we should have a devfreq governor! */ 681 if (!devfreq->governor) { 682 dev_warn(dev, "%s: Governor %s NOT present\n", 683 __func__, governor->name); 684 continue; 685 /* Fall through */ 686 } 687 ret = devfreq->governor->event_handler(devfreq, 688 DEVFREQ_GOV_STOP, NULL); 689 if (ret) { 690 dev_warn(dev, "%s: Governor %s stop=%d\n", 691 __func__, devfreq->governor->name, 692 ret); 693 } 694 devfreq->governor = NULL; 695 } 696 } 697 698 list_del(&governor->node); 699 err_out: 700 mutex_unlock(&devfreq_list_lock); 701 702 return err; 703 } 704 EXPORT_SYMBOL(devfreq_remove_governor); 705 706 static ssize_t governor_show(struct device *dev, 707 struct device_attribute *attr, char *buf) 708 { 709 if (!to_devfreq(dev)->governor) 710 return -EINVAL; 711 712 return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name); 713 } 714 715 static ssize_t governor_store(struct device *dev, struct device_attribute *attr, 716 const char *buf, size_t count) 717 { 718 struct devfreq *df = to_devfreq(dev); 719 int ret; 720 char str_governor[DEVFREQ_NAME_LEN + 1]; 721 struct devfreq_governor *governor; 722 723 ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor); 724 if (ret != 1) 725 return -EINVAL; 726 727 mutex_lock(&devfreq_list_lock); 728 governor = find_devfreq_governor(str_governor); 729 if (IS_ERR(governor)) { 730 ret = PTR_ERR(governor); 731 goto out; 732 } 733 if (df->governor == governor) 734 goto out; 735 736 if (df->governor) { 737 ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL); 738 if (ret) { 739 dev_warn(dev, "%s: Governor %s not stopped(%d)\n", 740 __func__, df->governor->name, ret); 741 goto out; 742 } 743 } 744 df->governor = governor; 745 strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN); 746 ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL); 747 if (ret) 748 dev_warn(dev, "%s: Governor %s not started(%d)\n", 749 __func__, df->governor->name, ret); 750 out: 751 mutex_unlock(&devfreq_list_lock); 752 753 if (!ret) 754 ret = count; 755 return ret; 756 } 757 static DEVICE_ATTR_RW(governor); 758 759 static ssize_t available_governors_show(struct device *d, 760 struct device_attribute *attr, 761 char *buf) 762 { 763 struct devfreq_governor *tmp_governor; 764 ssize_t count = 0; 765 766 mutex_lock(&devfreq_list_lock); 767 list_for_each_entry(tmp_governor, &devfreq_governor_list, node) 768 count += scnprintf(&buf[count], (PAGE_SIZE - count - 2), 769 "%s ", tmp_governor->name); 770 mutex_unlock(&devfreq_list_lock); 771 772 /* Truncate the trailing space */ 773 if (count) 774 count--; 775 776 count += sprintf(&buf[count], "\n"); 777 778 return count; 779 } 780 static DEVICE_ATTR_RO(available_governors); 781 782 static ssize_t cur_freq_show(struct device *dev, struct device_attribute *attr, 783 char *buf) 784 { 785 unsigned long freq; 786 struct devfreq *devfreq = to_devfreq(dev); 787 788 if (devfreq->profile->get_cur_freq && 789 !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq)) 790 return sprintf(buf, "%lu\n", freq); 791 792 return sprintf(buf, "%lu\n", devfreq->previous_freq); 793 } 794 static DEVICE_ATTR_RO(cur_freq); 795 796 static ssize_t target_freq_show(struct device *dev, 797 struct device_attribute *attr, char *buf) 798 { 799 return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq); 800 } 801 static DEVICE_ATTR_RO(target_freq); 802 803 static ssize_t polling_interval_show(struct device *dev, 804 struct device_attribute *attr, char *buf) 805 { 806 return sprintf(buf, "%d\n", to_devfreq(dev)->profile->polling_ms); 807 } 808 809 static ssize_t polling_interval_store(struct device *dev, 810 struct device_attribute *attr, 811 const char *buf, size_t count) 812 { 813 struct devfreq *df = to_devfreq(dev); 814 unsigned int value; 815 int ret; 816 817 if (!df->governor) 818 return -EINVAL; 819 820 ret = sscanf(buf, "%u", &value); 821 if (ret != 1) 822 return -EINVAL; 823 824 df->governor->event_handler(df, DEVFREQ_GOV_INTERVAL, &value); 825 ret = count; 826 827 return ret; 828 } 829 static DEVICE_ATTR_RW(polling_interval); 830 831 static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr, 832 const char *buf, size_t count) 833 { 834 struct devfreq *df = to_devfreq(dev); 835 unsigned long value; 836 int ret; 837 unsigned long max; 838 839 ret = sscanf(buf, "%lu", &value); 840 if (ret != 1) 841 return -EINVAL; 842 843 mutex_lock(&df->lock); 844 max = df->max_freq; 845 if (value && max && value > max) { 846 ret = -EINVAL; 847 goto unlock; 848 } 849 850 df->min_freq = value; 851 update_devfreq(df); 852 ret = count; 853 unlock: 854 mutex_unlock(&df->lock); 855 return ret; 856 } 857 858 static ssize_t min_freq_show(struct device *dev, struct device_attribute *attr, 859 char *buf) 860 { 861 return sprintf(buf, "%lu\n", to_devfreq(dev)->min_freq); 862 } 863 864 static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr, 865 const char *buf, size_t count) 866 { 867 struct devfreq *df = to_devfreq(dev); 868 unsigned long value; 869 int ret; 870 unsigned long min; 871 872 ret = sscanf(buf, "%lu", &value); 873 if (ret != 1) 874 return -EINVAL; 875 876 mutex_lock(&df->lock); 877 min = df->min_freq; 878 if (value && min && value < min) { 879 ret = -EINVAL; 880 goto unlock; 881 } 882 883 df->max_freq = value; 884 update_devfreq(df); 885 ret = count; 886 unlock: 887 mutex_unlock(&df->lock); 888 return ret; 889 } 890 static DEVICE_ATTR_RW(min_freq); 891 892 static ssize_t max_freq_show(struct device *dev, struct device_attribute *attr, 893 char *buf) 894 { 895 return sprintf(buf, "%lu\n", to_devfreq(dev)->max_freq); 896 } 897 static DEVICE_ATTR_RW(max_freq); 898 899 static ssize_t available_frequencies_show(struct device *d, 900 struct device_attribute *attr, 901 char *buf) 902 { 903 struct devfreq *df = to_devfreq(d); 904 struct device *dev = df->dev.parent; 905 struct dev_pm_opp *opp; 906 ssize_t count = 0; 907 unsigned long freq = 0; 908 909 rcu_read_lock(); 910 do { 911 opp = dev_pm_opp_find_freq_ceil(dev, &freq); 912 if (IS_ERR(opp)) 913 break; 914 915 count += scnprintf(&buf[count], (PAGE_SIZE - count - 2), 916 "%lu ", freq); 917 freq++; 918 } while (1); 919 rcu_read_unlock(); 920 921 /* Truncate the trailing space */ 922 if (count) 923 count--; 924 925 count += sprintf(&buf[count], "\n"); 926 927 return count; 928 } 929 static DEVICE_ATTR_RO(available_frequencies); 930 931 static ssize_t trans_stat_show(struct device *dev, 932 struct device_attribute *attr, char *buf) 933 { 934 struct devfreq *devfreq = to_devfreq(dev); 935 ssize_t len; 936 int i, j; 937 unsigned int max_state = devfreq->profile->max_state; 938 939 if (!devfreq->stop_polling && 940 devfreq_update_status(devfreq, devfreq->previous_freq)) 941 return 0; 942 943 len = sprintf(buf, " From : To\n"); 944 len += sprintf(buf + len, " :"); 945 for (i = 0; i < max_state; i++) 946 len += sprintf(buf + len, "%8u", 947 devfreq->profile->freq_table[i]); 948 949 len += sprintf(buf + len, " time(ms)\n"); 950 951 for (i = 0; i < max_state; i++) { 952 if (devfreq->profile->freq_table[i] 953 == devfreq->previous_freq) { 954 len += sprintf(buf + len, "*"); 955 } else { 956 len += sprintf(buf + len, " "); 957 } 958 len += sprintf(buf + len, "%8u:", 959 devfreq->profile->freq_table[i]); 960 for (j = 0; j < max_state; j++) 961 len += sprintf(buf + len, "%8u", 962 devfreq->trans_table[(i * max_state) + j]); 963 len += sprintf(buf + len, "%10u\n", 964 jiffies_to_msecs(devfreq->time_in_state[i])); 965 } 966 967 len += sprintf(buf + len, "Total transition : %u\n", 968 devfreq->total_trans); 969 return len; 970 } 971 static DEVICE_ATTR_RO(trans_stat); 972 973 static struct attribute *devfreq_attrs[] = { 974 &dev_attr_governor.attr, 975 &dev_attr_available_governors.attr, 976 &dev_attr_cur_freq.attr, 977 &dev_attr_available_frequencies.attr, 978 &dev_attr_target_freq.attr, 979 &dev_attr_polling_interval.attr, 980 &dev_attr_min_freq.attr, 981 &dev_attr_max_freq.attr, 982 &dev_attr_trans_stat.attr, 983 NULL, 984 }; 985 ATTRIBUTE_GROUPS(devfreq); 986 987 static int __init devfreq_init(void) 988 { 989 devfreq_class = class_create(THIS_MODULE, "devfreq"); 990 if (IS_ERR(devfreq_class)) { 991 pr_err("%s: couldn't create class\n", __FILE__); 992 return PTR_ERR(devfreq_class); 993 } 994 995 devfreq_wq = create_freezable_workqueue("devfreq_wq"); 996 if (!devfreq_wq) { 997 class_destroy(devfreq_class); 998 pr_err("%s: couldn't create workqueue\n", __FILE__); 999 return -ENOMEM; 1000 } 1001 devfreq_class->dev_groups = devfreq_groups; 1002 1003 return 0; 1004 } 1005 subsys_initcall(devfreq_init); 1006 1007 static void __exit devfreq_exit(void) 1008 { 1009 class_destroy(devfreq_class); 1010 destroy_workqueue(devfreq_wq); 1011 } 1012 module_exit(devfreq_exit); 1013 1014 /* 1015 * The followings are helper functions for devfreq user device drivers with 1016 * OPP framework. 1017 */ 1018 1019 /** 1020 * devfreq_recommended_opp() - Helper function to get proper OPP for the 1021 * freq value given to target callback. 1022 * @dev: The devfreq user device. (parent of devfreq) 1023 * @freq: The frequency given to target function 1024 * @flags: Flags handed from devfreq framework. 1025 * 1026 * Locking: This function must be called under rcu_read_lock(). opp is a rcu 1027 * protected pointer. The reason for the same is that the opp pointer which is 1028 * returned will remain valid for use with opp_get_{voltage, freq} only while 1029 * under the locked area. The pointer returned must be used prior to unlocking 1030 * with rcu_read_unlock() to maintain the integrity of the pointer. 1031 */ 1032 struct dev_pm_opp *devfreq_recommended_opp(struct device *dev, 1033 unsigned long *freq, 1034 u32 flags) 1035 { 1036 struct dev_pm_opp *opp; 1037 1038 if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) { 1039 /* The freq is an upper bound. opp should be lower */ 1040 opp = dev_pm_opp_find_freq_floor(dev, freq); 1041 1042 /* If not available, use the closest opp */ 1043 if (opp == ERR_PTR(-ERANGE)) 1044 opp = dev_pm_opp_find_freq_ceil(dev, freq); 1045 } else { 1046 /* The freq is an lower bound. opp should be higher */ 1047 opp = dev_pm_opp_find_freq_ceil(dev, freq); 1048 1049 /* If not available, use the closest opp */ 1050 if (opp == ERR_PTR(-ERANGE)) 1051 opp = dev_pm_opp_find_freq_floor(dev, freq); 1052 } 1053 1054 return opp; 1055 } 1056 1057 /** 1058 * devfreq_register_opp_notifier() - Helper function to get devfreq notified 1059 * for any changes in the OPP availability 1060 * changes 1061 * @dev: The devfreq user device. (parent of devfreq) 1062 * @devfreq: The devfreq object. 1063 */ 1064 int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq) 1065 { 1066 struct srcu_notifier_head *nh; 1067 int ret = 0; 1068 1069 rcu_read_lock(); 1070 nh = dev_pm_opp_get_notifier(dev); 1071 if (IS_ERR(nh)) 1072 ret = PTR_ERR(nh); 1073 rcu_read_unlock(); 1074 if (!ret) 1075 ret = srcu_notifier_chain_register(nh, &devfreq->nb); 1076 1077 return ret; 1078 } 1079 1080 /** 1081 * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq 1082 * notified for any changes in the OPP 1083 * availability changes anymore. 1084 * @dev: The devfreq user device. (parent of devfreq) 1085 * @devfreq: The devfreq object. 1086 * 1087 * At exit() callback of devfreq_dev_profile, this must be included if 1088 * devfreq_recommended_opp is used. 1089 */ 1090 int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq) 1091 { 1092 struct srcu_notifier_head *nh; 1093 int ret = 0; 1094 1095 rcu_read_lock(); 1096 nh = dev_pm_opp_get_notifier(dev); 1097 if (IS_ERR(nh)) 1098 ret = PTR_ERR(nh); 1099 rcu_read_unlock(); 1100 if (!ret) 1101 ret = srcu_notifier_chain_unregister(nh, &devfreq->nb); 1102 1103 return ret; 1104 } 1105 1106 MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>"); 1107 MODULE_DESCRIPTION("devfreq class support"); 1108 MODULE_LICENSE("GPL"); 1109