1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Generic OPP Interface 4 * 5 * Copyright (C) 2009-2010 Texas Instruments Incorporated. 6 * Nishanth Menon 7 * Romit Dasgupta 8 * Kevin Hilman 9 */ 10 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 13 #include <linux/clk.h> 14 #include <linux/errno.h> 15 #include <linux/err.h> 16 #include <linux/slab.h> 17 #include <linux/device.h> 18 #include <linux/export.h> 19 #include <linux/pm_domain.h> 20 #include <linux/regulator/consumer.h> 21 22 #include "opp.h" 23 24 /* 25 * The root of the list of all opp-tables. All opp_table structures branch off 26 * from here, with each opp_table containing the list of opps it supports in 27 * various states of availability. 28 */ 29 LIST_HEAD(opp_tables); 30 31 /* OPP tables with uninitialized required OPPs */ 32 LIST_HEAD(lazy_opp_tables); 33 34 /* Lock to allow exclusive modification to the device and opp lists */ 35 DEFINE_MUTEX(opp_table_lock); 36 /* Flag indicating that opp_tables list is being updated at the moment */ 37 static bool opp_tables_busy; 38 39 static bool _find_opp_dev(const struct device *dev, struct opp_table *opp_table) 40 { 41 struct opp_device *opp_dev; 42 bool found = false; 43 44 mutex_lock(&opp_table->lock); 45 list_for_each_entry(opp_dev, &opp_table->dev_list, node) 46 if (opp_dev->dev == dev) { 47 found = true; 48 break; 49 } 50 51 mutex_unlock(&opp_table->lock); 52 return found; 53 } 54 55 static struct opp_table *_find_opp_table_unlocked(struct device *dev) 56 { 57 struct opp_table *opp_table; 58 59 list_for_each_entry(opp_table, &opp_tables, node) { 60 if (_find_opp_dev(dev, opp_table)) { 61 _get_opp_table_kref(opp_table); 62 return opp_table; 63 } 64 } 65 66 return ERR_PTR(-ENODEV); 67 } 68 69 /** 70 * _find_opp_table() - find opp_table struct using device pointer 71 * @dev: device pointer used to lookup OPP table 72 * 73 * Search OPP table for one containing matching device. 74 * 75 * Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or 76 * -EINVAL based on type of error. 77 * 78 * The callers must call dev_pm_opp_put_opp_table() after the table is used. 79 */ 80 struct opp_table *_find_opp_table(struct device *dev) 81 { 82 struct opp_table *opp_table; 83 84 if (IS_ERR_OR_NULL(dev)) { 85 pr_err("%s: Invalid parameters\n", __func__); 86 return ERR_PTR(-EINVAL); 87 } 88 89 mutex_lock(&opp_table_lock); 90 opp_table = _find_opp_table_unlocked(dev); 91 mutex_unlock(&opp_table_lock); 92 93 return opp_table; 94 } 95 96 /** 97 * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp 98 * @opp: opp for which voltage has to be returned for 99 * 100 * Return: voltage in micro volt corresponding to the opp, else 101 * return 0 102 * 103 * This is useful only for devices with single power supply. 104 */ 105 unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp) 106 { 107 if (IS_ERR_OR_NULL(opp)) { 108 pr_err("%s: Invalid parameters\n", __func__); 109 return 0; 110 } 111 112 return opp->supplies[0].u_volt; 113 } 114 EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage); 115 116 /** 117 * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp 118 * @opp: opp for which frequency has to be returned for 119 * 120 * Return: frequency in hertz corresponding to the opp, else 121 * return 0 122 */ 123 unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp) 124 { 125 if (IS_ERR_OR_NULL(opp)) { 126 pr_err("%s: Invalid parameters\n", __func__); 127 return 0; 128 } 129 130 return opp->rate; 131 } 132 EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq); 133 134 /** 135 * dev_pm_opp_get_level() - Gets the level corresponding to an available opp 136 * @opp: opp for which level value has to be returned for 137 * 138 * Return: level read from device tree corresponding to the opp, else 139 * return 0. 140 */ 141 unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp) 142 { 143 if (IS_ERR_OR_NULL(opp) || !opp->available) { 144 pr_err("%s: Invalid parameters\n", __func__); 145 return 0; 146 } 147 148 return opp->level; 149 } 150 EXPORT_SYMBOL_GPL(dev_pm_opp_get_level); 151 152 /** 153 * dev_pm_opp_get_required_pstate() - Gets the required performance state 154 * corresponding to an available opp 155 * @opp: opp for which performance state has to be returned for 156 * @index: index of the required opp 157 * 158 * Return: performance state read from device tree corresponding to the 159 * required opp, else return 0. 160 */ 161 unsigned int dev_pm_opp_get_required_pstate(struct dev_pm_opp *opp, 162 unsigned int index) 163 { 164 if (IS_ERR_OR_NULL(opp) || !opp->available || 165 index >= opp->opp_table->required_opp_count) { 166 pr_err("%s: Invalid parameters\n", __func__); 167 return 0; 168 } 169 170 /* required-opps not fully initialized yet */ 171 if (lazy_linking_pending(opp->opp_table)) 172 return 0; 173 174 return opp->required_opps[index]->pstate; 175 } 176 EXPORT_SYMBOL_GPL(dev_pm_opp_get_required_pstate); 177 178 /** 179 * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not 180 * @opp: opp for which turbo mode is being verified 181 * 182 * Turbo OPPs are not for normal use, and can be enabled (under certain 183 * conditions) for short duration of times to finish high throughput work 184 * quickly. Running on them for longer times may overheat the chip. 185 * 186 * Return: true if opp is turbo opp, else false. 187 */ 188 bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp) 189 { 190 if (IS_ERR_OR_NULL(opp) || !opp->available) { 191 pr_err("%s: Invalid parameters\n", __func__); 192 return false; 193 } 194 195 return opp->turbo; 196 } 197 EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo); 198 199 /** 200 * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds 201 * @dev: device for which we do this operation 202 * 203 * Return: This function returns the max clock latency in nanoseconds. 204 */ 205 unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev) 206 { 207 struct opp_table *opp_table; 208 unsigned long clock_latency_ns; 209 210 opp_table = _find_opp_table(dev); 211 if (IS_ERR(opp_table)) 212 return 0; 213 214 clock_latency_ns = opp_table->clock_latency_ns_max; 215 216 dev_pm_opp_put_opp_table(opp_table); 217 218 return clock_latency_ns; 219 } 220 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency); 221 222 /** 223 * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds 224 * @dev: device for which we do this operation 225 * 226 * Return: This function returns the max voltage latency in nanoseconds. 227 */ 228 unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev) 229 { 230 struct opp_table *opp_table; 231 struct dev_pm_opp *opp; 232 struct regulator *reg; 233 unsigned long latency_ns = 0; 234 int ret, i, count; 235 struct { 236 unsigned long min; 237 unsigned long max; 238 } *uV; 239 240 opp_table = _find_opp_table(dev); 241 if (IS_ERR(opp_table)) 242 return 0; 243 244 /* Regulator may not be required for the device */ 245 if (!opp_table->regulators) 246 goto put_opp_table; 247 248 count = opp_table->regulator_count; 249 250 uV = kmalloc_array(count, sizeof(*uV), GFP_KERNEL); 251 if (!uV) 252 goto put_opp_table; 253 254 mutex_lock(&opp_table->lock); 255 256 for (i = 0; i < count; i++) { 257 uV[i].min = ~0; 258 uV[i].max = 0; 259 260 list_for_each_entry(opp, &opp_table->opp_list, node) { 261 if (!opp->available) 262 continue; 263 264 if (opp->supplies[i].u_volt_min < uV[i].min) 265 uV[i].min = opp->supplies[i].u_volt_min; 266 if (opp->supplies[i].u_volt_max > uV[i].max) 267 uV[i].max = opp->supplies[i].u_volt_max; 268 } 269 } 270 271 mutex_unlock(&opp_table->lock); 272 273 /* 274 * The caller needs to ensure that opp_table (and hence the regulator) 275 * isn't freed, while we are executing this routine. 276 */ 277 for (i = 0; i < count; i++) { 278 reg = opp_table->regulators[i]; 279 ret = regulator_set_voltage_time(reg, uV[i].min, uV[i].max); 280 if (ret > 0) 281 latency_ns += ret * 1000; 282 } 283 284 kfree(uV); 285 put_opp_table: 286 dev_pm_opp_put_opp_table(opp_table); 287 288 return latency_ns; 289 } 290 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency); 291 292 /** 293 * dev_pm_opp_get_max_transition_latency() - Get max transition latency in 294 * nanoseconds 295 * @dev: device for which we do this operation 296 * 297 * Return: This function returns the max transition latency, in nanoseconds, to 298 * switch from one OPP to other. 299 */ 300 unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev) 301 { 302 return dev_pm_opp_get_max_volt_latency(dev) + 303 dev_pm_opp_get_max_clock_latency(dev); 304 } 305 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency); 306 307 /** 308 * dev_pm_opp_get_suspend_opp_freq() - Get frequency of suspend opp in Hz 309 * @dev: device for which we do this operation 310 * 311 * Return: This function returns the frequency of the OPP marked as suspend_opp 312 * if one is available, else returns 0; 313 */ 314 unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev) 315 { 316 struct opp_table *opp_table; 317 unsigned long freq = 0; 318 319 opp_table = _find_opp_table(dev); 320 if (IS_ERR(opp_table)) 321 return 0; 322 323 if (opp_table->suspend_opp && opp_table->suspend_opp->available) 324 freq = dev_pm_opp_get_freq(opp_table->suspend_opp); 325 326 dev_pm_opp_put_opp_table(opp_table); 327 328 return freq; 329 } 330 EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp_freq); 331 332 int _get_opp_count(struct opp_table *opp_table) 333 { 334 struct dev_pm_opp *opp; 335 int count = 0; 336 337 mutex_lock(&opp_table->lock); 338 339 list_for_each_entry(opp, &opp_table->opp_list, node) { 340 if (opp->available) 341 count++; 342 } 343 344 mutex_unlock(&opp_table->lock); 345 346 return count; 347 } 348 349 /** 350 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table 351 * @dev: device for which we do this operation 352 * 353 * Return: This function returns the number of available opps if there are any, 354 * else returns 0 if none or the corresponding error value. 355 */ 356 int dev_pm_opp_get_opp_count(struct device *dev) 357 { 358 struct opp_table *opp_table; 359 int count; 360 361 opp_table = _find_opp_table(dev); 362 if (IS_ERR(opp_table)) { 363 count = PTR_ERR(opp_table); 364 dev_dbg(dev, "%s: OPP table not found (%d)\n", 365 __func__, count); 366 return count; 367 } 368 369 count = _get_opp_count(opp_table); 370 dev_pm_opp_put_opp_table(opp_table); 371 372 return count; 373 } 374 EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count); 375 376 /** 377 * dev_pm_opp_find_freq_exact() - search for an exact frequency 378 * @dev: device for which we do this operation 379 * @freq: frequency to search for 380 * @available: true/false - match for available opp 381 * 382 * Return: Searches for exact match in the opp table and returns pointer to the 383 * matching opp if found, else returns ERR_PTR in case of error and should 384 * be handled using IS_ERR. Error return values can be: 385 * EINVAL: for bad pointer 386 * ERANGE: no match found for search 387 * ENODEV: if device not found in list of registered devices 388 * 389 * Note: available is a modifier for the search. if available=true, then the 390 * match is for exact matching frequency and is available in the stored OPP 391 * table. if false, the match is for exact frequency which is not available. 392 * 393 * This provides a mechanism to enable an opp which is not available currently 394 * or the opposite as well. 395 * 396 * The callers are required to call dev_pm_opp_put() for the returned OPP after 397 * use. 398 */ 399 struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, 400 unsigned long freq, 401 bool available) 402 { 403 struct opp_table *opp_table; 404 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); 405 406 opp_table = _find_opp_table(dev); 407 if (IS_ERR(opp_table)) { 408 int r = PTR_ERR(opp_table); 409 410 dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r); 411 return ERR_PTR(r); 412 } 413 414 mutex_lock(&opp_table->lock); 415 416 list_for_each_entry(temp_opp, &opp_table->opp_list, node) { 417 if (temp_opp->available == available && 418 temp_opp->rate == freq) { 419 opp = temp_opp; 420 421 /* Increment the reference count of OPP */ 422 dev_pm_opp_get(opp); 423 break; 424 } 425 } 426 427 mutex_unlock(&opp_table->lock); 428 dev_pm_opp_put_opp_table(opp_table); 429 430 return opp; 431 } 432 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact); 433 434 /** 435 * dev_pm_opp_find_level_exact() - search for an exact level 436 * @dev: device for which we do this operation 437 * @level: level to search for 438 * 439 * Return: Searches for exact match in the opp table and returns pointer to the 440 * matching opp if found, else returns ERR_PTR in case of error and should 441 * be handled using IS_ERR. Error return values can be: 442 * EINVAL: for bad pointer 443 * ERANGE: no match found for search 444 * ENODEV: if device not found in list of registered devices 445 * 446 * The callers are required to call dev_pm_opp_put() for the returned OPP after 447 * use. 448 */ 449 struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev, 450 unsigned int level) 451 { 452 struct opp_table *opp_table; 453 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); 454 455 opp_table = _find_opp_table(dev); 456 if (IS_ERR(opp_table)) { 457 int r = PTR_ERR(opp_table); 458 459 dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r); 460 return ERR_PTR(r); 461 } 462 463 mutex_lock(&opp_table->lock); 464 465 list_for_each_entry(temp_opp, &opp_table->opp_list, node) { 466 if (temp_opp->level == level) { 467 opp = temp_opp; 468 469 /* Increment the reference count of OPP */ 470 dev_pm_opp_get(opp); 471 break; 472 } 473 } 474 475 mutex_unlock(&opp_table->lock); 476 dev_pm_opp_put_opp_table(opp_table); 477 478 return opp; 479 } 480 EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_exact); 481 482 /** 483 * dev_pm_opp_find_level_ceil() - search for an rounded up level 484 * @dev: device for which we do this operation 485 * @level: level to search for 486 * 487 * Return: Searches for rounded up match in the opp table and returns pointer 488 * to the matching opp if found, else returns ERR_PTR in case of error and 489 * should be handled using IS_ERR. Error return values can be: 490 * EINVAL: for bad pointer 491 * ERANGE: no match found for search 492 * ENODEV: if device not found in list of registered devices 493 * 494 * The callers are required to call dev_pm_opp_put() for the returned OPP after 495 * use. 496 */ 497 struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev, 498 unsigned int *level) 499 { 500 struct opp_table *opp_table; 501 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); 502 503 opp_table = _find_opp_table(dev); 504 if (IS_ERR(opp_table)) { 505 int r = PTR_ERR(opp_table); 506 507 dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r); 508 return ERR_PTR(r); 509 } 510 511 mutex_lock(&opp_table->lock); 512 513 list_for_each_entry(temp_opp, &opp_table->opp_list, node) { 514 if (temp_opp->available && temp_opp->level >= *level) { 515 opp = temp_opp; 516 *level = opp->level; 517 518 /* Increment the reference count of OPP */ 519 dev_pm_opp_get(opp); 520 break; 521 } 522 } 523 524 mutex_unlock(&opp_table->lock); 525 dev_pm_opp_put_opp_table(opp_table); 526 527 return opp; 528 } 529 EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_ceil); 530 531 static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table, 532 unsigned long *freq) 533 { 534 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); 535 536 mutex_lock(&opp_table->lock); 537 538 list_for_each_entry(temp_opp, &opp_table->opp_list, node) { 539 if (temp_opp->available && temp_opp->rate >= *freq) { 540 opp = temp_opp; 541 *freq = opp->rate; 542 543 /* Increment the reference count of OPP */ 544 dev_pm_opp_get(opp); 545 break; 546 } 547 } 548 549 mutex_unlock(&opp_table->lock); 550 551 return opp; 552 } 553 554 /** 555 * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq 556 * @dev: device for which we do this operation 557 * @freq: Start frequency 558 * 559 * Search for the matching ceil *available* OPP from a starting freq 560 * for a device. 561 * 562 * Return: matching *opp and refreshes *freq accordingly, else returns 563 * ERR_PTR in case of error and should be handled using IS_ERR. Error return 564 * values can be: 565 * EINVAL: for bad pointer 566 * ERANGE: no match found for search 567 * ENODEV: if device not found in list of registered devices 568 * 569 * The callers are required to call dev_pm_opp_put() for the returned OPP after 570 * use. 571 */ 572 struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, 573 unsigned long *freq) 574 { 575 struct opp_table *opp_table; 576 struct dev_pm_opp *opp; 577 578 if (!dev || !freq) { 579 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); 580 return ERR_PTR(-EINVAL); 581 } 582 583 opp_table = _find_opp_table(dev); 584 if (IS_ERR(opp_table)) 585 return ERR_CAST(opp_table); 586 587 opp = _find_freq_ceil(opp_table, freq); 588 589 dev_pm_opp_put_opp_table(opp_table); 590 591 return opp; 592 } 593 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil); 594 595 /** 596 * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq 597 * @dev: device for which we do this operation 598 * @freq: Start frequency 599 * 600 * Search for the matching floor *available* OPP from a starting freq 601 * for a device. 602 * 603 * Return: matching *opp and refreshes *freq accordingly, else returns 604 * ERR_PTR in case of error and should be handled using IS_ERR. Error return 605 * values can be: 606 * EINVAL: for bad pointer 607 * ERANGE: no match found for search 608 * ENODEV: if device not found in list of registered devices 609 * 610 * The callers are required to call dev_pm_opp_put() for the returned OPP after 611 * use. 612 */ 613 struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, 614 unsigned long *freq) 615 { 616 struct opp_table *opp_table; 617 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); 618 619 if (!dev || !freq) { 620 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); 621 return ERR_PTR(-EINVAL); 622 } 623 624 opp_table = _find_opp_table(dev); 625 if (IS_ERR(opp_table)) 626 return ERR_CAST(opp_table); 627 628 mutex_lock(&opp_table->lock); 629 630 list_for_each_entry(temp_opp, &opp_table->opp_list, node) { 631 if (temp_opp->available) { 632 /* go to the next node, before choosing prev */ 633 if (temp_opp->rate > *freq) 634 break; 635 else 636 opp = temp_opp; 637 } 638 } 639 640 /* Increment the reference count of OPP */ 641 if (!IS_ERR(opp)) 642 dev_pm_opp_get(opp); 643 mutex_unlock(&opp_table->lock); 644 dev_pm_opp_put_opp_table(opp_table); 645 646 if (!IS_ERR(opp)) 647 *freq = opp->rate; 648 649 return opp; 650 } 651 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor); 652 653 /** 654 * dev_pm_opp_find_freq_ceil_by_volt() - Find OPP with highest frequency for 655 * target voltage. 656 * @dev: Device for which we do this operation. 657 * @u_volt: Target voltage. 658 * 659 * Search for OPP with highest (ceil) frequency and has voltage <= u_volt. 660 * 661 * Return: matching *opp, else returns ERR_PTR in case of error which should be 662 * handled using IS_ERR. 663 * 664 * Error return values can be: 665 * EINVAL: bad parameters 666 * 667 * The callers are required to call dev_pm_opp_put() for the returned OPP after 668 * use. 669 */ 670 struct dev_pm_opp *dev_pm_opp_find_freq_ceil_by_volt(struct device *dev, 671 unsigned long u_volt) 672 { 673 struct opp_table *opp_table; 674 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); 675 676 if (!dev || !u_volt) { 677 dev_err(dev, "%s: Invalid argument volt=%lu\n", __func__, 678 u_volt); 679 return ERR_PTR(-EINVAL); 680 } 681 682 opp_table = _find_opp_table(dev); 683 if (IS_ERR(opp_table)) 684 return ERR_CAST(opp_table); 685 686 mutex_lock(&opp_table->lock); 687 688 list_for_each_entry(temp_opp, &opp_table->opp_list, node) { 689 if (temp_opp->available) { 690 if (temp_opp->supplies[0].u_volt > u_volt) 691 break; 692 opp = temp_opp; 693 } 694 } 695 696 /* Increment the reference count of OPP */ 697 if (!IS_ERR(opp)) 698 dev_pm_opp_get(opp); 699 700 mutex_unlock(&opp_table->lock); 701 dev_pm_opp_put_opp_table(opp_table); 702 703 return opp; 704 } 705 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil_by_volt); 706 707 static int _set_opp_voltage(struct device *dev, struct regulator *reg, 708 struct dev_pm_opp_supply *supply) 709 { 710 int ret; 711 712 /* Regulator not available for device */ 713 if (IS_ERR(reg)) { 714 dev_dbg(dev, "%s: regulator not available: %ld\n", __func__, 715 PTR_ERR(reg)); 716 return 0; 717 } 718 719 dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__, 720 supply->u_volt_min, supply->u_volt, supply->u_volt_max); 721 722 ret = regulator_set_voltage_triplet(reg, supply->u_volt_min, 723 supply->u_volt, supply->u_volt_max); 724 if (ret) 725 dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n", 726 __func__, supply->u_volt_min, supply->u_volt, 727 supply->u_volt_max, ret); 728 729 return ret; 730 } 731 732 static inline int _generic_set_opp_clk_only(struct device *dev, struct clk *clk, 733 unsigned long freq) 734 { 735 int ret; 736 737 /* We may reach here for devices which don't change frequency */ 738 if (IS_ERR(clk)) 739 return 0; 740 741 ret = clk_set_rate(clk, freq); 742 if (ret) { 743 dev_err(dev, "%s: failed to set clock rate: %d\n", __func__, 744 ret); 745 } 746 747 return ret; 748 } 749 750 static int _generic_set_opp_regulator(struct opp_table *opp_table, 751 struct device *dev, 752 struct dev_pm_opp *opp, 753 unsigned long freq, 754 int scaling_down) 755 { 756 struct regulator *reg = opp_table->regulators[0]; 757 struct dev_pm_opp *old_opp = opp_table->current_opp; 758 int ret; 759 760 /* This function only supports single regulator per device */ 761 if (WARN_ON(opp_table->regulator_count > 1)) { 762 dev_err(dev, "multiple regulators are not supported\n"); 763 return -EINVAL; 764 } 765 766 /* Scaling up? Scale voltage before frequency */ 767 if (!scaling_down) { 768 ret = _set_opp_voltage(dev, reg, opp->supplies); 769 if (ret) 770 goto restore_voltage; 771 } 772 773 /* Change frequency */ 774 ret = _generic_set_opp_clk_only(dev, opp_table->clk, freq); 775 if (ret) 776 goto restore_voltage; 777 778 /* Scaling down? Scale voltage after frequency */ 779 if (scaling_down) { 780 ret = _set_opp_voltage(dev, reg, opp->supplies); 781 if (ret) 782 goto restore_freq; 783 } 784 785 /* 786 * Enable the regulator after setting its voltages, otherwise it breaks 787 * some boot-enabled regulators. 788 */ 789 if (unlikely(!opp_table->enabled)) { 790 ret = regulator_enable(reg); 791 if (ret < 0) 792 dev_warn(dev, "Failed to enable regulator: %d", ret); 793 } 794 795 return 0; 796 797 restore_freq: 798 if (_generic_set_opp_clk_only(dev, opp_table->clk, old_opp->rate)) 799 dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n", 800 __func__, old_opp->rate); 801 restore_voltage: 802 /* This shouldn't harm even if the voltages weren't updated earlier */ 803 _set_opp_voltage(dev, reg, old_opp->supplies); 804 805 return ret; 806 } 807 808 static int _set_opp_bw(const struct opp_table *opp_table, 809 struct dev_pm_opp *opp, struct device *dev) 810 { 811 u32 avg, peak; 812 int i, ret; 813 814 if (!opp_table->paths) 815 return 0; 816 817 for (i = 0; i < opp_table->path_count; i++) { 818 if (!opp) { 819 avg = 0; 820 peak = 0; 821 } else { 822 avg = opp->bandwidth[i].avg; 823 peak = opp->bandwidth[i].peak; 824 } 825 ret = icc_set_bw(opp_table->paths[i], avg, peak); 826 if (ret) { 827 dev_err(dev, "Failed to %s bandwidth[%d]: %d\n", 828 opp ? "set" : "remove", i, ret); 829 return ret; 830 } 831 } 832 833 return 0; 834 } 835 836 static int _set_opp_custom(const struct opp_table *opp_table, 837 struct device *dev, struct dev_pm_opp *opp, 838 unsigned long freq) 839 { 840 struct dev_pm_set_opp_data *data = opp_table->set_opp_data; 841 struct dev_pm_opp *old_opp = opp_table->current_opp; 842 int size; 843 844 /* 845 * We support this only if dev_pm_opp_set_regulators() was called 846 * earlier. 847 */ 848 if (opp_table->sod_supplies) { 849 size = sizeof(*old_opp->supplies) * opp_table->regulator_count; 850 memcpy(data->old_opp.supplies, old_opp->supplies, size); 851 memcpy(data->new_opp.supplies, opp->supplies, size); 852 data->regulator_count = opp_table->regulator_count; 853 } else { 854 data->regulator_count = 0; 855 } 856 857 data->regulators = opp_table->regulators; 858 data->clk = opp_table->clk; 859 data->dev = dev; 860 data->old_opp.rate = old_opp->rate; 861 data->new_opp.rate = freq; 862 863 return opp_table->set_opp(data); 864 } 865 866 static int _set_required_opp(struct device *dev, struct device *pd_dev, 867 struct dev_pm_opp *opp, int i) 868 { 869 unsigned int pstate = likely(opp) ? opp->required_opps[i]->pstate : 0; 870 int ret; 871 872 if (!pd_dev) 873 return 0; 874 875 ret = dev_pm_genpd_set_performance_state(pd_dev, pstate); 876 if (ret) { 877 dev_err(dev, "Failed to set performance rate of %s: %d (%d)\n", 878 dev_name(pd_dev), pstate, ret); 879 } 880 881 return ret; 882 } 883 884 /* This is only called for PM domain for now */ 885 static int _set_required_opps(struct device *dev, 886 struct opp_table *opp_table, 887 struct dev_pm_opp *opp, bool up) 888 { 889 struct opp_table **required_opp_tables = opp_table->required_opp_tables; 890 struct device **genpd_virt_devs = opp_table->genpd_virt_devs; 891 int i, ret = 0; 892 893 if (!required_opp_tables) 894 return 0; 895 896 /* required-opps not fully initialized yet */ 897 if (lazy_linking_pending(opp_table)) 898 return -EBUSY; 899 900 /* Single genpd case */ 901 if (!genpd_virt_devs) 902 return _set_required_opp(dev, dev, opp, 0); 903 904 /* Multiple genpd case */ 905 906 /* 907 * Acquire genpd_virt_dev_lock to make sure we don't use a genpd_dev 908 * after it is freed from another thread. 909 */ 910 mutex_lock(&opp_table->genpd_virt_dev_lock); 911 912 /* Scaling up? Set required OPPs in normal order, else reverse */ 913 if (up) { 914 for (i = 0; i < opp_table->required_opp_count; i++) { 915 ret = _set_required_opp(dev, genpd_virt_devs[i], opp, i); 916 if (ret) 917 break; 918 } 919 } else { 920 for (i = opp_table->required_opp_count - 1; i >= 0; i--) { 921 ret = _set_required_opp(dev, genpd_virt_devs[i], opp, i); 922 if (ret) 923 break; 924 } 925 } 926 927 mutex_unlock(&opp_table->genpd_virt_dev_lock); 928 929 return ret; 930 } 931 932 static void _find_current_opp(struct device *dev, struct opp_table *opp_table) 933 { 934 struct dev_pm_opp *opp = ERR_PTR(-ENODEV); 935 unsigned long freq; 936 937 if (!IS_ERR(opp_table->clk)) { 938 freq = clk_get_rate(opp_table->clk); 939 opp = _find_freq_ceil(opp_table, &freq); 940 } 941 942 /* 943 * Unable to find the current OPP ? Pick the first from the list since 944 * it is in ascending order, otherwise rest of the code will need to 945 * make special checks to validate current_opp. 946 */ 947 if (IS_ERR(opp)) { 948 mutex_lock(&opp_table->lock); 949 opp = list_first_entry(&opp_table->opp_list, struct dev_pm_opp, node); 950 dev_pm_opp_get(opp); 951 mutex_unlock(&opp_table->lock); 952 } 953 954 opp_table->current_opp = opp; 955 } 956 957 static int _disable_opp_table(struct device *dev, struct opp_table *opp_table) 958 { 959 int ret; 960 961 if (!opp_table->enabled) 962 return 0; 963 964 /* 965 * Some drivers need to support cases where some platforms may 966 * have OPP table for the device, while others don't and 967 * opp_set_rate() just needs to behave like clk_set_rate(). 968 */ 969 if (!_get_opp_count(opp_table)) 970 return 0; 971 972 ret = _set_opp_bw(opp_table, NULL, dev); 973 if (ret) 974 return ret; 975 976 if (opp_table->regulators) 977 regulator_disable(opp_table->regulators[0]); 978 979 ret = _set_required_opps(dev, opp_table, NULL, false); 980 981 opp_table->enabled = false; 982 return ret; 983 } 984 985 static int _set_opp(struct device *dev, struct opp_table *opp_table, 986 struct dev_pm_opp *opp, unsigned long freq) 987 { 988 struct dev_pm_opp *old_opp; 989 int scaling_down, ret; 990 991 if (unlikely(!opp)) 992 return _disable_opp_table(dev, opp_table); 993 994 /* Find the currently set OPP if we don't know already */ 995 if (unlikely(!opp_table->current_opp)) 996 _find_current_opp(dev, opp_table); 997 998 old_opp = opp_table->current_opp; 999 1000 /* Return early if nothing to do */ 1001 if (old_opp == opp && opp_table->current_rate == freq && 1002 opp_table->enabled) { 1003 dev_dbg(dev, "%s: OPPs are same, nothing to do\n", __func__); 1004 return 0; 1005 } 1006 1007 dev_dbg(dev, "%s: switching OPP: Freq %lu -> %lu Hz, Level %u -> %u, Bw %u -> %u\n", 1008 __func__, opp_table->current_rate, freq, old_opp->level, 1009 opp->level, old_opp->bandwidth ? old_opp->bandwidth[0].peak : 0, 1010 opp->bandwidth ? opp->bandwidth[0].peak : 0); 1011 1012 scaling_down = _opp_compare_key(old_opp, opp); 1013 if (scaling_down == -1) 1014 scaling_down = 0; 1015 1016 /* Scaling up? Configure required OPPs before frequency */ 1017 if (!scaling_down) { 1018 ret = _set_required_opps(dev, opp_table, opp, true); 1019 if (ret) { 1020 dev_err(dev, "Failed to set required opps: %d\n", ret); 1021 return ret; 1022 } 1023 1024 ret = _set_opp_bw(opp_table, opp, dev); 1025 if (ret) { 1026 dev_err(dev, "Failed to set bw: %d\n", ret); 1027 return ret; 1028 } 1029 } 1030 1031 if (opp_table->set_opp) { 1032 ret = _set_opp_custom(opp_table, dev, opp, freq); 1033 } else if (opp_table->regulators) { 1034 ret = _generic_set_opp_regulator(opp_table, dev, opp, freq, 1035 scaling_down); 1036 } else { 1037 /* Only frequency scaling */ 1038 ret = _generic_set_opp_clk_only(dev, opp_table->clk, freq); 1039 } 1040 1041 if (ret) 1042 return ret; 1043 1044 /* Scaling down? Configure required OPPs after frequency */ 1045 if (scaling_down) { 1046 ret = _set_opp_bw(opp_table, opp, dev); 1047 if (ret) { 1048 dev_err(dev, "Failed to set bw: %d\n", ret); 1049 return ret; 1050 } 1051 1052 ret = _set_required_opps(dev, opp_table, opp, false); 1053 if (ret) { 1054 dev_err(dev, "Failed to set required opps: %d\n", ret); 1055 return ret; 1056 } 1057 } 1058 1059 opp_table->enabled = true; 1060 dev_pm_opp_put(old_opp); 1061 1062 /* Make sure current_opp doesn't get freed */ 1063 dev_pm_opp_get(opp); 1064 opp_table->current_opp = opp; 1065 opp_table->current_rate = freq; 1066 1067 return ret; 1068 } 1069 1070 /** 1071 * dev_pm_opp_set_rate() - Configure new OPP based on frequency 1072 * @dev: device for which we do this operation 1073 * @target_freq: frequency to achieve 1074 * 1075 * This configures the power-supplies to the levels specified by the OPP 1076 * corresponding to the target_freq, and programs the clock to a value <= 1077 * target_freq, as rounded by clk_round_rate(). Device wanting to run at fmax 1078 * provided by the opp, should have already rounded to the target OPP's 1079 * frequency. 1080 */ 1081 int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) 1082 { 1083 struct opp_table *opp_table; 1084 unsigned long freq = 0, temp_freq; 1085 struct dev_pm_opp *opp = NULL; 1086 int ret; 1087 1088 opp_table = _find_opp_table(dev); 1089 if (IS_ERR(opp_table)) { 1090 dev_err(dev, "%s: device's opp table doesn't exist\n", __func__); 1091 return PTR_ERR(opp_table); 1092 } 1093 1094 if (target_freq) { 1095 /* 1096 * For IO devices which require an OPP on some platforms/SoCs 1097 * while just needing to scale the clock on some others 1098 * we look for empty OPP tables with just a clock handle and 1099 * scale only the clk. This makes dev_pm_opp_set_rate() 1100 * equivalent to a clk_set_rate() 1101 */ 1102 if (!_get_opp_count(opp_table)) { 1103 ret = _generic_set_opp_clk_only(dev, opp_table->clk, target_freq); 1104 goto put_opp_table; 1105 } 1106 1107 freq = clk_round_rate(opp_table->clk, target_freq); 1108 if ((long)freq <= 0) 1109 freq = target_freq; 1110 1111 /* 1112 * The clock driver may support finer resolution of the 1113 * frequencies than the OPP table, don't update the frequency we 1114 * pass to clk_set_rate() here. 1115 */ 1116 temp_freq = freq; 1117 opp = _find_freq_ceil(opp_table, &temp_freq); 1118 if (IS_ERR(opp)) { 1119 ret = PTR_ERR(opp); 1120 dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n", 1121 __func__, freq, ret); 1122 goto put_opp_table; 1123 } 1124 } 1125 1126 ret = _set_opp(dev, opp_table, opp, freq); 1127 1128 if (target_freq) 1129 dev_pm_opp_put(opp); 1130 put_opp_table: 1131 dev_pm_opp_put_opp_table(opp_table); 1132 return ret; 1133 } 1134 EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate); 1135 1136 /** 1137 * dev_pm_opp_set_opp() - Configure device for OPP 1138 * @dev: device for which we do this operation 1139 * @opp: OPP to set to 1140 * 1141 * This configures the device based on the properties of the OPP passed to this 1142 * routine. 1143 * 1144 * Return: 0 on success, a negative error number otherwise. 1145 */ 1146 int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp) 1147 { 1148 struct opp_table *opp_table; 1149 int ret; 1150 1151 opp_table = _find_opp_table(dev); 1152 if (IS_ERR(opp_table)) { 1153 dev_err(dev, "%s: device opp doesn't exist\n", __func__); 1154 return PTR_ERR(opp_table); 1155 } 1156 1157 ret = _set_opp(dev, opp_table, opp, opp ? opp->rate : 0); 1158 dev_pm_opp_put_opp_table(opp_table); 1159 1160 return ret; 1161 } 1162 EXPORT_SYMBOL_GPL(dev_pm_opp_set_opp); 1163 1164 /* OPP-dev Helpers */ 1165 static void _remove_opp_dev(struct opp_device *opp_dev, 1166 struct opp_table *opp_table) 1167 { 1168 opp_debug_unregister(opp_dev, opp_table); 1169 list_del(&opp_dev->node); 1170 kfree(opp_dev); 1171 } 1172 1173 struct opp_device *_add_opp_dev(const struct device *dev, 1174 struct opp_table *opp_table) 1175 { 1176 struct opp_device *opp_dev; 1177 1178 opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL); 1179 if (!opp_dev) 1180 return NULL; 1181 1182 /* Initialize opp-dev */ 1183 opp_dev->dev = dev; 1184 1185 mutex_lock(&opp_table->lock); 1186 list_add(&opp_dev->node, &opp_table->dev_list); 1187 mutex_unlock(&opp_table->lock); 1188 1189 /* Create debugfs entries for the opp_table */ 1190 opp_debug_register(opp_dev, opp_table); 1191 1192 return opp_dev; 1193 } 1194 1195 static struct opp_table *_allocate_opp_table(struct device *dev, int index) 1196 { 1197 struct opp_table *opp_table; 1198 struct opp_device *opp_dev; 1199 int ret; 1200 1201 /* 1202 * Allocate a new OPP table. In the infrequent case where a new 1203 * device is needed to be added, we pay this penalty. 1204 */ 1205 opp_table = kzalloc(sizeof(*opp_table), GFP_KERNEL); 1206 if (!opp_table) 1207 return ERR_PTR(-ENOMEM); 1208 1209 mutex_init(&opp_table->lock); 1210 mutex_init(&opp_table->genpd_virt_dev_lock); 1211 INIT_LIST_HEAD(&opp_table->dev_list); 1212 INIT_LIST_HEAD(&opp_table->lazy); 1213 1214 /* Mark regulator count uninitialized */ 1215 opp_table->regulator_count = -1; 1216 1217 opp_dev = _add_opp_dev(dev, opp_table); 1218 if (!opp_dev) { 1219 ret = -ENOMEM; 1220 goto err; 1221 } 1222 1223 _of_init_opp_table(opp_table, dev, index); 1224 1225 /* Find interconnect path(s) for the device */ 1226 ret = dev_pm_opp_of_find_icc_paths(dev, opp_table); 1227 if (ret) { 1228 if (ret == -EPROBE_DEFER) 1229 goto remove_opp_dev; 1230 1231 dev_warn(dev, "%s: Error finding interconnect paths: %d\n", 1232 __func__, ret); 1233 } 1234 1235 BLOCKING_INIT_NOTIFIER_HEAD(&opp_table->head); 1236 INIT_LIST_HEAD(&opp_table->opp_list); 1237 kref_init(&opp_table->kref); 1238 1239 return opp_table; 1240 1241 remove_opp_dev: 1242 _remove_opp_dev(opp_dev, opp_table); 1243 err: 1244 kfree(opp_table); 1245 return ERR_PTR(ret); 1246 } 1247 1248 void _get_opp_table_kref(struct opp_table *opp_table) 1249 { 1250 kref_get(&opp_table->kref); 1251 } 1252 1253 static struct opp_table *_update_opp_table_clk(struct device *dev, 1254 struct opp_table *opp_table, 1255 bool getclk) 1256 { 1257 int ret; 1258 1259 /* 1260 * Return early if we don't need to get clk or we have already tried it 1261 * earlier. 1262 */ 1263 if (!getclk || IS_ERR(opp_table) || opp_table->clk) 1264 return opp_table; 1265 1266 /* Find clk for the device */ 1267 opp_table->clk = clk_get(dev, NULL); 1268 1269 ret = PTR_ERR_OR_ZERO(opp_table->clk); 1270 if (!ret) 1271 return opp_table; 1272 1273 if (ret == -ENOENT) { 1274 dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__, ret); 1275 return opp_table; 1276 } 1277 1278 dev_pm_opp_put_opp_table(opp_table); 1279 dev_err_probe(dev, ret, "Couldn't find clock\n"); 1280 1281 return ERR_PTR(ret); 1282 } 1283 1284 /* 1285 * We need to make sure that the OPP table for a device doesn't get added twice, 1286 * if this routine gets called in parallel with the same device pointer. 1287 * 1288 * The simplest way to enforce that is to perform everything (find existing 1289 * table and if not found, create a new one) under the opp_table_lock, so only 1290 * one creator gets access to the same. But that expands the critical section 1291 * under the lock and may end up causing circular dependencies with frameworks 1292 * like debugfs, interconnect or clock framework as they may be direct or 1293 * indirect users of OPP core. 1294 * 1295 * And for that reason we have to go for a bit tricky implementation here, which 1296 * uses the opp_tables_busy flag to indicate if another creator is in the middle 1297 * of adding an OPP table and others should wait for it to finish. 1298 */ 1299 struct opp_table *_add_opp_table_indexed(struct device *dev, int index, 1300 bool getclk) 1301 { 1302 struct opp_table *opp_table; 1303 1304 again: 1305 mutex_lock(&opp_table_lock); 1306 1307 opp_table = _find_opp_table_unlocked(dev); 1308 if (!IS_ERR(opp_table)) 1309 goto unlock; 1310 1311 /* 1312 * The opp_tables list or an OPP table's dev_list is getting updated by 1313 * another user, wait for it to finish. 1314 */ 1315 if (unlikely(opp_tables_busy)) { 1316 mutex_unlock(&opp_table_lock); 1317 cpu_relax(); 1318 goto again; 1319 } 1320 1321 opp_tables_busy = true; 1322 opp_table = _managed_opp(dev, index); 1323 1324 /* Drop the lock to reduce the size of critical section */ 1325 mutex_unlock(&opp_table_lock); 1326 1327 if (opp_table) { 1328 if (!_add_opp_dev(dev, opp_table)) { 1329 dev_pm_opp_put_opp_table(opp_table); 1330 opp_table = ERR_PTR(-ENOMEM); 1331 } 1332 1333 mutex_lock(&opp_table_lock); 1334 } else { 1335 opp_table = _allocate_opp_table(dev, index); 1336 1337 mutex_lock(&opp_table_lock); 1338 if (!IS_ERR(opp_table)) 1339 list_add(&opp_table->node, &opp_tables); 1340 } 1341 1342 opp_tables_busy = false; 1343 1344 unlock: 1345 mutex_unlock(&opp_table_lock); 1346 1347 return _update_opp_table_clk(dev, opp_table, getclk); 1348 } 1349 1350 static struct opp_table *_add_opp_table(struct device *dev, bool getclk) 1351 { 1352 return _add_opp_table_indexed(dev, 0, getclk); 1353 } 1354 1355 struct opp_table *dev_pm_opp_get_opp_table(struct device *dev) 1356 { 1357 return _find_opp_table(dev); 1358 } 1359 EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_table); 1360 1361 static void _opp_table_kref_release(struct kref *kref) 1362 { 1363 struct opp_table *opp_table = container_of(kref, struct opp_table, kref); 1364 struct opp_device *opp_dev, *temp; 1365 int i; 1366 1367 /* Drop the lock as soon as we can */ 1368 list_del(&opp_table->node); 1369 mutex_unlock(&opp_table_lock); 1370 1371 if (opp_table->current_opp) 1372 dev_pm_opp_put(opp_table->current_opp); 1373 1374 _of_clear_opp_table(opp_table); 1375 1376 /* Release clk */ 1377 if (!IS_ERR(opp_table->clk)) 1378 clk_put(opp_table->clk); 1379 1380 if (opp_table->paths) { 1381 for (i = 0; i < opp_table->path_count; i++) 1382 icc_put(opp_table->paths[i]); 1383 kfree(opp_table->paths); 1384 } 1385 1386 WARN_ON(!list_empty(&opp_table->opp_list)); 1387 1388 list_for_each_entry_safe(opp_dev, temp, &opp_table->dev_list, node) { 1389 /* 1390 * The OPP table is getting removed, drop the performance state 1391 * constraints. 1392 */ 1393 if (opp_table->genpd_performance_state) 1394 dev_pm_genpd_set_performance_state((struct device *)(opp_dev->dev), 0); 1395 1396 _remove_opp_dev(opp_dev, opp_table); 1397 } 1398 1399 mutex_destroy(&opp_table->genpd_virt_dev_lock); 1400 mutex_destroy(&opp_table->lock); 1401 kfree(opp_table); 1402 } 1403 1404 void dev_pm_opp_put_opp_table(struct opp_table *opp_table) 1405 { 1406 kref_put_mutex(&opp_table->kref, _opp_table_kref_release, 1407 &opp_table_lock); 1408 } 1409 EXPORT_SYMBOL_GPL(dev_pm_opp_put_opp_table); 1410 1411 void _opp_free(struct dev_pm_opp *opp) 1412 { 1413 kfree(opp); 1414 } 1415 1416 static void _opp_kref_release(struct kref *kref) 1417 { 1418 struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref); 1419 struct opp_table *opp_table = opp->opp_table; 1420 1421 list_del(&opp->node); 1422 mutex_unlock(&opp_table->lock); 1423 1424 /* 1425 * Notify the changes in the availability of the operable 1426 * frequency/voltage list. 1427 */ 1428 blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_REMOVE, opp); 1429 _of_opp_free_required_opps(opp_table, opp); 1430 opp_debug_remove_one(opp); 1431 kfree(opp); 1432 } 1433 1434 void dev_pm_opp_get(struct dev_pm_opp *opp) 1435 { 1436 kref_get(&opp->kref); 1437 } 1438 1439 void dev_pm_opp_put(struct dev_pm_opp *opp) 1440 { 1441 kref_put_mutex(&opp->kref, _opp_kref_release, &opp->opp_table->lock); 1442 } 1443 EXPORT_SYMBOL_GPL(dev_pm_opp_put); 1444 1445 /** 1446 * dev_pm_opp_remove() - Remove an OPP from OPP table 1447 * @dev: device for which we do this operation 1448 * @freq: OPP to remove with matching 'freq' 1449 * 1450 * This function removes an opp from the opp table. 1451 */ 1452 void dev_pm_opp_remove(struct device *dev, unsigned long freq) 1453 { 1454 struct dev_pm_opp *opp; 1455 struct opp_table *opp_table; 1456 bool found = false; 1457 1458 opp_table = _find_opp_table(dev); 1459 if (IS_ERR(opp_table)) 1460 return; 1461 1462 mutex_lock(&opp_table->lock); 1463 1464 list_for_each_entry(opp, &opp_table->opp_list, node) { 1465 if (opp->rate == freq) { 1466 found = true; 1467 break; 1468 } 1469 } 1470 1471 mutex_unlock(&opp_table->lock); 1472 1473 if (found) { 1474 dev_pm_opp_put(opp); 1475 1476 /* Drop the reference taken by dev_pm_opp_add() */ 1477 dev_pm_opp_put_opp_table(opp_table); 1478 } else { 1479 dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n", 1480 __func__, freq); 1481 } 1482 1483 /* Drop the reference taken by _find_opp_table() */ 1484 dev_pm_opp_put_opp_table(opp_table); 1485 } 1486 EXPORT_SYMBOL_GPL(dev_pm_opp_remove); 1487 1488 static struct dev_pm_opp *_opp_get_next(struct opp_table *opp_table, 1489 bool dynamic) 1490 { 1491 struct dev_pm_opp *opp = NULL, *temp; 1492 1493 mutex_lock(&opp_table->lock); 1494 list_for_each_entry(temp, &opp_table->opp_list, node) { 1495 /* 1496 * Refcount must be dropped only once for each OPP by OPP core, 1497 * do that with help of "removed" flag. 1498 */ 1499 if (!temp->removed && dynamic == temp->dynamic) { 1500 opp = temp; 1501 break; 1502 } 1503 } 1504 1505 mutex_unlock(&opp_table->lock); 1506 return opp; 1507 } 1508 1509 /* 1510 * Can't call dev_pm_opp_put() from under the lock as debugfs removal needs to 1511 * happen lock less to avoid circular dependency issues. This routine must be 1512 * called without the opp_table->lock held. 1513 */ 1514 static void _opp_remove_all(struct opp_table *opp_table, bool dynamic) 1515 { 1516 struct dev_pm_opp *opp; 1517 1518 while ((opp = _opp_get_next(opp_table, dynamic))) { 1519 opp->removed = true; 1520 dev_pm_opp_put(opp); 1521 1522 /* Drop the references taken by dev_pm_opp_add() */ 1523 if (dynamic) 1524 dev_pm_opp_put_opp_table(opp_table); 1525 } 1526 } 1527 1528 bool _opp_remove_all_static(struct opp_table *opp_table) 1529 { 1530 mutex_lock(&opp_table->lock); 1531 1532 if (!opp_table->parsed_static_opps) { 1533 mutex_unlock(&opp_table->lock); 1534 return false; 1535 } 1536 1537 if (--opp_table->parsed_static_opps) { 1538 mutex_unlock(&opp_table->lock); 1539 return true; 1540 } 1541 1542 mutex_unlock(&opp_table->lock); 1543 1544 _opp_remove_all(opp_table, false); 1545 return true; 1546 } 1547 1548 /** 1549 * dev_pm_opp_remove_all_dynamic() - Remove all dynamically created OPPs 1550 * @dev: device for which we do this operation 1551 * 1552 * This function removes all dynamically created OPPs from the opp table. 1553 */ 1554 void dev_pm_opp_remove_all_dynamic(struct device *dev) 1555 { 1556 struct opp_table *opp_table; 1557 1558 opp_table = _find_opp_table(dev); 1559 if (IS_ERR(opp_table)) 1560 return; 1561 1562 _opp_remove_all(opp_table, true); 1563 1564 /* Drop the reference taken by _find_opp_table() */ 1565 dev_pm_opp_put_opp_table(opp_table); 1566 } 1567 EXPORT_SYMBOL_GPL(dev_pm_opp_remove_all_dynamic); 1568 1569 struct dev_pm_opp *_opp_allocate(struct opp_table *table) 1570 { 1571 struct dev_pm_opp *opp; 1572 int supply_count, supply_size, icc_size; 1573 1574 /* Allocate space for at least one supply */ 1575 supply_count = table->regulator_count > 0 ? table->regulator_count : 1; 1576 supply_size = sizeof(*opp->supplies) * supply_count; 1577 icc_size = sizeof(*opp->bandwidth) * table->path_count; 1578 1579 /* allocate new OPP node and supplies structures */ 1580 opp = kzalloc(sizeof(*opp) + supply_size + icc_size, GFP_KERNEL); 1581 1582 if (!opp) 1583 return NULL; 1584 1585 /* Put the supplies at the end of the OPP structure as an empty array */ 1586 opp->supplies = (struct dev_pm_opp_supply *)(opp + 1); 1587 if (icc_size) 1588 opp->bandwidth = (struct dev_pm_opp_icc_bw *)(opp->supplies + supply_count); 1589 INIT_LIST_HEAD(&opp->node); 1590 1591 return opp; 1592 } 1593 1594 static bool _opp_supported_by_regulators(struct dev_pm_opp *opp, 1595 struct opp_table *opp_table) 1596 { 1597 struct regulator *reg; 1598 int i; 1599 1600 if (!opp_table->regulators) 1601 return true; 1602 1603 for (i = 0; i < opp_table->regulator_count; i++) { 1604 reg = opp_table->regulators[i]; 1605 1606 if (!regulator_is_supported_voltage(reg, 1607 opp->supplies[i].u_volt_min, 1608 opp->supplies[i].u_volt_max)) { 1609 pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n", 1610 __func__, opp->supplies[i].u_volt_min, 1611 opp->supplies[i].u_volt_max); 1612 return false; 1613 } 1614 } 1615 1616 return true; 1617 } 1618 1619 int _opp_compare_key(struct dev_pm_opp *opp1, struct dev_pm_opp *opp2) 1620 { 1621 if (opp1->rate != opp2->rate) 1622 return opp1->rate < opp2->rate ? -1 : 1; 1623 if (opp1->bandwidth && opp2->bandwidth && 1624 opp1->bandwidth[0].peak != opp2->bandwidth[0].peak) 1625 return opp1->bandwidth[0].peak < opp2->bandwidth[0].peak ? -1 : 1; 1626 if (opp1->level != opp2->level) 1627 return opp1->level < opp2->level ? -1 : 1; 1628 return 0; 1629 } 1630 1631 static int _opp_is_duplicate(struct device *dev, struct dev_pm_opp *new_opp, 1632 struct opp_table *opp_table, 1633 struct list_head **head) 1634 { 1635 struct dev_pm_opp *opp; 1636 int opp_cmp; 1637 1638 /* 1639 * Insert new OPP in order of increasing frequency and discard if 1640 * already present. 1641 * 1642 * Need to use &opp_table->opp_list in the condition part of the 'for' 1643 * loop, don't replace it with head otherwise it will become an infinite 1644 * loop. 1645 */ 1646 list_for_each_entry(opp, &opp_table->opp_list, node) { 1647 opp_cmp = _opp_compare_key(new_opp, opp); 1648 if (opp_cmp > 0) { 1649 *head = &opp->node; 1650 continue; 1651 } 1652 1653 if (opp_cmp < 0) 1654 return 0; 1655 1656 /* Duplicate OPPs */ 1657 dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n", 1658 __func__, opp->rate, opp->supplies[0].u_volt, 1659 opp->available, new_opp->rate, 1660 new_opp->supplies[0].u_volt, new_opp->available); 1661 1662 /* Should we compare voltages for all regulators here ? */ 1663 return opp->available && 1664 new_opp->supplies[0].u_volt == opp->supplies[0].u_volt ? -EBUSY : -EEXIST; 1665 } 1666 1667 return 0; 1668 } 1669 1670 void _required_opps_available(struct dev_pm_opp *opp, int count) 1671 { 1672 int i; 1673 1674 for (i = 0; i < count; i++) { 1675 if (opp->required_opps[i]->available) 1676 continue; 1677 1678 opp->available = false; 1679 pr_warn("%s: OPP not supported by required OPP %pOF (%lu)\n", 1680 __func__, opp->required_opps[i]->np, opp->rate); 1681 return; 1682 } 1683 } 1684 1685 /* 1686 * Returns: 1687 * 0: On success. And appropriate error message for duplicate OPPs. 1688 * -EBUSY: For OPP with same freq/volt and is available. The callers of 1689 * _opp_add() must return 0 if they receive -EBUSY from it. This is to make 1690 * sure we don't print error messages unnecessarily if different parts of 1691 * kernel try to initialize the OPP table. 1692 * -EEXIST: For OPP with same freq but different volt or is unavailable. This 1693 * should be considered an error by the callers of _opp_add(). 1694 */ 1695 int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, 1696 struct opp_table *opp_table, bool rate_not_available) 1697 { 1698 struct list_head *head; 1699 int ret; 1700 1701 mutex_lock(&opp_table->lock); 1702 head = &opp_table->opp_list; 1703 1704 ret = _opp_is_duplicate(dev, new_opp, opp_table, &head); 1705 if (ret) { 1706 mutex_unlock(&opp_table->lock); 1707 return ret; 1708 } 1709 1710 list_add(&new_opp->node, head); 1711 mutex_unlock(&opp_table->lock); 1712 1713 new_opp->opp_table = opp_table; 1714 kref_init(&new_opp->kref); 1715 1716 opp_debug_create_one(new_opp, opp_table); 1717 1718 if (!_opp_supported_by_regulators(new_opp, opp_table)) { 1719 new_opp->available = false; 1720 dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n", 1721 __func__, new_opp->rate); 1722 } 1723 1724 /* required-opps not fully initialized yet */ 1725 if (lazy_linking_pending(opp_table)) 1726 return 0; 1727 1728 _required_opps_available(new_opp, opp_table->required_opp_count); 1729 1730 return 0; 1731 } 1732 1733 /** 1734 * _opp_add_v1() - Allocate a OPP based on v1 bindings. 1735 * @opp_table: OPP table 1736 * @dev: device for which we do this operation 1737 * @freq: Frequency in Hz for this OPP 1738 * @u_volt: Voltage in uVolts for this OPP 1739 * @dynamic: Dynamically added OPPs. 1740 * 1741 * This function adds an opp definition to the opp table and returns status. 1742 * The opp is made available by default and it can be controlled using 1743 * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove. 1744 * 1745 * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table 1746 * and freed by dev_pm_opp_of_remove_table. 1747 * 1748 * Return: 1749 * 0 On success OR 1750 * Duplicate OPPs (both freq and volt are same) and opp->available 1751 * -EEXIST Freq are same and volt are different OR 1752 * Duplicate OPPs (both freq and volt are same) and !opp->available 1753 * -ENOMEM Memory allocation failure 1754 */ 1755 int _opp_add_v1(struct opp_table *opp_table, struct device *dev, 1756 unsigned long freq, long u_volt, bool dynamic) 1757 { 1758 struct dev_pm_opp *new_opp; 1759 unsigned long tol; 1760 int ret; 1761 1762 new_opp = _opp_allocate(opp_table); 1763 if (!new_opp) 1764 return -ENOMEM; 1765 1766 /* populate the opp table */ 1767 new_opp->rate = freq; 1768 tol = u_volt * opp_table->voltage_tolerance_v1 / 100; 1769 new_opp->supplies[0].u_volt = u_volt; 1770 new_opp->supplies[0].u_volt_min = u_volt - tol; 1771 new_opp->supplies[0].u_volt_max = u_volt + tol; 1772 new_opp->available = true; 1773 new_opp->dynamic = dynamic; 1774 1775 ret = _opp_add(dev, new_opp, opp_table, false); 1776 if (ret) { 1777 /* Don't return error for duplicate OPPs */ 1778 if (ret == -EBUSY) 1779 ret = 0; 1780 goto free_opp; 1781 } 1782 1783 /* 1784 * Notify the changes in the availability of the operable 1785 * frequency/voltage list. 1786 */ 1787 blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADD, new_opp); 1788 return 0; 1789 1790 free_opp: 1791 _opp_free(new_opp); 1792 1793 return ret; 1794 } 1795 1796 /** 1797 * dev_pm_opp_set_supported_hw() - Set supported platforms 1798 * @dev: Device for which supported-hw has to be set. 1799 * @versions: Array of hierarchy of versions to match. 1800 * @count: Number of elements in the array. 1801 * 1802 * This is required only for the V2 bindings, and it enables a platform to 1803 * specify the hierarchy of versions it supports. OPP layer will then enable 1804 * OPPs, which are available for those versions, based on its 'opp-supported-hw' 1805 * property. 1806 */ 1807 struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev, 1808 const u32 *versions, unsigned int count) 1809 { 1810 struct opp_table *opp_table; 1811 1812 opp_table = _add_opp_table(dev, false); 1813 if (IS_ERR(opp_table)) 1814 return opp_table; 1815 1816 /* Make sure there are no concurrent readers while updating opp_table */ 1817 WARN_ON(!list_empty(&opp_table->opp_list)); 1818 1819 /* Another CPU that shares the OPP table has set the property ? */ 1820 if (opp_table->supported_hw) 1821 return opp_table; 1822 1823 opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions), 1824 GFP_KERNEL); 1825 if (!opp_table->supported_hw) { 1826 dev_pm_opp_put_opp_table(opp_table); 1827 return ERR_PTR(-ENOMEM); 1828 } 1829 1830 opp_table->supported_hw_count = count; 1831 1832 return opp_table; 1833 } 1834 EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw); 1835 1836 /** 1837 * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw 1838 * @opp_table: OPP table returned by dev_pm_opp_set_supported_hw(). 1839 * 1840 * This is required only for the V2 bindings, and is called for a matching 1841 * dev_pm_opp_set_supported_hw(). Until this is called, the opp_table structure 1842 * will not be freed. 1843 */ 1844 void dev_pm_opp_put_supported_hw(struct opp_table *opp_table) 1845 { 1846 if (unlikely(!opp_table)) 1847 return; 1848 1849 /* Make sure there are no concurrent readers while updating opp_table */ 1850 WARN_ON(!list_empty(&opp_table->opp_list)); 1851 1852 kfree(opp_table->supported_hw); 1853 opp_table->supported_hw = NULL; 1854 opp_table->supported_hw_count = 0; 1855 1856 dev_pm_opp_put_opp_table(opp_table); 1857 } 1858 EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw); 1859 1860 /** 1861 * dev_pm_opp_set_prop_name() - Set prop-extn name 1862 * @dev: Device for which the prop-name has to be set. 1863 * @name: name to postfix to properties. 1864 * 1865 * This is required only for the V2 bindings, and it enables a platform to 1866 * specify the extn to be used for certain property names. The properties to 1867 * which the extension will apply are opp-microvolt and opp-microamp. OPP core 1868 * should postfix the property name with -<name> while looking for them. 1869 */ 1870 struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name) 1871 { 1872 struct opp_table *opp_table; 1873 1874 opp_table = _add_opp_table(dev, false); 1875 if (IS_ERR(opp_table)) 1876 return opp_table; 1877 1878 /* Make sure there are no concurrent readers while updating opp_table */ 1879 WARN_ON(!list_empty(&opp_table->opp_list)); 1880 1881 /* Another CPU that shares the OPP table has set the property ? */ 1882 if (opp_table->prop_name) 1883 return opp_table; 1884 1885 opp_table->prop_name = kstrdup(name, GFP_KERNEL); 1886 if (!opp_table->prop_name) { 1887 dev_pm_opp_put_opp_table(opp_table); 1888 return ERR_PTR(-ENOMEM); 1889 } 1890 1891 return opp_table; 1892 } 1893 EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name); 1894 1895 /** 1896 * dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name 1897 * @opp_table: OPP table returned by dev_pm_opp_set_prop_name(). 1898 * 1899 * This is required only for the V2 bindings, and is called for a matching 1900 * dev_pm_opp_set_prop_name(). Until this is called, the opp_table structure 1901 * will not be freed. 1902 */ 1903 void dev_pm_opp_put_prop_name(struct opp_table *opp_table) 1904 { 1905 if (unlikely(!opp_table)) 1906 return; 1907 1908 /* Make sure there are no concurrent readers while updating opp_table */ 1909 WARN_ON(!list_empty(&opp_table->opp_list)); 1910 1911 kfree(opp_table->prop_name); 1912 opp_table->prop_name = NULL; 1913 1914 dev_pm_opp_put_opp_table(opp_table); 1915 } 1916 EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name); 1917 1918 /** 1919 * dev_pm_opp_set_regulators() - Set regulator names for the device 1920 * @dev: Device for which regulator name is being set. 1921 * @names: Array of pointers to the names of the regulator. 1922 * @count: Number of regulators. 1923 * 1924 * In order to support OPP switching, OPP layer needs to know the name of the 1925 * device's regulators, as the core would be required to switch voltages as 1926 * well. 1927 * 1928 * This must be called before any OPPs are initialized for the device. 1929 */ 1930 struct opp_table *dev_pm_opp_set_regulators(struct device *dev, 1931 const char * const names[], 1932 unsigned int count) 1933 { 1934 struct dev_pm_opp_supply *supplies; 1935 struct opp_table *opp_table; 1936 struct regulator *reg; 1937 int ret, i; 1938 1939 opp_table = _add_opp_table(dev, false); 1940 if (IS_ERR(opp_table)) 1941 return opp_table; 1942 1943 /* This should be called before OPPs are initialized */ 1944 if (WARN_ON(!list_empty(&opp_table->opp_list))) { 1945 ret = -EBUSY; 1946 goto err; 1947 } 1948 1949 /* Another CPU that shares the OPP table has set the regulators ? */ 1950 if (opp_table->regulators) 1951 return opp_table; 1952 1953 opp_table->regulators = kmalloc_array(count, 1954 sizeof(*opp_table->regulators), 1955 GFP_KERNEL); 1956 if (!opp_table->regulators) { 1957 ret = -ENOMEM; 1958 goto err; 1959 } 1960 1961 for (i = 0; i < count; i++) { 1962 reg = regulator_get_optional(dev, names[i]); 1963 if (IS_ERR(reg)) { 1964 ret = PTR_ERR(reg); 1965 if (ret != -EPROBE_DEFER) 1966 dev_err(dev, "%s: no regulator (%s) found: %d\n", 1967 __func__, names[i], ret); 1968 goto free_regulators; 1969 } 1970 1971 opp_table->regulators[i] = reg; 1972 } 1973 1974 opp_table->regulator_count = count; 1975 1976 supplies = kmalloc_array(count * 2, sizeof(*supplies), GFP_KERNEL); 1977 if (!supplies) { 1978 ret = -ENOMEM; 1979 goto free_regulators; 1980 } 1981 1982 mutex_lock(&opp_table->lock); 1983 opp_table->sod_supplies = supplies; 1984 if (opp_table->set_opp_data) { 1985 opp_table->set_opp_data->old_opp.supplies = supplies; 1986 opp_table->set_opp_data->new_opp.supplies = supplies + count; 1987 } 1988 mutex_unlock(&opp_table->lock); 1989 1990 return opp_table; 1991 1992 free_regulators: 1993 while (i != 0) 1994 regulator_put(opp_table->regulators[--i]); 1995 1996 kfree(opp_table->regulators); 1997 opp_table->regulators = NULL; 1998 opp_table->regulator_count = -1; 1999 err: 2000 dev_pm_opp_put_opp_table(opp_table); 2001 2002 return ERR_PTR(ret); 2003 } 2004 EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulators); 2005 2006 /** 2007 * dev_pm_opp_put_regulators() - Releases resources blocked for regulator 2008 * @opp_table: OPP table returned from dev_pm_opp_set_regulators(). 2009 */ 2010 void dev_pm_opp_put_regulators(struct opp_table *opp_table) 2011 { 2012 int i; 2013 2014 if (unlikely(!opp_table)) 2015 return; 2016 2017 if (!opp_table->regulators) 2018 goto put_opp_table; 2019 2020 /* Make sure there are no concurrent readers while updating opp_table */ 2021 WARN_ON(!list_empty(&opp_table->opp_list)); 2022 2023 if (opp_table->enabled) { 2024 for (i = opp_table->regulator_count - 1; i >= 0; i--) 2025 regulator_disable(opp_table->regulators[i]); 2026 } 2027 2028 for (i = opp_table->regulator_count - 1; i >= 0; i--) 2029 regulator_put(opp_table->regulators[i]); 2030 2031 mutex_lock(&opp_table->lock); 2032 if (opp_table->set_opp_data) { 2033 opp_table->set_opp_data->old_opp.supplies = NULL; 2034 opp_table->set_opp_data->new_opp.supplies = NULL; 2035 } 2036 2037 kfree(opp_table->sod_supplies); 2038 opp_table->sod_supplies = NULL; 2039 mutex_unlock(&opp_table->lock); 2040 2041 kfree(opp_table->regulators); 2042 opp_table->regulators = NULL; 2043 opp_table->regulator_count = -1; 2044 2045 put_opp_table: 2046 dev_pm_opp_put_opp_table(opp_table); 2047 } 2048 EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulators); 2049 2050 /** 2051 * dev_pm_opp_set_clkname() - Set clk name for the device 2052 * @dev: Device for which clk name is being set. 2053 * @name: Clk name. 2054 * 2055 * In order to support OPP switching, OPP layer needs to get pointer to the 2056 * clock for the device. Simple cases work fine without using this routine (i.e. 2057 * by passing connection-id as NULL), but for a device with multiple clocks 2058 * available, the OPP core needs to know the exact name of the clk to use. 2059 * 2060 * This must be called before any OPPs are initialized for the device. 2061 */ 2062 struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name) 2063 { 2064 struct opp_table *opp_table; 2065 int ret; 2066 2067 opp_table = _add_opp_table(dev, false); 2068 if (IS_ERR(opp_table)) 2069 return opp_table; 2070 2071 /* This should be called before OPPs are initialized */ 2072 if (WARN_ON(!list_empty(&opp_table->opp_list))) { 2073 ret = -EBUSY; 2074 goto err; 2075 } 2076 2077 /* clk shouldn't be initialized at this point */ 2078 if (WARN_ON(opp_table->clk)) { 2079 ret = -EBUSY; 2080 goto err; 2081 } 2082 2083 /* Find clk for the device */ 2084 opp_table->clk = clk_get(dev, name); 2085 if (IS_ERR(opp_table->clk)) { 2086 ret = PTR_ERR(opp_table->clk); 2087 if (ret != -EPROBE_DEFER) { 2088 dev_err(dev, "%s: Couldn't find clock: %d\n", __func__, 2089 ret); 2090 } 2091 goto err; 2092 } 2093 2094 return opp_table; 2095 2096 err: 2097 dev_pm_opp_put_opp_table(opp_table); 2098 2099 return ERR_PTR(ret); 2100 } 2101 EXPORT_SYMBOL_GPL(dev_pm_opp_set_clkname); 2102 2103 /** 2104 * dev_pm_opp_put_clkname() - Releases resources blocked for clk. 2105 * @opp_table: OPP table returned from dev_pm_opp_set_clkname(). 2106 */ 2107 void dev_pm_opp_put_clkname(struct opp_table *opp_table) 2108 { 2109 if (unlikely(!opp_table)) 2110 return; 2111 2112 /* Make sure there are no concurrent readers while updating opp_table */ 2113 WARN_ON(!list_empty(&opp_table->opp_list)); 2114 2115 clk_put(opp_table->clk); 2116 opp_table->clk = ERR_PTR(-EINVAL); 2117 2118 dev_pm_opp_put_opp_table(opp_table); 2119 } 2120 EXPORT_SYMBOL_GPL(dev_pm_opp_put_clkname); 2121 2122 /** 2123 * dev_pm_opp_register_set_opp_helper() - Register custom set OPP helper 2124 * @dev: Device for which the helper is getting registered. 2125 * @set_opp: Custom set OPP helper. 2126 * 2127 * This is useful to support complex platforms (like platforms with multiple 2128 * regulators per device), instead of the generic OPP set rate helper. 2129 * 2130 * This must be called before any OPPs are initialized for the device. 2131 */ 2132 struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, 2133 int (*set_opp)(struct dev_pm_set_opp_data *data)) 2134 { 2135 struct dev_pm_set_opp_data *data; 2136 struct opp_table *opp_table; 2137 2138 if (!set_opp) 2139 return ERR_PTR(-EINVAL); 2140 2141 opp_table = _add_opp_table(dev, false); 2142 if (IS_ERR(opp_table)) 2143 return opp_table; 2144 2145 /* This should be called before OPPs are initialized */ 2146 if (WARN_ON(!list_empty(&opp_table->opp_list))) { 2147 dev_pm_opp_put_opp_table(opp_table); 2148 return ERR_PTR(-EBUSY); 2149 } 2150 2151 /* Another CPU that shares the OPP table has set the helper ? */ 2152 if (opp_table->set_opp) 2153 return opp_table; 2154 2155 data = kzalloc(sizeof(*data), GFP_KERNEL); 2156 if (!data) 2157 return ERR_PTR(-ENOMEM); 2158 2159 mutex_lock(&opp_table->lock); 2160 opp_table->set_opp_data = data; 2161 if (opp_table->sod_supplies) { 2162 data->old_opp.supplies = opp_table->sod_supplies; 2163 data->new_opp.supplies = opp_table->sod_supplies + 2164 opp_table->regulator_count; 2165 } 2166 mutex_unlock(&opp_table->lock); 2167 2168 opp_table->set_opp = set_opp; 2169 2170 return opp_table; 2171 } 2172 EXPORT_SYMBOL_GPL(dev_pm_opp_register_set_opp_helper); 2173 2174 /** 2175 * dev_pm_opp_unregister_set_opp_helper() - Releases resources blocked for 2176 * set_opp helper 2177 * @opp_table: OPP table returned from dev_pm_opp_register_set_opp_helper(). 2178 * 2179 * Release resources blocked for platform specific set_opp helper. 2180 */ 2181 void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table) 2182 { 2183 if (unlikely(!opp_table)) 2184 return; 2185 2186 /* Make sure there are no concurrent readers while updating opp_table */ 2187 WARN_ON(!list_empty(&opp_table->opp_list)); 2188 2189 opp_table->set_opp = NULL; 2190 2191 mutex_lock(&opp_table->lock); 2192 kfree(opp_table->set_opp_data); 2193 opp_table->set_opp_data = NULL; 2194 mutex_unlock(&opp_table->lock); 2195 2196 dev_pm_opp_put_opp_table(opp_table); 2197 } 2198 EXPORT_SYMBOL_GPL(dev_pm_opp_unregister_set_opp_helper); 2199 2200 static void devm_pm_opp_unregister_set_opp_helper(void *data) 2201 { 2202 dev_pm_opp_unregister_set_opp_helper(data); 2203 } 2204 2205 /** 2206 * devm_pm_opp_register_set_opp_helper() - Register custom set OPP helper 2207 * @dev: Device for which the helper is getting registered. 2208 * @set_opp: Custom set OPP helper. 2209 * 2210 * This is a resource-managed version of dev_pm_opp_register_set_opp_helper(). 2211 * 2212 * Return: pointer to 'struct opp_table' on success and errorno otherwise. 2213 */ 2214 struct opp_table * 2215 devm_pm_opp_register_set_opp_helper(struct device *dev, 2216 int (*set_opp)(struct dev_pm_set_opp_data *data)) 2217 { 2218 struct opp_table *opp_table; 2219 int err; 2220 2221 opp_table = dev_pm_opp_register_set_opp_helper(dev, set_opp); 2222 if (IS_ERR(opp_table)) 2223 return opp_table; 2224 2225 err = devm_add_action_or_reset(dev, devm_pm_opp_unregister_set_opp_helper, 2226 opp_table); 2227 if (err) 2228 return ERR_PTR(err); 2229 2230 return opp_table; 2231 } 2232 EXPORT_SYMBOL_GPL(devm_pm_opp_register_set_opp_helper); 2233 2234 static void _opp_detach_genpd(struct opp_table *opp_table) 2235 { 2236 int index; 2237 2238 if (!opp_table->genpd_virt_devs) 2239 return; 2240 2241 for (index = 0; index < opp_table->required_opp_count; index++) { 2242 if (!opp_table->genpd_virt_devs[index]) 2243 continue; 2244 2245 dev_pm_domain_detach(opp_table->genpd_virt_devs[index], false); 2246 opp_table->genpd_virt_devs[index] = NULL; 2247 } 2248 2249 kfree(opp_table->genpd_virt_devs); 2250 opp_table->genpd_virt_devs = NULL; 2251 } 2252 2253 /** 2254 * dev_pm_opp_attach_genpd - Attach genpd(s) for the device and save virtual device pointer 2255 * @dev: Consumer device for which the genpd is getting attached. 2256 * @names: Null terminated array of pointers containing names of genpd to attach. 2257 * @virt_devs: Pointer to return the array of virtual devices. 2258 * 2259 * Multiple generic power domains for a device are supported with the help of 2260 * virtual genpd devices, which are created for each consumer device - genpd 2261 * pair. These are the device structures which are attached to the power domain 2262 * and are required by the OPP core to set the performance state of the genpd. 2263 * The same API also works for the case where single genpd is available and so 2264 * we don't need to support that separately. 2265 * 2266 * This helper will normally be called by the consumer driver of the device 2267 * "dev", as only that has details of the genpd names. 2268 * 2269 * This helper needs to be called once with a list of all genpd to attach. 2270 * Otherwise the original device structure will be used instead by the OPP core. 2271 * 2272 * The order of entries in the names array must match the order in which 2273 * "required-opps" are added in DT. 2274 */ 2275 struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, 2276 const char **names, struct device ***virt_devs) 2277 { 2278 struct opp_table *opp_table; 2279 struct device *virt_dev; 2280 int index = 0, ret = -EINVAL; 2281 const char **name = names; 2282 2283 opp_table = _add_opp_table(dev, false); 2284 if (IS_ERR(opp_table)) 2285 return opp_table; 2286 2287 if (opp_table->genpd_virt_devs) 2288 return opp_table; 2289 2290 /* 2291 * If the genpd's OPP table isn't already initialized, parsing of the 2292 * required-opps fail for dev. We should retry this after genpd's OPP 2293 * table is added. 2294 */ 2295 if (!opp_table->required_opp_count) { 2296 ret = -EPROBE_DEFER; 2297 goto put_table; 2298 } 2299 2300 mutex_lock(&opp_table->genpd_virt_dev_lock); 2301 2302 opp_table->genpd_virt_devs = kcalloc(opp_table->required_opp_count, 2303 sizeof(*opp_table->genpd_virt_devs), 2304 GFP_KERNEL); 2305 if (!opp_table->genpd_virt_devs) 2306 goto unlock; 2307 2308 while (*name) { 2309 if (index >= opp_table->required_opp_count) { 2310 dev_err(dev, "Index can't be greater than required-opp-count - 1, %s (%d : %d)\n", 2311 *name, opp_table->required_opp_count, index); 2312 goto err; 2313 } 2314 2315 virt_dev = dev_pm_domain_attach_by_name(dev, *name); 2316 if (IS_ERR(virt_dev)) { 2317 ret = PTR_ERR(virt_dev); 2318 dev_err(dev, "Couldn't attach to pm_domain: %d\n", ret); 2319 goto err; 2320 } 2321 2322 opp_table->genpd_virt_devs[index] = virt_dev; 2323 index++; 2324 name++; 2325 } 2326 2327 if (virt_devs) 2328 *virt_devs = opp_table->genpd_virt_devs; 2329 mutex_unlock(&opp_table->genpd_virt_dev_lock); 2330 2331 return opp_table; 2332 2333 err: 2334 _opp_detach_genpd(opp_table); 2335 unlock: 2336 mutex_unlock(&opp_table->genpd_virt_dev_lock); 2337 2338 put_table: 2339 dev_pm_opp_put_opp_table(opp_table); 2340 2341 return ERR_PTR(ret); 2342 } 2343 EXPORT_SYMBOL_GPL(dev_pm_opp_attach_genpd); 2344 2345 /** 2346 * dev_pm_opp_detach_genpd() - Detach genpd(s) from the device. 2347 * @opp_table: OPP table returned by dev_pm_opp_attach_genpd(). 2348 * 2349 * This detaches the genpd(s), resets the virtual device pointers, and puts the 2350 * OPP table. 2351 */ 2352 void dev_pm_opp_detach_genpd(struct opp_table *opp_table) 2353 { 2354 if (unlikely(!opp_table)) 2355 return; 2356 2357 /* 2358 * Acquire genpd_virt_dev_lock to make sure virt_dev isn't getting 2359 * used in parallel. 2360 */ 2361 mutex_lock(&opp_table->genpd_virt_dev_lock); 2362 _opp_detach_genpd(opp_table); 2363 mutex_unlock(&opp_table->genpd_virt_dev_lock); 2364 2365 dev_pm_opp_put_opp_table(opp_table); 2366 } 2367 EXPORT_SYMBOL_GPL(dev_pm_opp_detach_genpd); 2368 2369 static void devm_pm_opp_detach_genpd(void *data) 2370 { 2371 dev_pm_opp_detach_genpd(data); 2372 } 2373 2374 /** 2375 * devm_pm_opp_attach_genpd - Attach genpd(s) for the device and save virtual 2376 * device pointer 2377 * @dev: Consumer device for which the genpd is getting attached. 2378 * @names: Null terminated array of pointers containing names of genpd to attach. 2379 * @virt_devs: Pointer to return the array of virtual devices. 2380 * 2381 * This is a resource-managed version of dev_pm_opp_attach_genpd(). 2382 * 2383 * Return: pointer to 'struct opp_table' on success and errorno otherwise. 2384 */ 2385 struct opp_table * 2386 devm_pm_opp_attach_genpd(struct device *dev, const char **names, 2387 struct device ***virt_devs) 2388 { 2389 struct opp_table *opp_table; 2390 int err; 2391 2392 opp_table = dev_pm_opp_attach_genpd(dev, names, virt_devs); 2393 if (IS_ERR(opp_table)) 2394 return opp_table; 2395 2396 err = devm_add_action_or_reset(dev, devm_pm_opp_detach_genpd, 2397 opp_table); 2398 if (err) 2399 return ERR_PTR(err); 2400 2401 return opp_table; 2402 } 2403 EXPORT_SYMBOL_GPL(devm_pm_opp_attach_genpd); 2404 2405 /** 2406 * dev_pm_opp_xlate_required_opp() - Find required OPP for @src_table OPP. 2407 * @src_table: OPP table which has @dst_table as one of its required OPP table. 2408 * @dst_table: Required OPP table of the @src_table. 2409 * @src_opp: OPP from the @src_table. 2410 * 2411 * This function returns the OPP (present in @dst_table) pointed out by the 2412 * "required-opps" property of the @src_opp (present in @src_table). 2413 * 2414 * The callers are required to call dev_pm_opp_put() for the returned OPP after 2415 * use. 2416 * 2417 * Return: pointer to 'struct dev_pm_opp' on success and errorno otherwise. 2418 */ 2419 struct dev_pm_opp *dev_pm_opp_xlate_required_opp(struct opp_table *src_table, 2420 struct opp_table *dst_table, 2421 struct dev_pm_opp *src_opp) 2422 { 2423 struct dev_pm_opp *opp, *dest_opp = ERR_PTR(-ENODEV); 2424 int i; 2425 2426 if (!src_table || !dst_table || !src_opp || 2427 !src_table->required_opp_tables) 2428 return ERR_PTR(-EINVAL); 2429 2430 /* required-opps not fully initialized yet */ 2431 if (lazy_linking_pending(src_table)) 2432 return ERR_PTR(-EBUSY); 2433 2434 for (i = 0; i < src_table->required_opp_count; i++) { 2435 if (src_table->required_opp_tables[i] == dst_table) { 2436 mutex_lock(&src_table->lock); 2437 2438 list_for_each_entry(opp, &src_table->opp_list, node) { 2439 if (opp == src_opp) { 2440 dest_opp = opp->required_opps[i]; 2441 dev_pm_opp_get(dest_opp); 2442 break; 2443 } 2444 } 2445 2446 mutex_unlock(&src_table->lock); 2447 break; 2448 } 2449 } 2450 2451 if (IS_ERR(dest_opp)) { 2452 pr_err("%s: Couldn't find matching OPP (%p: %p)\n", __func__, 2453 src_table, dst_table); 2454 } 2455 2456 return dest_opp; 2457 } 2458 EXPORT_SYMBOL_GPL(dev_pm_opp_xlate_required_opp); 2459 2460 /** 2461 * dev_pm_opp_xlate_performance_state() - Find required OPP's pstate for src_table. 2462 * @src_table: OPP table which has dst_table as one of its required OPP table. 2463 * @dst_table: Required OPP table of the src_table. 2464 * @pstate: Current performance state of the src_table. 2465 * 2466 * This Returns pstate of the OPP (present in @dst_table) pointed out by the 2467 * "required-opps" property of the OPP (present in @src_table) which has 2468 * performance state set to @pstate. 2469 * 2470 * Return: Zero or positive performance state on success, otherwise negative 2471 * value on errors. 2472 */ 2473 int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, 2474 struct opp_table *dst_table, 2475 unsigned int pstate) 2476 { 2477 struct dev_pm_opp *opp; 2478 int dest_pstate = -EINVAL; 2479 int i; 2480 2481 /* 2482 * Normally the src_table will have the "required_opps" property set to 2483 * point to one of the OPPs in the dst_table, but in some cases the 2484 * genpd and its master have one to one mapping of performance states 2485 * and so none of them have the "required-opps" property set. Return the 2486 * pstate of the src_table as it is in such cases. 2487 */ 2488 if (!src_table || !src_table->required_opp_count) 2489 return pstate; 2490 2491 /* required-opps not fully initialized yet */ 2492 if (lazy_linking_pending(src_table)) 2493 return -EBUSY; 2494 2495 for (i = 0; i < src_table->required_opp_count; i++) { 2496 if (src_table->required_opp_tables[i]->np == dst_table->np) 2497 break; 2498 } 2499 2500 if (unlikely(i == src_table->required_opp_count)) { 2501 pr_err("%s: Couldn't find matching OPP table (%p: %p)\n", 2502 __func__, src_table, dst_table); 2503 return -EINVAL; 2504 } 2505 2506 mutex_lock(&src_table->lock); 2507 2508 list_for_each_entry(opp, &src_table->opp_list, node) { 2509 if (opp->pstate == pstate) { 2510 dest_pstate = opp->required_opps[i]->pstate; 2511 goto unlock; 2512 } 2513 } 2514 2515 pr_err("%s: Couldn't find matching OPP (%p: %p)\n", __func__, src_table, 2516 dst_table); 2517 2518 unlock: 2519 mutex_unlock(&src_table->lock); 2520 2521 return dest_pstate; 2522 } 2523 2524 /** 2525 * dev_pm_opp_add() - Add an OPP table from a table definitions 2526 * @dev: device for which we do this operation 2527 * @freq: Frequency in Hz for this OPP 2528 * @u_volt: Voltage in uVolts for this OPP 2529 * 2530 * This function adds an opp definition to the opp table and returns status. 2531 * The opp is made available by default and it can be controlled using 2532 * dev_pm_opp_enable/disable functions. 2533 * 2534 * Return: 2535 * 0 On success OR 2536 * Duplicate OPPs (both freq and volt are same) and opp->available 2537 * -EEXIST Freq are same and volt are different OR 2538 * Duplicate OPPs (both freq and volt are same) and !opp->available 2539 * -ENOMEM Memory allocation failure 2540 */ 2541 int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) 2542 { 2543 struct opp_table *opp_table; 2544 int ret; 2545 2546 opp_table = _add_opp_table(dev, true); 2547 if (IS_ERR(opp_table)) 2548 return PTR_ERR(opp_table); 2549 2550 /* Fix regulator count for dynamic OPPs */ 2551 opp_table->regulator_count = 1; 2552 2553 ret = _opp_add_v1(opp_table, dev, freq, u_volt, true); 2554 if (ret) 2555 dev_pm_opp_put_opp_table(opp_table); 2556 2557 return ret; 2558 } 2559 EXPORT_SYMBOL_GPL(dev_pm_opp_add); 2560 2561 /** 2562 * _opp_set_availability() - helper to set the availability of an opp 2563 * @dev: device for which we do this operation 2564 * @freq: OPP frequency to modify availability 2565 * @availability_req: availability status requested for this opp 2566 * 2567 * Set the availability of an OPP, opp_{enable,disable} share a common logic 2568 * which is isolated here. 2569 * 2570 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the 2571 * copy operation, returns 0 if no modification was done OR modification was 2572 * successful. 2573 */ 2574 static int _opp_set_availability(struct device *dev, unsigned long freq, 2575 bool availability_req) 2576 { 2577 struct opp_table *opp_table; 2578 struct dev_pm_opp *tmp_opp, *opp = ERR_PTR(-ENODEV); 2579 int r = 0; 2580 2581 /* Find the opp_table */ 2582 opp_table = _find_opp_table(dev); 2583 if (IS_ERR(opp_table)) { 2584 r = PTR_ERR(opp_table); 2585 dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r); 2586 return r; 2587 } 2588 2589 mutex_lock(&opp_table->lock); 2590 2591 /* Do we have the frequency? */ 2592 list_for_each_entry(tmp_opp, &opp_table->opp_list, node) { 2593 if (tmp_opp->rate == freq) { 2594 opp = tmp_opp; 2595 break; 2596 } 2597 } 2598 2599 if (IS_ERR(opp)) { 2600 r = PTR_ERR(opp); 2601 goto unlock; 2602 } 2603 2604 /* Is update really needed? */ 2605 if (opp->available == availability_req) 2606 goto unlock; 2607 2608 opp->available = availability_req; 2609 2610 dev_pm_opp_get(opp); 2611 mutex_unlock(&opp_table->lock); 2612 2613 /* Notify the change of the OPP availability */ 2614 if (availability_req) 2615 blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ENABLE, 2616 opp); 2617 else 2618 blocking_notifier_call_chain(&opp_table->head, 2619 OPP_EVENT_DISABLE, opp); 2620 2621 dev_pm_opp_put(opp); 2622 goto put_table; 2623 2624 unlock: 2625 mutex_unlock(&opp_table->lock); 2626 put_table: 2627 dev_pm_opp_put_opp_table(opp_table); 2628 return r; 2629 } 2630 2631 /** 2632 * dev_pm_opp_adjust_voltage() - helper to change the voltage of an OPP 2633 * @dev: device for which we do this operation 2634 * @freq: OPP frequency to adjust voltage of 2635 * @u_volt: new OPP target voltage 2636 * @u_volt_min: new OPP min voltage 2637 * @u_volt_max: new OPP max voltage 2638 * 2639 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the 2640 * copy operation, returns 0 if no modifcation was done OR modification was 2641 * successful. 2642 */ 2643 int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq, 2644 unsigned long u_volt, unsigned long u_volt_min, 2645 unsigned long u_volt_max) 2646 2647 { 2648 struct opp_table *opp_table; 2649 struct dev_pm_opp *tmp_opp, *opp = ERR_PTR(-ENODEV); 2650 int r = 0; 2651 2652 /* Find the opp_table */ 2653 opp_table = _find_opp_table(dev); 2654 if (IS_ERR(opp_table)) { 2655 r = PTR_ERR(opp_table); 2656 dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r); 2657 return r; 2658 } 2659 2660 mutex_lock(&opp_table->lock); 2661 2662 /* Do we have the frequency? */ 2663 list_for_each_entry(tmp_opp, &opp_table->opp_list, node) { 2664 if (tmp_opp->rate == freq) { 2665 opp = tmp_opp; 2666 break; 2667 } 2668 } 2669 2670 if (IS_ERR(opp)) { 2671 r = PTR_ERR(opp); 2672 goto adjust_unlock; 2673 } 2674 2675 /* Is update really needed? */ 2676 if (opp->supplies->u_volt == u_volt) 2677 goto adjust_unlock; 2678 2679 opp->supplies->u_volt = u_volt; 2680 opp->supplies->u_volt_min = u_volt_min; 2681 opp->supplies->u_volt_max = u_volt_max; 2682 2683 dev_pm_opp_get(opp); 2684 mutex_unlock(&opp_table->lock); 2685 2686 /* Notify the voltage change of the OPP */ 2687 blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADJUST_VOLTAGE, 2688 opp); 2689 2690 dev_pm_opp_put(opp); 2691 goto adjust_put_table; 2692 2693 adjust_unlock: 2694 mutex_unlock(&opp_table->lock); 2695 adjust_put_table: 2696 dev_pm_opp_put_opp_table(opp_table); 2697 return r; 2698 } 2699 EXPORT_SYMBOL_GPL(dev_pm_opp_adjust_voltage); 2700 2701 /** 2702 * dev_pm_opp_enable() - Enable a specific OPP 2703 * @dev: device for which we do this operation 2704 * @freq: OPP frequency to enable 2705 * 2706 * Enables a provided opp. If the operation is valid, this returns 0, else the 2707 * corresponding error value. It is meant to be used for users an OPP available 2708 * after being temporarily made unavailable with dev_pm_opp_disable. 2709 * 2710 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the 2711 * copy operation, returns 0 if no modification was done OR modification was 2712 * successful. 2713 */ 2714 int dev_pm_opp_enable(struct device *dev, unsigned long freq) 2715 { 2716 return _opp_set_availability(dev, freq, true); 2717 } 2718 EXPORT_SYMBOL_GPL(dev_pm_opp_enable); 2719 2720 /** 2721 * dev_pm_opp_disable() - Disable a specific OPP 2722 * @dev: device for which we do this operation 2723 * @freq: OPP frequency to disable 2724 * 2725 * Disables a provided opp. If the operation is valid, this returns 2726 * 0, else the corresponding error value. It is meant to be a temporary 2727 * control by users to make this OPP not available until the circumstances are 2728 * right to make it available again (with a call to dev_pm_opp_enable). 2729 * 2730 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the 2731 * copy operation, returns 0 if no modification was done OR modification was 2732 * successful. 2733 */ 2734 int dev_pm_opp_disable(struct device *dev, unsigned long freq) 2735 { 2736 return _opp_set_availability(dev, freq, false); 2737 } 2738 EXPORT_SYMBOL_GPL(dev_pm_opp_disable); 2739 2740 /** 2741 * dev_pm_opp_register_notifier() - Register OPP notifier for the device 2742 * @dev: Device for which notifier needs to be registered 2743 * @nb: Notifier block to be registered 2744 * 2745 * Return: 0 on success or a negative error value. 2746 */ 2747 int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb) 2748 { 2749 struct opp_table *opp_table; 2750 int ret; 2751 2752 opp_table = _find_opp_table(dev); 2753 if (IS_ERR(opp_table)) 2754 return PTR_ERR(opp_table); 2755 2756 ret = blocking_notifier_chain_register(&opp_table->head, nb); 2757 2758 dev_pm_opp_put_opp_table(opp_table); 2759 2760 return ret; 2761 } 2762 EXPORT_SYMBOL(dev_pm_opp_register_notifier); 2763 2764 /** 2765 * dev_pm_opp_unregister_notifier() - Unregister OPP notifier for the device 2766 * @dev: Device for which notifier needs to be unregistered 2767 * @nb: Notifier block to be unregistered 2768 * 2769 * Return: 0 on success or a negative error value. 2770 */ 2771 int dev_pm_opp_unregister_notifier(struct device *dev, 2772 struct notifier_block *nb) 2773 { 2774 struct opp_table *opp_table; 2775 int ret; 2776 2777 opp_table = _find_opp_table(dev); 2778 if (IS_ERR(opp_table)) 2779 return PTR_ERR(opp_table); 2780 2781 ret = blocking_notifier_chain_unregister(&opp_table->head, nb); 2782 2783 dev_pm_opp_put_opp_table(opp_table); 2784 2785 return ret; 2786 } 2787 EXPORT_SYMBOL(dev_pm_opp_unregister_notifier); 2788 2789 /** 2790 * dev_pm_opp_remove_table() - Free all OPPs associated with the device 2791 * @dev: device pointer used to lookup OPP table. 2792 * 2793 * Free both OPPs created using static entries present in DT and the 2794 * dynamically added entries. 2795 */ 2796 void dev_pm_opp_remove_table(struct device *dev) 2797 { 2798 struct opp_table *opp_table; 2799 2800 /* Check for existing table for 'dev' */ 2801 opp_table = _find_opp_table(dev); 2802 if (IS_ERR(opp_table)) { 2803 int error = PTR_ERR(opp_table); 2804 2805 if (error != -ENODEV) 2806 WARN(1, "%s: opp_table: %d\n", 2807 IS_ERR_OR_NULL(dev) ? 2808 "Invalid device" : dev_name(dev), 2809 error); 2810 return; 2811 } 2812 2813 /* 2814 * Drop the extra reference only if the OPP table was successfully added 2815 * with dev_pm_opp_of_add_table() earlier. 2816 **/ 2817 if (_opp_remove_all_static(opp_table)) 2818 dev_pm_opp_put_opp_table(opp_table); 2819 2820 /* Drop reference taken by _find_opp_table() */ 2821 dev_pm_opp_put_opp_table(opp_table); 2822 } 2823 EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table); 2824 2825 /** 2826 * dev_pm_opp_sync_regulators() - Sync state of voltage regulators 2827 * @dev: device for which we do this operation 2828 * 2829 * Sync voltage state of the OPP table regulators. 2830 * 2831 * Return: 0 on success or a negative error value. 2832 */ 2833 int dev_pm_opp_sync_regulators(struct device *dev) 2834 { 2835 struct opp_table *opp_table; 2836 struct regulator *reg; 2837 int i, ret = 0; 2838 2839 /* Device may not have OPP table */ 2840 opp_table = _find_opp_table(dev); 2841 if (IS_ERR(opp_table)) 2842 return 0; 2843 2844 /* Regulator may not be required for the device */ 2845 if (unlikely(!opp_table->regulators)) 2846 goto put_table; 2847 2848 /* Nothing to sync if voltage wasn't changed */ 2849 if (!opp_table->enabled) 2850 goto put_table; 2851 2852 for (i = 0; i < opp_table->regulator_count; i++) { 2853 reg = opp_table->regulators[i]; 2854 ret = regulator_sync_voltage(reg); 2855 if (ret) 2856 break; 2857 } 2858 put_table: 2859 /* Drop reference taken by _find_opp_table() */ 2860 dev_pm_opp_put_opp_table(opp_table); 2861 2862 return ret; 2863 } 2864 EXPORT_SYMBOL_GPL(dev_pm_opp_sync_regulators); 2865