1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Generic OPP Interface 4 * 5 * Copyright (C) 2009-2010 Texas Instruments Incorporated. 6 * Nishanth Menon 7 * Romit Dasgupta 8 * Kevin Hilman 9 */ 10 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 13 #include <linux/clk.h> 14 #include <linux/errno.h> 15 #include <linux/err.h> 16 #include <linux/slab.h> 17 #include <linux/device.h> 18 #include <linux/export.h> 19 #include <linux/pm_domain.h> 20 #include <linux/regulator/consumer.h> 21 22 #include "opp.h" 23 24 /* 25 * The root of the list of all opp-tables. All opp_table structures branch off 26 * from here, with each opp_table containing the list of opps it supports in 27 * various states of availability. 28 */ 29 LIST_HEAD(opp_tables); 30 /* Lock to allow exclusive modification to the device and opp lists */ 31 DEFINE_MUTEX(opp_table_lock); 32 /* Flag indicating that opp_tables list is being updated at the moment */ 33 static bool opp_tables_busy; 34 35 static bool _find_opp_dev(const struct device *dev, struct opp_table *opp_table) 36 { 37 struct opp_device *opp_dev; 38 bool found = false; 39 40 mutex_lock(&opp_table->lock); 41 list_for_each_entry(opp_dev, &opp_table->dev_list, node) 42 if (opp_dev->dev == dev) { 43 found = true; 44 break; 45 } 46 47 mutex_unlock(&opp_table->lock); 48 return found; 49 } 50 51 static struct opp_table *_find_opp_table_unlocked(struct device *dev) 52 { 53 struct opp_table *opp_table; 54 55 list_for_each_entry(opp_table, &opp_tables, node) { 56 if (_find_opp_dev(dev, opp_table)) { 57 _get_opp_table_kref(opp_table); 58 return opp_table; 59 } 60 } 61 62 return ERR_PTR(-ENODEV); 63 } 64 65 /** 66 * _find_opp_table() - find opp_table struct using device pointer 67 * @dev: device pointer used to lookup OPP table 68 * 69 * Search OPP table for one containing matching device. 70 * 71 * Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or 72 * -EINVAL based on type of error. 73 * 74 * The callers must call dev_pm_opp_put_opp_table() after the table is used. 75 */ 76 struct opp_table *_find_opp_table(struct device *dev) 77 { 78 struct opp_table *opp_table; 79 80 if (IS_ERR_OR_NULL(dev)) { 81 pr_err("%s: Invalid parameters\n", __func__); 82 return ERR_PTR(-EINVAL); 83 } 84 85 mutex_lock(&opp_table_lock); 86 opp_table = _find_opp_table_unlocked(dev); 87 mutex_unlock(&opp_table_lock); 88 89 return opp_table; 90 } 91 92 /** 93 * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp 94 * @opp: opp for which voltage has to be returned for 95 * 96 * Return: voltage in micro volt corresponding to the opp, else 97 * return 0 98 * 99 * This is useful only for devices with single power supply. 100 */ 101 unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp) 102 { 103 if (IS_ERR_OR_NULL(opp)) { 104 pr_err("%s: Invalid parameters\n", __func__); 105 return 0; 106 } 107 108 return opp->supplies[0].u_volt; 109 } 110 EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage); 111 112 /** 113 * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp 114 * @opp: opp for which frequency has to be returned for 115 * 116 * Return: frequency in hertz corresponding to the opp, else 117 * return 0 118 */ 119 unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp) 120 { 121 if (IS_ERR_OR_NULL(opp)) { 122 pr_err("%s: Invalid parameters\n", __func__); 123 return 0; 124 } 125 126 return opp->rate; 127 } 128 EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq); 129 130 /** 131 * dev_pm_opp_get_level() - Gets the level corresponding to an available opp 132 * @opp: opp for which level value has to be returned for 133 * 134 * Return: level read from device tree corresponding to the opp, else 135 * return 0. 136 */ 137 unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp) 138 { 139 if (IS_ERR_OR_NULL(opp) || !opp->available) { 140 pr_err("%s: Invalid parameters\n", __func__); 141 return 0; 142 } 143 144 return opp->level; 145 } 146 EXPORT_SYMBOL_GPL(dev_pm_opp_get_level); 147 148 /** 149 * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not 150 * @opp: opp for which turbo mode is being verified 151 * 152 * Turbo OPPs are not for normal use, and can be enabled (under certain 153 * conditions) for short duration of times to finish high throughput work 154 * quickly. Running on them for longer times may overheat the chip. 155 * 156 * Return: true if opp is turbo opp, else false. 157 */ 158 bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp) 159 { 160 if (IS_ERR_OR_NULL(opp) || !opp->available) { 161 pr_err("%s: Invalid parameters\n", __func__); 162 return false; 163 } 164 165 return opp->turbo; 166 } 167 EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo); 168 169 /** 170 * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds 171 * @dev: device for which we do this operation 172 * 173 * Return: This function returns the max clock latency in nanoseconds. 174 */ 175 unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev) 176 { 177 struct opp_table *opp_table; 178 unsigned long clock_latency_ns; 179 180 opp_table = _find_opp_table(dev); 181 if (IS_ERR(opp_table)) 182 return 0; 183 184 clock_latency_ns = opp_table->clock_latency_ns_max; 185 186 dev_pm_opp_put_opp_table(opp_table); 187 188 return clock_latency_ns; 189 } 190 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency); 191 192 /** 193 * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds 194 * @dev: device for which we do this operation 195 * 196 * Return: This function returns the max voltage latency in nanoseconds. 197 */ 198 unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev) 199 { 200 struct opp_table *opp_table; 201 struct dev_pm_opp *opp; 202 struct regulator *reg; 203 unsigned long latency_ns = 0; 204 int ret, i, count; 205 struct { 206 unsigned long min; 207 unsigned long max; 208 } *uV; 209 210 opp_table = _find_opp_table(dev); 211 if (IS_ERR(opp_table)) 212 return 0; 213 214 /* Regulator may not be required for the device */ 215 if (!opp_table->regulators) 216 goto put_opp_table; 217 218 count = opp_table->regulator_count; 219 220 uV = kmalloc_array(count, sizeof(*uV), GFP_KERNEL); 221 if (!uV) 222 goto put_opp_table; 223 224 mutex_lock(&opp_table->lock); 225 226 for (i = 0; i < count; i++) { 227 uV[i].min = ~0; 228 uV[i].max = 0; 229 230 list_for_each_entry(opp, &opp_table->opp_list, node) { 231 if (!opp->available) 232 continue; 233 234 if (opp->supplies[i].u_volt_min < uV[i].min) 235 uV[i].min = opp->supplies[i].u_volt_min; 236 if (opp->supplies[i].u_volt_max > uV[i].max) 237 uV[i].max = opp->supplies[i].u_volt_max; 238 } 239 } 240 241 mutex_unlock(&opp_table->lock); 242 243 /* 244 * The caller needs to ensure that opp_table (and hence the regulator) 245 * isn't freed, while we are executing this routine. 246 */ 247 for (i = 0; i < count; i++) { 248 reg = opp_table->regulators[i]; 249 ret = regulator_set_voltage_time(reg, uV[i].min, uV[i].max); 250 if (ret > 0) 251 latency_ns += ret * 1000; 252 } 253 254 kfree(uV); 255 put_opp_table: 256 dev_pm_opp_put_opp_table(opp_table); 257 258 return latency_ns; 259 } 260 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency); 261 262 /** 263 * dev_pm_opp_get_max_transition_latency() - Get max transition latency in 264 * nanoseconds 265 * @dev: device for which we do this operation 266 * 267 * Return: This function returns the max transition latency, in nanoseconds, to 268 * switch from one OPP to other. 269 */ 270 unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev) 271 { 272 return dev_pm_opp_get_max_volt_latency(dev) + 273 dev_pm_opp_get_max_clock_latency(dev); 274 } 275 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency); 276 277 /** 278 * dev_pm_opp_get_suspend_opp_freq() - Get frequency of suspend opp in Hz 279 * @dev: device for which we do this operation 280 * 281 * Return: This function returns the frequency of the OPP marked as suspend_opp 282 * if one is available, else returns 0; 283 */ 284 unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev) 285 { 286 struct opp_table *opp_table; 287 unsigned long freq = 0; 288 289 opp_table = _find_opp_table(dev); 290 if (IS_ERR(opp_table)) 291 return 0; 292 293 if (opp_table->suspend_opp && opp_table->suspend_opp->available) 294 freq = dev_pm_opp_get_freq(opp_table->suspend_opp); 295 296 dev_pm_opp_put_opp_table(opp_table); 297 298 return freq; 299 } 300 EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp_freq); 301 302 int _get_opp_count(struct opp_table *opp_table) 303 { 304 struct dev_pm_opp *opp; 305 int count = 0; 306 307 mutex_lock(&opp_table->lock); 308 309 list_for_each_entry(opp, &opp_table->opp_list, node) { 310 if (opp->available) 311 count++; 312 } 313 314 mutex_unlock(&opp_table->lock); 315 316 return count; 317 } 318 319 /** 320 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table 321 * @dev: device for which we do this operation 322 * 323 * Return: This function returns the number of available opps if there are any, 324 * else returns 0 if none or the corresponding error value. 325 */ 326 int dev_pm_opp_get_opp_count(struct device *dev) 327 { 328 struct opp_table *opp_table; 329 int count; 330 331 opp_table = _find_opp_table(dev); 332 if (IS_ERR(opp_table)) { 333 count = PTR_ERR(opp_table); 334 dev_dbg(dev, "%s: OPP table not found (%d)\n", 335 __func__, count); 336 return count; 337 } 338 339 count = _get_opp_count(opp_table); 340 dev_pm_opp_put_opp_table(opp_table); 341 342 return count; 343 } 344 EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count); 345 346 /** 347 * dev_pm_opp_find_freq_exact() - search for an exact frequency 348 * @dev: device for which we do this operation 349 * @freq: frequency to search for 350 * @available: true/false - match for available opp 351 * 352 * Return: Searches for exact match in the opp table and returns pointer to the 353 * matching opp if found, else returns ERR_PTR in case of error and should 354 * be handled using IS_ERR. Error return values can be: 355 * EINVAL: for bad pointer 356 * ERANGE: no match found for search 357 * ENODEV: if device not found in list of registered devices 358 * 359 * Note: available is a modifier for the search. if available=true, then the 360 * match is for exact matching frequency and is available in the stored OPP 361 * table. if false, the match is for exact frequency which is not available. 362 * 363 * This provides a mechanism to enable an opp which is not available currently 364 * or the opposite as well. 365 * 366 * The callers are required to call dev_pm_opp_put() for the returned OPP after 367 * use. 368 */ 369 struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, 370 unsigned long freq, 371 bool available) 372 { 373 struct opp_table *opp_table; 374 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); 375 376 opp_table = _find_opp_table(dev); 377 if (IS_ERR(opp_table)) { 378 int r = PTR_ERR(opp_table); 379 380 dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r); 381 return ERR_PTR(r); 382 } 383 384 mutex_lock(&opp_table->lock); 385 386 list_for_each_entry(temp_opp, &opp_table->opp_list, node) { 387 if (temp_opp->available == available && 388 temp_opp->rate == freq) { 389 opp = temp_opp; 390 391 /* Increment the reference count of OPP */ 392 dev_pm_opp_get(opp); 393 break; 394 } 395 } 396 397 mutex_unlock(&opp_table->lock); 398 dev_pm_opp_put_opp_table(opp_table); 399 400 return opp; 401 } 402 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact); 403 404 /** 405 * dev_pm_opp_find_level_exact() - search for an exact level 406 * @dev: device for which we do this operation 407 * @level: level to search for 408 * 409 * Return: Searches for exact match in the opp table and returns pointer to the 410 * matching opp if found, else returns ERR_PTR in case of error and should 411 * be handled using IS_ERR. Error return values can be: 412 * EINVAL: for bad pointer 413 * ERANGE: no match found for search 414 * ENODEV: if device not found in list of registered devices 415 * 416 * The callers are required to call dev_pm_opp_put() for the returned OPP after 417 * use. 418 */ 419 struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev, 420 unsigned int level) 421 { 422 struct opp_table *opp_table; 423 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); 424 425 opp_table = _find_opp_table(dev); 426 if (IS_ERR(opp_table)) { 427 int r = PTR_ERR(opp_table); 428 429 dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r); 430 return ERR_PTR(r); 431 } 432 433 mutex_lock(&opp_table->lock); 434 435 list_for_each_entry(temp_opp, &opp_table->opp_list, node) { 436 if (temp_opp->level == level) { 437 opp = temp_opp; 438 439 /* Increment the reference count of OPP */ 440 dev_pm_opp_get(opp); 441 break; 442 } 443 } 444 445 mutex_unlock(&opp_table->lock); 446 dev_pm_opp_put_opp_table(opp_table); 447 448 return opp; 449 } 450 EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_exact); 451 452 static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table, 453 unsigned long *freq) 454 { 455 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); 456 457 mutex_lock(&opp_table->lock); 458 459 list_for_each_entry(temp_opp, &opp_table->opp_list, node) { 460 if (temp_opp->available && temp_opp->rate >= *freq) { 461 opp = temp_opp; 462 *freq = opp->rate; 463 464 /* Increment the reference count of OPP */ 465 dev_pm_opp_get(opp); 466 break; 467 } 468 } 469 470 mutex_unlock(&opp_table->lock); 471 472 return opp; 473 } 474 475 /** 476 * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq 477 * @dev: device for which we do this operation 478 * @freq: Start frequency 479 * 480 * Search for the matching ceil *available* OPP from a starting freq 481 * for a device. 482 * 483 * Return: matching *opp and refreshes *freq accordingly, else returns 484 * ERR_PTR in case of error and should be handled using IS_ERR. Error return 485 * values can be: 486 * EINVAL: for bad pointer 487 * ERANGE: no match found for search 488 * ENODEV: if device not found in list of registered devices 489 * 490 * The callers are required to call dev_pm_opp_put() for the returned OPP after 491 * use. 492 */ 493 struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, 494 unsigned long *freq) 495 { 496 struct opp_table *opp_table; 497 struct dev_pm_opp *opp; 498 499 if (!dev || !freq) { 500 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); 501 return ERR_PTR(-EINVAL); 502 } 503 504 opp_table = _find_opp_table(dev); 505 if (IS_ERR(opp_table)) 506 return ERR_CAST(opp_table); 507 508 opp = _find_freq_ceil(opp_table, freq); 509 510 dev_pm_opp_put_opp_table(opp_table); 511 512 return opp; 513 } 514 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil); 515 516 /** 517 * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq 518 * @dev: device for which we do this operation 519 * @freq: Start frequency 520 * 521 * Search for the matching floor *available* OPP from a starting freq 522 * for a device. 523 * 524 * Return: matching *opp and refreshes *freq accordingly, else returns 525 * ERR_PTR in case of error and should be handled using IS_ERR. Error return 526 * values can be: 527 * EINVAL: for bad pointer 528 * ERANGE: no match found for search 529 * ENODEV: if device not found in list of registered devices 530 * 531 * The callers are required to call dev_pm_opp_put() for the returned OPP after 532 * use. 533 */ 534 struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, 535 unsigned long *freq) 536 { 537 struct opp_table *opp_table; 538 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); 539 540 if (!dev || !freq) { 541 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); 542 return ERR_PTR(-EINVAL); 543 } 544 545 opp_table = _find_opp_table(dev); 546 if (IS_ERR(opp_table)) 547 return ERR_CAST(opp_table); 548 549 mutex_lock(&opp_table->lock); 550 551 list_for_each_entry(temp_opp, &opp_table->opp_list, node) { 552 if (temp_opp->available) { 553 /* go to the next node, before choosing prev */ 554 if (temp_opp->rate > *freq) 555 break; 556 else 557 opp = temp_opp; 558 } 559 } 560 561 /* Increment the reference count of OPP */ 562 if (!IS_ERR(opp)) 563 dev_pm_opp_get(opp); 564 mutex_unlock(&opp_table->lock); 565 dev_pm_opp_put_opp_table(opp_table); 566 567 if (!IS_ERR(opp)) 568 *freq = opp->rate; 569 570 return opp; 571 } 572 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor); 573 574 /** 575 * dev_pm_opp_find_freq_ceil_by_volt() - Find OPP with highest frequency for 576 * target voltage. 577 * @dev: Device for which we do this operation. 578 * @u_volt: Target voltage. 579 * 580 * Search for OPP with highest (ceil) frequency and has voltage <= u_volt. 581 * 582 * Return: matching *opp, else returns ERR_PTR in case of error which should be 583 * handled using IS_ERR. 584 * 585 * Error return values can be: 586 * EINVAL: bad parameters 587 * 588 * The callers are required to call dev_pm_opp_put() for the returned OPP after 589 * use. 590 */ 591 struct dev_pm_opp *dev_pm_opp_find_freq_ceil_by_volt(struct device *dev, 592 unsigned long u_volt) 593 { 594 struct opp_table *opp_table; 595 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); 596 597 if (!dev || !u_volt) { 598 dev_err(dev, "%s: Invalid argument volt=%lu\n", __func__, 599 u_volt); 600 return ERR_PTR(-EINVAL); 601 } 602 603 opp_table = _find_opp_table(dev); 604 if (IS_ERR(opp_table)) 605 return ERR_CAST(opp_table); 606 607 mutex_lock(&opp_table->lock); 608 609 list_for_each_entry(temp_opp, &opp_table->opp_list, node) { 610 if (temp_opp->available) { 611 if (temp_opp->supplies[0].u_volt > u_volt) 612 break; 613 opp = temp_opp; 614 } 615 } 616 617 /* Increment the reference count of OPP */ 618 if (!IS_ERR(opp)) 619 dev_pm_opp_get(opp); 620 621 mutex_unlock(&opp_table->lock); 622 dev_pm_opp_put_opp_table(opp_table); 623 624 return opp; 625 } 626 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil_by_volt); 627 628 static int _set_opp_voltage(struct device *dev, struct regulator *reg, 629 struct dev_pm_opp_supply *supply) 630 { 631 int ret; 632 633 /* Regulator not available for device */ 634 if (IS_ERR(reg)) { 635 dev_dbg(dev, "%s: regulator not available: %ld\n", __func__, 636 PTR_ERR(reg)); 637 return 0; 638 } 639 640 dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__, 641 supply->u_volt_min, supply->u_volt, supply->u_volt_max); 642 643 ret = regulator_set_voltage_triplet(reg, supply->u_volt_min, 644 supply->u_volt, supply->u_volt_max); 645 if (ret) 646 dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n", 647 __func__, supply->u_volt_min, supply->u_volt, 648 supply->u_volt_max, ret); 649 650 return ret; 651 } 652 653 static inline int _generic_set_opp_clk_only(struct device *dev, struct clk *clk, 654 unsigned long freq) 655 { 656 int ret; 657 658 ret = clk_set_rate(clk, freq); 659 if (ret) { 660 dev_err(dev, "%s: failed to set clock rate: %d\n", __func__, 661 ret); 662 } 663 664 return ret; 665 } 666 667 static int _generic_set_opp_regulator(struct opp_table *opp_table, 668 struct device *dev, 669 unsigned long old_freq, 670 unsigned long freq, 671 struct dev_pm_opp_supply *old_supply, 672 struct dev_pm_opp_supply *new_supply) 673 { 674 struct regulator *reg = opp_table->regulators[0]; 675 int ret; 676 677 /* This function only supports single regulator per device */ 678 if (WARN_ON(opp_table->regulator_count > 1)) { 679 dev_err(dev, "multiple regulators are not supported\n"); 680 return -EINVAL; 681 } 682 683 /* Scaling up? Scale voltage before frequency */ 684 if (freq >= old_freq) { 685 ret = _set_opp_voltage(dev, reg, new_supply); 686 if (ret) 687 goto restore_voltage; 688 } 689 690 /* Change frequency */ 691 ret = _generic_set_opp_clk_only(dev, opp_table->clk, freq); 692 if (ret) 693 goto restore_voltage; 694 695 /* Scaling down? Scale voltage after frequency */ 696 if (freq < old_freq) { 697 ret = _set_opp_voltage(dev, reg, new_supply); 698 if (ret) 699 goto restore_freq; 700 } 701 702 /* 703 * Enable the regulator after setting its voltages, otherwise it breaks 704 * some boot-enabled regulators. 705 */ 706 if (unlikely(!opp_table->enabled)) { 707 ret = regulator_enable(reg); 708 if (ret < 0) 709 dev_warn(dev, "Failed to enable regulator: %d", ret); 710 } 711 712 return 0; 713 714 restore_freq: 715 if (_generic_set_opp_clk_only(dev, opp_table->clk, old_freq)) 716 dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n", 717 __func__, old_freq); 718 restore_voltage: 719 /* This shouldn't harm even if the voltages weren't updated earlier */ 720 if (old_supply) 721 _set_opp_voltage(dev, reg, old_supply); 722 723 return ret; 724 } 725 726 static int _set_opp_bw(const struct opp_table *opp_table, 727 struct dev_pm_opp *opp, struct device *dev, bool remove) 728 { 729 u32 avg, peak; 730 int i, ret; 731 732 if (!opp_table->paths) 733 return 0; 734 735 for (i = 0; i < opp_table->path_count; i++) { 736 if (remove) { 737 avg = 0; 738 peak = 0; 739 } else { 740 avg = opp->bandwidth[i].avg; 741 peak = opp->bandwidth[i].peak; 742 } 743 ret = icc_set_bw(opp_table->paths[i], avg, peak); 744 if (ret) { 745 dev_err(dev, "Failed to %s bandwidth[%d]: %d\n", 746 remove ? "remove" : "set", i, ret); 747 return ret; 748 } 749 } 750 751 return 0; 752 } 753 754 static int _set_opp_custom(const struct opp_table *opp_table, 755 struct device *dev, unsigned long old_freq, 756 unsigned long freq, 757 struct dev_pm_opp_supply *old_supply, 758 struct dev_pm_opp_supply *new_supply) 759 { 760 struct dev_pm_set_opp_data *data; 761 int size; 762 763 data = opp_table->set_opp_data; 764 data->regulators = opp_table->regulators; 765 data->regulator_count = opp_table->regulator_count; 766 data->clk = opp_table->clk; 767 data->dev = dev; 768 769 data->old_opp.rate = old_freq; 770 size = sizeof(*old_supply) * opp_table->regulator_count; 771 if (!old_supply) 772 memset(data->old_opp.supplies, 0, size); 773 else 774 memcpy(data->old_opp.supplies, old_supply, size); 775 776 data->new_opp.rate = freq; 777 memcpy(data->new_opp.supplies, new_supply, size); 778 779 return opp_table->set_opp(data); 780 } 781 782 static int _set_required_opp(struct device *dev, struct device *pd_dev, 783 struct dev_pm_opp *opp, int i) 784 { 785 unsigned int pstate = likely(opp) ? opp->required_opps[i]->pstate : 0; 786 int ret; 787 788 if (!pd_dev) 789 return 0; 790 791 ret = dev_pm_genpd_set_performance_state(pd_dev, pstate); 792 if (ret) { 793 dev_err(dev, "Failed to set performance rate of %s: %d (%d)\n", 794 dev_name(pd_dev), pstate, ret); 795 } 796 797 return ret; 798 } 799 800 /* This is only called for PM domain for now */ 801 static int _set_required_opps(struct device *dev, 802 struct opp_table *opp_table, 803 struct dev_pm_opp *opp, bool up) 804 { 805 struct opp_table **required_opp_tables = opp_table->required_opp_tables; 806 struct device **genpd_virt_devs = opp_table->genpd_virt_devs; 807 int i, ret = 0; 808 809 if (!required_opp_tables) 810 return 0; 811 812 /* Single genpd case */ 813 if (!genpd_virt_devs) 814 return _set_required_opp(dev, dev, opp, 0); 815 816 /* Multiple genpd case */ 817 818 /* 819 * Acquire genpd_virt_dev_lock to make sure we don't use a genpd_dev 820 * after it is freed from another thread. 821 */ 822 mutex_lock(&opp_table->genpd_virt_dev_lock); 823 824 /* Scaling up? Set required OPPs in normal order, else reverse */ 825 if (up) { 826 for (i = 0; i < opp_table->required_opp_count; i++) { 827 ret = _set_required_opp(dev, genpd_virt_devs[i], opp, i); 828 if (ret) 829 break; 830 } 831 } else { 832 for (i = opp_table->required_opp_count - 1; i >= 0; i--) { 833 ret = _set_required_opp(dev, genpd_virt_devs[i], opp, i); 834 if (ret) 835 break; 836 } 837 } 838 839 mutex_unlock(&opp_table->genpd_virt_dev_lock); 840 841 return ret; 842 } 843 844 /** 845 * dev_pm_opp_set_bw() - sets bandwidth levels corresponding to an opp 846 * @dev: device for which we do this operation 847 * @opp: opp based on which the bandwidth levels are to be configured 848 * 849 * This configures the bandwidth to the levels specified by the OPP. However 850 * if the OPP specified is NULL the bandwidth levels are cleared out. 851 * 852 * Return: 0 on success or a negative error value. 853 */ 854 int dev_pm_opp_set_bw(struct device *dev, struct dev_pm_opp *opp) 855 { 856 struct opp_table *opp_table; 857 int ret; 858 859 opp_table = _find_opp_table(dev); 860 if (IS_ERR(opp_table)) { 861 dev_err(dev, "%s: device opp table doesn't exist\n", __func__); 862 return PTR_ERR(opp_table); 863 } 864 865 if (opp) 866 ret = _set_opp_bw(opp_table, opp, dev, false); 867 else 868 ret = _set_opp_bw(opp_table, NULL, dev, true); 869 870 dev_pm_opp_put_opp_table(opp_table); 871 return ret; 872 } 873 EXPORT_SYMBOL_GPL(dev_pm_opp_set_bw); 874 875 static int _opp_set_rate_zero(struct device *dev, struct opp_table *opp_table) 876 { 877 int ret; 878 879 if (!opp_table->enabled) 880 return 0; 881 882 /* 883 * Some drivers need to support cases where some platforms may 884 * have OPP table for the device, while others don't and 885 * opp_set_rate() just needs to behave like clk_set_rate(). 886 */ 887 if (!_get_opp_count(opp_table)) 888 return 0; 889 890 ret = _set_opp_bw(opp_table, NULL, dev, true); 891 if (ret) 892 return ret; 893 894 if (opp_table->regulators) 895 regulator_disable(opp_table->regulators[0]); 896 897 ret = _set_required_opps(dev, opp_table, NULL, false); 898 899 opp_table->enabled = false; 900 return ret; 901 } 902 903 /** 904 * dev_pm_opp_set_rate() - Configure new OPP based on frequency 905 * @dev: device for which we do this operation 906 * @target_freq: frequency to achieve 907 * 908 * This configures the power-supplies to the levels specified by the OPP 909 * corresponding to the target_freq, and programs the clock to a value <= 910 * target_freq, as rounded by clk_round_rate(). Device wanting to run at fmax 911 * provided by the opp, should have already rounded to the target OPP's 912 * frequency. 913 */ 914 int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) 915 { 916 struct opp_table *opp_table; 917 unsigned long freq, old_freq, temp_freq; 918 struct dev_pm_opp *old_opp, *opp; 919 struct clk *clk; 920 int ret; 921 922 opp_table = _find_opp_table(dev); 923 if (IS_ERR(opp_table)) { 924 dev_err(dev, "%s: device opp doesn't exist\n", __func__); 925 return PTR_ERR(opp_table); 926 } 927 928 if (unlikely(!target_freq)) { 929 ret = _opp_set_rate_zero(dev, opp_table); 930 goto put_opp_table; 931 } 932 933 clk = opp_table->clk; 934 if (IS_ERR(clk)) { 935 dev_err(dev, "%s: No clock available for the device\n", 936 __func__); 937 ret = PTR_ERR(clk); 938 goto put_opp_table; 939 } 940 941 freq = clk_round_rate(clk, target_freq); 942 if ((long)freq <= 0) 943 freq = target_freq; 944 945 old_freq = clk_get_rate(clk); 946 947 /* Return early if nothing to do */ 948 if (opp_table->enabled && old_freq == freq) { 949 dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n", 950 __func__, freq); 951 ret = 0; 952 goto put_opp_table; 953 } 954 955 /* 956 * For IO devices which require an OPP on some platforms/SoCs 957 * while just needing to scale the clock on some others 958 * we look for empty OPP tables with just a clock handle and 959 * scale only the clk. This makes dev_pm_opp_set_rate() 960 * equivalent to a clk_set_rate() 961 */ 962 if (!_get_opp_count(opp_table)) { 963 ret = _generic_set_opp_clk_only(dev, clk, freq); 964 goto put_opp_table; 965 } 966 967 temp_freq = old_freq; 968 old_opp = _find_freq_ceil(opp_table, &temp_freq); 969 if (IS_ERR(old_opp)) { 970 dev_err(dev, "%s: failed to find current OPP for freq %lu (%ld)\n", 971 __func__, old_freq, PTR_ERR(old_opp)); 972 } 973 974 temp_freq = freq; 975 opp = _find_freq_ceil(opp_table, &temp_freq); 976 if (IS_ERR(opp)) { 977 ret = PTR_ERR(opp); 978 dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n", 979 __func__, freq, ret); 980 goto put_old_opp; 981 } 982 983 dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n", __func__, 984 old_freq, freq); 985 986 /* Scaling up? Configure required OPPs before frequency */ 987 if (freq >= old_freq) { 988 ret = _set_required_opps(dev, opp_table, opp, true); 989 if (ret) 990 goto put_opp; 991 } 992 993 if (opp_table->set_opp) { 994 ret = _set_opp_custom(opp_table, dev, old_freq, freq, 995 IS_ERR(old_opp) ? NULL : old_opp->supplies, 996 opp->supplies); 997 } else if (opp_table->regulators) { 998 ret = _generic_set_opp_regulator(opp_table, dev, old_freq, freq, 999 IS_ERR(old_opp) ? NULL : old_opp->supplies, 1000 opp->supplies); 1001 } else { 1002 /* Only frequency scaling */ 1003 ret = _generic_set_opp_clk_only(dev, clk, freq); 1004 } 1005 1006 /* Scaling down? Configure required OPPs after frequency */ 1007 if (!ret && freq < old_freq) { 1008 ret = _set_required_opps(dev, opp_table, opp, false); 1009 if (ret) 1010 dev_err(dev, "Failed to set required opps: %d\n", ret); 1011 } 1012 1013 if (!ret) { 1014 ret = _set_opp_bw(opp_table, opp, dev, false); 1015 if (!ret) 1016 opp_table->enabled = true; 1017 } 1018 1019 put_opp: 1020 dev_pm_opp_put(opp); 1021 put_old_opp: 1022 if (!IS_ERR(old_opp)) 1023 dev_pm_opp_put(old_opp); 1024 put_opp_table: 1025 dev_pm_opp_put_opp_table(opp_table); 1026 return ret; 1027 } 1028 EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate); 1029 1030 /* OPP-dev Helpers */ 1031 static void _remove_opp_dev(struct opp_device *opp_dev, 1032 struct opp_table *opp_table) 1033 { 1034 opp_debug_unregister(opp_dev, opp_table); 1035 list_del(&opp_dev->node); 1036 kfree(opp_dev); 1037 } 1038 1039 struct opp_device *_add_opp_dev(const struct device *dev, 1040 struct opp_table *opp_table) 1041 { 1042 struct opp_device *opp_dev; 1043 1044 opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL); 1045 if (!opp_dev) 1046 return NULL; 1047 1048 /* Initialize opp-dev */ 1049 opp_dev->dev = dev; 1050 1051 mutex_lock(&opp_table->lock); 1052 list_add(&opp_dev->node, &opp_table->dev_list); 1053 mutex_unlock(&opp_table->lock); 1054 1055 /* Create debugfs entries for the opp_table */ 1056 opp_debug_register(opp_dev, opp_table); 1057 1058 return opp_dev; 1059 } 1060 1061 static struct opp_table *_allocate_opp_table(struct device *dev, int index) 1062 { 1063 struct opp_table *opp_table; 1064 struct opp_device *opp_dev; 1065 int ret; 1066 1067 /* 1068 * Allocate a new OPP table. In the infrequent case where a new 1069 * device is needed to be added, we pay this penalty. 1070 */ 1071 opp_table = kzalloc(sizeof(*opp_table), GFP_KERNEL); 1072 if (!opp_table) 1073 return ERR_PTR(-ENOMEM); 1074 1075 mutex_init(&opp_table->lock); 1076 mutex_init(&opp_table->genpd_virt_dev_lock); 1077 INIT_LIST_HEAD(&opp_table->dev_list); 1078 1079 /* Mark regulator count uninitialized */ 1080 opp_table->regulator_count = -1; 1081 1082 opp_dev = _add_opp_dev(dev, opp_table); 1083 if (!opp_dev) { 1084 ret = -ENOMEM; 1085 goto err; 1086 } 1087 1088 _of_init_opp_table(opp_table, dev, index); 1089 1090 /* Find clk for the device */ 1091 opp_table->clk = clk_get(dev, NULL); 1092 if (IS_ERR(opp_table->clk)) { 1093 ret = PTR_ERR(opp_table->clk); 1094 if (ret == -EPROBE_DEFER) 1095 goto err; 1096 1097 dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__, ret); 1098 } 1099 1100 /* Find interconnect path(s) for the device */ 1101 ret = dev_pm_opp_of_find_icc_paths(dev, opp_table); 1102 if (ret) { 1103 if (ret == -EPROBE_DEFER) 1104 goto err; 1105 1106 dev_warn(dev, "%s: Error finding interconnect paths: %d\n", 1107 __func__, ret); 1108 } 1109 1110 BLOCKING_INIT_NOTIFIER_HEAD(&opp_table->head); 1111 INIT_LIST_HEAD(&opp_table->opp_list); 1112 kref_init(&opp_table->kref); 1113 1114 return opp_table; 1115 1116 err: 1117 kfree(opp_table); 1118 return ERR_PTR(ret); 1119 } 1120 1121 void _get_opp_table_kref(struct opp_table *opp_table) 1122 { 1123 kref_get(&opp_table->kref); 1124 } 1125 1126 /* 1127 * We need to make sure that the OPP table for a device doesn't get added twice, 1128 * if this routine gets called in parallel with the same device pointer. 1129 * 1130 * The simplest way to enforce that is to perform everything (find existing 1131 * table and if not found, create a new one) under the opp_table_lock, so only 1132 * one creator gets access to the same. But that expands the critical section 1133 * under the lock and may end up causing circular dependencies with frameworks 1134 * like debugfs, interconnect or clock framework as they may be direct or 1135 * indirect users of OPP core. 1136 * 1137 * And for that reason we have to go for a bit tricky implementation here, which 1138 * uses the opp_tables_busy flag to indicate if another creator is in the middle 1139 * of adding an OPP table and others should wait for it to finish. 1140 */ 1141 struct opp_table *_add_opp_table_indexed(struct device *dev, int index) 1142 { 1143 struct opp_table *opp_table; 1144 1145 again: 1146 mutex_lock(&opp_table_lock); 1147 1148 opp_table = _find_opp_table_unlocked(dev); 1149 if (!IS_ERR(opp_table)) 1150 goto unlock; 1151 1152 /* 1153 * The opp_tables list or an OPP table's dev_list is getting updated by 1154 * another user, wait for it to finish. 1155 */ 1156 if (unlikely(opp_tables_busy)) { 1157 mutex_unlock(&opp_table_lock); 1158 cpu_relax(); 1159 goto again; 1160 } 1161 1162 opp_tables_busy = true; 1163 opp_table = _managed_opp(dev, index); 1164 1165 /* Drop the lock to reduce the size of critical section */ 1166 mutex_unlock(&opp_table_lock); 1167 1168 if (opp_table) { 1169 if (!_add_opp_dev(dev, opp_table)) { 1170 dev_pm_opp_put_opp_table(opp_table); 1171 opp_table = ERR_PTR(-ENOMEM); 1172 } 1173 1174 mutex_lock(&opp_table_lock); 1175 } else { 1176 opp_table = _allocate_opp_table(dev, index); 1177 1178 mutex_lock(&opp_table_lock); 1179 if (!IS_ERR(opp_table)) 1180 list_add(&opp_table->node, &opp_tables); 1181 } 1182 1183 opp_tables_busy = false; 1184 1185 unlock: 1186 mutex_unlock(&opp_table_lock); 1187 1188 return opp_table; 1189 } 1190 1191 struct opp_table *_add_opp_table(struct device *dev) 1192 { 1193 return _add_opp_table_indexed(dev, 0); 1194 } 1195 1196 struct opp_table *dev_pm_opp_get_opp_table(struct device *dev) 1197 { 1198 return _find_opp_table(dev); 1199 } 1200 EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_table); 1201 1202 static void _opp_table_kref_release(struct kref *kref) 1203 { 1204 struct opp_table *opp_table = container_of(kref, struct opp_table, kref); 1205 struct opp_device *opp_dev, *temp; 1206 int i; 1207 1208 /* Drop the lock as soon as we can */ 1209 list_del(&opp_table->node); 1210 mutex_unlock(&opp_table_lock); 1211 1212 _of_clear_opp_table(opp_table); 1213 1214 /* Release clk */ 1215 if (!IS_ERR(opp_table->clk)) 1216 clk_put(opp_table->clk); 1217 1218 if (opp_table->paths) { 1219 for (i = 0; i < opp_table->path_count; i++) 1220 icc_put(opp_table->paths[i]); 1221 kfree(opp_table->paths); 1222 } 1223 1224 WARN_ON(!list_empty(&opp_table->opp_list)); 1225 1226 list_for_each_entry_safe(opp_dev, temp, &opp_table->dev_list, node) { 1227 /* 1228 * The OPP table is getting removed, drop the performance state 1229 * constraints. 1230 */ 1231 if (opp_table->genpd_performance_state) 1232 dev_pm_genpd_set_performance_state((struct device *)(opp_dev->dev), 0); 1233 1234 _remove_opp_dev(opp_dev, opp_table); 1235 } 1236 1237 mutex_destroy(&opp_table->genpd_virt_dev_lock); 1238 mutex_destroy(&opp_table->lock); 1239 kfree(opp_table); 1240 } 1241 1242 void dev_pm_opp_put_opp_table(struct opp_table *opp_table) 1243 { 1244 kref_put_mutex(&opp_table->kref, _opp_table_kref_release, 1245 &opp_table_lock); 1246 } 1247 EXPORT_SYMBOL_GPL(dev_pm_opp_put_opp_table); 1248 1249 void _opp_free(struct dev_pm_opp *opp) 1250 { 1251 kfree(opp); 1252 } 1253 1254 static void _opp_kref_release(struct kref *kref) 1255 { 1256 struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref); 1257 struct opp_table *opp_table = opp->opp_table; 1258 1259 list_del(&opp->node); 1260 mutex_unlock(&opp_table->lock); 1261 1262 /* 1263 * Notify the changes in the availability of the operable 1264 * frequency/voltage list. 1265 */ 1266 blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_REMOVE, opp); 1267 _of_opp_free_required_opps(opp_table, opp); 1268 opp_debug_remove_one(opp); 1269 kfree(opp); 1270 } 1271 1272 void dev_pm_opp_get(struct dev_pm_opp *opp) 1273 { 1274 kref_get(&opp->kref); 1275 } 1276 1277 void dev_pm_opp_put(struct dev_pm_opp *opp) 1278 { 1279 kref_put_mutex(&opp->kref, _opp_kref_release, &opp->opp_table->lock); 1280 } 1281 EXPORT_SYMBOL_GPL(dev_pm_opp_put); 1282 1283 /** 1284 * dev_pm_opp_remove() - Remove an OPP from OPP table 1285 * @dev: device for which we do this operation 1286 * @freq: OPP to remove with matching 'freq' 1287 * 1288 * This function removes an opp from the opp table. 1289 */ 1290 void dev_pm_opp_remove(struct device *dev, unsigned long freq) 1291 { 1292 struct dev_pm_opp *opp; 1293 struct opp_table *opp_table; 1294 bool found = false; 1295 1296 opp_table = _find_opp_table(dev); 1297 if (IS_ERR(opp_table)) 1298 return; 1299 1300 mutex_lock(&opp_table->lock); 1301 1302 list_for_each_entry(opp, &opp_table->opp_list, node) { 1303 if (opp->rate == freq) { 1304 found = true; 1305 break; 1306 } 1307 } 1308 1309 mutex_unlock(&opp_table->lock); 1310 1311 if (found) { 1312 dev_pm_opp_put(opp); 1313 1314 /* Drop the reference taken by dev_pm_opp_add() */ 1315 dev_pm_opp_put_opp_table(opp_table); 1316 } else { 1317 dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n", 1318 __func__, freq); 1319 } 1320 1321 /* Drop the reference taken by _find_opp_table() */ 1322 dev_pm_opp_put_opp_table(opp_table); 1323 } 1324 EXPORT_SYMBOL_GPL(dev_pm_opp_remove); 1325 1326 static struct dev_pm_opp *_opp_get_next(struct opp_table *opp_table, 1327 bool dynamic) 1328 { 1329 struct dev_pm_opp *opp = NULL, *temp; 1330 1331 mutex_lock(&opp_table->lock); 1332 list_for_each_entry(temp, &opp_table->opp_list, node) { 1333 if (dynamic == temp->dynamic) { 1334 opp = temp; 1335 break; 1336 } 1337 } 1338 1339 mutex_unlock(&opp_table->lock); 1340 return opp; 1341 } 1342 1343 bool _opp_remove_all_static(struct opp_table *opp_table) 1344 { 1345 struct dev_pm_opp *opp; 1346 1347 mutex_lock(&opp_table->lock); 1348 1349 if (!opp_table->parsed_static_opps) { 1350 mutex_unlock(&opp_table->lock); 1351 return false; 1352 } 1353 1354 if (--opp_table->parsed_static_opps) { 1355 mutex_unlock(&opp_table->lock); 1356 return true; 1357 } 1358 1359 mutex_unlock(&opp_table->lock); 1360 1361 /* 1362 * Can't remove the OPP from under the lock, debugfs removal needs to 1363 * happen lock less to avoid circular dependency issues. 1364 */ 1365 while ((opp = _opp_get_next(opp_table, false))) 1366 dev_pm_opp_put(opp); 1367 1368 return true; 1369 } 1370 1371 /** 1372 * dev_pm_opp_remove_all_dynamic() - Remove all dynamically created OPPs 1373 * @dev: device for which we do this operation 1374 * 1375 * This function removes all dynamically created OPPs from the opp table. 1376 */ 1377 void dev_pm_opp_remove_all_dynamic(struct device *dev) 1378 { 1379 struct opp_table *opp_table; 1380 struct dev_pm_opp *opp; 1381 int count = 0; 1382 1383 opp_table = _find_opp_table(dev); 1384 if (IS_ERR(opp_table)) 1385 return; 1386 1387 /* 1388 * Can't remove the OPP from under the lock, debugfs removal needs to 1389 * happen lock less to avoid circular dependency issues. 1390 */ 1391 while ((opp = _opp_get_next(opp_table, true))) { 1392 dev_pm_opp_put(opp); 1393 count++; 1394 } 1395 1396 /* Drop the references taken by dev_pm_opp_add() */ 1397 while (count--) 1398 dev_pm_opp_put_opp_table(opp_table); 1399 1400 /* Drop the reference taken by _find_opp_table() */ 1401 dev_pm_opp_put_opp_table(opp_table); 1402 } 1403 EXPORT_SYMBOL_GPL(dev_pm_opp_remove_all_dynamic); 1404 1405 struct dev_pm_opp *_opp_allocate(struct opp_table *table) 1406 { 1407 struct dev_pm_opp *opp; 1408 int supply_count, supply_size, icc_size; 1409 1410 /* Allocate space for at least one supply */ 1411 supply_count = table->regulator_count > 0 ? table->regulator_count : 1; 1412 supply_size = sizeof(*opp->supplies) * supply_count; 1413 icc_size = sizeof(*opp->bandwidth) * table->path_count; 1414 1415 /* allocate new OPP node and supplies structures */ 1416 opp = kzalloc(sizeof(*opp) + supply_size + icc_size, GFP_KERNEL); 1417 1418 if (!opp) 1419 return NULL; 1420 1421 /* Put the supplies at the end of the OPP structure as an empty array */ 1422 opp->supplies = (struct dev_pm_opp_supply *)(opp + 1); 1423 if (icc_size) 1424 opp->bandwidth = (struct dev_pm_opp_icc_bw *)(opp->supplies + supply_count); 1425 INIT_LIST_HEAD(&opp->node); 1426 1427 return opp; 1428 } 1429 1430 static bool _opp_supported_by_regulators(struct dev_pm_opp *opp, 1431 struct opp_table *opp_table) 1432 { 1433 struct regulator *reg; 1434 int i; 1435 1436 if (!opp_table->regulators) 1437 return true; 1438 1439 for (i = 0; i < opp_table->regulator_count; i++) { 1440 reg = opp_table->regulators[i]; 1441 1442 if (!regulator_is_supported_voltage(reg, 1443 opp->supplies[i].u_volt_min, 1444 opp->supplies[i].u_volt_max)) { 1445 pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n", 1446 __func__, opp->supplies[i].u_volt_min, 1447 opp->supplies[i].u_volt_max); 1448 return false; 1449 } 1450 } 1451 1452 return true; 1453 } 1454 1455 int _opp_compare_key(struct dev_pm_opp *opp1, struct dev_pm_opp *opp2) 1456 { 1457 if (opp1->rate != opp2->rate) 1458 return opp1->rate < opp2->rate ? -1 : 1; 1459 if (opp1->bandwidth && opp2->bandwidth && 1460 opp1->bandwidth[0].peak != opp2->bandwidth[0].peak) 1461 return opp1->bandwidth[0].peak < opp2->bandwidth[0].peak ? -1 : 1; 1462 if (opp1->level != opp2->level) 1463 return opp1->level < opp2->level ? -1 : 1; 1464 return 0; 1465 } 1466 1467 static int _opp_is_duplicate(struct device *dev, struct dev_pm_opp *new_opp, 1468 struct opp_table *opp_table, 1469 struct list_head **head) 1470 { 1471 struct dev_pm_opp *opp; 1472 int opp_cmp; 1473 1474 /* 1475 * Insert new OPP in order of increasing frequency and discard if 1476 * already present. 1477 * 1478 * Need to use &opp_table->opp_list in the condition part of the 'for' 1479 * loop, don't replace it with head otherwise it will become an infinite 1480 * loop. 1481 */ 1482 list_for_each_entry(opp, &opp_table->opp_list, node) { 1483 opp_cmp = _opp_compare_key(new_opp, opp); 1484 if (opp_cmp > 0) { 1485 *head = &opp->node; 1486 continue; 1487 } 1488 1489 if (opp_cmp < 0) 1490 return 0; 1491 1492 /* Duplicate OPPs */ 1493 dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n", 1494 __func__, opp->rate, opp->supplies[0].u_volt, 1495 opp->available, new_opp->rate, 1496 new_opp->supplies[0].u_volt, new_opp->available); 1497 1498 /* Should we compare voltages for all regulators here ? */ 1499 return opp->available && 1500 new_opp->supplies[0].u_volt == opp->supplies[0].u_volt ? -EBUSY : -EEXIST; 1501 } 1502 1503 return 0; 1504 } 1505 1506 /* 1507 * Returns: 1508 * 0: On success. And appropriate error message for duplicate OPPs. 1509 * -EBUSY: For OPP with same freq/volt and is available. The callers of 1510 * _opp_add() must return 0 if they receive -EBUSY from it. This is to make 1511 * sure we don't print error messages unnecessarily if different parts of 1512 * kernel try to initialize the OPP table. 1513 * -EEXIST: For OPP with same freq but different volt or is unavailable. This 1514 * should be considered an error by the callers of _opp_add(). 1515 */ 1516 int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, 1517 struct opp_table *opp_table, bool rate_not_available) 1518 { 1519 struct list_head *head; 1520 int ret; 1521 1522 mutex_lock(&opp_table->lock); 1523 head = &opp_table->opp_list; 1524 1525 if (likely(!rate_not_available)) { 1526 ret = _opp_is_duplicate(dev, new_opp, opp_table, &head); 1527 if (ret) { 1528 mutex_unlock(&opp_table->lock); 1529 return ret; 1530 } 1531 } 1532 1533 list_add(&new_opp->node, head); 1534 mutex_unlock(&opp_table->lock); 1535 1536 new_opp->opp_table = opp_table; 1537 kref_init(&new_opp->kref); 1538 1539 opp_debug_create_one(new_opp, opp_table); 1540 1541 if (!_opp_supported_by_regulators(new_opp, opp_table)) { 1542 new_opp->available = false; 1543 dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n", 1544 __func__, new_opp->rate); 1545 } 1546 1547 return 0; 1548 } 1549 1550 /** 1551 * _opp_add_v1() - Allocate a OPP based on v1 bindings. 1552 * @opp_table: OPP table 1553 * @dev: device for which we do this operation 1554 * @freq: Frequency in Hz for this OPP 1555 * @u_volt: Voltage in uVolts for this OPP 1556 * @dynamic: Dynamically added OPPs. 1557 * 1558 * This function adds an opp definition to the opp table and returns status. 1559 * The opp is made available by default and it can be controlled using 1560 * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove. 1561 * 1562 * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table 1563 * and freed by dev_pm_opp_of_remove_table. 1564 * 1565 * Return: 1566 * 0 On success OR 1567 * Duplicate OPPs (both freq and volt are same) and opp->available 1568 * -EEXIST Freq are same and volt are different OR 1569 * Duplicate OPPs (both freq and volt are same) and !opp->available 1570 * -ENOMEM Memory allocation failure 1571 */ 1572 int _opp_add_v1(struct opp_table *opp_table, struct device *dev, 1573 unsigned long freq, long u_volt, bool dynamic) 1574 { 1575 struct dev_pm_opp *new_opp; 1576 unsigned long tol; 1577 int ret; 1578 1579 new_opp = _opp_allocate(opp_table); 1580 if (!new_opp) 1581 return -ENOMEM; 1582 1583 /* populate the opp table */ 1584 new_opp->rate = freq; 1585 tol = u_volt * opp_table->voltage_tolerance_v1 / 100; 1586 new_opp->supplies[0].u_volt = u_volt; 1587 new_opp->supplies[0].u_volt_min = u_volt - tol; 1588 new_opp->supplies[0].u_volt_max = u_volt + tol; 1589 new_opp->available = true; 1590 new_opp->dynamic = dynamic; 1591 1592 ret = _opp_add(dev, new_opp, opp_table, false); 1593 if (ret) { 1594 /* Don't return error for duplicate OPPs */ 1595 if (ret == -EBUSY) 1596 ret = 0; 1597 goto free_opp; 1598 } 1599 1600 /* 1601 * Notify the changes in the availability of the operable 1602 * frequency/voltage list. 1603 */ 1604 blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADD, new_opp); 1605 return 0; 1606 1607 free_opp: 1608 _opp_free(new_opp); 1609 1610 return ret; 1611 } 1612 1613 /** 1614 * dev_pm_opp_set_supported_hw() - Set supported platforms 1615 * @dev: Device for which supported-hw has to be set. 1616 * @versions: Array of hierarchy of versions to match. 1617 * @count: Number of elements in the array. 1618 * 1619 * This is required only for the V2 bindings, and it enables a platform to 1620 * specify the hierarchy of versions it supports. OPP layer will then enable 1621 * OPPs, which are available for those versions, based on its 'opp-supported-hw' 1622 * property. 1623 */ 1624 struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev, 1625 const u32 *versions, unsigned int count) 1626 { 1627 struct opp_table *opp_table; 1628 1629 opp_table = _add_opp_table(dev); 1630 if (IS_ERR(opp_table)) 1631 return opp_table; 1632 1633 /* Make sure there are no concurrent readers while updating opp_table */ 1634 WARN_ON(!list_empty(&opp_table->opp_list)); 1635 1636 /* Another CPU that shares the OPP table has set the property ? */ 1637 if (opp_table->supported_hw) 1638 return opp_table; 1639 1640 opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions), 1641 GFP_KERNEL); 1642 if (!opp_table->supported_hw) { 1643 dev_pm_opp_put_opp_table(opp_table); 1644 return ERR_PTR(-ENOMEM); 1645 } 1646 1647 opp_table->supported_hw_count = count; 1648 1649 return opp_table; 1650 } 1651 EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw); 1652 1653 /** 1654 * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw 1655 * @opp_table: OPP table returned by dev_pm_opp_set_supported_hw(). 1656 * 1657 * This is required only for the V2 bindings, and is called for a matching 1658 * dev_pm_opp_set_supported_hw(). Until this is called, the opp_table structure 1659 * will not be freed. 1660 */ 1661 void dev_pm_opp_put_supported_hw(struct opp_table *opp_table) 1662 { 1663 if (unlikely(!opp_table)) 1664 return; 1665 1666 /* Make sure there are no concurrent readers while updating opp_table */ 1667 WARN_ON(!list_empty(&opp_table->opp_list)); 1668 1669 kfree(opp_table->supported_hw); 1670 opp_table->supported_hw = NULL; 1671 opp_table->supported_hw_count = 0; 1672 1673 dev_pm_opp_put_opp_table(opp_table); 1674 } 1675 EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw); 1676 1677 /** 1678 * dev_pm_opp_set_prop_name() - Set prop-extn name 1679 * @dev: Device for which the prop-name has to be set. 1680 * @name: name to postfix to properties. 1681 * 1682 * This is required only for the V2 bindings, and it enables a platform to 1683 * specify the extn to be used for certain property names. The properties to 1684 * which the extension will apply are opp-microvolt and opp-microamp. OPP core 1685 * should postfix the property name with -<name> while looking for them. 1686 */ 1687 struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name) 1688 { 1689 struct opp_table *opp_table; 1690 1691 opp_table = _add_opp_table(dev); 1692 if (IS_ERR(opp_table)) 1693 return opp_table; 1694 1695 /* Make sure there are no concurrent readers while updating opp_table */ 1696 WARN_ON(!list_empty(&opp_table->opp_list)); 1697 1698 /* Another CPU that shares the OPP table has set the property ? */ 1699 if (opp_table->prop_name) 1700 return opp_table; 1701 1702 opp_table->prop_name = kstrdup(name, GFP_KERNEL); 1703 if (!opp_table->prop_name) { 1704 dev_pm_opp_put_opp_table(opp_table); 1705 return ERR_PTR(-ENOMEM); 1706 } 1707 1708 return opp_table; 1709 } 1710 EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name); 1711 1712 /** 1713 * dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name 1714 * @opp_table: OPP table returned by dev_pm_opp_set_prop_name(). 1715 * 1716 * This is required only for the V2 bindings, and is called for a matching 1717 * dev_pm_opp_set_prop_name(). Until this is called, the opp_table structure 1718 * will not be freed. 1719 */ 1720 void dev_pm_opp_put_prop_name(struct opp_table *opp_table) 1721 { 1722 if (unlikely(!opp_table)) 1723 return; 1724 1725 /* Make sure there are no concurrent readers while updating opp_table */ 1726 WARN_ON(!list_empty(&opp_table->opp_list)); 1727 1728 kfree(opp_table->prop_name); 1729 opp_table->prop_name = NULL; 1730 1731 dev_pm_opp_put_opp_table(opp_table); 1732 } 1733 EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name); 1734 1735 static int _allocate_set_opp_data(struct opp_table *opp_table) 1736 { 1737 struct dev_pm_set_opp_data *data; 1738 int len, count = opp_table->regulator_count; 1739 1740 if (WARN_ON(!opp_table->regulators)) 1741 return -EINVAL; 1742 1743 /* space for set_opp_data */ 1744 len = sizeof(*data); 1745 1746 /* space for old_opp.supplies and new_opp.supplies */ 1747 len += 2 * sizeof(struct dev_pm_opp_supply) * count; 1748 1749 data = kzalloc(len, GFP_KERNEL); 1750 if (!data) 1751 return -ENOMEM; 1752 1753 data->old_opp.supplies = (void *)(data + 1); 1754 data->new_opp.supplies = data->old_opp.supplies + count; 1755 1756 opp_table->set_opp_data = data; 1757 1758 return 0; 1759 } 1760 1761 static void _free_set_opp_data(struct opp_table *opp_table) 1762 { 1763 kfree(opp_table->set_opp_data); 1764 opp_table->set_opp_data = NULL; 1765 } 1766 1767 /** 1768 * dev_pm_opp_set_regulators() - Set regulator names for the device 1769 * @dev: Device for which regulator name is being set. 1770 * @names: Array of pointers to the names of the regulator. 1771 * @count: Number of regulators. 1772 * 1773 * In order to support OPP switching, OPP layer needs to know the name of the 1774 * device's regulators, as the core would be required to switch voltages as 1775 * well. 1776 * 1777 * This must be called before any OPPs are initialized for the device. 1778 */ 1779 struct opp_table *dev_pm_opp_set_regulators(struct device *dev, 1780 const char * const names[], 1781 unsigned int count) 1782 { 1783 struct opp_table *opp_table; 1784 struct regulator *reg; 1785 int ret, i; 1786 1787 opp_table = _add_opp_table(dev); 1788 if (IS_ERR(opp_table)) 1789 return opp_table; 1790 1791 /* This should be called before OPPs are initialized */ 1792 if (WARN_ON(!list_empty(&opp_table->opp_list))) { 1793 ret = -EBUSY; 1794 goto err; 1795 } 1796 1797 /* Another CPU that shares the OPP table has set the regulators ? */ 1798 if (opp_table->regulators) 1799 return opp_table; 1800 1801 opp_table->regulators = kmalloc_array(count, 1802 sizeof(*opp_table->regulators), 1803 GFP_KERNEL); 1804 if (!opp_table->regulators) { 1805 ret = -ENOMEM; 1806 goto err; 1807 } 1808 1809 for (i = 0; i < count; i++) { 1810 reg = regulator_get_optional(dev, names[i]); 1811 if (IS_ERR(reg)) { 1812 ret = PTR_ERR(reg); 1813 if (ret != -EPROBE_DEFER) 1814 dev_err(dev, "%s: no regulator (%s) found: %d\n", 1815 __func__, names[i], ret); 1816 goto free_regulators; 1817 } 1818 1819 opp_table->regulators[i] = reg; 1820 } 1821 1822 opp_table->regulator_count = count; 1823 1824 /* Allocate block only once to pass to set_opp() routines */ 1825 ret = _allocate_set_opp_data(opp_table); 1826 if (ret) 1827 goto free_regulators; 1828 1829 return opp_table; 1830 1831 free_regulators: 1832 while (i != 0) 1833 regulator_put(opp_table->regulators[--i]); 1834 1835 kfree(opp_table->regulators); 1836 opp_table->regulators = NULL; 1837 opp_table->regulator_count = -1; 1838 err: 1839 dev_pm_opp_put_opp_table(opp_table); 1840 1841 return ERR_PTR(ret); 1842 } 1843 EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulators); 1844 1845 /** 1846 * dev_pm_opp_put_regulators() - Releases resources blocked for regulator 1847 * @opp_table: OPP table returned from dev_pm_opp_set_regulators(). 1848 */ 1849 void dev_pm_opp_put_regulators(struct opp_table *opp_table) 1850 { 1851 int i; 1852 1853 if (unlikely(!opp_table)) 1854 return; 1855 1856 if (!opp_table->regulators) 1857 goto put_opp_table; 1858 1859 /* Make sure there are no concurrent readers while updating opp_table */ 1860 WARN_ON(!list_empty(&opp_table->opp_list)); 1861 1862 if (opp_table->enabled) { 1863 for (i = opp_table->regulator_count - 1; i >= 0; i--) 1864 regulator_disable(opp_table->regulators[i]); 1865 } 1866 1867 for (i = opp_table->regulator_count - 1; i >= 0; i--) 1868 regulator_put(opp_table->regulators[i]); 1869 1870 _free_set_opp_data(opp_table); 1871 1872 kfree(opp_table->regulators); 1873 opp_table->regulators = NULL; 1874 opp_table->regulator_count = -1; 1875 1876 put_opp_table: 1877 dev_pm_opp_put_opp_table(opp_table); 1878 } 1879 EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulators); 1880 1881 /** 1882 * dev_pm_opp_set_clkname() - Set clk name for the device 1883 * @dev: Device for which clk name is being set. 1884 * @name: Clk name. 1885 * 1886 * In order to support OPP switching, OPP layer needs to get pointer to the 1887 * clock for the device. Simple cases work fine without using this routine (i.e. 1888 * by passing connection-id as NULL), but for a device with multiple clocks 1889 * available, the OPP core needs to know the exact name of the clk to use. 1890 * 1891 * This must be called before any OPPs are initialized for the device. 1892 */ 1893 struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name) 1894 { 1895 struct opp_table *opp_table; 1896 int ret; 1897 1898 opp_table = _add_opp_table(dev); 1899 if (IS_ERR(opp_table)) 1900 return opp_table; 1901 1902 /* This should be called before OPPs are initialized */ 1903 if (WARN_ON(!list_empty(&opp_table->opp_list))) { 1904 ret = -EBUSY; 1905 goto err; 1906 } 1907 1908 /* Already have default clk set, free it */ 1909 if (!IS_ERR(opp_table->clk)) 1910 clk_put(opp_table->clk); 1911 1912 /* Find clk for the device */ 1913 opp_table->clk = clk_get(dev, name); 1914 if (IS_ERR(opp_table->clk)) { 1915 ret = PTR_ERR(opp_table->clk); 1916 if (ret != -EPROBE_DEFER) { 1917 dev_err(dev, "%s: Couldn't find clock: %d\n", __func__, 1918 ret); 1919 } 1920 goto err; 1921 } 1922 1923 return opp_table; 1924 1925 err: 1926 dev_pm_opp_put_opp_table(opp_table); 1927 1928 return ERR_PTR(ret); 1929 } 1930 EXPORT_SYMBOL_GPL(dev_pm_opp_set_clkname); 1931 1932 /** 1933 * dev_pm_opp_put_clkname() - Releases resources blocked for clk. 1934 * @opp_table: OPP table returned from dev_pm_opp_set_clkname(). 1935 */ 1936 void dev_pm_opp_put_clkname(struct opp_table *opp_table) 1937 { 1938 if (unlikely(!opp_table)) 1939 return; 1940 1941 /* Make sure there are no concurrent readers while updating opp_table */ 1942 WARN_ON(!list_empty(&opp_table->opp_list)); 1943 1944 clk_put(opp_table->clk); 1945 opp_table->clk = ERR_PTR(-EINVAL); 1946 1947 dev_pm_opp_put_opp_table(opp_table); 1948 } 1949 EXPORT_SYMBOL_GPL(dev_pm_opp_put_clkname); 1950 1951 /** 1952 * dev_pm_opp_register_set_opp_helper() - Register custom set OPP helper 1953 * @dev: Device for which the helper is getting registered. 1954 * @set_opp: Custom set OPP helper. 1955 * 1956 * This is useful to support complex platforms (like platforms with multiple 1957 * regulators per device), instead of the generic OPP set rate helper. 1958 * 1959 * This must be called before any OPPs are initialized for the device. 1960 */ 1961 struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, 1962 int (*set_opp)(struct dev_pm_set_opp_data *data)) 1963 { 1964 struct opp_table *opp_table; 1965 1966 if (!set_opp) 1967 return ERR_PTR(-EINVAL); 1968 1969 opp_table = _add_opp_table(dev); 1970 if (IS_ERR(opp_table)) 1971 return opp_table; 1972 1973 /* This should be called before OPPs are initialized */ 1974 if (WARN_ON(!list_empty(&opp_table->opp_list))) { 1975 dev_pm_opp_put_opp_table(opp_table); 1976 return ERR_PTR(-EBUSY); 1977 } 1978 1979 /* Another CPU that shares the OPP table has set the helper ? */ 1980 if (!opp_table->set_opp) 1981 opp_table->set_opp = set_opp; 1982 1983 return opp_table; 1984 } 1985 EXPORT_SYMBOL_GPL(dev_pm_opp_register_set_opp_helper); 1986 1987 /** 1988 * dev_pm_opp_unregister_set_opp_helper() - Releases resources blocked for 1989 * set_opp helper 1990 * @opp_table: OPP table returned from dev_pm_opp_register_set_opp_helper(). 1991 * 1992 * Release resources blocked for platform specific set_opp helper. 1993 */ 1994 void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table) 1995 { 1996 if (unlikely(!opp_table)) 1997 return; 1998 1999 /* Make sure there are no concurrent readers while updating opp_table */ 2000 WARN_ON(!list_empty(&opp_table->opp_list)); 2001 2002 opp_table->set_opp = NULL; 2003 dev_pm_opp_put_opp_table(opp_table); 2004 } 2005 EXPORT_SYMBOL_GPL(dev_pm_opp_unregister_set_opp_helper); 2006 2007 static void _opp_detach_genpd(struct opp_table *opp_table) 2008 { 2009 int index; 2010 2011 if (!opp_table->genpd_virt_devs) 2012 return; 2013 2014 for (index = 0; index < opp_table->required_opp_count; index++) { 2015 if (!opp_table->genpd_virt_devs[index]) 2016 continue; 2017 2018 dev_pm_domain_detach(opp_table->genpd_virt_devs[index], false); 2019 opp_table->genpd_virt_devs[index] = NULL; 2020 } 2021 2022 kfree(opp_table->genpd_virt_devs); 2023 opp_table->genpd_virt_devs = NULL; 2024 } 2025 2026 /** 2027 * dev_pm_opp_attach_genpd - Attach genpd(s) for the device and save virtual device pointer 2028 * @dev: Consumer device for which the genpd is getting attached. 2029 * @names: Null terminated array of pointers containing names of genpd to attach. 2030 * @virt_devs: Pointer to return the array of virtual devices. 2031 * 2032 * Multiple generic power domains for a device are supported with the help of 2033 * virtual genpd devices, which are created for each consumer device - genpd 2034 * pair. These are the device structures which are attached to the power domain 2035 * and are required by the OPP core to set the performance state of the genpd. 2036 * The same API also works for the case where single genpd is available and so 2037 * we don't need to support that separately. 2038 * 2039 * This helper will normally be called by the consumer driver of the device 2040 * "dev", as only that has details of the genpd names. 2041 * 2042 * This helper needs to be called once with a list of all genpd to attach. 2043 * Otherwise the original device structure will be used instead by the OPP core. 2044 * 2045 * The order of entries in the names array must match the order in which 2046 * "required-opps" are added in DT. 2047 */ 2048 struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, 2049 const char **names, struct device ***virt_devs) 2050 { 2051 struct opp_table *opp_table; 2052 struct device *virt_dev; 2053 int index = 0, ret = -EINVAL; 2054 const char **name = names; 2055 2056 opp_table = _add_opp_table(dev); 2057 if (IS_ERR(opp_table)) 2058 return opp_table; 2059 2060 if (opp_table->genpd_virt_devs) 2061 return opp_table; 2062 2063 /* 2064 * If the genpd's OPP table isn't already initialized, parsing of the 2065 * required-opps fail for dev. We should retry this after genpd's OPP 2066 * table is added. 2067 */ 2068 if (!opp_table->required_opp_count) { 2069 ret = -EPROBE_DEFER; 2070 goto put_table; 2071 } 2072 2073 mutex_lock(&opp_table->genpd_virt_dev_lock); 2074 2075 opp_table->genpd_virt_devs = kcalloc(opp_table->required_opp_count, 2076 sizeof(*opp_table->genpd_virt_devs), 2077 GFP_KERNEL); 2078 if (!opp_table->genpd_virt_devs) 2079 goto unlock; 2080 2081 while (*name) { 2082 if (index >= opp_table->required_opp_count) { 2083 dev_err(dev, "Index can't be greater than required-opp-count - 1, %s (%d : %d)\n", 2084 *name, opp_table->required_opp_count, index); 2085 goto err; 2086 } 2087 2088 virt_dev = dev_pm_domain_attach_by_name(dev, *name); 2089 if (IS_ERR(virt_dev)) { 2090 ret = PTR_ERR(virt_dev); 2091 dev_err(dev, "Couldn't attach to pm_domain: %d\n", ret); 2092 goto err; 2093 } 2094 2095 opp_table->genpd_virt_devs[index] = virt_dev; 2096 index++; 2097 name++; 2098 } 2099 2100 if (virt_devs) 2101 *virt_devs = opp_table->genpd_virt_devs; 2102 mutex_unlock(&opp_table->genpd_virt_dev_lock); 2103 2104 return opp_table; 2105 2106 err: 2107 _opp_detach_genpd(opp_table); 2108 unlock: 2109 mutex_unlock(&opp_table->genpd_virt_dev_lock); 2110 2111 put_table: 2112 dev_pm_opp_put_opp_table(opp_table); 2113 2114 return ERR_PTR(ret); 2115 } 2116 EXPORT_SYMBOL_GPL(dev_pm_opp_attach_genpd); 2117 2118 /** 2119 * dev_pm_opp_detach_genpd() - Detach genpd(s) from the device. 2120 * @opp_table: OPP table returned by dev_pm_opp_attach_genpd(). 2121 * 2122 * This detaches the genpd(s), resets the virtual device pointers, and puts the 2123 * OPP table. 2124 */ 2125 void dev_pm_opp_detach_genpd(struct opp_table *opp_table) 2126 { 2127 if (unlikely(!opp_table)) 2128 return; 2129 2130 /* 2131 * Acquire genpd_virt_dev_lock to make sure virt_dev isn't getting 2132 * used in parallel. 2133 */ 2134 mutex_lock(&opp_table->genpd_virt_dev_lock); 2135 _opp_detach_genpd(opp_table); 2136 mutex_unlock(&opp_table->genpd_virt_dev_lock); 2137 2138 dev_pm_opp_put_opp_table(opp_table); 2139 } 2140 EXPORT_SYMBOL_GPL(dev_pm_opp_detach_genpd); 2141 2142 /** 2143 * dev_pm_opp_xlate_performance_state() - Find required OPP's pstate for src_table. 2144 * @src_table: OPP table which has dst_table as one of its required OPP table. 2145 * @dst_table: Required OPP table of the src_table. 2146 * @pstate: Current performance state of the src_table. 2147 * 2148 * This Returns pstate of the OPP (present in @dst_table) pointed out by the 2149 * "required-opps" property of the OPP (present in @src_table) which has 2150 * performance state set to @pstate. 2151 * 2152 * Return: Zero or positive performance state on success, otherwise negative 2153 * value on errors. 2154 */ 2155 int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, 2156 struct opp_table *dst_table, 2157 unsigned int pstate) 2158 { 2159 struct dev_pm_opp *opp; 2160 int dest_pstate = -EINVAL; 2161 int i; 2162 2163 /* 2164 * Normally the src_table will have the "required_opps" property set to 2165 * point to one of the OPPs in the dst_table, but in some cases the 2166 * genpd and its master have one to one mapping of performance states 2167 * and so none of them have the "required-opps" property set. Return the 2168 * pstate of the src_table as it is in such cases. 2169 */ 2170 if (!src_table->required_opp_count) 2171 return pstate; 2172 2173 for (i = 0; i < src_table->required_opp_count; i++) { 2174 if (src_table->required_opp_tables[i]->np == dst_table->np) 2175 break; 2176 } 2177 2178 if (unlikely(i == src_table->required_opp_count)) { 2179 pr_err("%s: Couldn't find matching OPP table (%p: %p)\n", 2180 __func__, src_table, dst_table); 2181 return -EINVAL; 2182 } 2183 2184 mutex_lock(&src_table->lock); 2185 2186 list_for_each_entry(opp, &src_table->opp_list, node) { 2187 if (opp->pstate == pstate) { 2188 dest_pstate = opp->required_opps[i]->pstate; 2189 goto unlock; 2190 } 2191 } 2192 2193 pr_err("%s: Couldn't find matching OPP (%p: %p)\n", __func__, src_table, 2194 dst_table); 2195 2196 unlock: 2197 mutex_unlock(&src_table->lock); 2198 2199 return dest_pstate; 2200 } 2201 2202 /** 2203 * dev_pm_opp_add() - Add an OPP table from a table definitions 2204 * @dev: device for which we do this operation 2205 * @freq: Frequency in Hz for this OPP 2206 * @u_volt: Voltage in uVolts for this OPP 2207 * 2208 * This function adds an opp definition to the opp table and returns status. 2209 * The opp is made available by default and it can be controlled using 2210 * dev_pm_opp_enable/disable functions. 2211 * 2212 * Return: 2213 * 0 On success OR 2214 * Duplicate OPPs (both freq and volt are same) and opp->available 2215 * -EEXIST Freq are same and volt are different OR 2216 * Duplicate OPPs (both freq and volt are same) and !opp->available 2217 * -ENOMEM Memory allocation failure 2218 */ 2219 int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) 2220 { 2221 struct opp_table *opp_table; 2222 int ret; 2223 2224 opp_table = _add_opp_table(dev); 2225 if (IS_ERR(opp_table)) 2226 return PTR_ERR(opp_table); 2227 2228 /* Fix regulator count for dynamic OPPs */ 2229 opp_table->regulator_count = 1; 2230 2231 ret = _opp_add_v1(opp_table, dev, freq, u_volt, true); 2232 if (ret) 2233 dev_pm_opp_put_opp_table(opp_table); 2234 2235 return ret; 2236 } 2237 EXPORT_SYMBOL_GPL(dev_pm_opp_add); 2238 2239 /** 2240 * _opp_set_availability() - helper to set the availability of an opp 2241 * @dev: device for which we do this operation 2242 * @freq: OPP frequency to modify availability 2243 * @availability_req: availability status requested for this opp 2244 * 2245 * Set the availability of an OPP, opp_{enable,disable} share a common logic 2246 * which is isolated here. 2247 * 2248 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the 2249 * copy operation, returns 0 if no modification was done OR modification was 2250 * successful. 2251 */ 2252 static int _opp_set_availability(struct device *dev, unsigned long freq, 2253 bool availability_req) 2254 { 2255 struct opp_table *opp_table; 2256 struct dev_pm_opp *tmp_opp, *opp = ERR_PTR(-ENODEV); 2257 int r = 0; 2258 2259 /* Find the opp_table */ 2260 opp_table = _find_opp_table(dev); 2261 if (IS_ERR(opp_table)) { 2262 r = PTR_ERR(opp_table); 2263 dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r); 2264 return r; 2265 } 2266 2267 mutex_lock(&opp_table->lock); 2268 2269 /* Do we have the frequency? */ 2270 list_for_each_entry(tmp_opp, &opp_table->opp_list, node) { 2271 if (tmp_opp->rate == freq) { 2272 opp = tmp_opp; 2273 break; 2274 } 2275 } 2276 2277 if (IS_ERR(opp)) { 2278 r = PTR_ERR(opp); 2279 goto unlock; 2280 } 2281 2282 /* Is update really needed? */ 2283 if (opp->available == availability_req) 2284 goto unlock; 2285 2286 opp->available = availability_req; 2287 2288 dev_pm_opp_get(opp); 2289 mutex_unlock(&opp_table->lock); 2290 2291 /* Notify the change of the OPP availability */ 2292 if (availability_req) 2293 blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ENABLE, 2294 opp); 2295 else 2296 blocking_notifier_call_chain(&opp_table->head, 2297 OPP_EVENT_DISABLE, opp); 2298 2299 dev_pm_opp_put(opp); 2300 goto put_table; 2301 2302 unlock: 2303 mutex_unlock(&opp_table->lock); 2304 put_table: 2305 dev_pm_opp_put_opp_table(opp_table); 2306 return r; 2307 } 2308 2309 /** 2310 * dev_pm_opp_adjust_voltage() - helper to change the voltage of an OPP 2311 * @dev: device for which we do this operation 2312 * @freq: OPP frequency to adjust voltage of 2313 * @u_volt: new OPP target voltage 2314 * @u_volt_min: new OPP min voltage 2315 * @u_volt_max: new OPP max voltage 2316 * 2317 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the 2318 * copy operation, returns 0 if no modifcation was done OR modification was 2319 * successful. 2320 */ 2321 int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq, 2322 unsigned long u_volt, unsigned long u_volt_min, 2323 unsigned long u_volt_max) 2324 2325 { 2326 struct opp_table *opp_table; 2327 struct dev_pm_opp *tmp_opp, *opp = ERR_PTR(-ENODEV); 2328 int r = 0; 2329 2330 /* Find the opp_table */ 2331 opp_table = _find_opp_table(dev); 2332 if (IS_ERR(opp_table)) { 2333 r = PTR_ERR(opp_table); 2334 dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r); 2335 return r; 2336 } 2337 2338 mutex_lock(&opp_table->lock); 2339 2340 /* Do we have the frequency? */ 2341 list_for_each_entry(tmp_opp, &opp_table->opp_list, node) { 2342 if (tmp_opp->rate == freq) { 2343 opp = tmp_opp; 2344 break; 2345 } 2346 } 2347 2348 if (IS_ERR(opp)) { 2349 r = PTR_ERR(opp); 2350 goto adjust_unlock; 2351 } 2352 2353 /* Is update really needed? */ 2354 if (opp->supplies->u_volt == u_volt) 2355 goto adjust_unlock; 2356 2357 opp->supplies->u_volt = u_volt; 2358 opp->supplies->u_volt_min = u_volt_min; 2359 opp->supplies->u_volt_max = u_volt_max; 2360 2361 dev_pm_opp_get(opp); 2362 mutex_unlock(&opp_table->lock); 2363 2364 /* Notify the voltage change of the OPP */ 2365 blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADJUST_VOLTAGE, 2366 opp); 2367 2368 dev_pm_opp_put(opp); 2369 goto adjust_put_table; 2370 2371 adjust_unlock: 2372 mutex_unlock(&opp_table->lock); 2373 adjust_put_table: 2374 dev_pm_opp_put_opp_table(opp_table); 2375 return r; 2376 } 2377 EXPORT_SYMBOL_GPL(dev_pm_opp_adjust_voltage); 2378 2379 /** 2380 * dev_pm_opp_enable() - Enable a specific OPP 2381 * @dev: device for which we do this operation 2382 * @freq: OPP frequency to enable 2383 * 2384 * Enables a provided opp. If the operation is valid, this returns 0, else the 2385 * corresponding error value. It is meant to be used for users an OPP available 2386 * after being temporarily made unavailable with dev_pm_opp_disable. 2387 * 2388 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the 2389 * copy operation, returns 0 if no modification was done OR modification was 2390 * successful. 2391 */ 2392 int dev_pm_opp_enable(struct device *dev, unsigned long freq) 2393 { 2394 return _opp_set_availability(dev, freq, true); 2395 } 2396 EXPORT_SYMBOL_GPL(dev_pm_opp_enable); 2397 2398 /** 2399 * dev_pm_opp_disable() - Disable a specific OPP 2400 * @dev: device for which we do this operation 2401 * @freq: OPP frequency to disable 2402 * 2403 * Disables a provided opp. If the operation is valid, this returns 2404 * 0, else the corresponding error value. It is meant to be a temporary 2405 * control by users to make this OPP not available until the circumstances are 2406 * right to make it available again (with a call to dev_pm_opp_enable). 2407 * 2408 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the 2409 * copy operation, returns 0 if no modification was done OR modification was 2410 * successful. 2411 */ 2412 int dev_pm_opp_disable(struct device *dev, unsigned long freq) 2413 { 2414 return _opp_set_availability(dev, freq, false); 2415 } 2416 EXPORT_SYMBOL_GPL(dev_pm_opp_disable); 2417 2418 /** 2419 * dev_pm_opp_register_notifier() - Register OPP notifier for the device 2420 * @dev: Device for which notifier needs to be registered 2421 * @nb: Notifier block to be registered 2422 * 2423 * Return: 0 on success or a negative error value. 2424 */ 2425 int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb) 2426 { 2427 struct opp_table *opp_table; 2428 int ret; 2429 2430 opp_table = _find_opp_table(dev); 2431 if (IS_ERR(opp_table)) 2432 return PTR_ERR(opp_table); 2433 2434 ret = blocking_notifier_chain_register(&opp_table->head, nb); 2435 2436 dev_pm_opp_put_opp_table(opp_table); 2437 2438 return ret; 2439 } 2440 EXPORT_SYMBOL(dev_pm_opp_register_notifier); 2441 2442 /** 2443 * dev_pm_opp_unregister_notifier() - Unregister OPP notifier for the device 2444 * @dev: Device for which notifier needs to be unregistered 2445 * @nb: Notifier block to be unregistered 2446 * 2447 * Return: 0 on success or a negative error value. 2448 */ 2449 int dev_pm_opp_unregister_notifier(struct device *dev, 2450 struct notifier_block *nb) 2451 { 2452 struct opp_table *opp_table; 2453 int ret; 2454 2455 opp_table = _find_opp_table(dev); 2456 if (IS_ERR(opp_table)) 2457 return PTR_ERR(opp_table); 2458 2459 ret = blocking_notifier_chain_unregister(&opp_table->head, nb); 2460 2461 dev_pm_opp_put_opp_table(opp_table); 2462 2463 return ret; 2464 } 2465 EXPORT_SYMBOL(dev_pm_opp_unregister_notifier); 2466 2467 /** 2468 * dev_pm_opp_remove_table() - Free all OPPs associated with the device 2469 * @dev: device pointer used to lookup OPP table. 2470 * 2471 * Free both OPPs created using static entries present in DT and the 2472 * dynamically added entries. 2473 */ 2474 void dev_pm_opp_remove_table(struct device *dev) 2475 { 2476 struct opp_table *opp_table; 2477 2478 /* Check for existing table for 'dev' */ 2479 opp_table = _find_opp_table(dev); 2480 if (IS_ERR(opp_table)) { 2481 int error = PTR_ERR(opp_table); 2482 2483 if (error != -ENODEV) 2484 WARN(1, "%s: opp_table: %d\n", 2485 IS_ERR_OR_NULL(dev) ? 2486 "Invalid device" : dev_name(dev), 2487 error); 2488 return; 2489 } 2490 2491 /* 2492 * Drop the extra reference only if the OPP table was successfully added 2493 * with dev_pm_opp_of_add_table() earlier. 2494 **/ 2495 if (_opp_remove_all_static(opp_table)) 2496 dev_pm_opp_put_opp_table(opp_table); 2497 2498 /* Drop reference taken by _find_opp_table() */ 2499 dev_pm_opp_put_opp_table(opp_table); 2500 } 2501 EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table); 2502