1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Generic OPP OF helpers 4 * 5 * Copyright (C) 2009-2010 Texas Instruments Incorporated. 6 * Nishanth Menon 7 * Romit Dasgupta 8 * Kevin Hilman 9 */ 10 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 13 #include <linux/cpu.h> 14 #include <linux/errno.h> 15 #include <linux/device.h> 16 #include <linux/of_device.h> 17 #include <linux/pm_domain.h> 18 #include <linux/slab.h> 19 #include <linux/export.h> 20 #include <linux/energy_model.h> 21 22 #include "opp.h" 23 24 /* 25 * Returns opp descriptor node for a device node, caller must 26 * do of_node_put(). 27 */ 28 static struct device_node *_opp_of_get_opp_desc_node(struct device_node *np, 29 int index) 30 { 31 /* "operating-points-v2" can be an array for power domain providers */ 32 return of_parse_phandle(np, "operating-points-v2", index); 33 } 34 35 /* Returns opp descriptor node for a device, caller must do of_node_put() */ 36 struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev) 37 { 38 return _opp_of_get_opp_desc_node(dev->of_node, 0); 39 } 40 EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_opp_desc_node); 41 42 struct opp_table *_managed_opp(struct device *dev, int index) 43 { 44 struct opp_table *opp_table, *managed_table = NULL; 45 struct device_node *np; 46 47 np = _opp_of_get_opp_desc_node(dev->of_node, index); 48 if (!np) 49 return NULL; 50 51 list_for_each_entry(opp_table, &opp_tables, node) { 52 if (opp_table->np == np) { 53 /* 54 * Multiple devices can point to the same OPP table and 55 * so will have same node-pointer, np. 56 * 57 * But the OPPs will be considered as shared only if the 58 * OPP table contains a "opp-shared" property. 59 */ 60 if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) { 61 _get_opp_table_kref(opp_table); 62 managed_table = opp_table; 63 } 64 65 break; 66 } 67 } 68 69 of_node_put(np); 70 71 return managed_table; 72 } 73 74 /* The caller must call dev_pm_opp_put() after the OPP is used */ 75 static struct dev_pm_opp *_find_opp_of_np(struct opp_table *opp_table, 76 struct device_node *opp_np) 77 { 78 struct dev_pm_opp *opp; 79 80 mutex_lock(&opp_table->lock); 81 82 list_for_each_entry(opp, &opp_table->opp_list, node) { 83 if (opp->np == opp_np) { 84 dev_pm_opp_get(opp); 85 mutex_unlock(&opp_table->lock); 86 return opp; 87 } 88 } 89 90 mutex_unlock(&opp_table->lock); 91 92 return NULL; 93 } 94 95 static struct device_node *of_parse_required_opp(struct device_node *np, 96 int index) 97 { 98 struct device_node *required_np; 99 100 required_np = of_parse_phandle(np, "required-opps", index); 101 if (unlikely(!required_np)) { 102 pr_err("%s: Unable to parse required-opps: %pOF, index: %d\n", 103 __func__, np, index); 104 } 105 106 return required_np; 107 } 108 109 /* The caller must call dev_pm_opp_put_opp_table() after the table is used */ 110 static struct opp_table *_find_table_of_opp_np(struct device_node *opp_np) 111 { 112 struct opp_table *opp_table; 113 struct device_node *opp_table_np; 114 115 lockdep_assert_held(&opp_table_lock); 116 117 opp_table_np = of_get_parent(opp_np); 118 if (!opp_table_np) 119 goto err; 120 121 /* It is safe to put the node now as all we need now is its address */ 122 of_node_put(opp_table_np); 123 124 list_for_each_entry(opp_table, &opp_tables, node) { 125 if (opp_table_np == opp_table->np) { 126 _get_opp_table_kref(opp_table); 127 return opp_table; 128 } 129 } 130 131 err: 132 return ERR_PTR(-ENODEV); 133 } 134 135 /* Free resources previously acquired by _opp_table_alloc_required_tables() */ 136 static void _opp_table_free_required_tables(struct opp_table *opp_table) 137 { 138 struct opp_table **required_opp_tables = opp_table->required_opp_tables; 139 int i; 140 141 if (!required_opp_tables) 142 return; 143 144 for (i = 0; i < opp_table->required_opp_count; i++) { 145 if (IS_ERR_OR_NULL(required_opp_tables[i])) 146 break; 147 148 dev_pm_opp_put_opp_table(required_opp_tables[i]); 149 } 150 151 kfree(required_opp_tables); 152 153 opp_table->required_opp_count = 0; 154 opp_table->required_opp_tables = NULL; 155 } 156 157 /* 158 * Populate all devices and opp tables which are part of "required-opps" list. 159 * Checking only the first OPP node should be enough. 160 */ 161 static void _opp_table_alloc_required_tables(struct opp_table *opp_table, 162 struct device *dev, 163 struct device_node *opp_np) 164 { 165 struct opp_table **required_opp_tables; 166 struct device_node *required_np, *np; 167 int count, i; 168 169 /* Traversing the first OPP node is all we need */ 170 np = of_get_next_available_child(opp_np, NULL); 171 if (!np) { 172 dev_err(dev, "Empty OPP table\n"); 173 return; 174 } 175 176 count = of_count_phandle_with_args(np, "required-opps", NULL); 177 if (!count) 178 goto put_np; 179 180 required_opp_tables = kcalloc(count, sizeof(*required_opp_tables), 181 GFP_KERNEL); 182 if (!required_opp_tables) 183 goto put_np; 184 185 opp_table->required_opp_tables = required_opp_tables; 186 opp_table->required_opp_count = count; 187 188 for (i = 0; i < count; i++) { 189 required_np = of_parse_required_opp(np, i); 190 if (!required_np) 191 goto free_required_tables; 192 193 required_opp_tables[i] = _find_table_of_opp_np(required_np); 194 of_node_put(required_np); 195 196 if (IS_ERR(required_opp_tables[i])) 197 goto free_required_tables; 198 199 /* 200 * We only support genpd's OPPs in the "required-opps" for now, 201 * as we don't know how much about other cases. Error out if the 202 * required OPP doesn't belong to a genpd. 203 */ 204 if (!required_opp_tables[i]->is_genpd) { 205 dev_err(dev, "required-opp doesn't belong to genpd: %pOF\n", 206 required_np); 207 goto free_required_tables; 208 } 209 } 210 211 goto put_np; 212 213 free_required_tables: 214 _opp_table_free_required_tables(opp_table); 215 put_np: 216 of_node_put(np); 217 } 218 219 void _of_init_opp_table(struct opp_table *opp_table, struct device *dev, 220 int index) 221 { 222 struct device_node *np, *opp_np; 223 u32 val; 224 225 /* 226 * Only required for backward compatibility with v1 bindings, but isn't 227 * harmful for other cases. And so we do it unconditionally. 228 */ 229 np = of_node_get(dev->of_node); 230 if (!np) 231 return; 232 233 if (!of_property_read_u32(np, "clock-latency", &val)) 234 opp_table->clock_latency_ns_max = val; 235 of_property_read_u32(np, "voltage-tolerance", 236 &opp_table->voltage_tolerance_v1); 237 238 if (of_find_property(np, "#power-domain-cells", NULL)) 239 opp_table->is_genpd = true; 240 241 /* Get OPP table node */ 242 opp_np = _opp_of_get_opp_desc_node(np, index); 243 of_node_put(np); 244 245 if (!opp_np) 246 return; 247 248 if (of_property_read_bool(opp_np, "opp-shared")) 249 opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED; 250 else 251 opp_table->shared_opp = OPP_TABLE_ACCESS_EXCLUSIVE; 252 253 opp_table->np = opp_np; 254 255 _opp_table_alloc_required_tables(opp_table, dev, opp_np); 256 of_node_put(opp_np); 257 } 258 259 void _of_clear_opp_table(struct opp_table *opp_table) 260 { 261 _opp_table_free_required_tables(opp_table); 262 } 263 264 /* 265 * Release all resources previously acquired with a call to 266 * _of_opp_alloc_required_opps(). 267 */ 268 void _of_opp_free_required_opps(struct opp_table *opp_table, 269 struct dev_pm_opp *opp) 270 { 271 struct dev_pm_opp **required_opps = opp->required_opps; 272 int i; 273 274 if (!required_opps) 275 return; 276 277 for (i = 0; i < opp_table->required_opp_count; i++) { 278 if (!required_opps[i]) 279 break; 280 281 /* Put the reference back */ 282 dev_pm_opp_put(required_opps[i]); 283 } 284 285 kfree(required_opps); 286 opp->required_opps = NULL; 287 } 288 289 /* Populate all required OPPs which are part of "required-opps" list */ 290 static int _of_opp_alloc_required_opps(struct opp_table *opp_table, 291 struct dev_pm_opp *opp) 292 { 293 struct dev_pm_opp **required_opps; 294 struct opp_table *required_table; 295 struct device_node *np; 296 int i, ret, count = opp_table->required_opp_count; 297 298 if (!count) 299 return 0; 300 301 required_opps = kcalloc(count, sizeof(*required_opps), GFP_KERNEL); 302 if (!required_opps) 303 return -ENOMEM; 304 305 opp->required_opps = required_opps; 306 307 for (i = 0; i < count; i++) { 308 required_table = opp_table->required_opp_tables[i]; 309 310 np = of_parse_required_opp(opp->np, i); 311 if (unlikely(!np)) { 312 ret = -ENODEV; 313 goto free_required_opps; 314 } 315 316 required_opps[i] = _find_opp_of_np(required_table, np); 317 of_node_put(np); 318 319 if (!required_opps[i]) { 320 pr_err("%s: Unable to find required OPP node: %pOF (%d)\n", 321 __func__, opp->np, i); 322 ret = -ENODEV; 323 goto free_required_opps; 324 } 325 } 326 327 return 0; 328 329 free_required_opps: 330 _of_opp_free_required_opps(opp_table, opp); 331 332 return ret; 333 } 334 335 static int _bandwidth_supported(struct device *dev, struct opp_table *opp_table) 336 { 337 struct device_node *np, *opp_np; 338 struct property *prop; 339 340 if (!opp_table) { 341 np = of_node_get(dev->of_node); 342 if (!np) 343 return -ENODEV; 344 345 opp_np = _opp_of_get_opp_desc_node(np, 0); 346 of_node_put(np); 347 } else { 348 opp_np = of_node_get(opp_table->np); 349 } 350 351 /* Lets not fail in case we are parsing opp-v1 bindings */ 352 if (!opp_np) 353 return 0; 354 355 /* Checking only first OPP is sufficient */ 356 np = of_get_next_available_child(opp_np, NULL); 357 if (!np) { 358 dev_err(dev, "OPP table empty\n"); 359 return -EINVAL; 360 } 361 of_node_put(opp_np); 362 363 prop = of_find_property(np, "opp-peak-kBps", NULL); 364 of_node_put(np); 365 366 if (!prop || !prop->length) 367 return 0; 368 369 return 1; 370 } 371 372 int dev_pm_opp_of_find_icc_paths(struct device *dev, 373 struct opp_table *opp_table) 374 { 375 struct device_node *np; 376 int ret, i, count, num_paths; 377 struct icc_path **paths; 378 379 ret = _bandwidth_supported(dev, opp_table); 380 if (ret <= 0) 381 return ret; 382 383 ret = 0; 384 385 np = of_node_get(dev->of_node); 386 if (!np) 387 return 0; 388 389 count = of_count_phandle_with_args(np, "interconnects", 390 "#interconnect-cells"); 391 of_node_put(np); 392 if (count < 0) 393 return 0; 394 395 /* two phandles when #interconnect-cells = <1> */ 396 if (count % 2) { 397 dev_err(dev, "%s: Invalid interconnects values\n", __func__); 398 return -EINVAL; 399 } 400 401 num_paths = count / 2; 402 paths = kcalloc(num_paths, sizeof(*paths), GFP_KERNEL); 403 if (!paths) 404 return -ENOMEM; 405 406 for (i = 0; i < num_paths; i++) { 407 paths[i] = of_icc_get_by_index(dev, i); 408 if (IS_ERR(paths[i])) { 409 ret = PTR_ERR(paths[i]); 410 if (ret != -EPROBE_DEFER) { 411 dev_err(dev, "%s: Unable to get path%d: %d\n", 412 __func__, i, ret); 413 } 414 goto err; 415 } 416 } 417 418 if (opp_table) { 419 opp_table->paths = paths; 420 opp_table->path_count = num_paths; 421 return 0; 422 } 423 424 err: 425 while (i--) 426 icc_put(paths[i]); 427 428 kfree(paths); 429 430 return ret; 431 } 432 EXPORT_SYMBOL_GPL(dev_pm_opp_of_find_icc_paths); 433 434 static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table, 435 struct device_node *np) 436 { 437 unsigned int levels = opp_table->supported_hw_count; 438 int count, versions, ret, i, j; 439 u32 val; 440 441 if (!opp_table->supported_hw) { 442 /* 443 * In the case that no supported_hw has been set by the 444 * platform but there is an opp-supported-hw value set for 445 * an OPP then the OPP should not be enabled as there is 446 * no way to see if the hardware supports it. 447 */ 448 if (of_find_property(np, "opp-supported-hw", NULL)) 449 return false; 450 else 451 return true; 452 } 453 454 count = of_property_count_u32_elems(np, "opp-supported-hw"); 455 if (count <= 0 || count % levels) { 456 dev_err(dev, "%s: Invalid opp-supported-hw property (%d)\n", 457 __func__, count); 458 return false; 459 } 460 461 versions = count / levels; 462 463 /* All levels in at least one of the versions should match */ 464 for (i = 0; i < versions; i++) { 465 bool supported = true; 466 467 for (j = 0; j < levels; j++) { 468 ret = of_property_read_u32_index(np, "opp-supported-hw", 469 i * levels + j, &val); 470 if (ret) { 471 dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n", 472 __func__, i * levels + j, ret); 473 return false; 474 } 475 476 /* Check if the level is supported */ 477 if (!(val & opp_table->supported_hw[j])) { 478 supported = false; 479 break; 480 } 481 } 482 483 if (supported) 484 return true; 485 } 486 487 return false; 488 } 489 490 static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev, 491 struct opp_table *opp_table) 492 { 493 u32 *microvolt, *microamp = NULL; 494 int supplies = opp_table->regulator_count, vcount, icount, ret, i, j; 495 struct property *prop = NULL; 496 char name[NAME_MAX]; 497 498 /* Search for "opp-microvolt-<name>" */ 499 if (opp_table->prop_name) { 500 snprintf(name, sizeof(name), "opp-microvolt-%s", 501 opp_table->prop_name); 502 prop = of_find_property(opp->np, name, NULL); 503 } 504 505 if (!prop) { 506 /* Search for "opp-microvolt" */ 507 sprintf(name, "opp-microvolt"); 508 prop = of_find_property(opp->np, name, NULL); 509 510 /* Missing property isn't a problem, but an invalid entry is */ 511 if (!prop) { 512 if (unlikely(supplies == -1)) { 513 /* Initialize regulator_count */ 514 opp_table->regulator_count = 0; 515 return 0; 516 } 517 518 if (!supplies) 519 return 0; 520 521 dev_err(dev, "%s: opp-microvolt missing although OPP managing regulators\n", 522 __func__); 523 return -EINVAL; 524 } 525 } 526 527 if (unlikely(supplies == -1)) { 528 /* Initialize regulator_count */ 529 supplies = opp_table->regulator_count = 1; 530 } else if (unlikely(!supplies)) { 531 dev_err(dev, "%s: opp-microvolt wasn't expected\n", __func__); 532 return -EINVAL; 533 } 534 535 vcount = of_property_count_u32_elems(opp->np, name); 536 if (vcount < 0) { 537 dev_err(dev, "%s: Invalid %s property (%d)\n", 538 __func__, name, vcount); 539 return vcount; 540 } 541 542 /* There can be one or three elements per supply */ 543 if (vcount != supplies && vcount != supplies * 3) { 544 dev_err(dev, "%s: Invalid number of elements in %s property (%d) with supplies (%d)\n", 545 __func__, name, vcount, supplies); 546 return -EINVAL; 547 } 548 549 microvolt = kmalloc_array(vcount, sizeof(*microvolt), GFP_KERNEL); 550 if (!microvolt) 551 return -ENOMEM; 552 553 ret = of_property_read_u32_array(opp->np, name, microvolt, vcount); 554 if (ret) { 555 dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret); 556 ret = -EINVAL; 557 goto free_microvolt; 558 } 559 560 /* Search for "opp-microamp-<name>" */ 561 prop = NULL; 562 if (opp_table->prop_name) { 563 snprintf(name, sizeof(name), "opp-microamp-%s", 564 opp_table->prop_name); 565 prop = of_find_property(opp->np, name, NULL); 566 } 567 568 if (!prop) { 569 /* Search for "opp-microamp" */ 570 sprintf(name, "opp-microamp"); 571 prop = of_find_property(opp->np, name, NULL); 572 } 573 574 if (prop) { 575 icount = of_property_count_u32_elems(opp->np, name); 576 if (icount < 0) { 577 dev_err(dev, "%s: Invalid %s property (%d)\n", __func__, 578 name, icount); 579 ret = icount; 580 goto free_microvolt; 581 } 582 583 if (icount != supplies) { 584 dev_err(dev, "%s: Invalid number of elements in %s property (%d) with supplies (%d)\n", 585 __func__, name, icount, supplies); 586 ret = -EINVAL; 587 goto free_microvolt; 588 } 589 590 microamp = kmalloc_array(icount, sizeof(*microamp), GFP_KERNEL); 591 if (!microamp) { 592 ret = -EINVAL; 593 goto free_microvolt; 594 } 595 596 ret = of_property_read_u32_array(opp->np, name, microamp, 597 icount); 598 if (ret) { 599 dev_err(dev, "%s: error parsing %s: %d\n", __func__, 600 name, ret); 601 ret = -EINVAL; 602 goto free_microamp; 603 } 604 } 605 606 for (i = 0, j = 0; i < supplies; i++) { 607 opp->supplies[i].u_volt = microvolt[j++]; 608 609 if (vcount == supplies) { 610 opp->supplies[i].u_volt_min = opp->supplies[i].u_volt; 611 opp->supplies[i].u_volt_max = opp->supplies[i].u_volt; 612 } else { 613 opp->supplies[i].u_volt_min = microvolt[j++]; 614 opp->supplies[i].u_volt_max = microvolt[j++]; 615 } 616 617 if (microamp) 618 opp->supplies[i].u_amp = microamp[i]; 619 } 620 621 free_microamp: 622 kfree(microamp); 623 free_microvolt: 624 kfree(microvolt); 625 626 return ret; 627 } 628 629 /** 630 * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT 631 * entries 632 * @dev: device pointer used to lookup OPP table. 633 * 634 * Free OPPs created using static entries present in DT. 635 */ 636 void dev_pm_opp_of_remove_table(struct device *dev) 637 { 638 dev_pm_opp_remove_table(dev); 639 } 640 EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table); 641 642 static int _read_bw(struct dev_pm_opp *new_opp, struct opp_table *table, 643 struct device_node *np, bool peak) 644 { 645 const char *name = peak ? "opp-peak-kBps" : "opp-avg-kBps"; 646 struct property *prop; 647 int i, count, ret; 648 u32 *bw; 649 650 prop = of_find_property(np, name, NULL); 651 if (!prop) 652 return -ENODEV; 653 654 count = prop->length / sizeof(u32); 655 if (table->path_count != count) { 656 pr_err("%s: Mismatch between %s and paths (%d %d)\n", 657 __func__, name, count, table->path_count); 658 return -EINVAL; 659 } 660 661 bw = kmalloc_array(count, sizeof(*bw), GFP_KERNEL); 662 if (!bw) 663 return -ENOMEM; 664 665 ret = of_property_read_u32_array(np, name, bw, count); 666 if (ret) { 667 pr_err("%s: Error parsing %s: %d\n", __func__, name, ret); 668 goto out; 669 } 670 671 for (i = 0; i < count; i++) { 672 if (peak) 673 new_opp->bandwidth[i].peak = kBps_to_icc(bw[i]); 674 else 675 new_opp->bandwidth[i].avg = kBps_to_icc(bw[i]); 676 } 677 678 out: 679 kfree(bw); 680 return ret; 681 } 682 683 static int _read_opp_key(struct dev_pm_opp *new_opp, struct opp_table *table, 684 struct device_node *np, bool *rate_not_available) 685 { 686 bool found = false; 687 u64 rate; 688 int ret; 689 690 ret = of_property_read_u64(np, "opp-hz", &rate); 691 if (!ret) { 692 /* 693 * Rate is defined as an unsigned long in clk API, and so 694 * casting explicitly to its type. Must be fixed once rate is 64 695 * bit guaranteed in clk API. 696 */ 697 new_opp->rate = (unsigned long)rate; 698 found = true; 699 } 700 *rate_not_available = !!ret; 701 702 /* 703 * Bandwidth consists of peak and average (optional) values: 704 * opp-peak-kBps = <path1_value path2_value>; 705 * opp-avg-kBps = <path1_value path2_value>; 706 */ 707 ret = _read_bw(new_opp, table, np, true); 708 if (!ret) { 709 found = true; 710 ret = _read_bw(new_opp, table, np, false); 711 } 712 713 /* The properties were found but we failed to parse them */ 714 if (ret && ret != -ENODEV) 715 return ret; 716 717 if (!of_property_read_u32(np, "opp-level", &new_opp->level)) 718 found = true; 719 720 if (found) 721 return 0; 722 723 return ret; 724 } 725 726 /** 727 * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings) 728 * @opp_table: OPP table 729 * @dev: device for which we do this operation 730 * @np: device node 731 * 732 * This function adds an opp definition to the opp table and returns status. The 733 * opp can be controlled using dev_pm_opp_enable/disable functions and may be 734 * removed by dev_pm_opp_remove. 735 * 736 * Return: 737 * Valid OPP pointer: 738 * On success 739 * NULL: 740 * Duplicate OPPs (both freq and volt are same) and opp->available 741 * OR if the OPP is not supported by hardware. 742 * ERR_PTR(-EEXIST): 743 * Freq are same and volt are different OR 744 * Duplicate OPPs (both freq and volt are same) and !opp->available 745 * ERR_PTR(-ENOMEM): 746 * Memory allocation failure 747 * ERR_PTR(-EINVAL): 748 * Failed parsing the OPP node 749 */ 750 static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table, 751 struct device *dev, struct device_node *np) 752 { 753 struct dev_pm_opp *new_opp; 754 u64 rate = 0; 755 u32 val; 756 int ret; 757 bool rate_not_available = false; 758 759 new_opp = _opp_allocate(opp_table); 760 if (!new_opp) 761 return ERR_PTR(-ENOMEM); 762 763 ret = _read_opp_key(new_opp, opp_table, np, &rate_not_available); 764 if (ret < 0 && !opp_table->is_genpd) { 765 dev_err(dev, "%s: opp key field not found\n", __func__); 766 goto free_opp; 767 } 768 769 /* Check if the OPP supports hardware's hierarchy of versions or not */ 770 if (!_opp_is_supported(dev, opp_table, np)) { 771 dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate); 772 goto free_opp; 773 } 774 775 new_opp->turbo = of_property_read_bool(np, "turbo-mode"); 776 777 new_opp->np = np; 778 new_opp->dynamic = false; 779 new_opp->available = true; 780 781 ret = _of_opp_alloc_required_opps(opp_table, new_opp); 782 if (ret) 783 goto free_opp; 784 785 if (!of_property_read_u32(np, "clock-latency-ns", &val)) 786 new_opp->clock_latency_ns = val; 787 788 ret = opp_parse_supplies(new_opp, dev, opp_table); 789 if (ret) 790 goto free_required_opps; 791 792 if (opp_table->is_genpd) 793 new_opp->pstate = pm_genpd_opp_to_performance_state(dev, new_opp); 794 795 ret = _opp_add(dev, new_opp, opp_table, rate_not_available); 796 if (ret) { 797 /* Don't return error for duplicate OPPs */ 798 if (ret == -EBUSY) 799 ret = 0; 800 goto free_required_opps; 801 } 802 803 /* OPP to select on device suspend */ 804 if (of_property_read_bool(np, "opp-suspend")) { 805 if (opp_table->suspend_opp) { 806 /* Pick the OPP with higher rate as suspend OPP */ 807 if (new_opp->rate > opp_table->suspend_opp->rate) { 808 opp_table->suspend_opp->suspend = false; 809 new_opp->suspend = true; 810 opp_table->suspend_opp = new_opp; 811 } 812 } else { 813 new_opp->suspend = true; 814 opp_table->suspend_opp = new_opp; 815 } 816 } 817 818 if (new_opp->clock_latency_ns > opp_table->clock_latency_ns_max) 819 opp_table->clock_latency_ns_max = new_opp->clock_latency_ns; 820 821 pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n", 822 __func__, new_opp->turbo, new_opp->rate, 823 new_opp->supplies[0].u_volt, new_opp->supplies[0].u_volt_min, 824 new_opp->supplies[0].u_volt_max, new_opp->clock_latency_ns); 825 826 /* 827 * Notify the changes in the availability of the operable 828 * frequency/voltage list. 829 */ 830 blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADD, new_opp); 831 return new_opp; 832 833 free_required_opps: 834 _of_opp_free_required_opps(opp_table, new_opp); 835 free_opp: 836 _opp_free(new_opp); 837 838 return ERR_PTR(ret); 839 } 840 841 /* Initializes OPP tables based on new bindings */ 842 static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table) 843 { 844 struct device_node *np; 845 int ret, count = 0; 846 struct dev_pm_opp *opp; 847 848 /* OPP table is already initialized for the device */ 849 mutex_lock(&opp_table->lock); 850 if (opp_table->parsed_static_opps) { 851 opp_table->parsed_static_opps++; 852 mutex_unlock(&opp_table->lock); 853 return 0; 854 } 855 856 opp_table->parsed_static_opps = 1; 857 mutex_unlock(&opp_table->lock); 858 859 /* We have opp-table node now, iterate over it and add OPPs */ 860 for_each_available_child_of_node(opp_table->np, np) { 861 opp = _opp_add_static_v2(opp_table, dev, np); 862 if (IS_ERR(opp)) { 863 ret = PTR_ERR(opp); 864 dev_err(dev, "%s: Failed to add OPP, %d\n", __func__, 865 ret); 866 of_node_put(np); 867 goto remove_static_opp; 868 } else if (opp) { 869 count++; 870 } 871 } 872 873 /* There should be one of more OPP defined */ 874 if (WARN_ON(!count)) { 875 ret = -ENOENT; 876 goto remove_static_opp; 877 } 878 879 list_for_each_entry(opp, &opp_table->opp_list, node) { 880 /* Any non-zero performance state would enable the feature */ 881 if (opp->pstate) { 882 opp_table->genpd_performance_state = true; 883 break; 884 } 885 } 886 887 return 0; 888 889 remove_static_opp: 890 _opp_remove_all_static(opp_table); 891 892 return ret; 893 } 894 895 /* Initializes OPP tables based on old-deprecated bindings */ 896 static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table) 897 { 898 const struct property *prop; 899 const __be32 *val; 900 int nr, ret = 0; 901 902 mutex_lock(&opp_table->lock); 903 if (opp_table->parsed_static_opps) { 904 opp_table->parsed_static_opps++; 905 mutex_unlock(&opp_table->lock); 906 return 0; 907 } 908 909 opp_table->parsed_static_opps = 1; 910 mutex_unlock(&opp_table->lock); 911 912 prop = of_find_property(dev->of_node, "operating-points", NULL); 913 if (!prop) { 914 ret = -ENODEV; 915 goto remove_static_opp; 916 } 917 if (!prop->value) { 918 ret = -ENODATA; 919 goto remove_static_opp; 920 } 921 922 /* 923 * Each OPP is a set of tuples consisting of frequency and 924 * voltage like <freq-kHz vol-uV>. 925 */ 926 nr = prop->length / sizeof(u32); 927 if (nr % 2) { 928 dev_err(dev, "%s: Invalid OPP table\n", __func__); 929 ret = -EINVAL; 930 goto remove_static_opp; 931 } 932 933 val = prop->value; 934 while (nr) { 935 unsigned long freq = be32_to_cpup(val++) * 1000; 936 unsigned long volt = be32_to_cpup(val++); 937 938 ret = _opp_add_v1(opp_table, dev, freq, volt, false); 939 if (ret) { 940 dev_err(dev, "%s: Failed to add OPP %ld (%d)\n", 941 __func__, freq, ret); 942 goto remove_static_opp; 943 } 944 nr -= 2; 945 } 946 947 return 0; 948 949 remove_static_opp: 950 _opp_remove_all_static(opp_table); 951 952 return ret; 953 } 954 955 /** 956 * dev_pm_opp_of_add_table() - Initialize opp table from device tree 957 * @dev: device pointer used to lookup OPP table. 958 * 959 * Register the initial OPP table with the OPP library for given device. 960 * 961 * Return: 962 * 0 On success OR 963 * Duplicate OPPs (both freq and volt are same) and opp->available 964 * -EEXIST Freq are same and volt are different OR 965 * Duplicate OPPs (both freq and volt are same) and !opp->available 966 * -ENOMEM Memory allocation failure 967 * -ENODEV when 'operating-points' property is not found or is invalid data 968 * in device node. 969 * -ENODATA when empty 'operating-points' property is found 970 * -EINVAL when invalid entries are found in opp-v2 table 971 */ 972 int dev_pm_opp_of_add_table(struct device *dev) 973 { 974 struct opp_table *opp_table; 975 int ret; 976 977 opp_table = dev_pm_opp_get_opp_table_indexed(dev, 0); 978 if (IS_ERR(opp_table)) 979 return PTR_ERR(opp_table); 980 981 /* 982 * OPPs have two version of bindings now. Also try the old (v1) 983 * bindings for backward compatibility with older dtbs. 984 */ 985 if (opp_table->np) 986 ret = _of_add_opp_table_v2(dev, opp_table); 987 else 988 ret = _of_add_opp_table_v1(dev, opp_table); 989 990 if (ret) 991 dev_pm_opp_put_opp_table(opp_table); 992 993 return ret; 994 } 995 EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table); 996 997 /** 998 * dev_pm_opp_of_add_table_indexed() - Initialize indexed opp table from device tree 999 * @dev: device pointer used to lookup OPP table. 1000 * @index: Index number. 1001 * 1002 * Register the initial OPP table with the OPP library for given device only 1003 * using the "operating-points-v2" property. 1004 * 1005 * Return: 1006 * 0 On success OR 1007 * Duplicate OPPs (both freq and volt are same) and opp->available 1008 * -EEXIST Freq are same and volt are different OR 1009 * Duplicate OPPs (both freq and volt are same) and !opp->available 1010 * -ENOMEM Memory allocation failure 1011 * -ENODEV when 'operating-points' property is not found or is invalid data 1012 * in device node. 1013 * -ENODATA when empty 'operating-points' property is found 1014 * -EINVAL when invalid entries are found in opp-v2 table 1015 */ 1016 int dev_pm_opp_of_add_table_indexed(struct device *dev, int index) 1017 { 1018 struct opp_table *opp_table; 1019 int ret, count; 1020 1021 if (index) { 1022 /* 1023 * If only one phandle is present, then the same OPP table 1024 * applies for all index requests. 1025 */ 1026 count = of_count_phandle_with_args(dev->of_node, 1027 "operating-points-v2", NULL); 1028 if (count == 1) 1029 index = 0; 1030 } 1031 1032 opp_table = dev_pm_opp_get_opp_table_indexed(dev, index); 1033 if (IS_ERR(opp_table)) 1034 return PTR_ERR(opp_table); 1035 1036 ret = _of_add_opp_table_v2(dev, opp_table); 1037 if (ret) 1038 dev_pm_opp_put_opp_table(opp_table); 1039 1040 return ret; 1041 } 1042 EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table_indexed); 1043 1044 /* CPU device specific helpers */ 1045 1046 /** 1047 * dev_pm_opp_of_cpumask_remove_table() - Removes OPP table for @cpumask 1048 * @cpumask: cpumask for which OPP table needs to be removed 1049 * 1050 * This removes the OPP tables for CPUs present in the @cpumask. 1051 * This should be used only to remove static entries created from DT. 1052 */ 1053 void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask) 1054 { 1055 _dev_pm_opp_cpumask_remove_table(cpumask, -1); 1056 } 1057 EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_remove_table); 1058 1059 /** 1060 * dev_pm_opp_of_cpumask_add_table() - Adds OPP table for @cpumask 1061 * @cpumask: cpumask for which OPP table needs to be added. 1062 * 1063 * This adds the OPP tables for CPUs present in the @cpumask. 1064 */ 1065 int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask) 1066 { 1067 struct device *cpu_dev; 1068 int cpu, ret; 1069 1070 if (WARN_ON(cpumask_empty(cpumask))) 1071 return -ENODEV; 1072 1073 for_each_cpu(cpu, cpumask) { 1074 cpu_dev = get_cpu_device(cpu); 1075 if (!cpu_dev) { 1076 pr_err("%s: failed to get cpu%d device\n", __func__, 1077 cpu); 1078 ret = -ENODEV; 1079 goto remove_table; 1080 } 1081 1082 ret = dev_pm_opp_of_add_table(cpu_dev); 1083 if (ret) { 1084 /* 1085 * OPP may get registered dynamically, don't print error 1086 * message here. 1087 */ 1088 pr_debug("%s: couldn't find opp table for cpu:%d, %d\n", 1089 __func__, cpu, ret); 1090 1091 goto remove_table; 1092 } 1093 } 1094 1095 return 0; 1096 1097 remove_table: 1098 /* Free all other OPPs */ 1099 _dev_pm_opp_cpumask_remove_table(cpumask, cpu); 1100 1101 return ret; 1102 } 1103 EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table); 1104 1105 /* 1106 * Works only for OPP v2 bindings. 1107 * 1108 * Returns -ENOENT if operating-points-v2 bindings aren't supported. 1109 */ 1110 /** 1111 * dev_pm_opp_of_get_sharing_cpus() - Get cpumask of CPUs sharing OPPs with 1112 * @cpu_dev using operating-points-v2 1113 * bindings. 1114 * 1115 * @cpu_dev: CPU device for which we do this operation 1116 * @cpumask: cpumask to update with information of sharing CPUs 1117 * 1118 * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev. 1119 * 1120 * Returns -ENOENT if operating-points-v2 isn't present for @cpu_dev. 1121 */ 1122 int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, 1123 struct cpumask *cpumask) 1124 { 1125 struct device_node *np, *tmp_np, *cpu_np; 1126 int cpu, ret = 0; 1127 1128 /* Get OPP descriptor node */ 1129 np = dev_pm_opp_of_get_opp_desc_node(cpu_dev); 1130 if (!np) { 1131 dev_dbg(cpu_dev, "%s: Couldn't find opp node.\n", __func__); 1132 return -ENOENT; 1133 } 1134 1135 cpumask_set_cpu(cpu_dev->id, cpumask); 1136 1137 /* OPPs are shared ? */ 1138 if (!of_property_read_bool(np, "opp-shared")) 1139 goto put_cpu_node; 1140 1141 for_each_possible_cpu(cpu) { 1142 if (cpu == cpu_dev->id) 1143 continue; 1144 1145 cpu_np = of_cpu_device_node_get(cpu); 1146 if (!cpu_np) { 1147 dev_err(cpu_dev, "%s: failed to get cpu%d node\n", 1148 __func__, cpu); 1149 ret = -ENOENT; 1150 goto put_cpu_node; 1151 } 1152 1153 /* Get OPP descriptor node */ 1154 tmp_np = _opp_of_get_opp_desc_node(cpu_np, 0); 1155 of_node_put(cpu_np); 1156 if (!tmp_np) { 1157 pr_err("%pOF: Couldn't find opp node\n", cpu_np); 1158 ret = -ENOENT; 1159 goto put_cpu_node; 1160 } 1161 1162 /* CPUs are sharing opp node */ 1163 if (np == tmp_np) 1164 cpumask_set_cpu(cpu, cpumask); 1165 1166 of_node_put(tmp_np); 1167 } 1168 1169 put_cpu_node: 1170 of_node_put(np); 1171 return ret; 1172 } 1173 EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus); 1174 1175 /** 1176 * of_get_required_opp_performance_state() - Search for required OPP and return its performance state. 1177 * @np: Node that contains the "required-opps" property. 1178 * @index: Index of the phandle to parse. 1179 * 1180 * Returns the performance state of the OPP pointed out by the "required-opps" 1181 * property at @index in @np. 1182 * 1183 * Return: Zero or positive performance state on success, otherwise negative 1184 * value on errors. 1185 */ 1186 int of_get_required_opp_performance_state(struct device_node *np, int index) 1187 { 1188 struct dev_pm_opp *opp; 1189 struct device_node *required_np; 1190 struct opp_table *opp_table; 1191 int pstate = -EINVAL; 1192 1193 required_np = of_parse_required_opp(np, index); 1194 if (!required_np) 1195 return -EINVAL; 1196 1197 opp_table = _find_table_of_opp_np(required_np); 1198 if (IS_ERR(opp_table)) { 1199 pr_err("%s: Failed to find required OPP table %pOF: %ld\n", 1200 __func__, np, PTR_ERR(opp_table)); 1201 goto put_required_np; 1202 } 1203 1204 opp = _find_opp_of_np(opp_table, required_np); 1205 if (opp) { 1206 pstate = opp->pstate; 1207 dev_pm_opp_put(opp); 1208 } 1209 1210 dev_pm_opp_put_opp_table(opp_table); 1211 1212 put_required_np: 1213 of_node_put(required_np); 1214 1215 return pstate; 1216 } 1217 EXPORT_SYMBOL_GPL(of_get_required_opp_performance_state); 1218 1219 /** 1220 * dev_pm_opp_get_of_node() - Gets the DT node corresponding to an opp 1221 * @opp: opp for which DT node has to be returned for 1222 * 1223 * Return: DT node corresponding to the opp, else 0 on success. 1224 * 1225 * The caller needs to put the node with of_node_put() after using it. 1226 */ 1227 struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp) 1228 { 1229 if (IS_ERR_OR_NULL(opp)) { 1230 pr_err("%s: Invalid parameters\n", __func__); 1231 return NULL; 1232 } 1233 1234 return of_node_get(opp->np); 1235 } 1236 EXPORT_SYMBOL_GPL(dev_pm_opp_get_of_node); 1237 1238 /* 1239 * Callback function provided to the Energy Model framework upon registration. 1240 * This computes the power estimated by @dev at @kHz if it is the frequency 1241 * of an existing OPP, or at the frequency of the first OPP above @kHz otherwise 1242 * (see dev_pm_opp_find_freq_ceil()). This function updates @kHz to the ceiled 1243 * frequency and @mW to the associated power. The power is estimated as 1244 * P = C * V^2 * f with C being the device's capacitance and V and f 1245 * respectively the voltage and frequency of the OPP. 1246 * 1247 * Returns -EINVAL if the power calculation failed because of missing 1248 * parameters, 0 otherwise. 1249 */ 1250 static int __maybe_unused _get_power(unsigned long *mW, unsigned long *kHz, 1251 struct device *dev) 1252 { 1253 struct dev_pm_opp *opp; 1254 struct device_node *np; 1255 unsigned long mV, Hz; 1256 u32 cap; 1257 u64 tmp; 1258 int ret; 1259 1260 np = of_node_get(dev->of_node); 1261 if (!np) 1262 return -EINVAL; 1263 1264 ret = of_property_read_u32(np, "dynamic-power-coefficient", &cap); 1265 of_node_put(np); 1266 if (ret) 1267 return -EINVAL; 1268 1269 Hz = *kHz * 1000; 1270 opp = dev_pm_opp_find_freq_ceil(dev, &Hz); 1271 if (IS_ERR(opp)) 1272 return -EINVAL; 1273 1274 mV = dev_pm_opp_get_voltage(opp) / 1000; 1275 dev_pm_opp_put(opp); 1276 if (!mV) 1277 return -EINVAL; 1278 1279 tmp = (u64)cap * mV * mV * (Hz / 1000000); 1280 do_div(tmp, 1000000000); 1281 1282 *mW = (unsigned long)tmp; 1283 *kHz = Hz / 1000; 1284 1285 return 0; 1286 } 1287 1288 /** 1289 * dev_pm_opp_of_register_em() - Attempt to register an Energy Model 1290 * @dev : Device for which an Energy Model has to be registered 1291 * @cpus : CPUs for which an Energy Model has to be registered. For 1292 * other type of devices it should be set to NULL. 1293 * 1294 * This checks whether the "dynamic-power-coefficient" devicetree property has 1295 * been specified, and tries to register an Energy Model with it if it has. 1296 * Having this property means the voltages are known for OPPs and the EM 1297 * might be calculated. 1298 */ 1299 int dev_pm_opp_of_register_em(struct device *dev, struct cpumask *cpus) 1300 { 1301 struct em_data_callback em_cb = EM_DATA_CB(_get_power); 1302 struct device_node *np; 1303 int ret, nr_opp; 1304 u32 cap; 1305 1306 if (IS_ERR_OR_NULL(dev)) { 1307 ret = -EINVAL; 1308 goto failed; 1309 } 1310 1311 nr_opp = dev_pm_opp_get_opp_count(dev); 1312 if (nr_opp <= 0) { 1313 ret = -EINVAL; 1314 goto failed; 1315 } 1316 1317 np = of_node_get(dev->of_node); 1318 if (!np) { 1319 ret = -EINVAL; 1320 goto failed; 1321 } 1322 1323 /* 1324 * Register an EM only if the 'dynamic-power-coefficient' property is 1325 * set in devicetree. It is assumed the voltage values are known if that 1326 * property is set since it is useless otherwise. If voltages are not 1327 * known, just let the EM registration fail with an error to alert the 1328 * user about the inconsistent configuration. 1329 */ 1330 ret = of_property_read_u32(np, "dynamic-power-coefficient", &cap); 1331 of_node_put(np); 1332 if (ret || !cap) { 1333 dev_dbg(dev, "Couldn't find proper 'dynamic-power-coefficient' in DT\n"); 1334 ret = -EINVAL; 1335 goto failed; 1336 } 1337 1338 ret = em_dev_register_perf_domain(dev, nr_opp, &em_cb, cpus); 1339 if (ret) 1340 goto failed; 1341 1342 return 0; 1343 1344 failed: 1345 dev_dbg(dev, "Couldn't register Energy Model %d\n", ret); 1346 return ret; 1347 } 1348 EXPORT_SYMBOL_GPL(dev_pm_opp_of_register_em); 1349