1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * phy-core.c -- Generic Phy framework. 4 * 5 * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com 6 * 7 * Author: Kishon Vijay Abraham I <kishon@ti.com> 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/export.h> 12 #include <linux/module.h> 13 #include <linux/err.h> 14 #include <linux/debugfs.h> 15 #include <linux/device.h> 16 #include <linux/slab.h> 17 #include <linux/of.h> 18 #include <linux/phy/phy.h> 19 #include <linux/idr.h> 20 #include <linux/pm_runtime.h> 21 #include <linux/regulator/consumer.h> 22 23 static struct class *phy_class; 24 static struct dentry *phy_debugfs_root; 25 static DEFINE_MUTEX(phy_provider_mutex); 26 static LIST_HEAD(phy_provider_list); 27 static LIST_HEAD(phys); 28 static DEFINE_IDA(phy_ida); 29 30 static void devm_phy_release(struct device *dev, void *res) 31 { 32 struct phy *phy = *(struct phy **)res; 33 34 phy_put(dev, phy); 35 } 36 37 static void devm_phy_provider_release(struct device *dev, void *res) 38 { 39 struct phy_provider *phy_provider = *(struct phy_provider **)res; 40 41 of_phy_provider_unregister(phy_provider); 42 } 43 44 static void devm_phy_consume(struct device *dev, void *res) 45 { 46 struct phy *phy = *(struct phy **)res; 47 48 phy_destroy(phy); 49 } 50 51 static int devm_phy_match(struct device *dev, void *res, void *match_data) 52 { 53 struct phy **phy = res; 54 55 return *phy == match_data; 56 } 57 58 /** 59 * phy_create_lookup() - allocate and register PHY/device association 60 * @phy: the phy of the association 61 * @con_id: connection ID string on device 62 * @dev_id: the device of the association 63 * 64 * Creates and registers phy_lookup entry. 65 */ 66 int phy_create_lookup(struct phy *phy, const char *con_id, const char *dev_id) 67 { 68 struct phy_lookup *pl; 69 70 if (!phy || !dev_id || !con_id) 71 return -EINVAL; 72 73 pl = kzalloc(sizeof(*pl), GFP_KERNEL); 74 if (!pl) 75 return -ENOMEM; 76 77 pl->dev_id = dev_id; 78 pl->con_id = con_id; 79 pl->phy = phy; 80 81 mutex_lock(&phy_provider_mutex); 82 list_add_tail(&pl->node, &phys); 83 mutex_unlock(&phy_provider_mutex); 84 85 return 0; 86 } 87 EXPORT_SYMBOL_GPL(phy_create_lookup); 88 89 /** 90 * phy_remove_lookup() - find and remove PHY/device association 91 * @phy: the phy of the association 92 * @con_id: connection ID string on device 93 * @dev_id: the device of the association 94 * 95 * Finds and unregisters phy_lookup entry that was created with 96 * phy_create_lookup(). 97 */ 98 void phy_remove_lookup(struct phy *phy, const char *con_id, const char *dev_id) 99 { 100 struct phy_lookup *pl; 101 102 if (!phy || !dev_id || !con_id) 103 return; 104 105 mutex_lock(&phy_provider_mutex); 106 list_for_each_entry(pl, &phys, node) 107 if (pl->phy == phy && !strcmp(pl->dev_id, dev_id) && 108 !strcmp(pl->con_id, con_id)) { 109 list_del(&pl->node); 110 kfree(pl); 111 break; 112 } 113 mutex_unlock(&phy_provider_mutex); 114 } 115 EXPORT_SYMBOL_GPL(phy_remove_lookup); 116 117 static struct phy *phy_find(struct device *dev, const char *con_id) 118 { 119 const char *dev_id = dev_name(dev); 120 struct phy_lookup *p, *pl = NULL; 121 122 mutex_lock(&phy_provider_mutex); 123 list_for_each_entry(p, &phys, node) 124 if (!strcmp(p->dev_id, dev_id) && !strcmp(p->con_id, con_id)) { 125 pl = p; 126 break; 127 } 128 mutex_unlock(&phy_provider_mutex); 129 130 return pl ? pl->phy : ERR_PTR(-ENODEV); 131 } 132 133 static struct phy_provider *of_phy_provider_lookup(struct device_node *node) 134 { 135 struct phy_provider *phy_provider; 136 struct device_node *child; 137 138 list_for_each_entry(phy_provider, &phy_provider_list, list) { 139 if (phy_provider->dev->of_node == node) 140 return phy_provider; 141 142 for_each_child_of_node(phy_provider->children, child) 143 if (child == node) { 144 of_node_put(child); 145 return phy_provider; 146 } 147 } 148 149 return ERR_PTR(-EPROBE_DEFER); 150 } 151 152 int phy_pm_runtime_get(struct phy *phy) 153 { 154 int ret; 155 156 if (!phy) 157 return 0; 158 159 if (!pm_runtime_enabled(&phy->dev)) 160 return -ENOTSUPP; 161 162 ret = pm_runtime_get(&phy->dev); 163 if (ret < 0 && ret != -EINPROGRESS) 164 pm_runtime_put_noidle(&phy->dev); 165 166 return ret; 167 } 168 EXPORT_SYMBOL_GPL(phy_pm_runtime_get); 169 170 int phy_pm_runtime_get_sync(struct phy *phy) 171 { 172 int ret; 173 174 if (!phy) 175 return 0; 176 177 if (!pm_runtime_enabled(&phy->dev)) 178 return -ENOTSUPP; 179 180 ret = pm_runtime_get_sync(&phy->dev); 181 if (ret < 0) 182 pm_runtime_put_sync(&phy->dev); 183 184 return ret; 185 } 186 EXPORT_SYMBOL_GPL(phy_pm_runtime_get_sync); 187 188 int phy_pm_runtime_put(struct phy *phy) 189 { 190 if (!phy) 191 return 0; 192 193 if (!pm_runtime_enabled(&phy->dev)) 194 return -ENOTSUPP; 195 196 return pm_runtime_put(&phy->dev); 197 } 198 EXPORT_SYMBOL_GPL(phy_pm_runtime_put); 199 200 int phy_pm_runtime_put_sync(struct phy *phy) 201 { 202 if (!phy) 203 return 0; 204 205 if (!pm_runtime_enabled(&phy->dev)) 206 return -ENOTSUPP; 207 208 return pm_runtime_put_sync(&phy->dev); 209 } 210 EXPORT_SYMBOL_GPL(phy_pm_runtime_put_sync); 211 212 void phy_pm_runtime_allow(struct phy *phy) 213 { 214 if (!phy) 215 return; 216 217 if (!pm_runtime_enabled(&phy->dev)) 218 return; 219 220 pm_runtime_allow(&phy->dev); 221 } 222 EXPORT_SYMBOL_GPL(phy_pm_runtime_allow); 223 224 void phy_pm_runtime_forbid(struct phy *phy) 225 { 226 if (!phy) 227 return; 228 229 if (!pm_runtime_enabled(&phy->dev)) 230 return; 231 232 pm_runtime_forbid(&phy->dev); 233 } 234 EXPORT_SYMBOL_GPL(phy_pm_runtime_forbid); 235 236 /** 237 * phy_init - phy internal initialization before phy operation 238 * @phy: the phy returned by phy_get() 239 * 240 * Used to allow phy's driver to perform phy internal initialization, 241 * such as PLL block powering, clock initialization or anything that's 242 * is required by the phy to perform the start of operation. 243 * Must be called before phy_power_on(). 244 * 245 * Return: %0 if successful, a negative error code otherwise 246 */ 247 int phy_init(struct phy *phy) 248 { 249 int ret; 250 251 if (!phy) 252 return 0; 253 254 ret = phy_pm_runtime_get_sync(phy); 255 if (ret < 0 && ret != -ENOTSUPP) 256 return ret; 257 ret = 0; /* Override possible ret == -ENOTSUPP */ 258 259 mutex_lock(&phy->mutex); 260 if (phy->power_count > phy->init_count) 261 dev_warn(&phy->dev, "phy_power_on was called before phy_init\n"); 262 263 if (phy->init_count == 0 && phy->ops->init) { 264 ret = phy->ops->init(phy); 265 if (ret < 0) { 266 dev_err(&phy->dev, "phy init failed --> %d\n", ret); 267 goto out; 268 } 269 } 270 ++phy->init_count; 271 272 out: 273 mutex_unlock(&phy->mutex); 274 phy_pm_runtime_put(phy); 275 return ret; 276 } 277 EXPORT_SYMBOL_GPL(phy_init); 278 279 /** 280 * phy_exit - Phy internal un-initialization 281 * @phy: the phy returned by phy_get() 282 * 283 * Must be called after phy_power_off(). 284 * 285 * Return: %0 if successful, a negative error code otherwise 286 */ 287 int phy_exit(struct phy *phy) 288 { 289 int ret; 290 291 if (!phy) 292 return 0; 293 294 ret = phy_pm_runtime_get_sync(phy); 295 if (ret < 0 && ret != -ENOTSUPP) 296 return ret; 297 ret = 0; /* Override possible ret == -ENOTSUPP */ 298 299 mutex_lock(&phy->mutex); 300 if (phy->init_count == 1 && phy->ops->exit) { 301 ret = phy->ops->exit(phy); 302 if (ret < 0) { 303 dev_err(&phy->dev, "phy exit failed --> %d\n", ret); 304 goto out; 305 } 306 } 307 --phy->init_count; 308 309 out: 310 mutex_unlock(&phy->mutex); 311 phy_pm_runtime_put(phy); 312 return ret; 313 } 314 EXPORT_SYMBOL_GPL(phy_exit); 315 316 /** 317 * phy_power_on - Enable the phy and enter proper operation 318 * @phy: the phy returned by phy_get() 319 * 320 * Must be called after phy_init(). 321 * 322 * Return: %0 if successful, a negative error code otherwise 323 */ 324 int phy_power_on(struct phy *phy) 325 { 326 int ret = 0; 327 328 if (!phy) 329 goto out; 330 331 if (phy->pwr) { 332 ret = regulator_enable(phy->pwr); 333 if (ret) 334 goto out; 335 } 336 337 ret = phy_pm_runtime_get_sync(phy); 338 if (ret < 0 && ret != -ENOTSUPP) 339 goto err_pm_sync; 340 341 ret = 0; /* Override possible ret == -ENOTSUPP */ 342 343 mutex_lock(&phy->mutex); 344 if (phy->power_count == 0 && phy->ops->power_on) { 345 ret = phy->ops->power_on(phy); 346 if (ret < 0) { 347 dev_err(&phy->dev, "phy poweron failed --> %d\n", ret); 348 goto err_pwr_on; 349 } 350 } 351 ++phy->power_count; 352 mutex_unlock(&phy->mutex); 353 return 0; 354 355 err_pwr_on: 356 mutex_unlock(&phy->mutex); 357 phy_pm_runtime_put_sync(phy); 358 err_pm_sync: 359 if (phy->pwr) 360 regulator_disable(phy->pwr); 361 out: 362 return ret; 363 } 364 EXPORT_SYMBOL_GPL(phy_power_on); 365 366 /** 367 * phy_power_off - Disable the phy. 368 * @phy: the phy returned by phy_get() 369 * 370 * Must be called before phy_exit(). 371 * 372 * Return: %0 if successful, a negative error code otherwise 373 */ 374 int phy_power_off(struct phy *phy) 375 { 376 int ret; 377 378 if (!phy) 379 return 0; 380 381 mutex_lock(&phy->mutex); 382 if (phy->power_count == 1 && phy->ops->power_off) { 383 ret = phy->ops->power_off(phy); 384 if (ret < 0) { 385 dev_err(&phy->dev, "phy poweroff failed --> %d\n", ret); 386 mutex_unlock(&phy->mutex); 387 return ret; 388 } 389 } 390 --phy->power_count; 391 mutex_unlock(&phy->mutex); 392 phy_pm_runtime_put(phy); 393 394 if (phy->pwr) 395 regulator_disable(phy->pwr); 396 397 return 0; 398 } 399 EXPORT_SYMBOL_GPL(phy_power_off); 400 401 int phy_set_mode_ext(struct phy *phy, enum phy_mode mode, int submode) 402 { 403 int ret; 404 405 if (!phy || !phy->ops->set_mode) 406 return 0; 407 408 mutex_lock(&phy->mutex); 409 ret = phy->ops->set_mode(phy, mode, submode); 410 if (!ret) 411 phy->attrs.mode = mode; 412 mutex_unlock(&phy->mutex); 413 414 return ret; 415 } 416 EXPORT_SYMBOL_GPL(phy_set_mode_ext); 417 418 int phy_set_media(struct phy *phy, enum phy_media media) 419 { 420 int ret; 421 422 if (!phy || !phy->ops->set_media) 423 return 0; 424 425 mutex_lock(&phy->mutex); 426 ret = phy->ops->set_media(phy, media); 427 mutex_unlock(&phy->mutex); 428 429 return ret; 430 } 431 EXPORT_SYMBOL_GPL(phy_set_media); 432 433 int phy_set_speed(struct phy *phy, int speed) 434 { 435 int ret; 436 437 if (!phy || !phy->ops->set_speed) 438 return 0; 439 440 mutex_lock(&phy->mutex); 441 ret = phy->ops->set_speed(phy, speed); 442 mutex_unlock(&phy->mutex); 443 444 return ret; 445 } 446 EXPORT_SYMBOL_GPL(phy_set_speed); 447 448 int phy_reset(struct phy *phy) 449 { 450 int ret; 451 452 if (!phy || !phy->ops->reset) 453 return 0; 454 455 ret = phy_pm_runtime_get_sync(phy); 456 if (ret < 0 && ret != -ENOTSUPP) 457 return ret; 458 459 mutex_lock(&phy->mutex); 460 ret = phy->ops->reset(phy); 461 mutex_unlock(&phy->mutex); 462 463 phy_pm_runtime_put(phy); 464 465 return ret; 466 } 467 EXPORT_SYMBOL_GPL(phy_reset); 468 469 /** 470 * phy_calibrate() - Tunes the phy hw parameters for current configuration 471 * @phy: the phy returned by phy_get() 472 * 473 * Used to calibrate phy hardware, typically by adjusting some parameters in 474 * runtime, which are otherwise lost after host controller reset and cannot 475 * be applied in phy_init() or phy_power_on(). 476 * 477 * Return: %0 if successful, a negative error code otherwise 478 */ 479 int phy_calibrate(struct phy *phy) 480 { 481 int ret; 482 483 if (!phy || !phy->ops->calibrate) 484 return 0; 485 486 mutex_lock(&phy->mutex); 487 ret = phy->ops->calibrate(phy); 488 mutex_unlock(&phy->mutex); 489 490 return ret; 491 } 492 EXPORT_SYMBOL_GPL(phy_calibrate); 493 494 /** 495 * phy_configure() - Changes the phy parameters 496 * @phy: the phy returned by phy_get() 497 * @opts: New configuration to apply 498 * 499 * Used to change the PHY parameters. phy_init() must have been called 500 * on the phy. The configuration will be applied on the current phy 501 * mode, that can be changed using phy_set_mode(). 502 * 503 * Return: %0 if successful, a negative error code otherwise 504 */ 505 int phy_configure(struct phy *phy, union phy_configure_opts *opts) 506 { 507 int ret; 508 509 if (!phy) 510 return -EINVAL; 511 512 if (!phy->ops->configure) 513 return -EOPNOTSUPP; 514 515 mutex_lock(&phy->mutex); 516 ret = phy->ops->configure(phy, opts); 517 mutex_unlock(&phy->mutex); 518 519 return ret; 520 } 521 EXPORT_SYMBOL_GPL(phy_configure); 522 523 /** 524 * phy_validate() - Checks the phy parameters 525 * @phy: the phy returned by phy_get() 526 * @mode: phy_mode the configuration is applicable to. 527 * @submode: PHY submode the configuration is applicable to. 528 * @opts: Configuration to check 529 * 530 * Used to check that the current set of parameters can be handled by 531 * the phy. Implementations are free to tune the parameters passed as 532 * arguments if needed by some implementation detail or 533 * constraints. It will not change any actual configuration of the 534 * PHY, so calling it as many times as deemed fit will have no side 535 * effect. 536 * 537 * Return: %0 if successful, a negative error code otherwise 538 */ 539 int phy_validate(struct phy *phy, enum phy_mode mode, int submode, 540 union phy_configure_opts *opts) 541 { 542 int ret; 543 544 if (!phy) 545 return -EINVAL; 546 547 if (!phy->ops->validate) 548 return -EOPNOTSUPP; 549 550 mutex_lock(&phy->mutex); 551 ret = phy->ops->validate(phy, mode, submode, opts); 552 mutex_unlock(&phy->mutex); 553 554 return ret; 555 } 556 EXPORT_SYMBOL_GPL(phy_validate); 557 558 /** 559 * _of_phy_get() - lookup and obtain a reference to a phy by phandle 560 * @np: device_node for which to get the phy 561 * @index: the index of the phy 562 * 563 * Returns the phy associated with the given phandle value, 564 * after getting a refcount to it or -ENODEV if there is no such phy or 565 * -EPROBE_DEFER if there is a phandle to the phy, but the device is 566 * not yet loaded. This function uses of_xlate call back function provided 567 * while registering the phy_provider to find the phy instance. 568 */ 569 static struct phy *_of_phy_get(struct device_node *np, int index) 570 { 571 int ret; 572 struct phy_provider *phy_provider; 573 struct phy *phy = NULL; 574 struct of_phandle_args args; 575 576 ret = of_parse_phandle_with_args(np, "phys", "#phy-cells", 577 index, &args); 578 if (ret) 579 return ERR_PTR(-ENODEV); 580 581 /* This phy type handled by the usb-phy subsystem for now */ 582 if (of_device_is_compatible(args.np, "usb-nop-xceiv")) { 583 phy = ERR_PTR(-ENODEV); 584 goto out_put_node; 585 } 586 587 mutex_lock(&phy_provider_mutex); 588 phy_provider = of_phy_provider_lookup(args.np); 589 if (IS_ERR(phy_provider) || !try_module_get(phy_provider->owner)) { 590 phy = ERR_PTR(-EPROBE_DEFER); 591 goto out_unlock; 592 } 593 594 if (!of_device_is_available(args.np)) { 595 dev_warn(phy_provider->dev, "Requested PHY is disabled\n"); 596 phy = ERR_PTR(-ENODEV); 597 goto out_put_module; 598 } 599 600 phy = phy_provider->of_xlate(phy_provider->dev, &args); 601 602 out_put_module: 603 module_put(phy_provider->owner); 604 605 out_unlock: 606 mutex_unlock(&phy_provider_mutex); 607 out_put_node: 608 of_node_put(args.np); 609 610 return phy; 611 } 612 613 /** 614 * of_phy_get() - lookup and obtain a reference to a phy using a device_node. 615 * @np: device_node for which to get the phy 616 * @con_id: name of the phy from device's point of view 617 * 618 * Returns the phy driver, after getting a refcount to it; or 619 * -ENODEV if there is no such phy. The caller is responsible for 620 * calling phy_put() to release that count. 621 */ 622 struct phy *of_phy_get(struct device_node *np, const char *con_id) 623 { 624 struct phy *phy = NULL; 625 int index = 0; 626 627 if (con_id) 628 index = of_property_match_string(np, "phy-names", con_id); 629 630 phy = _of_phy_get(np, index); 631 if (IS_ERR(phy)) 632 return phy; 633 634 if (!try_module_get(phy->ops->owner)) 635 return ERR_PTR(-EPROBE_DEFER); 636 637 get_device(&phy->dev); 638 639 return phy; 640 } 641 EXPORT_SYMBOL_GPL(of_phy_get); 642 643 /** 644 * of_phy_put() - release the PHY 645 * @phy: the phy returned by of_phy_get() 646 * 647 * Releases a refcount the caller received from of_phy_get(). 648 */ 649 void of_phy_put(struct phy *phy) 650 { 651 if (!phy || IS_ERR(phy)) 652 return; 653 654 mutex_lock(&phy->mutex); 655 if (phy->ops->release) 656 phy->ops->release(phy); 657 mutex_unlock(&phy->mutex); 658 659 module_put(phy->ops->owner); 660 put_device(&phy->dev); 661 } 662 EXPORT_SYMBOL_GPL(of_phy_put); 663 664 /** 665 * phy_put() - release the PHY 666 * @dev: device that wants to release this phy 667 * @phy: the phy returned by phy_get() 668 * 669 * Releases a refcount the caller received from phy_get(). 670 */ 671 void phy_put(struct device *dev, struct phy *phy) 672 { 673 device_link_remove(dev, &phy->dev); 674 of_phy_put(phy); 675 } 676 EXPORT_SYMBOL_GPL(phy_put); 677 678 /** 679 * devm_phy_put() - release the PHY 680 * @dev: device that wants to release this phy 681 * @phy: the phy returned by devm_phy_get() 682 * 683 * destroys the devres associated with this phy and invokes phy_put 684 * to release the phy. 685 */ 686 void devm_phy_put(struct device *dev, struct phy *phy) 687 { 688 int r; 689 690 if (!phy) 691 return; 692 693 r = devres_release(dev, devm_phy_release, devm_phy_match, phy); 694 dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n"); 695 } 696 EXPORT_SYMBOL_GPL(devm_phy_put); 697 698 /** 699 * of_phy_simple_xlate() - returns the phy instance from phy provider 700 * @dev: the PHY provider device 701 * @args: of_phandle_args (not used here) 702 * 703 * Intended to be used by phy provider for the common case where #phy-cells is 704 * 0. For other cases where #phy-cells is greater than '0', the phy provider 705 * should provide a custom of_xlate function that reads the *args* and returns 706 * the appropriate phy. 707 */ 708 struct phy *of_phy_simple_xlate(struct device *dev, struct of_phandle_args 709 *args) 710 { 711 struct phy *phy; 712 struct class_dev_iter iter; 713 714 class_dev_iter_init(&iter, phy_class, NULL, NULL); 715 while ((dev = class_dev_iter_next(&iter))) { 716 phy = to_phy(dev); 717 if (args->np != phy->dev.of_node) 718 continue; 719 720 class_dev_iter_exit(&iter); 721 return phy; 722 } 723 724 class_dev_iter_exit(&iter); 725 return ERR_PTR(-ENODEV); 726 } 727 EXPORT_SYMBOL_GPL(of_phy_simple_xlate); 728 729 /** 730 * phy_get() - lookup and obtain a reference to a phy. 731 * @dev: device that requests this phy 732 * @string: the phy name as given in the dt data or the name of the controller 733 * port for non-dt case 734 * 735 * Returns the phy driver, after getting a refcount to it; or 736 * -ENODEV if there is no such phy. The caller is responsible for 737 * calling phy_put() to release that count. 738 */ 739 struct phy *phy_get(struct device *dev, const char *string) 740 { 741 int index = 0; 742 struct phy *phy; 743 struct device_link *link; 744 745 if (dev->of_node) { 746 if (string) 747 index = of_property_match_string(dev->of_node, "phy-names", 748 string); 749 else 750 index = 0; 751 phy = _of_phy_get(dev->of_node, index); 752 } else { 753 if (string == NULL) { 754 dev_WARN(dev, "missing string\n"); 755 return ERR_PTR(-EINVAL); 756 } 757 phy = phy_find(dev, string); 758 } 759 if (IS_ERR(phy)) 760 return phy; 761 762 if (!try_module_get(phy->ops->owner)) 763 return ERR_PTR(-EPROBE_DEFER); 764 765 get_device(&phy->dev); 766 767 link = device_link_add(dev, &phy->dev, DL_FLAG_STATELESS); 768 if (!link) 769 dev_dbg(dev, "failed to create device link to %s\n", 770 dev_name(phy->dev.parent)); 771 772 return phy; 773 } 774 EXPORT_SYMBOL_GPL(phy_get); 775 776 /** 777 * devm_phy_get() - lookup and obtain a reference to a phy. 778 * @dev: device that requests this phy 779 * @string: the phy name as given in the dt data or phy device name 780 * for non-dt case 781 * 782 * Gets the phy using phy_get(), and associates a device with it using 783 * devres. On driver detach, release function is invoked on the devres data, 784 * then, devres data is freed. 785 */ 786 struct phy *devm_phy_get(struct device *dev, const char *string) 787 { 788 struct phy **ptr, *phy; 789 790 ptr = devres_alloc(devm_phy_release, sizeof(*ptr), GFP_KERNEL); 791 if (!ptr) 792 return ERR_PTR(-ENOMEM); 793 794 phy = phy_get(dev, string); 795 if (!IS_ERR(phy)) { 796 *ptr = phy; 797 devres_add(dev, ptr); 798 } else { 799 devres_free(ptr); 800 } 801 802 return phy; 803 } 804 EXPORT_SYMBOL_GPL(devm_phy_get); 805 806 /** 807 * devm_phy_optional_get() - lookup and obtain a reference to an optional phy. 808 * @dev: device that requests this phy 809 * @string: the phy name as given in the dt data or phy device name 810 * for non-dt case 811 * 812 * Gets the phy using phy_get(), and associates a device with it using 813 * devres. On driver detach, release function is invoked on the devres 814 * data, then, devres data is freed. This differs to devm_phy_get() in 815 * that if the phy does not exist, it is not considered an error and 816 * -ENODEV will not be returned. Instead the NULL phy is returned, 817 * which can be passed to all other phy consumer calls. 818 */ 819 struct phy *devm_phy_optional_get(struct device *dev, const char *string) 820 { 821 struct phy *phy = devm_phy_get(dev, string); 822 823 if (PTR_ERR(phy) == -ENODEV) 824 phy = NULL; 825 826 return phy; 827 } 828 EXPORT_SYMBOL_GPL(devm_phy_optional_get); 829 830 /** 831 * devm_of_phy_get() - lookup and obtain a reference to a phy. 832 * @dev: device that requests this phy 833 * @np: node containing the phy 834 * @con_id: name of the phy from device's point of view 835 * 836 * Gets the phy using of_phy_get(), and associates a device with it using 837 * devres. On driver detach, release function is invoked on the devres data, 838 * then, devres data is freed. 839 */ 840 struct phy *devm_of_phy_get(struct device *dev, struct device_node *np, 841 const char *con_id) 842 { 843 struct phy **ptr, *phy; 844 struct device_link *link; 845 846 ptr = devres_alloc(devm_phy_release, sizeof(*ptr), GFP_KERNEL); 847 if (!ptr) 848 return ERR_PTR(-ENOMEM); 849 850 phy = of_phy_get(np, con_id); 851 if (!IS_ERR(phy)) { 852 *ptr = phy; 853 devres_add(dev, ptr); 854 } else { 855 devres_free(ptr); 856 return phy; 857 } 858 859 link = device_link_add(dev, &phy->dev, DL_FLAG_STATELESS); 860 if (!link) 861 dev_dbg(dev, "failed to create device link to %s\n", 862 dev_name(phy->dev.parent)); 863 864 return phy; 865 } 866 EXPORT_SYMBOL_GPL(devm_of_phy_get); 867 868 /** 869 * devm_of_phy_optional_get() - lookup and obtain a reference to an optional 870 * phy. 871 * @dev: device that requests this phy 872 * @np: node containing the phy 873 * @con_id: name of the phy from device's point of view 874 * 875 * Gets the phy using of_phy_get(), and associates a device with it using 876 * devres. On driver detach, release function is invoked on the devres data, 877 * then, devres data is freed. This differs to devm_of_phy_get() in 878 * that if the phy does not exist, it is not considered an error and 879 * -ENODEV will not be returned. Instead the NULL phy is returned, 880 * which can be passed to all other phy consumer calls. 881 */ 882 struct phy *devm_of_phy_optional_get(struct device *dev, struct device_node *np, 883 const char *con_id) 884 { 885 struct phy *phy = devm_of_phy_get(dev, np, con_id); 886 887 if (PTR_ERR(phy) == -ENODEV) 888 phy = NULL; 889 890 if (IS_ERR(phy)) 891 dev_err_probe(dev, PTR_ERR(phy), "failed to get PHY %pOF:%s", 892 np, con_id); 893 894 return phy; 895 } 896 EXPORT_SYMBOL_GPL(devm_of_phy_optional_get); 897 898 /** 899 * devm_of_phy_get_by_index() - lookup and obtain a reference to a phy by index. 900 * @dev: device that requests this phy 901 * @np: node containing the phy 902 * @index: index of the phy 903 * 904 * Gets the phy using _of_phy_get(), then gets a refcount to it, 905 * and associates a device with it using devres. On driver detach, 906 * release function is invoked on the devres data, 907 * then, devres data is freed. 908 * 909 */ 910 struct phy *devm_of_phy_get_by_index(struct device *dev, struct device_node *np, 911 int index) 912 { 913 struct phy **ptr, *phy; 914 struct device_link *link; 915 916 ptr = devres_alloc(devm_phy_release, sizeof(*ptr), GFP_KERNEL); 917 if (!ptr) 918 return ERR_PTR(-ENOMEM); 919 920 phy = _of_phy_get(np, index); 921 if (IS_ERR(phy)) { 922 devres_free(ptr); 923 return phy; 924 } 925 926 if (!try_module_get(phy->ops->owner)) { 927 devres_free(ptr); 928 return ERR_PTR(-EPROBE_DEFER); 929 } 930 931 get_device(&phy->dev); 932 933 *ptr = phy; 934 devres_add(dev, ptr); 935 936 link = device_link_add(dev, &phy->dev, DL_FLAG_STATELESS); 937 if (!link) 938 dev_dbg(dev, "failed to create device link to %s\n", 939 dev_name(phy->dev.parent)); 940 941 return phy; 942 } 943 EXPORT_SYMBOL_GPL(devm_of_phy_get_by_index); 944 945 /** 946 * phy_create() - create a new phy 947 * @dev: device that is creating the new phy 948 * @node: device node of the phy 949 * @ops: function pointers for performing phy operations 950 * 951 * Called to create a phy using phy framework. 952 */ 953 struct phy *phy_create(struct device *dev, struct device_node *node, 954 const struct phy_ops *ops) 955 { 956 int ret; 957 int id; 958 struct phy *phy; 959 960 if (WARN_ON(!dev)) 961 return ERR_PTR(-EINVAL); 962 963 phy = kzalloc(sizeof(*phy), GFP_KERNEL); 964 if (!phy) 965 return ERR_PTR(-ENOMEM); 966 967 id = ida_simple_get(&phy_ida, 0, 0, GFP_KERNEL); 968 if (id < 0) { 969 dev_err(dev, "unable to get id\n"); 970 ret = id; 971 goto free_phy; 972 } 973 974 device_initialize(&phy->dev); 975 mutex_init(&phy->mutex); 976 977 phy->dev.class = phy_class; 978 phy->dev.parent = dev; 979 phy->dev.of_node = node ?: dev->of_node; 980 phy->id = id; 981 phy->ops = ops; 982 983 ret = dev_set_name(&phy->dev, "phy-%s.%d", dev_name(dev), id); 984 if (ret) 985 goto put_dev; 986 987 /* phy-supply */ 988 phy->pwr = regulator_get_optional(&phy->dev, "phy"); 989 if (IS_ERR(phy->pwr)) { 990 ret = PTR_ERR(phy->pwr); 991 if (ret == -EPROBE_DEFER) 992 goto put_dev; 993 994 phy->pwr = NULL; 995 } 996 997 ret = device_add(&phy->dev); 998 if (ret) 999 goto put_dev; 1000 1001 if (pm_runtime_enabled(dev)) { 1002 pm_runtime_enable(&phy->dev); 1003 pm_runtime_no_callbacks(&phy->dev); 1004 } 1005 1006 phy->debugfs = debugfs_create_dir(dev_name(&phy->dev), phy_debugfs_root); 1007 1008 return phy; 1009 1010 put_dev: 1011 put_device(&phy->dev); /* calls phy_release() which frees resources */ 1012 return ERR_PTR(ret); 1013 1014 free_phy: 1015 kfree(phy); 1016 return ERR_PTR(ret); 1017 } 1018 EXPORT_SYMBOL_GPL(phy_create); 1019 1020 /** 1021 * devm_phy_create() - create a new phy 1022 * @dev: device that is creating the new phy 1023 * @node: device node of the phy 1024 * @ops: function pointers for performing phy operations 1025 * 1026 * Creates a new PHY device adding it to the PHY class. 1027 * While at that, it also associates the device with the phy using devres. 1028 * On driver detach, release function is invoked on the devres data, 1029 * then, devres data is freed. 1030 */ 1031 struct phy *devm_phy_create(struct device *dev, struct device_node *node, 1032 const struct phy_ops *ops) 1033 { 1034 struct phy **ptr, *phy; 1035 1036 ptr = devres_alloc(devm_phy_consume, sizeof(*ptr), GFP_KERNEL); 1037 if (!ptr) 1038 return ERR_PTR(-ENOMEM); 1039 1040 phy = phy_create(dev, node, ops); 1041 if (!IS_ERR(phy)) { 1042 *ptr = phy; 1043 devres_add(dev, ptr); 1044 } else { 1045 devres_free(ptr); 1046 } 1047 1048 return phy; 1049 } 1050 EXPORT_SYMBOL_GPL(devm_phy_create); 1051 1052 /** 1053 * phy_destroy() - destroy the phy 1054 * @phy: the phy to be destroyed 1055 * 1056 * Called to destroy the phy. 1057 */ 1058 void phy_destroy(struct phy *phy) 1059 { 1060 pm_runtime_disable(&phy->dev); 1061 device_unregister(&phy->dev); 1062 } 1063 EXPORT_SYMBOL_GPL(phy_destroy); 1064 1065 /** 1066 * devm_phy_destroy() - destroy the PHY 1067 * @dev: device that wants to release this phy 1068 * @phy: the phy returned by devm_phy_get() 1069 * 1070 * destroys the devres associated with this phy and invokes phy_destroy 1071 * to destroy the phy. 1072 */ 1073 void devm_phy_destroy(struct device *dev, struct phy *phy) 1074 { 1075 int r; 1076 1077 r = devres_destroy(dev, devm_phy_consume, devm_phy_match, phy); 1078 dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n"); 1079 } 1080 EXPORT_SYMBOL_GPL(devm_phy_destroy); 1081 1082 /** 1083 * __of_phy_provider_register() - create/register phy provider with the framework 1084 * @dev: struct device of the phy provider 1085 * @children: device node containing children (if different from dev->of_node) 1086 * @owner: the module owner containing of_xlate 1087 * @of_xlate: function pointer to obtain phy instance from phy provider 1088 * 1089 * Creates struct phy_provider from dev and of_xlate function pointer. 1090 * This is used in the case of dt boot for finding the phy instance from 1091 * phy provider. 1092 * 1093 * If the PHY provider doesn't nest children directly but uses a separate 1094 * child node to contain the individual children, the @children parameter 1095 * can be used to override the default. If NULL, the default (dev->of_node) 1096 * will be used. If non-NULL, the device node must be a child (or further 1097 * descendant) of dev->of_node. Otherwise an ERR_PTR()-encoded -EINVAL 1098 * error code is returned. 1099 */ 1100 struct phy_provider *__of_phy_provider_register(struct device *dev, 1101 struct device_node *children, struct module *owner, 1102 struct phy * (*of_xlate)(struct device *dev, 1103 struct of_phandle_args *args)) 1104 { 1105 struct phy_provider *phy_provider; 1106 1107 /* 1108 * If specified, the device node containing the children must itself 1109 * be the provider's device node or a child (or further descendant) 1110 * thereof. 1111 */ 1112 if (children) { 1113 struct device_node *parent = of_node_get(children), *next; 1114 1115 while (parent) { 1116 if (parent == dev->of_node) 1117 break; 1118 1119 next = of_get_parent(parent); 1120 of_node_put(parent); 1121 parent = next; 1122 } 1123 1124 if (!parent) 1125 return ERR_PTR(-EINVAL); 1126 1127 of_node_put(parent); 1128 } else { 1129 children = dev->of_node; 1130 } 1131 1132 phy_provider = kzalloc(sizeof(*phy_provider), GFP_KERNEL); 1133 if (!phy_provider) 1134 return ERR_PTR(-ENOMEM); 1135 1136 phy_provider->dev = dev; 1137 phy_provider->children = of_node_get(children); 1138 phy_provider->owner = owner; 1139 phy_provider->of_xlate = of_xlate; 1140 1141 mutex_lock(&phy_provider_mutex); 1142 list_add_tail(&phy_provider->list, &phy_provider_list); 1143 mutex_unlock(&phy_provider_mutex); 1144 1145 return phy_provider; 1146 } 1147 EXPORT_SYMBOL_GPL(__of_phy_provider_register); 1148 1149 /** 1150 * __devm_of_phy_provider_register() - create/register phy provider with the 1151 * framework 1152 * @dev: struct device of the phy provider 1153 * @children: device node containing children (if different from dev->of_node) 1154 * @owner: the module owner containing of_xlate 1155 * @of_xlate: function pointer to obtain phy instance from phy provider 1156 * 1157 * Creates struct phy_provider from dev and of_xlate function pointer. 1158 * This is used in the case of dt boot for finding the phy instance from 1159 * phy provider. While at that, it also associates the device with the 1160 * phy provider using devres. On driver detach, release function is invoked 1161 * on the devres data, then, devres data is freed. 1162 */ 1163 struct phy_provider *__devm_of_phy_provider_register(struct device *dev, 1164 struct device_node *children, struct module *owner, 1165 struct phy * (*of_xlate)(struct device *dev, 1166 struct of_phandle_args *args)) 1167 { 1168 struct phy_provider **ptr, *phy_provider; 1169 1170 ptr = devres_alloc(devm_phy_provider_release, sizeof(*ptr), GFP_KERNEL); 1171 if (!ptr) 1172 return ERR_PTR(-ENOMEM); 1173 1174 phy_provider = __of_phy_provider_register(dev, children, owner, 1175 of_xlate); 1176 if (!IS_ERR(phy_provider)) { 1177 *ptr = phy_provider; 1178 devres_add(dev, ptr); 1179 } else { 1180 devres_free(ptr); 1181 } 1182 1183 return phy_provider; 1184 } 1185 EXPORT_SYMBOL_GPL(__devm_of_phy_provider_register); 1186 1187 /** 1188 * of_phy_provider_unregister() - unregister phy provider from the framework 1189 * @phy_provider: phy provider returned by of_phy_provider_register() 1190 * 1191 * Removes the phy_provider created using of_phy_provider_register(). 1192 */ 1193 void of_phy_provider_unregister(struct phy_provider *phy_provider) 1194 { 1195 if (IS_ERR(phy_provider)) 1196 return; 1197 1198 mutex_lock(&phy_provider_mutex); 1199 list_del(&phy_provider->list); 1200 of_node_put(phy_provider->children); 1201 kfree(phy_provider); 1202 mutex_unlock(&phy_provider_mutex); 1203 } 1204 EXPORT_SYMBOL_GPL(of_phy_provider_unregister); 1205 1206 /** 1207 * devm_of_phy_provider_unregister() - remove phy provider from the framework 1208 * @dev: struct device of the phy provider 1209 * @phy_provider: phy provider returned by of_phy_provider_register() 1210 * 1211 * destroys the devres associated with this phy provider and invokes 1212 * of_phy_provider_unregister to unregister the phy provider. 1213 */ 1214 void devm_of_phy_provider_unregister(struct device *dev, 1215 struct phy_provider *phy_provider) 1216 { 1217 int r; 1218 1219 r = devres_destroy(dev, devm_phy_provider_release, devm_phy_match, 1220 phy_provider); 1221 dev_WARN_ONCE(dev, r, "couldn't find PHY provider device resource\n"); 1222 } 1223 EXPORT_SYMBOL_GPL(devm_of_phy_provider_unregister); 1224 1225 /** 1226 * phy_release() - release the phy 1227 * @dev: the dev member within phy 1228 * 1229 * When the last reference to the device is removed, it is called 1230 * from the embedded kobject as release method. 1231 */ 1232 static void phy_release(struct device *dev) 1233 { 1234 struct phy *phy; 1235 1236 phy = to_phy(dev); 1237 dev_vdbg(dev, "releasing '%s'\n", dev_name(dev)); 1238 debugfs_remove_recursive(phy->debugfs); 1239 regulator_put(phy->pwr); 1240 ida_simple_remove(&phy_ida, phy->id); 1241 kfree(phy); 1242 } 1243 1244 static int __init phy_core_init(void) 1245 { 1246 phy_class = class_create("phy"); 1247 if (IS_ERR(phy_class)) { 1248 pr_err("failed to create phy class --> %ld\n", 1249 PTR_ERR(phy_class)); 1250 return PTR_ERR(phy_class); 1251 } 1252 1253 phy_class->dev_release = phy_release; 1254 1255 phy_debugfs_root = debugfs_create_dir("phy", NULL); 1256 1257 return 0; 1258 } 1259 device_initcall(phy_core_init); 1260 1261 static void __exit phy_core_exit(void) 1262 { 1263 debugfs_remove_recursive(phy_debugfs_root); 1264 class_destroy(phy_class); 1265 } 1266 module_exit(phy_core_exit); 1267