1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * phy-core.c -- Generic Phy framework. 4 * 5 * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com 6 * 7 * Author: Kishon Vijay Abraham I <kishon@ti.com> 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/export.h> 12 #include <linux/module.h> 13 #include <linux/err.h> 14 #include <linux/debugfs.h> 15 #include <linux/device.h> 16 #include <linux/slab.h> 17 #include <linux/of.h> 18 #include <linux/phy/phy.h> 19 #include <linux/idr.h> 20 #include <linux/pm_runtime.h> 21 #include <linux/regulator/consumer.h> 22 23 static struct class *phy_class; 24 static struct dentry *phy_debugfs_root; 25 static DEFINE_MUTEX(phy_provider_mutex); 26 static LIST_HEAD(phy_provider_list); 27 static LIST_HEAD(phys); 28 static DEFINE_IDA(phy_ida); 29 30 static void devm_phy_release(struct device *dev, void *res) 31 { 32 struct phy *phy = *(struct phy **)res; 33 34 phy_put(dev, phy); 35 } 36 37 static void devm_phy_provider_release(struct device *dev, void *res) 38 { 39 struct phy_provider *phy_provider = *(struct phy_provider **)res; 40 41 of_phy_provider_unregister(phy_provider); 42 } 43 44 static void devm_phy_consume(struct device *dev, void *res) 45 { 46 struct phy *phy = *(struct phy **)res; 47 48 phy_destroy(phy); 49 } 50 51 static int devm_phy_match(struct device *dev, void *res, void *match_data) 52 { 53 struct phy **phy = res; 54 55 return *phy == match_data; 56 } 57 58 /** 59 * phy_create_lookup() - allocate and register PHY/device association 60 * @phy: the phy of the association 61 * @con_id: connection ID string on device 62 * @dev_id: the device of the association 63 * 64 * Creates and registers phy_lookup entry. 65 */ 66 int phy_create_lookup(struct phy *phy, const char *con_id, const char *dev_id) 67 { 68 struct phy_lookup *pl; 69 70 if (!phy || !dev_id || !con_id) 71 return -EINVAL; 72 73 pl = kzalloc(sizeof(*pl), GFP_KERNEL); 74 if (!pl) 75 return -ENOMEM; 76 77 pl->dev_id = dev_id; 78 pl->con_id = con_id; 79 pl->phy = phy; 80 81 mutex_lock(&phy_provider_mutex); 82 list_add_tail(&pl->node, &phys); 83 mutex_unlock(&phy_provider_mutex); 84 85 return 0; 86 } 87 EXPORT_SYMBOL_GPL(phy_create_lookup); 88 89 /** 90 * phy_remove_lookup() - find and remove PHY/device association 91 * @phy: the phy of the association 92 * @con_id: connection ID string on device 93 * @dev_id: the device of the association 94 * 95 * Finds and unregisters phy_lookup entry that was created with 96 * phy_create_lookup(). 97 */ 98 void phy_remove_lookup(struct phy *phy, const char *con_id, const char *dev_id) 99 { 100 struct phy_lookup *pl; 101 102 if (!phy || !dev_id || !con_id) 103 return; 104 105 mutex_lock(&phy_provider_mutex); 106 list_for_each_entry(pl, &phys, node) 107 if (pl->phy == phy && !strcmp(pl->dev_id, dev_id) && 108 !strcmp(pl->con_id, con_id)) { 109 list_del(&pl->node); 110 kfree(pl); 111 break; 112 } 113 mutex_unlock(&phy_provider_mutex); 114 } 115 EXPORT_SYMBOL_GPL(phy_remove_lookup); 116 117 static struct phy *phy_find(struct device *dev, const char *con_id) 118 { 119 const char *dev_id = dev_name(dev); 120 struct phy_lookup *p, *pl = NULL; 121 122 mutex_lock(&phy_provider_mutex); 123 list_for_each_entry(p, &phys, node) 124 if (!strcmp(p->dev_id, dev_id) && !strcmp(p->con_id, con_id)) { 125 pl = p; 126 break; 127 } 128 mutex_unlock(&phy_provider_mutex); 129 130 return pl ? pl->phy : ERR_PTR(-ENODEV); 131 } 132 133 static struct phy_provider *of_phy_provider_lookup(struct device_node *node) 134 { 135 struct phy_provider *phy_provider; 136 struct device_node *child; 137 138 list_for_each_entry(phy_provider, &phy_provider_list, list) { 139 if (phy_provider->dev->of_node == node) 140 return phy_provider; 141 142 for_each_child_of_node(phy_provider->children, child) 143 if (child == node) 144 return phy_provider; 145 } 146 147 return ERR_PTR(-EPROBE_DEFER); 148 } 149 150 int phy_pm_runtime_get(struct phy *phy) 151 { 152 int ret; 153 154 if (!phy) 155 return 0; 156 157 if (!pm_runtime_enabled(&phy->dev)) 158 return -ENOTSUPP; 159 160 ret = pm_runtime_get(&phy->dev); 161 if (ret < 0 && ret != -EINPROGRESS) 162 pm_runtime_put_noidle(&phy->dev); 163 164 return ret; 165 } 166 EXPORT_SYMBOL_GPL(phy_pm_runtime_get); 167 168 int phy_pm_runtime_get_sync(struct phy *phy) 169 { 170 int ret; 171 172 if (!phy) 173 return 0; 174 175 if (!pm_runtime_enabled(&phy->dev)) 176 return -ENOTSUPP; 177 178 ret = pm_runtime_get_sync(&phy->dev); 179 if (ret < 0) 180 pm_runtime_put_sync(&phy->dev); 181 182 return ret; 183 } 184 EXPORT_SYMBOL_GPL(phy_pm_runtime_get_sync); 185 186 int phy_pm_runtime_put(struct phy *phy) 187 { 188 if (!phy) 189 return 0; 190 191 if (!pm_runtime_enabled(&phy->dev)) 192 return -ENOTSUPP; 193 194 return pm_runtime_put(&phy->dev); 195 } 196 EXPORT_SYMBOL_GPL(phy_pm_runtime_put); 197 198 int phy_pm_runtime_put_sync(struct phy *phy) 199 { 200 if (!phy) 201 return 0; 202 203 if (!pm_runtime_enabled(&phy->dev)) 204 return -ENOTSUPP; 205 206 return pm_runtime_put_sync(&phy->dev); 207 } 208 EXPORT_SYMBOL_GPL(phy_pm_runtime_put_sync); 209 210 void phy_pm_runtime_allow(struct phy *phy) 211 { 212 if (!phy) 213 return; 214 215 if (!pm_runtime_enabled(&phy->dev)) 216 return; 217 218 pm_runtime_allow(&phy->dev); 219 } 220 EXPORT_SYMBOL_GPL(phy_pm_runtime_allow); 221 222 void phy_pm_runtime_forbid(struct phy *phy) 223 { 224 if (!phy) 225 return; 226 227 if (!pm_runtime_enabled(&phy->dev)) 228 return; 229 230 pm_runtime_forbid(&phy->dev); 231 } 232 EXPORT_SYMBOL_GPL(phy_pm_runtime_forbid); 233 234 /** 235 * phy_init - phy internal initialization before phy operation 236 * @phy: the phy returned by phy_get() 237 * 238 * Used to allow phy's driver to perform phy internal initialization, 239 * such as PLL block powering, clock initialization or anything that's 240 * is required by the phy to perform the start of operation. 241 * Must be called before phy_power_on(). 242 * 243 * Return: %0 if successful, a negative error code otherwise 244 */ 245 int phy_init(struct phy *phy) 246 { 247 int ret; 248 249 if (!phy) 250 return 0; 251 252 ret = phy_pm_runtime_get_sync(phy); 253 if (ret < 0 && ret != -ENOTSUPP) 254 return ret; 255 ret = 0; /* Override possible ret == -ENOTSUPP */ 256 257 mutex_lock(&phy->mutex); 258 if (phy->power_count > phy->init_count) 259 dev_warn(&phy->dev, "phy_power_on was called before phy_init\n"); 260 261 if (phy->init_count == 0 && phy->ops->init) { 262 ret = phy->ops->init(phy); 263 if (ret < 0) { 264 dev_err(&phy->dev, "phy init failed --> %d\n", ret); 265 goto out; 266 } 267 } 268 ++phy->init_count; 269 270 out: 271 mutex_unlock(&phy->mutex); 272 phy_pm_runtime_put(phy); 273 return ret; 274 } 275 EXPORT_SYMBOL_GPL(phy_init); 276 277 /** 278 * phy_exit - Phy internal un-initialization 279 * @phy: the phy returned by phy_get() 280 * 281 * Must be called after phy_power_off(). 282 * 283 * Return: %0 if successful, a negative error code otherwise 284 */ 285 int phy_exit(struct phy *phy) 286 { 287 int ret; 288 289 if (!phy) 290 return 0; 291 292 ret = phy_pm_runtime_get_sync(phy); 293 if (ret < 0 && ret != -ENOTSUPP) 294 return ret; 295 ret = 0; /* Override possible ret == -ENOTSUPP */ 296 297 mutex_lock(&phy->mutex); 298 if (phy->init_count == 1 && phy->ops->exit) { 299 ret = phy->ops->exit(phy); 300 if (ret < 0) { 301 dev_err(&phy->dev, "phy exit failed --> %d\n", ret); 302 goto out; 303 } 304 } 305 --phy->init_count; 306 307 out: 308 mutex_unlock(&phy->mutex); 309 phy_pm_runtime_put(phy); 310 return ret; 311 } 312 EXPORT_SYMBOL_GPL(phy_exit); 313 314 /** 315 * phy_power_on - Enable the phy and enter proper operation 316 * @phy: the phy returned by phy_get() 317 * 318 * Must be called after phy_init(). 319 * 320 * Return: %0 if successful, a negative error code otherwise 321 */ 322 int phy_power_on(struct phy *phy) 323 { 324 int ret = 0; 325 326 if (!phy) 327 goto out; 328 329 if (phy->pwr) { 330 ret = regulator_enable(phy->pwr); 331 if (ret) 332 goto out; 333 } 334 335 ret = phy_pm_runtime_get_sync(phy); 336 if (ret < 0 && ret != -ENOTSUPP) 337 goto err_pm_sync; 338 339 ret = 0; /* Override possible ret == -ENOTSUPP */ 340 341 mutex_lock(&phy->mutex); 342 if (phy->power_count == 0 && phy->ops->power_on) { 343 ret = phy->ops->power_on(phy); 344 if (ret < 0) { 345 dev_err(&phy->dev, "phy poweron failed --> %d\n", ret); 346 goto err_pwr_on; 347 } 348 } 349 ++phy->power_count; 350 mutex_unlock(&phy->mutex); 351 return 0; 352 353 err_pwr_on: 354 mutex_unlock(&phy->mutex); 355 phy_pm_runtime_put_sync(phy); 356 err_pm_sync: 357 if (phy->pwr) 358 regulator_disable(phy->pwr); 359 out: 360 return ret; 361 } 362 EXPORT_SYMBOL_GPL(phy_power_on); 363 364 /** 365 * phy_power_off - Disable the phy. 366 * @phy: the phy returned by phy_get() 367 * 368 * Must be called before phy_exit(). 369 * 370 * Return: %0 if successful, a negative error code otherwise 371 */ 372 int phy_power_off(struct phy *phy) 373 { 374 int ret; 375 376 if (!phy) 377 return 0; 378 379 mutex_lock(&phy->mutex); 380 if (phy->power_count == 1 && phy->ops->power_off) { 381 ret = phy->ops->power_off(phy); 382 if (ret < 0) { 383 dev_err(&phy->dev, "phy poweroff failed --> %d\n", ret); 384 mutex_unlock(&phy->mutex); 385 return ret; 386 } 387 } 388 --phy->power_count; 389 mutex_unlock(&phy->mutex); 390 phy_pm_runtime_put(phy); 391 392 if (phy->pwr) 393 regulator_disable(phy->pwr); 394 395 return 0; 396 } 397 EXPORT_SYMBOL_GPL(phy_power_off); 398 399 int phy_set_mode_ext(struct phy *phy, enum phy_mode mode, int submode) 400 { 401 int ret; 402 403 if (!phy || !phy->ops->set_mode) 404 return 0; 405 406 mutex_lock(&phy->mutex); 407 ret = phy->ops->set_mode(phy, mode, submode); 408 if (!ret) 409 phy->attrs.mode = mode; 410 mutex_unlock(&phy->mutex); 411 412 return ret; 413 } 414 EXPORT_SYMBOL_GPL(phy_set_mode_ext); 415 416 int phy_set_media(struct phy *phy, enum phy_media media) 417 { 418 int ret; 419 420 if (!phy || !phy->ops->set_media) 421 return 0; 422 423 mutex_lock(&phy->mutex); 424 ret = phy->ops->set_media(phy, media); 425 mutex_unlock(&phy->mutex); 426 427 return ret; 428 } 429 EXPORT_SYMBOL_GPL(phy_set_media); 430 431 int phy_set_speed(struct phy *phy, int speed) 432 { 433 int ret; 434 435 if (!phy || !phy->ops->set_speed) 436 return 0; 437 438 mutex_lock(&phy->mutex); 439 ret = phy->ops->set_speed(phy, speed); 440 mutex_unlock(&phy->mutex); 441 442 return ret; 443 } 444 EXPORT_SYMBOL_GPL(phy_set_speed); 445 446 int phy_reset(struct phy *phy) 447 { 448 int ret; 449 450 if (!phy || !phy->ops->reset) 451 return 0; 452 453 ret = phy_pm_runtime_get_sync(phy); 454 if (ret < 0 && ret != -ENOTSUPP) 455 return ret; 456 457 mutex_lock(&phy->mutex); 458 ret = phy->ops->reset(phy); 459 mutex_unlock(&phy->mutex); 460 461 phy_pm_runtime_put(phy); 462 463 return ret; 464 } 465 EXPORT_SYMBOL_GPL(phy_reset); 466 467 /** 468 * phy_calibrate() - Tunes the phy hw parameters for current configuration 469 * @phy: the phy returned by phy_get() 470 * 471 * Used to calibrate phy hardware, typically by adjusting some parameters in 472 * runtime, which are otherwise lost after host controller reset and cannot 473 * be applied in phy_init() or phy_power_on(). 474 * 475 * Return: %0 if successful, a negative error code otherwise 476 */ 477 int phy_calibrate(struct phy *phy) 478 { 479 int ret; 480 481 if (!phy || !phy->ops->calibrate) 482 return 0; 483 484 mutex_lock(&phy->mutex); 485 ret = phy->ops->calibrate(phy); 486 mutex_unlock(&phy->mutex); 487 488 return ret; 489 } 490 EXPORT_SYMBOL_GPL(phy_calibrate); 491 492 /** 493 * phy_configure() - Changes the phy parameters 494 * @phy: the phy returned by phy_get() 495 * @opts: New configuration to apply 496 * 497 * Used to change the PHY parameters. phy_init() must have been called 498 * on the phy. The configuration will be applied on the current phy 499 * mode, that can be changed using phy_set_mode(). 500 * 501 * Return: %0 if successful, a negative error code otherwise 502 */ 503 int phy_configure(struct phy *phy, union phy_configure_opts *opts) 504 { 505 int ret; 506 507 if (!phy) 508 return -EINVAL; 509 510 if (!phy->ops->configure) 511 return -EOPNOTSUPP; 512 513 mutex_lock(&phy->mutex); 514 ret = phy->ops->configure(phy, opts); 515 mutex_unlock(&phy->mutex); 516 517 return ret; 518 } 519 EXPORT_SYMBOL_GPL(phy_configure); 520 521 /** 522 * phy_validate() - Checks the phy parameters 523 * @phy: the phy returned by phy_get() 524 * @mode: phy_mode the configuration is applicable to. 525 * @submode: PHY submode the configuration is applicable to. 526 * @opts: Configuration to check 527 * 528 * Used to check that the current set of parameters can be handled by 529 * the phy. Implementations are free to tune the parameters passed as 530 * arguments if needed by some implementation detail or 531 * constraints. It will not change any actual configuration of the 532 * PHY, so calling it as many times as deemed fit will have no side 533 * effect. 534 * 535 * Return: %0 if successful, a negative error code otherwise 536 */ 537 int phy_validate(struct phy *phy, enum phy_mode mode, int submode, 538 union phy_configure_opts *opts) 539 { 540 int ret; 541 542 if (!phy) 543 return -EINVAL; 544 545 if (!phy->ops->validate) 546 return -EOPNOTSUPP; 547 548 mutex_lock(&phy->mutex); 549 ret = phy->ops->validate(phy, mode, submode, opts); 550 mutex_unlock(&phy->mutex); 551 552 return ret; 553 } 554 EXPORT_SYMBOL_GPL(phy_validate); 555 556 /** 557 * _of_phy_get() - lookup and obtain a reference to a phy by phandle 558 * @np: device_node for which to get the phy 559 * @index: the index of the phy 560 * 561 * Returns the phy associated with the given phandle value, 562 * after getting a refcount to it or -ENODEV if there is no such phy or 563 * -EPROBE_DEFER if there is a phandle to the phy, but the device is 564 * not yet loaded. This function uses of_xlate call back function provided 565 * while registering the phy_provider to find the phy instance. 566 */ 567 static struct phy *_of_phy_get(struct device_node *np, int index) 568 { 569 int ret; 570 struct phy_provider *phy_provider; 571 struct phy *phy = NULL; 572 struct of_phandle_args args; 573 574 ret = of_parse_phandle_with_args(np, "phys", "#phy-cells", 575 index, &args); 576 if (ret) 577 return ERR_PTR(-ENODEV); 578 579 /* This phy type handled by the usb-phy subsystem for now */ 580 if (of_device_is_compatible(args.np, "usb-nop-xceiv")) { 581 phy = ERR_PTR(-ENODEV); 582 goto out_put_node; 583 } 584 585 mutex_lock(&phy_provider_mutex); 586 phy_provider = of_phy_provider_lookup(args.np); 587 if (IS_ERR(phy_provider) || !try_module_get(phy_provider->owner)) { 588 phy = ERR_PTR(-EPROBE_DEFER); 589 goto out_unlock; 590 } 591 592 if (!of_device_is_available(args.np)) { 593 dev_warn(phy_provider->dev, "Requested PHY is disabled\n"); 594 phy = ERR_PTR(-ENODEV); 595 goto out_put_module; 596 } 597 598 phy = phy_provider->of_xlate(phy_provider->dev, &args); 599 600 out_put_module: 601 module_put(phy_provider->owner); 602 603 out_unlock: 604 mutex_unlock(&phy_provider_mutex); 605 out_put_node: 606 of_node_put(args.np); 607 608 return phy; 609 } 610 611 /** 612 * of_phy_get() - lookup and obtain a reference to a phy using a device_node. 613 * @np: device_node for which to get the phy 614 * @con_id: name of the phy from device's point of view 615 * 616 * Returns the phy driver, after getting a refcount to it; or 617 * -ENODEV if there is no such phy. The caller is responsible for 618 * calling phy_put() to release that count. 619 */ 620 struct phy *of_phy_get(struct device_node *np, const char *con_id) 621 { 622 struct phy *phy = NULL; 623 int index = 0; 624 625 if (con_id) 626 index = of_property_match_string(np, "phy-names", con_id); 627 628 phy = _of_phy_get(np, index); 629 if (IS_ERR(phy)) 630 return phy; 631 632 if (!try_module_get(phy->ops->owner)) 633 return ERR_PTR(-EPROBE_DEFER); 634 635 get_device(&phy->dev); 636 637 return phy; 638 } 639 EXPORT_SYMBOL_GPL(of_phy_get); 640 641 /** 642 * of_phy_put() - release the PHY 643 * @phy: the phy returned by of_phy_get() 644 * 645 * Releases a refcount the caller received from of_phy_get(). 646 */ 647 void of_phy_put(struct phy *phy) 648 { 649 if (!phy || IS_ERR(phy)) 650 return; 651 652 mutex_lock(&phy->mutex); 653 if (phy->ops->release) 654 phy->ops->release(phy); 655 mutex_unlock(&phy->mutex); 656 657 module_put(phy->ops->owner); 658 put_device(&phy->dev); 659 } 660 EXPORT_SYMBOL_GPL(of_phy_put); 661 662 /** 663 * phy_put() - release the PHY 664 * @dev: device that wants to release this phy 665 * @phy: the phy returned by phy_get() 666 * 667 * Releases a refcount the caller received from phy_get(). 668 */ 669 void phy_put(struct device *dev, struct phy *phy) 670 { 671 device_link_remove(dev, &phy->dev); 672 of_phy_put(phy); 673 } 674 EXPORT_SYMBOL_GPL(phy_put); 675 676 /** 677 * devm_phy_put() - release the PHY 678 * @dev: device that wants to release this phy 679 * @phy: the phy returned by devm_phy_get() 680 * 681 * destroys the devres associated with this phy and invokes phy_put 682 * to release the phy. 683 */ 684 void devm_phy_put(struct device *dev, struct phy *phy) 685 { 686 int r; 687 688 if (!phy) 689 return; 690 691 r = devres_destroy(dev, devm_phy_release, devm_phy_match, phy); 692 dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n"); 693 } 694 EXPORT_SYMBOL_GPL(devm_phy_put); 695 696 /** 697 * of_phy_simple_xlate() - returns the phy instance from phy provider 698 * @dev: the PHY provider device 699 * @args: of_phandle_args (not used here) 700 * 701 * Intended to be used by phy provider for the common case where #phy-cells is 702 * 0. For other cases where #phy-cells is greater than '0', the phy provider 703 * should provide a custom of_xlate function that reads the *args* and returns 704 * the appropriate phy. 705 */ 706 struct phy *of_phy_simple_xlate(struct device *dev, struct of_phandle_args 707 *args) 708 { 709 struct phy *phy; 710 struct class_dev_iter iter; 711 712 class_dev_iter_init(&iter, phy_class, NULL, NULL); 713 while ((dev = class_dev_iter_next(&iter))) { 714 phy = to_phy(dev); 715 if (args->np != phy->dev.of_node) 716 continue; 717 718 class_dev_iter_exit(&iter); 719 return phy; 720 } 721 722 class_dev_iter_exit(&iter); 723 return ERR_PTR(-ENODEV); 724 } 725 EXPORT_SYMBOL_GPL(of_phy_simple_xlate); 726 727 /** 728 * phy_get() - lookup and obtain a reference to a phy. 729 * @dev: device that requests this phy 730 * @string: the phy name as given in the dt data or the name of the controller 731 * port for non-dt case 732 * 733 * Returns the phy driver, after getting a refcount to it; or 734 * -ENODEV if there is no such phy. The caller is responsible for 735 * calling phy_put() to release that count. 736 */ 737 struct phy *phy_get(struct device *dev, const char *string) 738 { 739 int index = 0; 740 struct phy *phy; 741 struct device_link *link; 742 743 if (dev->of_node) { 744 if (string) 745 index = of_property_match_string(dev->of_node, "phy-names", 746 string); 747 else 748 index = 0; 749 phy = _of_phy_get(dev->of_node, index); 750 } else { 751 if (string == NULL) { 752 dev_WARN(dev, "missing string\n"); 753 return ERR_PTR(-EINVAL); 754 } 755 phy = phy_find(dev, string); 756 } 757 if (IS_ERR(phy)) 758 return phy; 759 760 if (!try_module_get(phy->ops->owner)) 761 return ERR_PTR(-EPROBE_DEFER); 762 763 get_device(&phy->dev); 764 765 link = device_link_add(dev, &phy->dev, DL_FLAG_STATELESS); 766 if (!link) 767 dev_dbg(dev, "failed to create device link to %s\n", 768 dev_name(phy->dev.parent)); 769 770 return phy; 771 } 772 EXPORT_SYMBOL_GPL(phy_get); 773 774 /** 775 * devm_phy_get() - lookup and obtain a reference to a phy. 776 * @dev: device that requests this phy 777 * @string: the phy name as given in the dt data or phy device name 778 * for non-dt case 779 * 780 * Gets the phy using phy_get(), and associates a device with it using 781 * devres. On driver detach, release function is invoked on the devres data, 782 * then, devres data is freed. 783 */ 784 struct phy *devm_phy_get(struct device *dev, const char *string) 785 { 786 struct phy **ptr, *phy; 787 788 ptr = devres_alloc(devm_phy_release, sizeof(*ptr), GFP_KERNEL); 789 if (!ptr) 790 return ERR_PTR(-ENOMEM); 791 792 phy = phy_get(dev, string); 793 if (!IS_ERR(phy)) { 794 *ptr = phy; 795 devres_add(dev, ptr); 796 } else { 797 devres_free(ptr); 798 } 799 800 return phy; 801 } 802 EXPORT_SYMBOL_GPL(devm_phy_get); 803 804 /** 805 * devm_phy_optional_get() - lookup and obtain a reference to an optional phy. 806 * @dev: device that requests this phy 807 * @string: the phy name as given in the dt data or phy device name 808 * for non-dt case 809 * 810 * Gets the phy using phy_get(), and associates a device with it using 811 * devres. On driver detach, release function is invoked on the devres 812 * data, then, devres data is freed. This differs to devm_phy_get() in 813 * that if the phy does not exist, it is not considered an error and 814 * -ENODEV will not be returned. Instead the NULL phy is returned, 815 * which can be passed to all other phy consumer calls. 816 */ 817 struct phy *devm_phy_optional_get(struct device *dev, const char *string) 818 { 819 struct phy *phy = devm_phy_get(dev, string); 820 821 if (PTR_ERR(phy) == -ENODEV) 822 phy = NULL; 823 824 return phy; 825 } 826 EXPORT_SYMBOL_GPL(devm_phy_optional_get); 827 828 /** 829 * devm_of_phy_get() - lookup and obtain a reference to a phy. 830 * @dev: device that requests this phy 831 * @np: node containing the phy 832 * @con_id: name of the phy from device's point of view 833 * 834 * Gets the phy using of_phy_get(), and associates a device with it using 835 * devres. On driver detach, release function is invoked on the devres data, 836 * then, devres data is freed. 837 */ 838 struct phy *devm_of_phy_get(struct device *dev, struct device_node *np, 839 const char *con_id) 840 { 841 struct phy **ptr, *phy; 842 struct device_link *link; 843 844 ptr = devres_alloc(devm_phy_release, sizeof(*ptr), GFP_KERNEL); 845 if (!ptr) 846 return ERR_PTR(-ENOMEM); 847 848 phy = of_phy_get(np, con_id); 849 if (!IS_ERR(phy)) { 850 *ptr = phy; 851 devres_add(dev, ptr); 852 } else { 853 devres_free(ptr); 854 return phy; 855 } 856 857 link = device_link_add(dev, &phy->dev, DL_FLAG_STATELESS); 858 if (!link) 859 dev_dbg(dev, "failed to create device link to %s\n", 860 dev_name(phy->dev.parent)); 861 862 return phy; 863 } 864 EXPORT_SYMBOL_GPL(devm_of_phy_get); 865 866 /** 867 * devm_of_phy_optional_get() - lookup and obtain a reference to an optional 868 * phy. 869 * @dev: device that requests this phy 870 * @np: node containing the phy 871 * @con_id: name of the phy from device's point of view 872 * 873 * Gets the phy using of_phy_get(), and associates a device with it using 874 * devres. On driver detach, release function is invoked on the devres data, 875 * then, devres data is freed. This differs to devm_of_phy_get() in 876 * that if the phy does not exist, it is not considered an error and 877 * -ENODEV will not be returned. Instead the NULL phy is returned, 878 * which can be passed to all other phy consumer calls. 879 */ 880 struct phy *devm_of_phy_optional_get(struct device *dev, struct device_node *np, 881 const char *con_id) 882 { 883 struct phy *phy = devm_of_phy_get(dev, np, con_id); 884 885 if (PTR_ERR(phy) == -ENODEV) 886 phy = NULL; 887 888 if (IS_ERR(phy)) 889 dev_err_probe(dev, PTR_ERR(phy), "failed to get PHY %pOF:%s", 890 np, con_id); 891 892 return phy; 893 } 894 EXPORT_SYMBOL_GPL(devm_of_phy_optional_get); 895 896 /** 897 * devm_of_phy_get_by_index() - lookup and obtain a reference to a phy by index. 898 * @dev: device that requests this phy 899 * @np: node containing the phy 900 * @index: index of the phy 901 * 902 * Gets the phy using _of_phy_get(), then gets a refcount to it, 903 * and associates a device with it using devres. On driver detach, 904 * release function is invoked on the devres data, 905 * then, devres data is freed. 906 * 907 */ 908 struct phy *devm_of_phy_get_by_index(struct device *dev, struct device_node *np, 909 int index) 910 { 911 struct phy **ptr, *phy; 912 struct device_link *link; 913 914 ptr = devres_alloc(devm_phy_release, sizeof(*ptr), GFP_KERNEL); 915 if (!ptr) 916 return ERR_PTR(-ENOMEM); 917 918 phy = _of_phy_get(np, index); 919 if (IS_ERR(phy)) { 920 devres_free(ptr); 921 return phy; 922 } 923 924 if (!try_module_get(phy->ops->owner)) { 925 devres_free(ptr); 926 return ERR_PTR(-EPROBE_DEFER); 927 } 928 929 get_device(&phy->dev); 930 931 *ptr = phy; 932 devres_add(dev, ptr); 933 934 link = device_link_add(dev, &phy->dev, DL_FLAG_STATELESS); 935 if (!link) 936 dev_dbg(dev, "failed to create device link to %s\n", 937 dev_name(phy->dev.parent)); 938 939 return phy; 940 } 941 EXPORT_SYMBOL_GPL(devm_of_phy_get_by_index); 942 943 /** 944 * phy_create() - create a new phy 945 * @dev: device that is creating the new phy 946 * @node: device node of the phy 947 * @ops: function pointers for performing phy operations 948 * 949 * Called to create a phy using phy framework. 950 */ 951 struct phy *phy_create(struct device *dev, struct device_node *node, 952 const struct phy_ops *ops) 953 { 954 int ret; 955 int id; 956 struct phy *phy; 957 958 if (WARN_ON(!dev)) 959 return ERR_PTR(-EINVAL); 960 961 phy = kzalloc(sizeof(*phy), GFP_KERNEL); 962 if (!phy) 963 return ERR_PTR(-ENOMEM); 964 965 id = ida_simple_get(&phy_ida, 0, 0, GFP_KERNEL); 966 if (id < 0) { 967 dev_err(dev, "unable to get id\n"); 968 ret = id; 969 goto free_phy; 970 } 971 972 device_initialize(&phy->dev); 973 mutex_init(&phy->mutex); 974 975 phy->dev.class = phy_class; 976 phy->dev.parent = dev; 977 phy->dev.of_node = node ?: dev->of_node; 978 phy->id = id; 979 phy->ops = ops; 980 981 ret = dev_set_name(&phy->dev, "phy-%s.%d", dev_name(dev), id); 982 if (ret) 983 goto put_dev; 984 985 /* phy-supply */ 986 phy->pwr = regulator_get_optional(&phy->dev, "phy"); 987 if (IS_ERR(phy->pwr)) { 988 ret = PTR_ERR(phy->pwr); 989 if (ret == -EPROBE_DEFER) 990 goto put_dev; 991 992 phy->pwr = NULL; 993 } 994 995 ret = device_add(&phy->dev); 996 if (ret) 997 goto put_dev; 998 999 if (pm_runtime_enabled(dev)) { 1000 pm_runtime_enable(&phy->dev); 1001 pm_runtime_no_callbacks(&phy->dev); 1002 } 1003 1004 phy->debugfs = debugfs_create_dir(dev_name(&phy->dev), phy_debugfs_root); 1005 1006 return phy; 1007 1008 put_dev: 1009 put_device(&phy->dev); /* calls phy_release() which frees resources */ 1010 return ERR_PTR(ret); 1011 1012 free_phy: 1013 kfree(phy); 1014 return ERR_PTR(ret); 1015 } 1016 EXPORT_SYMBOL_GPL(phy_create); 1017 1018 /** 1019 * devm_phy_create() - create a new phy 1020 * @dev: device that is creating the new phy 1021 * @node: device node of the phy 1022 * @ops: function pointers for performing phy operations 1023 * 1024 * Creates a new PHY device adding it to the PHY class. 1025 * While at that, it also associates the device with the phy using devres. 1026 * On driver detach, release function is invoked on the devres data, 1027 * then, devres data is freed. 1028 */ 1029 struct phy *devm_phy_create(struct device *dev, struct device_node *node, 1030 const struct phy_ops *ops) 1031 { 1032 struct phy **ptr, *phy; 1033 1034 ptr = devres_alloc(devm_phy_consume, sizeof(*ptr), GFP_KERNEL); 1035 if (!ptr) 1036 return ERR_PTR(-ENOMEM); 1037 1038 phy = phy_create(dev, node, ops); 1039 if (!IS_ERR(phy)) { 1040 *ptr = phy; 1041 devres_add(dev, ptr); 1042 } else { 1043 devres_free(ptr); 1044 } 1045 1046 return phy; 1047 } 1048 EXPORT_SYMBOL_GPL(devm_phy_create); 1049 1050 /** 1051 * phy_destroy() - destroy the phy 1052 * @phy: the phy to be destroyed 1053 * 1054 * Called to destroy the phy. 1055 */ 1056 void phy_destroy(struct phy *phy) 1057 { 1058 pm_runtime_disable(&phy->dev); 1059 device_unregister(&phy->dev); 1060 } 1061 EXPORT_SYMBOL_GPL(phy_destroy); 1062 1063 /** 1064 * devm_phy_destroy() - destroy the PHY 1065 * @dev: device that wants to release this phy 1066 * @phy: the phy returned by devm_phy_get() 1067 * 1068 * destroys the devres associated with this phy and invokes phy_destroy 1069 * to destroy the phy. 1070 */ 1071 void devm_phy_destroy(struct device *dev, struct phy *phy) 1072 { 1073 int r; 1074 1075 r = devres_destroy(dev, devm_phy_consume, devm_phy_match, phy); 1076 dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n"); 1077 } 1078 EXPORT_SYMBOL_GPL(devm_phy_destroy); 1079 1080 /** 1081 * __of_phy_provider_register() - create/register phy provider with the framework 1082 * @dev: struct device of the phy provider 1083 * @children: device node containing children (if different from dev->of_node) 1084 * @owner: the module owner containing of_xlate 1085 * @of_xlate: function pointer to obtain phy instance from phy provider 1086 * 1087 * Creates struct phy_provider from dev and of_xlate function pointer. 1088 * This is used in the case of dt boot for finding the phy instance from 1089 * phy provider. 1090 * 1091 * If the PHY provider doesn't nest children directly but uses a separate 1092 * child node to contain the individual children, the @children parameter 1093 * can be used to override the default. If NULL, the default (dev->of_node) 1094 * will be used. If non-NULL, the device node must be a child (or further 1095 * descendant) of dev->of_node. Otherwise an ERR_PTR()-encoded -EINVAL 1096 * error code is returned. 1097 */ 1098 struct phy_provider *__of_phy_provider_register(struct device *dev, 1099 struct device_node *children, struct module *owner, 1100 struct phy * (*of_xlate)(struct device *dev, 1101 struct of_phandle_args *args)) 1102 { 1103 struct phy_provider *phy_provider; 1104 1105 /* 1106 * If specified, the device node containing the children must itself 1107 * be the provider's device node or a child (or further descendant) 1108 * thereof. 1109 */ 1110 if (children) { 1111 struct device_node *parent = of_node_get(children), *next; 1112 1113 while (parent) { 1114 if (parent == dev->of_node) 1115 break; 1116 1117 next = of_get_parent(parent); 1118 of_node_put(parent); 1119 parent = next; 1120 } 1121 1122 if (!parent) 1123 return ERR_PTR(-EINVAL); 1124 1125 of_node_put(parent); 1126 } else { 1127 children = dev->of_node; 1128 } 1129 1130 phy_provider = kzalloc(sizeof(*phy_provider), GFP_KERNEL); 1131 if (!phy_provider) 1132 return ERR_PTR(-ENOMEM); 1133 1134 phy_provider->dev = dev; 1135 phy_provider->children = of_node_get(children); 1136 phy_provider->owner = owner; 1137 phy_provider->of_xlate = of_xlate; 1138 1139 mutex_lock(&phy_provider_mutex); 1140 list_add_tail(&phy_provider->list, &phy_provider_list); 1141 mutex_unlock(&phy_provider_mutex); 1142 1143 return phy_provider; 1144 } 1145 EXPORT_SYMBOL_GPL(__of_phy_provider_register); 1146 1147 /** 1148 * __devm_of_phy_provider_register() - create/register phy provider with the 1149 * framework 1150 * @dev: struct device of the phy provider 1151 * @children: device node containing children (if different from dev->of_node) 1152 * @owner: the module owner containing of_xlate 1153 * @of_xlate: function pointer to obtain phy instance from phy provider 1154 * 1155 * Creates struct phy_provider from dev and of_xlate function pointer. 1156 * This is used in the case of dt boot for finding the phy instance from 1157 * phy provider. While at that, it also associates the device with the 1158 * phy provider using devres. On driver detach, release function is invoked 1159 * on the devres data, then, devres data is freed. 1160 */ 1161 struct phy_provider *__devm_of_phy_provider_register(struct device *dev, 1162 struct device_node *children, struct module *owner, 1163 struct phy * (*of_xlate)(struct device *dev, 1164 struct of_phandle_args *args)) 1165 { 1166 struct phy_provider **ptr, *phy_provider; 1167 1168 ptr = devres_alloc(devm_phy_provider_release, sizeof(*ptr), GFP_KERNEL); 1169 if (!ptr) 1170 return ERR_PTR(-ENOMEM); 1171 1172 phy_provider = __of_phy_provider_register(dev, children, owner, 1173 of_xlate); 1174 if (!IS_ERR(phy_provider)) { 1175 *ptr = phy_provider; 1176 devres_add(dev, ptr); 1177 } else { 1178 devres_free(ptr); 1179 } 1180 1181 return phy_provider; 1182 } 1183 EXPORT_SYMBOL_GPL(__devm_of_phy_provider_register); 1184 1185 /** 1186 * of_phy_provider_unregister() - unregister phy provider from the framework 1187 * @phy_provider: phy provider returned by of_phy_provider_register() 1188 * 1189 * Removes the phy_provider created using of_phy_provider_register(). 1190 */ 1191 void of_phy_provider_unregister(struct phy_provider *phy_provider) 1192 { 1193 if (IS_ERR(phy_provider)) 1194 return; 1195 1196 mutex_lock(&phy_provider_mutex); 1197 list_del(&phy_provider->list); 1198 of_node_put(phy_provider->children); 1199 kfree(phy_provider); 1200 mutex_unlock(&phy_provider_mutex); 1201 } 1202 EXPORT_SYMBOL_GPL(of_phy_provider_unregister); 1203 1204 /** 1205 * devm_of_phy_provider_unregister() - remove phy provider from the framework 1206 * @dev: struct device of the phy provider 1207 * @phy_provider: phy provider returned by of_phy_provider_register() 1208 * 1209 * destroys the devres associated with this phy provider and invokes 1210 * of_phy_provider_unregister to unregister the phy provider. 1211 */ 1212 void devm_of_phy_provider_unregister(struct device *dev, 1213 struct phy_provider *phy_provider) 1214 { 1215 int r; 1216 1217 r = devres_destroy(dev, devm_phy_provider_release, devm_phy_match, 1218 phy_provider); 1219 dev_WARN_ONCE(dev, r, "couldn't find PHY provider device resource\n"); 1220 } 1221 EXPORT_SYMBOL_GPL(devm_of_phy_provider_unregister); 1222 1223 /** 1224 * phy_release() - release the phy 1225 * @dev: the dev member within phy 1226 * 1227 * When the last reference to the device is removed, it is called 1228 * from the embedded kobject as release method. 1229 */ 1230 static void phy_release(struct device *dev) 1231 { 1232 struct phy *phy; 1233 1234 phy = to_phy(dev); 1235 dev_vdbg(dev, "releasing '%s'\n", dev_name(dev)); 1236 debugfs_remove_recursive(phy->debugfs); 1237 regulator_put(phy->pwr); 1238 ida_simple_remove(&phy_ida, phy->id); 1239 kfree(phy); 1240 } 1241 1242 static int __init phy_core_init(void) 1243 { 1244 phy_class = class_create("phy"); 1245 if (IS_ERR(phy_class)) { 1246 pr_err("failed to create phy class --> %ld\n", 1247 PTR_ERR(phy_class)); 1248 return PTR_ERR(phy_class); 1249 } 1250 1251 phy_class->dev_release = phy_release; 1252 1253 phy_debugfs_root = debugfs_create_dir("phy", NULL); 1254 1255 return 0; 1256 } 1257 device_initcall(phy_core_init); 1258 1259 static void __exit phy_core_exit(void) 1260 { 1261 debugfs_remove_recursive(phy_debugfs_root); 1262 class_destroy(phy_class); 1263 } 1264 module_exit(phy_core_exit); 1265