1 /* 2 * phy-core.c -- Generic Phy framework. 3 * 4 * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com 5 * 6 * Author: Kishon Vijay Abraham I <kishon@ti.com> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License as published by the 10 * Free Software Foundation; either version 2 of the License, or (at your 11 * option) any later version. 12 */ 13 14 #include <linux/kernel.h> 15 #include <linux/export.h> 16 #include <linux/module.h> 17 #include <linux/err.h> 18 #include <linux/device.h> 19 #include <linux/slab.h> 20 #include <linux/of.h> 21 #include <linux/phy/phy.h> 22 #include <linux/idr.h> 23 #include <linux/pm_runtime.h> 24 #include <linux/regulator/consumer.h> 25 26 static struct class *phy_class; 27 static DEFINE_MUTEX(phy_provider_mutex); 28 static LIST_HEAD(phy_provider_list); 29 static LIST_HEAD(phys); 30 static DEFINE_IDA(phy_ida); 31 32 static void devm_phy_release(struct device *dev, void *res) 33 { 34 struct phy *phy = *(struct phy **)res; 35 36 phy_put(phy); 37 } 38 39 static void devm_phy_provider_release(struct device *dev, void *res) 40 { 41 struct phy_provider *phy_provider = *(struct phy_provider **)res; 42 43 of_phy_provider_unregister(phy_provider); 44 } 45 46 static void devm_phy_consume(struct device *dev, void *res) 47 { 48 struct phy *phy = *(struct phy **)res; 49 50 phy_destroy(phy); 51 } 52 53 static int devm_phy_match(struct device *dev, void *res, void *match_data) 54 { 55 return res == match_data; 56 } 57 58 /** 59 * phy_create_lookup() - allocate and register PHY/device association 60 * @phy: the phy of the association 61 * @con_id: connection ID string on device 62 * @dev_id: the device of the association 63 * 64 * Creates and registers phy_lookup entry. 65 */ 66 int phy_create_lookup(struct phy *phy, const char *con_id, const char *dev_id) 67 { 68 struct phy_lookup *pl; 69 70 if (!phy || !dev_id || !con_id) 71 return -EINVAL; 72 73 pl = kzalloc(sizeof(*pl), GFP_KERNEL); 74 if (!pl) 75 return -ENOMEM; 76 77 pl->dev_id = dev_id; 78 pl->con_id = con_id; 79 pl->phy = phy; 80 81 mutex_lock(&phy_provider_mutex); 82 list_add_tail(&pl->node, &phys); 83 mutex_unlock(&phy_provider_mutex); 84 85 return 0; 86 } 87 EXPORT_SYMBOL_GPL(phy_create_lookup); 88 89 /** 90 * phy_remove_lookup() - find and remove PHY/device association 91 * @phy: the phy of the association 92 * @con_id: connection ID string on device 93 * @dev_id: the device of the association 94 * 95 * Finds and unregisters phy_lookup entry that was created with 96 * phy_create_lookup(). 97 */ 98 void phy_remove_lookup(struct phy *phy, const char *con_id, const char *dev_id) 99 { 100 struct phy_lookup *pl; 101 102 if (!phy || !dev_id || !con_id) 103 return; 104 105 mutex_lock(&phy_provider_mutex); 106 list_for_each_entry(pl, &phys, node) 107 if (pl->phy == phy && !strcmp(pl->dev_id, dev_id) && 108 !strcmp(pl->con_id, con_id)) { 109 list_del(&pl->node); 110 kfree(pl); 111 break; 112 } 113 mutex_unlock(&phy_provider_mutex); 114 } 115 EXPORT_SYMBOL_GPL(phy_remove_lookup); 116 117 static struct phy *phy_find(struct device *dev, const char *con_id) 118 { 119 const char *dev_id = dev_name(dev); 120 struct phy_lookup *p, *pl = NULL; 121 122 mutex_lock(&phy_provider_mutex); 123 list_for_each_entry(p, &phys, node) 124 if (!strcmp(p->dev_id, dev_id) && !strcmp(p->con_id, con_id)) { 125 pl = p; 126 break; 127 } 128 mutex_unlock(&phy_provider_mutex); 129 130 return pl ? pl->phy : ERR_PTR(-ENODEV); 131 } 132 133 static struct phy_provider *of_phy_provider_lookup(struct device_node *node) 134 { 135 struct phy_provider *phy_provider; 136 struct device_node *child; 137 138 list_for_each_entry(phy_provider, &phy_provider_list, list) { 139 if (phy_provider->dev->of_node == node) 140 return phy_provider; 141 142 for_each_child_of_node(phy_provider->dev->of_node, child) 143 if (child == node) 144 return phy_provider; 145 } 146 147 return ERR_PTR(-EPROBE_DEFER); 148 } 149 150 int phy_pm_runtime_get(struct phy *phy) 151 { 152 int ret; 153 154 if (!pm_runtime_enabled(&phy->dev)) 155 return -ENOTSUPP; 156 157 ret = pm_runtime_get(&phy->dev); 158 if (ret < 0 && ret != -EINPROGRESS) 159 pm_runtime_put_noidle(&phy->dev); 160 161 return ret; 162 } 163 EXPORT_SYMBOL_GPL(phy_pm_runtime_get); 164 165 int phy_pm_runtime_get_sync(struct phy *phy) 166 { 167 int ret; 168 169 if (!pm_runtime_enabled(&phy->dev)) 170 return -ENOTSUPP; 171 172 ret = pm_runtime_get_sync(&phy->dev); 173 if (ret < 0) 174 pm_runtime_put_sync(&phy->dev); 175 176 return ret; 177 } 178 EXPORT_SYMBOL_GPL(phy_pm_runtime_get_sync); 179 180 int phy_pm_runtime_put(struct phy *phy) 181 { 182 if (!pm_runtime_enabled(&phy->dev)) 183 return -ENOTSUPP; 184 185 return pm_runtime_put(&phy->dev); 186 } 187 EXPORT_SYMBOL_GPL(phy_pm_runtime_put); 188 189 int phy_pm_runtime_put_sync(struct phy *phy) 190 { 191 if (!pm_runtime_enabled(&phy->dev)) 192 return -ENOTSUPP; 193 194 return pm_runtime_put_sync(&phy->dev); 195 } 196 EXPORT_SYMBOL_GPL(phy_pm_runtime_put_sync); 197 198 void phy_pm_runtime_allow(struct phy *phy) 199 { 200 if (!pm_runtime_enabled(&phy->dev)) 201 return; 202 203 pm_runtime_allow(&phy->dev); 204 } 205 EXPORT_SYMBOL_GPL(phy_pm_runtime_allow); 206 207 void phy_pm_runtime_forbid(struct phy *phy) 208 { 209 if (!pm_runtime_enabled(&phy->dev)) 210 return; 211 212 pm_runtime_forbid(&phy->dev); 213 } 214 EXPORT_SYMBOL_GPL(phy_pm_runtime_forbid); 215 216 int phy_init(struct phy *phy) 217 { 218 int ret; 219 220 if (!phy) 221 return 0; 222 223 ret = phy_pm_runtime_get_sync(phy); 224 if (ret < 0 && ret != -ENOTSUPP) 225 return ret; 226 227 mutex_lock(&phy->mutex); 228 if (phy->init_count == 0 && phy->ops->init) { 229 ret = phy->ops->init(phy); 230 if (ret < 0) { 231 dev_err(&phy->dev, "phy init failed --> %d\n", ret); 232 goto out; 233 } 234 } else { 235 ret = 0; /* Override possible ret == -ENOTSUPP */ 236 } 237 ++phy->init_count; 238 239 out: 240 mutex_unlock(&phy->mutex); 241 phy_pm_runtime_put(phy); 242 return ret; 243 } 244 EXPORT_SYMBOL_GPL(phy_init); 245 246 int phy_exit(struct phy *phy) 247 { 248 int ret; 249 250 if (!phy) 251 return 0; 252 253 ret = phy_pm_runtime_get_sync(phy); 254 if (ret < 0 && ret != -ENOTSUPP) 255 return ret; 256 257 mutex_lock(&phy->mutex); 258 if (phy->init_count == 1 && phy->ops->exit) { 259 ret = phy->ops->exit(phy); 260 if (ret < 0) { 261 dev_err(&phy->dev, "phy exit failed --> %d\n", ret); 262 goto out; 263 } 264 } 265 --phy->init_count; 266 267 out: 268 mutex_unlock(&phy->mutex); 269 phy_pm_runtime_put(phy); 270 return ret; 271 } 272 EXPORT_SYMBOL_GPL(phy_exit); 273 274 int phy_power_on(struct phy *phy) 275 { 276 int ret; 277 278 if (!phy) 279 return 0; 280 281 if (phy->pwr) { 282 ret = regulator_enable(phy->pwr); 283 if (ret) 284 return ret; 285 } 286 287 ret = phy_pm_runtime_get_sync(phy); 288 if (ret < 0 && ret != -ENOTSUPP) 289 return ret; 290 291 mutex_lock(&phy->mutex); 292 if (phy->power_count == 0 && phy->ops->power_on) { 293 ret = phy->ops->power_on(phy); 294 if (ret < 0) { 295 dev_err(&phy->dev, "phy poweron failed --> %d\n", ret); 296 goto out; 297 } 298 } else { 299 ret = 0; /* Override possible ret == -ENOTSUPP */ 300 } 301 ++phy->power_count; 302 mutex_unlock(&phy->mutex); 303 return 0; 304 305 out: 306 mutex_unlock(&phy->mutex); 307 phy_pm_runtime_put_sync(phy); 308 if (phy->pwr) 309 regulator_disable(phy->pwr); 310 311 return ret; 312 } 313 EXPORT_SYMBOL_GPL(phy_power_on); 314 315 int phy_power_off(struct phy *phy) 316 { 317 int ret; 318 319 if (!phy) 320 return 0; 321 322 mutex_lock(&phy->mutex); 323 if (phy->power_count == 1 && phy->ops->power_off) { 324 ret = phy->ops->power_off(phy); 325 if (ret < 0) { 326 dev_err(&phy->dev, "phy poweroff failed --> %d\n", ret); 327 mutex_unlock(&phy->mutex); 328 return ret; 329 } 330 } 331 --phy->power_count; 332 mutex_unlock(&phy->mutex); 333 phy_pm_runtime_put(phy); 334 335 if (phy->pwr) 336 regulator_disable(phy->pwr); 337 338 return 0; 339 } 340 EXPORT_SYMBOL_GPL(phy_power_off); 341 342 /** 343 * _of_phy_get() - lookup and obtain a reference to a phy by phandle 344 * @np: device_node for which to get the phy 345 * @index: the index of the phy 346 * 347 * Returns the phy associated with the given phandle value, 348 * after getting a refcount to it or -ENODEV if there is no such phy or 349 * -EPROBE_DEFER if there is a phandle to the phy, but the device is 350 * not yet loaded. This function uses of_xlate call back function provided 351 * while registering the phy_provider to find the phy instance. 352 */ 353 static struct phy *_of_phy_get(struct device_node *np, int index) 354 { 355 int ret; 356 struct phy_provider *phy_provider; 357 struct phy *phy = NULL; 358 struct of_phandle_args args; 359 360 ret = of_parse_phandle_with_args(np, "phys", "#phy-cells", 361 index, &args); 362 if (ret) 363 return ERR_PTR(-ENODEV); 364 365 mutex_lock(&phy_provider_mutex); 366 phy_provider = of_phy_provider_lookup(args.np); 367 if (IS_ERR(phy_provider) || !try_module_get(phy_provider->owner)) { 368 phy = ERR_PTR(-EPROBE_DEFER); 369 goto err0; 370 } 371 372 phy = phy_provider->of_xlate(phy_provider->dev, &args); 373 module_put(phy_provider->owner); 374 375 err0: 376 mutex_unlock(&phy_provider_mutex); 377 of_node_put(args.np); 378 379 return phy; 380 } 381 382 /** 383 * of_phy_get() - lookup and obtain a reference to a phy using a device_node. 384 * @np: device_node for which to get the phy 385 * @con_id: name of the phy from device's point of view 386 * 387 * Returns the phy driver, after getting a refcount to it; or 388 * -ENODEV if there is no such phy. The caller is responsible for 389 * calling phy_put() to release that count. 390 */ 391 struct phy *of_phy_get(struct device_node *np, const char *con_id) 392 { 393 struct phy *phy = NULL; 394 int index = 0; 395 396 if (con_id) 397 index = of_property_match_string(np, "phy-names", con_id); 398 399 phy = _of_phy_get(np, index); 400 if (IS_ERR(phy)) 401 return phy; 402 403 if (!try_module_get(phy->ops->owner)) 404 return ERR_PTR(-EPROBE_DEFER); 405 406 get_device(&phy->dev); 407 408 return phy; 409 } 410 EXPORT_SYMBOL_GPL(of_phy_get); 411 412 /** 413 * phy_put() - release the PHY 414 * @phy: the phy returned by phy_get() 415 * 416 * Releases a refcount the caller received from phy_get(). 417 */ 418 void phy_put(struct phy *phy) 419 { 420 if (!phy || IS_ERR(phy)) 421 return; 422 423 module_put(phy->ops->owner); 424 put_device(&phy->dev); 425 } 426 EXPORT_SYMBOL_GPL(phy_put); 427 428 /** 429 * devm_phy_put() - release the PHY 430 * @dev: device that wants to release this phy 431 * @phy: the phy returned by devm_phy_get() 432 * 433 * destroys the devres associated with this phy and invokes phy_put 434 * to release the phy. 435 */ 436 void devm_phy_put(struct device *dev, struct phy *phy) 437 { 438 int r; 439 440 if (!phy) 441 return; 442 443 r = devres_destroy(dev, devm_phy_release, devm_phy_match, phy); 444 dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n"); 445 } 446 EXPORT_SYMBOL_GPL(devm_phy_put); 447 448 /** 449 * of_phy_simple_xlate() - returns the phy instance from phy provider 450 * @dev: the PHY provider device 451 * @args: of_phandle_args (not used here) 452 * 453 * Intended to be used by phy provider for the common case where #phy-cells is 454 * 0. For other cases where #phy-cells is greater than '0', the phy provider 455 * should provide a custom of_xlate function that reads the *args* and returns 456 * the appropriate phy. 457 */ 458 struct phy *of_phy_simple_xlate(struct device *dev, struct of_phandle_args 459 *args) 460 { 461 struct phy *phy; 462 struct class_dev_iter iter; 463 464 class_dev_iter_init(&iter, phy_class, NULL, NULL); 465 while ((dev = class_dev_iter_next(&iter))) { 466 phy = to_phy(dev); 467 if (args->np != phy->dev.of_node) 468 continue; 469 470 class_dev_iter_exit(&iter); 471 return phy; 472 } 473 474 class_dev_iter_exit(&iter); 475 return ERR_PTR(-ENODEV); 476 } 477 EXPORT_SYMBOL_GPL(of_phy_simple_xlate); 478 479 /** 480 * phy_get() - lookup and obtain a reference to a phy. 481 * @dev: device that requests this phy 482 * @string: the phy name as given in the dt data or the name of the controller 483 * port for non-dt case 484 * 485 * Returns the phy driver, after getting a refcount to it; or 486 * -ENODEV if there is no such phy. The caller is responsible for 487 * calling phy_put() to release that count. 488 */ 489 struct phy *phy_get(struct device *dev, const char *string) 490 { 491 int index = 0; 492 struct phy *phy; 493 494 if (string == NULL) { 495 dev_WARN(dev, "missing string\n"); 496 return ERR_PTR(-EINVAL); 497 } 498 499 if (dev->of_node) { 500 index = of_property_match_string(dev->of_node, "phy-names", 501 string); 502 phy = _of_phy_get(dev->of_node, index); 503 } else { 504 phy = phy_find(dev, string); 505 } 506 if (IS_ERR(phy)) 507 return phy; 508 509 if (!try_module_get(phy->ops->owner)) 510 return ERR_PTR(-EPROBE_DEFER); 511 512 get_device(&phy->dev); 513 514 return phy; 515 } 516 EXPORT_SYMBOL_GPL(phy_get); 517 518 /** 519 * phy_optional_get() - lookup and obtain a reference to an optional phy. 520 * @dev: device that requests this phy 521 * @string: the phy name as given in the dt data or the name of the controller 522 * port for non-dt case 523 * 524 * Returns the phy driver, after getting a refcount to it; or 525 * NULL if there is no such phy. The caller is responsible for 526 * calling phy_put() to release that count. 527 */ 528 struct phy *phy_optional_get(struct device *dev, const char *string) 529 { 530 struct phy *phy = phy_get(dev, string); 531 532 if (PTR_ERR(phy) == -ENODEV) 533 phy = NULL; 534 535 return phy; 536 } 537 EXPORT_SYMBOL_GPL(phy_optional_get); 538 539 /** 540 * devm_phy_get() - lookup and obtain a reference to a phy. 541 * @dev: device that requests this phy 542 * @string: the phy name as given in the dt data or phy device name 543 * for non-dt case 544 * 545 * Gets the phy using phy_get(), and associates a device with it using 546 * devres. On driver detach, release function is invoked on the devres data, 547 * then, devres data is freed. 548 */ 549 struct phy *devm_phy_get(struct device *dev, const char *string) 550 { 551 struct phy **ptr, *phy; 552 553 ptr = devres_alloc(devm_phy_release, sizeof(*ptr), GFP_KERNEL); 554 if (!ptr) 555 return ERR_PTR(-ENOMEM); 556 557 phy = phy_get(dev, string); 558 if (!IS_ERR(phy)) { 559 *ptr = phy; 560 devres_add(dev, ptr); 561 } else { 562 devres_free(ptr); 563 } 564 565 return phy; 566 } 567 EXPORT_SYMBOL_GPL(devm_phy_get); 568 569 /** 570 * devm_phy_optional_get() - lookup and obtain a reference to an optional phy. 571 * @dev: device that requests this phy 572 * @string: the phy name as given in the dt data or phy device name 573 * for non-dt case 574 * 575 * Gets the phy using phy_get(), and associates a device with it using 576 * devres. On driver detach, release function is invoked on the devres 577 * data, then, devres data is freed. This differs to devm_phy_get() in 578 * that if the phy does not exist, it is not considered an error and 579 * -ENODEV will not be returned. Instead the NULL phy is returned, 580 * which can be passed to all other phy consumer calls. 581 */ 582 struct phy *devm_phy_optional_get(struct device *dev, const char *string) 583 { 584 struct phy *phy = devm_phy_get(dev, string); 585 586 if (PTR_ERR(phy) == -ENODEV) 587 phy = NULL; 588 589 return phy; 590 } 591 EXPORT_SYMBOL_GPL(devm_phy_optional_get); 592 593 /** 594 * devm_of_phy_get() - lookup and obtain a reference to a phy. 595 * @dev: device that requests this phy 596 * @np: node containing the phy 597 * @con_id: name of the phy from device's point of view 598 * 599 * Gets the phy using of_phy_get(), and associates a device with it using 600 * devres. On driver detach, release function is invoked on the devres data, 601 * then, devres data is freed. 602 */ 603 struct phy *devm_of_phy_get(struct device *dev, struct device_node *np, 604 const char *con_id) 605 { 606 struct phy **ptr, *phy; 607 608 ptr = devres_alloc(devm_phy_release, sizeof(*ptr), GFP_KERNEL); 609 if (!ptr) 610 return ERR_PTR(-ENOMEM); 611 612 phy = of_phy_get(np, con_id); 613 if (!IS_ERR(phy)) { 614 *ptr = phy; 615 devres_add(dev, ptr); 616 } else { 617 devres_free(ptr); 618 } 619 620 return phy; 621 } 622 EXPORT_SYMBOL_GPL(devm_of_phy_get); 623 624 /** 625 * phy_create() - create a new phy 626 * @dev: device that is creating the new phy 627 * @node: device node of the phy 628 * @ops: function pointers for performing phy operations 629 * 630 * Called to create a phy using phy framework. 631 */ 632 struct phy *phy_create(struct device *dev, struct device_node *node, 633 const struct phy_ops *ops) 634 { 635 int ret; 636 int id; 637 struct phy *phy; 638 639 if (WARN_ON(!dev)) 640 return ERR_PTR(-EINVAL); 641 642 phy = kzalloc(sizeof(*phy), GFP_KERNEL); 643 if (!phy) 644 return ERR_PTR(-ENOMEM); 645 646 id = ida_simple_get(&phy_ida, 0, 0, GFP_KERNEL); 647 if (id < 0) { 648 dev_err(dev, "unable to get id\n"); 649 ret = id; 650 goto free_phy; 651 } 652 653 /* phy-supply */ 654 phy->pwr = regulator_get_optional(dev, "phy"); 655 if (IS_ERR(phy->pwr)) { 656 if (PTR_ERR(phy->pwr) == -EPROBE_DEFER) { 657 ret = -EPROBE_DEFER; 658 goto free_ida; 659 } 660 phy->pwr = NULL; 661 } 662 663 device_initialize(&phy->dev); 664 mutex_init(&phy->mutex); 665 666 phy->dev.class = phy_class; 667 phy->dev.parent = dev; 668 phy->dev.of_node = node ?: dev->of_node; 669 phy->id = id; 670 phy->ops = ops; 671 672 ret = dev_set_name(&phy->dev, "phy-%s.%d", dev_name(dev), id); 673 if (ret) 674 goto put_dev; 675 676 ret = device_add(&phy->dev); 677 if (ret) 678 goto put_dev; 679 680 if (pm_runtime_enabled(dev)) { 681 pm_runtime_enable(&phy->dev); 682 pm_runtime_no_callbacks(&phy->dev); 683 } 684 685 return phy; 686 687 put_dev: 688 put_device(&phy->dev); /* calls phy_release() which frees resources */ 689 return ERR_PTR(ret); 690 691 free_ida: 692 ida_simple_remove(&phy_ida, phy->id); 693 694 free_phy: 695 kfree(phy); 696 return ERR_PTR(ret); 697 } 698 EXPORT_SYMBOL_GPL(phy_create); 699 700 /** 701 * devm_phy_create() - create a new phy 702 * @dev: device that is creating the new phy 703 * @node: device node of the phy 704 * @ops: function pointers for performing phy operations 705 * 706 * Creates a new PHY device adding it to the PHY class. 707 * While at that, it also associates the device with the phy using devres. 708 * On driver detach, release function is invoked on the devres data, 709 * then, devres data is freed. 710 */ 711 struct phy *devm_phy_create(struct device *dev, struct device_node *node, 712 const struct phy_ops *ops) 713 { 714 struct phy **ptr, *phy; 715 716 ptr = devres_alloc(devm_phy_consume, sizeof(*ptr), GFP_KERNEL); 717 if (!ptr) 718 return ERR_PTR(-ENOMEM); 719 720 phy = phy_create(dev, node, ops); 721 if (!IS_ERR(phy)) { 722 *ptr = phy; 723 devres_add(dev, ptr); 724 } else { 725 devres_free(ptr); 726 } 727 728 return phy; 729 } 730 EXPORT_SYMBOL_GPL(devm_phy_create); 731 732 /** 733 * phy_destroy() - destroy the phy 734 * @phy: the phy to be destroyed 735 * 736 * Called to destroy the phy. 737 */ 738 void phy_destroy(struct phy *phy) 739 { 740 pm_runtime_disable(&phy->dev); 741 device_unregister(&phy->dev); 742 } 743 EXPORT_SYMBOL_GPL(phy_destroy); 744 745 /** 746 * devm_phy_destroy() - destroy the PHY 747 * @dev: device that wants to release this phy 748 * @phy: the phy returned by devm_phy_get() 749 * 750 * destroys the devres associated with this phy and invokes phy_destroy 751 * to destroy the phy. 752 */ 753 void devm_phy_destroy(struct device *dev, struct phy *phy) 754 { 755 int r; 756 757 r = devres_destroy(dev, devm_phy_consume, devm_phy_match, phy); 758 dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n"); 759 } 760 EXPORT_SYMBOL_GPL(devm_phy_destroy); 761 762 /** 763 * __of_phy_provider_register() - create/register phy provider with the framework 764 * @dev: struct device of the phy provider 765 * @owner: the module owner containing of_xlate 766 * @of_xlate: function pointer to obtain phy instance from phy provider 767 * 768 * Creates struct phy_provider from dev and of_xlate function pointer. 769 * This is used in the case of dt boot for finding the phy instance from 770 * phy provider. 771 */ 772 struct phy_provider *__of_phy_provider_register(struct device *dev, 773 struct module *owner, struct phy * (*of_xlate)(struct device *dev, 774 struct of_phandle_args *args)) 775 { 776 struct phy_provider *phy_provider; 777 778 phy_provider = kzalloc(sizeof(*phy_provider), GFP_KERNEL); 779 if (!phy_provider) 780 return ERR_PTR(-ENOMEM); 781 782 phy_provider->dev = dev; 783 phy_provider->owner = owner; 784 phy_provider->of_xlate = of_xlate; 785 786 mutex_lock(&phy_provider_mutex); 787 list_add_tail(&phy_provider->list, &phy_provider_list); 788 mutex_unlock(&phy_provider_mutex); 789 790 return phy_provider; 791 } 792 EXPORT_SYMBOL_GPL(__of_phy_provider_register); 793 794 /** 795 * __devm_of_phy_provider_register() - create/register phy provider with the 796 * framework 797 * @dev: struct device of the phy provider 798 * @owner: the module owner containing of_xlate 799 * @of_xlate: function pointer to obtain phy instance from phy provider 800 * 801 * Creates struct phy_provider from dev and of_xlate function pointer. 802 * This is used in the case of dt boot for finding the phy instance from 803 * phy provider. While at that, it also associates the device with the 804 * phy provider using devres. On driver detach, release function is invoked 805 * on the devres data, then, devres data is freed. 806 */ 807 struct phy_provider *__devm_of_phy_provider_register(struct device *dev, 808 struct module *owner, struct phy * (*of_xlate)(struct device *dev, 809 struct of_phandle_args *args)) 810 { 811 struct phy_provider **ptr, *phy_provider; 812 813 ptr = devres_alloc(devm_phy_provider_release, sizeof(*ptr), GFP_KERNEL); 814 if (!ptr) 815 return ERR_PTR(-ENOMEM); 816 817 phy_provider = __of_phy_provider_register(dev, owner, of_xlate); 818 if (!IS_ERR(phy_provider)) { 819 *ptr = phy_provider; 820 devres_add(dev, ptr); 821 } else { 822 devres_free(ptr); 823 } 824 825 return phy_provider; 826 } 827 EXPORT_SYMBOL_GPL(__devm_of_phy_provider_register); 828 829 /** 830 * of_phy_provider_unregister() - unregister phy provider from the framework 831 * @phy_provider: phy provider returned by of_phy_provider_register() 832 * 833 * Removes the phy_provider created using of_phy_provider_register(). 834 */ 835 void of_phy_provider_unregister(struct phy_provider *phy_provider) 836 { 837 if (IS_ERR(phy_provider)) 838 return; 839 840 mutex_lock(&phy_provider_mutex); 841 list_del(&phy_provider->list); 842 kfree(phy_provider); 843 mutex_unlock(&phy_provider_mutex); 844 } 845 EXPORT_SYMBOL_GPL(of_phy_provider_unregister); 846 847 /** 848 * devm_of_phy_provider_unregister() - remove phy provider from the framework 849 * @dev: struct device of the phy provider 850 * 851 * destroys the devres associated with this phy provider and invokes 852 * of_phy_provider_unregister to unregister the phy provider. 853 */ 854 void devm_of_phy_provider_unregister(struct device *dev, 855 struct phy_provider *phy_provider) { 856 int r; 857 858 r = devres_destroy(dev, devm_phy_provider_release, devm_phy_match, 859 phy_provider); 860 dev_WARN_ONCE(dev, r, "couldn't find PHY provider device resource\n"); 861 } 862 EXPORT_SYMBOL_GPL(devm_of_phy_provider_unregister); 863 864 /** 865 * phy_release() - release the phy 866 * @dev: the dev member within phy 867 * 868 * When the last reference to the device is removed, it is called 869 * from the embedded kobject as release method. 870 */ 871 static void phy_release(struct device *dev) 872 { 873 struct phy *phy; 874 875 phy = to_phy(dev); 876 dev_vdbg(dev, "releasing '%s'\n", dev_name(dev)); 877 regulator_put(phy->pwr); 878 ida_simple_remove(&phy_ida, phy->id); 879 kfree(phy); 880 } 881 882 static int __init phy_core_init(void) 883 { 884 phy_class = class_create(THIS_MODULE, "phy"); 885 if (IS_ERR(phy_class)) { 886 pr_err("failed to create phy class --> %ld\n", 887 PTR_ERR(phy_class)); 888 return PTR_ERR(phy_class); 889 } 890 891 phy_class->dev_release = phy_release; 892 893 return 0; 894 } 895 module_init(phy_core_init); 896 897 static void __exit phy_core_exit(void) 898 { 899 class_destroy(phy_class); 900 } 901 module_exit(phy_core_exit); 902 903 MODULE_DESCRIPTION("Generic PHY Framework"); 904 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>"); 905 MODULE_LICENSE("GPL v2"); 906