1 /* 2 * phy-core.c -- Generic Phy framework. 3 * 4 * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com 5 * 6 * Author: Kishon Vijay Abraham I <kishon@ti.com> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License as published by the 10 * Free Software Foundation; either version 2 of the License, or (at your 11 * option) any later version. 12 */ 13 14 #include <linux/kernel.h> 15 #include <linux/export.h> 16 #include <linux/module.h> 17 #include <linux/err.h> 18 #include <linux/device.h> 19 #include <linux/slab.h> 20 #include <linux/of.h> 21 #include <linux/phy/phy.h> 22 #include <linux/idr.h> 23 #include <linux/pm_runtime.h> 24 25 static struct class *phy_class; 26 static DEFINE_MUTEX(phy_provider_mutex); 27 static LIST_HEAD(phy_provider_list); 28 static DEFINE_IDA(phy_ida); 29 30 static void devm_phy_release(struct device *dev, void *res) 31 { 32 struct phy *phy = *(struct phy **)res; 33 34 phy_put(phy); 35 } 36 37 static void devm_phy_provider_release(struct device *dev, void *res) 38 { 39 struct phy_provider *phy_provider = *(struct phy_provider **)res; 40 41 of_phy_provider_unregister(phy_provider); 42 } 43 44 static void devm_phy_consume(struct device *dev, void *res) 45 { 46 struct phy *phy = *(struct phy **)res; 47 48 phy_destroy(phy); 49 } 50 51 static int devm_phy_match(struct device *dev, void *res, void *match_data) 52 { 53 return res == match_data; 54 } 55 56 static struct phy *phy_lookup(struct device *device, const char *port) 57 { 58 unsigned int count; 59 struct phy *phy; 60 struct device *dev; 61 struct phy_consumer *consumers; 62 struct class_dev_iter iter; 63 64 class_dev_iter_init(&iter, phy_class, NULL, NULL); 65 while ((dev = class_dev_iter_next(&iter))) { 66 phy = to_phy(dev); 67 count = phy->init_data->num_consumers; 68 consumers = phy->init_data->consumers; 69 while (count--) { 70 if (!strcmp(consumers->dev_name, dev_name(device)) && 71 !strcmp(consumers->port, port)) { 72 class_dev_iter_exit(&iter); 73 return phy; 74 } 75 consumers++; 76 } 77 } 78 79 class_dev_iter_exit(&iter); 80 return ERR_PTR(-ENODEV); 81 } 82 83 static struct phy_provider *of_phy_provider_lookup(struct device_node *node) 84 { 85 struct phy_provider *phy_provider; 86 87 list_for_each_entry(phy_provider, &phy_provider_list, list) { 88 if (phy_provider->dev->of_node == node) 89 return phy_provider; 90 } 91 92 return ERR_PTR(-EPROBE_DEFER); 93 } 94 95 int phy_pm_runtime_get(struct phy *phy) 96 { 97 int ret; 98 99 if (!pm_runtime_enabled(&phy->dev)) 100 return -ENOTSUPP; 101 102 ret = pm_runtime_get(&phy->dev); 103 if (ret < 0 && ret != -EINPROGRESS) 104 pm_runtime_put_noidle(&phy->dev); 105 106 return ret; 107 } 108 EXPORT_SYMBOL_GPL(phy_pm_runtime_get); 109 110 int phy_pm_runtime_get_sync(struct phy *phy) 111 { 112 int ret; 113 114 if (!pm_runtime_enabled(&phy->dev)) 115 return -ENOTSUPP; 116 117 ret = pm_runtime_get_sync(&phy->dev); 118 if (ret < 0) 119 pm_runtime_put_sync(&phy->dev); 120 121 return ret; 122 } 123 EXPORT_SYMBOL_GPL(phy_pm_runtime_get_sync); 124 125 int phy_pm_runtime_put(struct phy *phy) 126 { 127 if (!pm_runtime_enabled(&phy->dev)) 128 return -ENOTSUPP; 129 130 return pm_runtime_put(&phy->dev); 131 } 132 EXPORT_SYMBOL_GPL(phy_pm_runtime_put); 133 134 int phy_pm_runtime_put_sync(struct phy *phy) 135 { 136 if (!pm_runtime_enabled(&phy->dev)) 137 return -ENOTSUPP; 138 139 return pm_runtime_put_sync(&phy->dev); 140 } 141 EXPORT_SYMBOL_GPL(phy_pm_runtime_put_sync); 142 143 void phy_pm_runtime_allow(struct phy *phy) 144 { 145 if (!pm_runtime_enabled(&phy->dev)) 146 return; 147 148 pm_runtime_allow(&phy->dev); 149 } 150 EXPORT_SYMBOL_GPL(phy_pm_runtime_allow); 151 152 void phy_pm_runtime_forbid(struct phy *phy) 153 { 154 if (!pm_runtime_enabled(&phy->dev)) 155 return; 156 157 pm_runtime_forbid(&phy->dev); 158 } 159 EXPORT_SYMBOL_GPL(phy_pm_runtime_forbid); 160 161 int phy_init(struct phy *phy) 162 { 163 int ret; 164 165 if (!phy) 166 return 0; 167 168 ret = phy_pm_runtime_get_sync(phy); 169 if (ret < 0 && ret != -ENOTSUPP) 170 return ret; 171 172 mutex_lock(&phy->mutex); 173 if (phy->init_count == 0 && phy->ops->init) { 174 ret = phy->ops->init(phy); 175 if (ret < 0) { 176 dev_err(&phy->dev, "phy init failed --> %d\n", ret); 177 goto out; 178 } 179 } else { 180 ret = 0; /* Override possible ret == -ENOTSUPP */ 181 } 182 ++phy->init_count; 183 184 out: 185 mutex_unlock(&phy->mutex); 186 phy_pm_runtime_put(phy); 187 return ret; 188 } 189 EXPORT_SYMBOL_GPL(phy_init); 190 191 int phy_exit(struct phy *phy) 192 { 193 int ret; 194 195 if (!phy) 196 return 0; 197 198 ret = phy_pm_runtime_get_sync(phy); 199 if (ret < 0 && ret != -ENOTSUPP) 200 return ret; 201 202 mutex_lock(&phy->mutex); 203 if (phy->init_count == 1 && phy->ops->exit) { 204 ret = phy->ops->exit(phy); 205 if (ret < 0) { 206 dev_err(&phy->dev, "phy exit failed --> %d\n", ret); 207 goto out; 208 } 209 } 210 --phy->init_count; 211 212 out: 213 mutex_unlock(&phy->mutex); 214 phy_pm_runtime_put(phy); 215 return ret; 216 } 217 EXPORT_SYMBOL_GPL(phy_exit); 218 219 int phy_power_on(struct phy *phy) 220 { 221 int ret; 222 223 if (!phy) 224 return 0; 225 226 ret = phy_pm_runtime_get_sync(phy); 227 if (ret < 0 && ret != -ENOTSUPP) 228 return ret; 229 230 mutex_lock(&phy->mutex); 231 if (phy->power_count == 0 && phy->ops->power_on) { 232 ret = phy->ops->power_on(phy); 233 if (ret < 0) { 234 dev_err(&phy->dev, "phy poweron failed --> %d\n", ret); 235 goto out; 236 } 237 } else { 238 ret = 0; /* Override possible ret == -ENOTSUPP */ 239 } 240 ++phy->power_count; 241 mutex_unlock(&phy->mutex); 242 return 0; 243 244 out: 245 mutex_unlock(&phy->mutex); 246 phy_pm_runtime_put_sync(phy); 247 248 return ret; 249 } 250 EXPORT_SYMBOL_GPL(phy_power_on); 251 252 int phy_power_off(struct phy *phy) 253 { 254 int ret; 255 256 if (!phy) 257 return 0; 258 259 mutex_lock(&phy->mutex); 260 if (phy->power_count == 1 && phy->ops->power_off) { 261 ret = phy->ops->power_off(phy); 262 if (ret < 0) { 263 dev_err(&phy->dev, "phy poweroff failed --> %d\n", ret); 264 mutex_unlock(&phy->mutex); 265 return ret; 266 } 267 } 268 --phy->power_count; 269 mutex_unlock(&phy->mutex); 270 phy_pm_runtime_put(phy); 271 272 return 0; 273 } 274 EXPORT_SYMBOL_GPL(phy_power_off); 275 276 /** 277 * _of_phy_get() - lookup and obtain a reference to a phy by phandle 278 * @np: device_node for which to get the phy 279 * @index: the index of the phy 280 * 281 * Returns the phy associated with the given phandle value, 282 * after getting a refcount to it or -ENODEV if there is no such phy or 283 * -EPROBE_DEFER if there is a phandle to the phy, but the device is 284 * not yet loaded. This function uses of_xlate call back function provided 285 * while registering the phy_provider to find the phy instance. 286 */ 287 static struct phy *_of_phy_get(struct device_node *np, int index) 288 { 289 int ret; 290 struct phy_provider *phy_provider; 291 struct phy *phy = NULL; 292 struct of_phandle_args args; 293 294 ret = of_parse_phandle_with_args(np, "phys", "#phy-cells", 295 index, &args); 296 if (ret) 297 return ERR_PTR(-ENODEV); 298 299 mutex_lock(&phy_provider_mutex); 300 phy_provider = of_phy_provider_lookup(args.np); 301 if (IS_ERR(phy_provider) || !try_module_get(phy_provider->owner)) { 302 phy = ERR_PTR(-EPROBE_DEFER); 303 goto err0; 304 } 305 306 phy = phy_provider->of_xlate(phy_provider->dev, &args); 307 module_put(phy_provider->owner); 308 309 err0: 310 mutex_unlock(&phy_provider_mutex); 311 of_node_put(args.np); 312 313 return phy; 314 } 315 316 /** 317 * of_phy_get() - lookup and obtain a reference to a phy using a device_node. 318 * @np: device_node for which to get the phy 319 * @con_id: name of the phy from device's point of view 320 * 321 * Returns the phy driver, after getting a refcount to it; or 322 * -ENODEV if there is no such phy. The caller is responsible for 323 * calling phy_put() to release that count. 324 */ 325 struct phy *of_phy_get(struct device_node *np, const char *con_id) 326 { 327 struct phy *phy = NULL; 328 int index = 0; 329 330 if (con_id) 331 index = of_property_match_string(np, "phy-names", con_id); 332 333 phy = _of_phy_get(np, index); 334 if (IS_ERR(phy)) 335 return phy; 336 337 if (!try_module_get(phy->ops->owner)) 338 return ERR_PTR(-EPROBE_DEFER); 339 340 get_device(&phy->dev); 341 342 return phy; 343 } 344 EXPORT_SYMBOL_GPL(of_phy_get); 345 346 /** 347 * phy_put() - release the PHY 348 * @phy: the phy returned by phy_get() 349 * 350 * Releases a refcount the caller received from phy_get(). 351 */ 352 void phy_put(struct phy *phy) 353 { 354 if (!phy || IS_ERR(phy)) 355 return; 356 357 module_put(phy->ops->owner); 358 put_device(&phy->dev); 359 } 360 EXPORT_SYMBOL_GPL(phy_put); 361 362 /** 363 * devm_phy_put() - release the PHY 364 * @dev: device that wants to release this phy 365 * @phy: the phy returned by devm_phy_get() 366 * 367 * destroys the devres associated with this phy and invokes phy_put 368 * to release the phy. 369 */ 370 void devm_phy_put(struct device *dev, struct phy *phy) 371 { 372 int r; 373 374 if (!phy) 375 return; 376 377 r = devres_destroy(dev, devm_phy_release, devm_phy_match, phy); 378 dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n"); 379 } 380 EXPORT_SYMBOL_GPL(devm_phy_put); 381 382 /** 383 * of_phy_simple_xlate() - returns the phy instance from phy provider 384 * @dev: the PHY provider device 385 * @args: of_phandle_args (not used here) 386 * 387 * Intended to be used by phy provider for the common case where #phy-cells is 388 * 0. For other cases where #phy-cells is greater than '0', the phy provider 389 * should provide a custom of_xlate function that reads the *args* and returns 390 * the appropriate phy. 391 */ 392 struct phy *of_phy_simple_xlate(struct device *dev, struct of_phandle_args 393 *args) 394 { 395 struct phy *phy; 396 struct class_dev_iter iter; 397 struct device_node *node = dev->of_node; 398 399 class_dev_iter_init(&iter, phy_class, NULL, NULL); 400 while ((dev = class_dev_iter_next(&iter))) { 401 phy = to_phy(dev); 402 if (node != phy->dev.of_node) 403 continue; 404 405 class_dev_iter_exit(&iter); 406 return phy; 407 } 408 409 class_dev_iter_exit(&iter); 410 return ERR_PTR(-ENODEV); 411 } 412 EXPORT_SYMBOL_GPL(of_phy_simple_xlate); 413 414 /** 415 * phy_get() - lookup and obtain a reference to a phy. 416 * @dev: device that requests this phy 417 * @string: the phy name as given in the dt data or the name of the controller 418 * port for non-dt case 419 * 420 * Returns the phy driver, after getting a refcount to it; or 421 * -ENODEV if there is no such phy. The caller is responsible for 422 * calling phy_put() to release that count. 423 */ 424 struct phy *phy_get(struct device *dev, const char *string) 425 { 426 int index = 0; 427 struct phy *phy; 428 429 if (string == NULL) { 430 dev_WARN(dev, "missing string\n"); 431 return ERR_PTR(-EINVAL); 432 } 433 434 if (dev->of_node) { 435 index = of_property_match_string(dev->of_node, "phy-names", 436 string); 437 phy = _of_phy_get(dev->of_node, index); 438 } else { 439 phy = phy_lookup(dev, string); 440 } 441 if (IS_ERR(phy)) 442 return phy; 443 444 if (!try_module_get(phy->ops->owner)) 445 return ERR_PTR(-EPROBE_DEFER); 446 447 get_device(&phy->dev); 448 449 return phy; 450 } 451 EXPORT_SYMBOL_GPL(phy_get); 452 453 /** 454 * phy_optional_get() - lookup and obtain a reference to an optional phy. 455 * @dev: device that requests this phy 456 * @string: the phy name as given in the dt data or the name of the controller 457 * port for non-dt case 458 * 459 * Returns the phy driver, after getting a refcount to it; or 460 * NULL if there is no such phy. The caller is responsible for 461 * calling phy_put() to release that count. 462 */ 463 struct phy *phy_optional_get(struct device *dev, const char *string) 464 { 465 struct phy *phy = phy_get(dev, string); 466 467 if (PTR_ERR(phy) == -ENODEV) 468 phy = NULL; 469 470 return phy; 471 } 472 EXPORT_SYMBOL_GPL(phy_optional_get); 473 474 /** 475 * devm_phy_get() - lookup and obtain a reference to a phy. 476 * @dev: device that requests this phy 477 * @string: the phy name as given in the dt data or phy device name 478 * for non-dt case 479 * 480 * Gets the phy using phy_get(), and associates a device with it using 481 * devres. On driver detach, release function is invoked on the devres data, 482 * then, devres data is freed. 483 */ 484 struct phy *devm_phy_get(struct device *dev, const char *string) 485 { 486 struct phy **ptr, *phy; 487 488 ptr = devres_alloc(devm_phy_release, sizeof(*ptr), GFP_KERNEL); 489 if (!ptr) 490 return ERR_PTR(-ENOMEM); 491 492 phy = phy_get(dev, string); 493 if (!IS_ERR(phy)) { 494 *ptr = phy; 495 devres_add(dev, ptr); 496 } else { 497 devres_free(ptr); 498 } 499 500 return phy; 501 } 502 EXPORT_SYMBOL_GPL(devm_phy_get); 503 504 /** 505 * devm_phy_optional_get() - lookup and obtain a reference to an optional phy. 506 * @dev: device that requests this phy 507 * @string: the phy name as given in the dt data or phy device name 508 * for non-dt case 509 * 510 * Gets the phy using phy_get(), and associates a device with it using 511 * devres. On driver detach, release function is invoked on the devres 512 * data, then, devres data is freed. This differs to devm_phy_get() in 513 * that if the phy does not exist, it is not considered an error and 514 * -ENODEV will not be returned. Instead the NULL phy is returned, 515 * which can be passed to all other phy consumer calls. 516 */ 517 struct phy *devm_phy_optional_get(struct device *dev, const char *string) 518 { 519 struct phy *phy = devm_phy_get(dev, string); 520 521 if (PTR_ERR(phy) == -ENODEV) 522 phy = NULL; 523 524 return phy; 525 } 526 EXPORT_SYMBOL_GPL(devm_phy_optional_get); 527 528 /** 529 * devm_of_phy_get() - lookup and obtain a reference to a phy. 530 * @dev: device that requests this phy 531 * @np: node containing the phy 532 * @con_id: name of the phy from device's point of view 533 * 534 * Gets the phy using of_phy_get(), and associates a device with it using 535 * devres. On driver detach, release function is invoked on the devres data, 536 * then, devres data is freed. 537 */ 538 struct phy *devm_of_phy_get(struct device *dev, struct device_node *np, 539 const char *con_id) 540 { 541 struct phy **ptr, *phy; 542 543 ptr = devres_alloc(devm_phy_release, sizeof(*ptr), GFP_KERNEL); 544 if (!ptr) 545 return ERR_PTR(-ENOMEM); 546 547 phy = of_phy_get(np, con_id); 548 if (!IS_ERR(phy)) { 549 *ptr = phy; 550 devres_add(dev, ptr); 551 } else { 552 devres_free(ptr); 553 } 554 555 return phy; 556 } 557 EXPORT_SYMBOL_GPL(devm_of_phy_get); 558 559 /** 560 * phy_create() - create a new phy 561 * @dev: device that is creating the new phy 562 * @ops: function pointers for performing phy operations 563 * @init_data: contains the list of PHY consumers or NULL 564 * 565 * Called to create a phy using phy framework. 566 */ 567 struct phy *phy_create(struct device *dev, const struct phy_ops *ops, 568 struct phy_init_data *init_data) 569 { 570 int ret; 571 int id; 572 struct phy *phy; 573 574 if (WARN_ON(!dev)) 575 return ERR_PTR(-EINVAL); 576 577 phy = kzalloc(sizeof(*phy), GFP_KERNEL); 578 if (!phy) 579 return ERR_PTR(-ENOMEM); 580 581 id = ida_simple_get(&phy_ida, 0, 0, GFP_KERNEL); 582 if (id < 0) { 583 dev_err(dev, "unable to get id\n"); 584 ret = id; 585 goto free_phy; 586 } 587 588 device_initialize(&phy->dev); 589 mutex_init(&phy->mutex); 590 591 phy->dev.class = phy_class; 592 phy->dev.parent = dev; 593 phy->dev.of_node = dev->of_node; 594 phy->id = id; 595 phy->ops = ops; 596 phy->init_data = init_data; 597 598 ret = dev_set_name(&phy->dev, "phy-%s.%d", dev_name(dev), id); 599 if (ret) 600 goto put_dev; 601 602 ret = device_add(&phy->dev); 603 if (ret) 604 goto put_dev; 605 606 if (pm_runtime_enabled(dev)) { 607 pm_runtime_enable(&phy->dev); 608 pm_runtime_no_callbacks(&phy->dev); 609 } 610 611 return phy; 612 613 put_dev: 614 put_device(&phy->dev); 615 ida_remove(&phy_ida, phy->id); 616 free_phy: 617 kfree(phy); 618 return ERR_PTR(ret); 619 } 620 EXPORT_SYMBOL_GPL(phy_create); 621 622 /** 623 * devm_phy_create() - create a new phy 624 * @dev: device that is creating the new phy 625 * @ops: function pointers for performing phy operations 626 * @init_data: contains the list of PHY consumers or NULL 627 * 628 * Creates a new PHY device adding it to the PHY class. 629 * While at that, it also associates the device with the phy using devres. 630 * On driver detach, release function is invoked on the devres data, 631 * then, devres data is freed. 632 */ 633 struct phy *devm_phy_create(struct device *dev, const struct phy_ops *ops, 634 struct phy_init_data *init_data) 635 { 636 struct phy **ptr, *phy; 637 638 ptr = devres_alloc(devm_phy_consume, sizeof(*ptr), GFP_KERNEL); 639 if (!ptr) 640 return ERR_PTR(-ENOMEM); 641 642 phy = phy_create(dev, ops, init_data); 643 if (!IS_ERR(phy)) { 644 *ptr = phy; 645 devres_add(dev, ptr); 646 } else { 647 devres_free(ptr); 648 } 649 650 return phy; 651 } 652 EXPORT_SYMBOL_GPL(devm_phy_create); 653 654 /** 655 * phy_destroy() - destroy the phy 656 * @phy: the phy to be destroyed 657 * 658 * Called to destroy the phy. 659 */ 660 void phy_destroy(struct phy *phy) 661 { 662 pm_runtime_disable(&phy->dev); 663 device_unregister(&phy->dev); 664 } 665 EXPORT_SYMBOL_GPL(phy_destroy); 666 667 /** 668 * devm_phy_destroy() - destroy the PHY 669 * @dev: device that wants to release this phy 670 * @phy: the phy returned by devm_phy_get() 671 * 672 * destroys the devres associated with this phy and invokes phy_destroy 673 * to destroy the phy. 674 */ 675 void devm_phy_destroy(struct device *dev, struct phy *phy) 676 { 677 int r; 678 679 r = devres_destroy(dev, devm_phy_consume, devm_phy_match, phy); 680 dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n"); 681 } 682 EXPORT_SYMBOL_GPL(devm_phy_destroy); 683 684 /** 685 * __of_phy_provider_register() - create/register phy provider with the framework 686 * @dev: struct device of the phy provider 687 * @owner: the module owner containing of_xlate 688 * @of_xlate: function pointer to obtain phy instance from phy provider 689 * 690 * Creates struct phy_provider from dev and of_xlate function pointer. 691 * This is used in the case of dt boot for finding the phy instance from 692 * phy provider. 693 */ 694 struct phy_provider *__of_phy_provider_register(struct device *dev, 695 struct module *owner, struct phy * (*of_xlate)(struct device *dev, 696 struct of_phandle_args *args)) 697 { 698 struct phy_provider *phy_provider; 699 700 phy_provider = kzalloc(sizeof(*phy_provider), GFP_KERNEL); 701 if (!phy_provider) 702 return ERR_PTR(-ENOMEM); 703 704 phy_provider->dev = dev; 705 phy_provider->owner = owner; 706 phy_provider->of_xlate = of_xlate; 707 708 mutex_lock(&phy_provider_mutex); 709 list_add_tail(&phy_provider->list, &phy_provider_list); 710 mutex_unlock(&phy_provider_mutex); 711 712 return phy_provider; 713 } 714 EXPORT_SYMBOL_GPL(__of_phy_provider_register); 715 716 /** 717 * __devm_of_phy_provider_register() - create/register phy provider with the 718 * framework 719 * @dev: struct device of the phy provider 720 * @owner: the module owner containing of_xlate 721 * @of_xlate: function pointer to obtain phy instance from phy provider 722 * 723 * Creates struct phy_provider from dev and of_xlate function pointer. 724 * This is used in the case of dt boot for finding the phy instance from 725 * phy provider. While at that, it also associates the device with the 726 * phy provider using devres. On driver detach, release function is invoked 727 * on the devres data, then, devres data is freed. 728 */ 729 struct phy_provider *__devm_of_phy_provider_register(struct device *dev, 730 struct module *owner, struct phy * (*of_xlate)(struct device *dev, 731 struct of_phandle_args *args)) 732 { 733 struct phy_provider **ptr, *phy_provider; 734 735 ptr = devres_alloc(devm_phy_provider_release, sizeof(*ptr), GFP_KERNEL); 736 if (!ptr) 737 return ERR_PTR(-ENOMEM); 738 739 phy_provider = __of_phy_provider_register(dev, owner, of_xlate); 740 if (!IS_ERR(phy_provider)) { 741 *ptr = phy_provider; 742 devres_add(dev, ptr); 743 } else { 744 devres_free(ptr); 745 } 746 747 return phy_provider; 748 } 749 EXPORT_SYMBOL_GPL(__devm_of_phy_provider_register); 750 751 /** 752 * of_phy_provider_unregister() - unregister phy provider from the framework 753 * @phy_provider: phy provider returned by of_phy_provider_register() 754 * 755 * Removes the phy_provider created using of_phy_provider_register(). 756 */ 757 void of_phy_provider_unregister(struct phy_provider *phy_provider) 758 { 759 if (IS_ERR(phy_provider)) 760 return; 761 762 mutex_lock(&phy_provider_mutex); 763 list_del(&phy_provider->list); 764 kfree(phy_provider); 765 mutex_unlock(&phy_provider_mutex); 766 } 767 EXPORT_SYMBOL_GPL(of_phy_provider_unregister); 768 769 /** 770 * devm_of_phy_provider_unregister() - remove phy provider from the framework 771 * @dev: struct device of the phy provider 772 * 773 * destroys the devres associated with this phy provider and invokes 774 * of_phy_provider_unregister to unregister the phy provider. 775 */ 776 void devm_of_phy_provider_unregister(struct device *dev, 777 struct phy_provider *phy_provider) { 778 int r; 779 780 r = devres_destroy(dev, devm_phy_provider_release, devm_phy_match, 781 phy_provider); 782 dev_WARN_ONCE(dev, r, "couldn't find PHY provider device resource\n"); 783 } 784 EXPORT_SYMBOL_GPL(devm_of_phy_provider_unregister); 785 786 /** 787 * phy_release() - release the phy 788 * @dev: the dev member within phy 789 * 790 * When the last reference to the device is removed, it is called 791 * from the embedded kobject as release method. 792 */ 793 static void phy_release(struct device *dev) 794 { 795 struct phy *phy; 796 797 phy = to_phy(dev); 798 dev_vdbg(dev, "releasing '%s'\n", dev_name(dev)); 799 ida_remove(&phy_ida, phy->id); 800 kfree(phy); 801 } 802 803 static int __init phy_core_init(void) 804 { 805 phy_class = class_create(THIS_MODULE, "phy"); 806 if (IS_ERR(phy_class)) { 807 pr_err("failed to create phy class --> %ld\n", 808 PTR_ERR(phy_class)); 809 return PTR_ERR(phy_class); 810 } 811 812 phy_class->dev_release = phy_release; 813 814 return 0; 815 } 816 module_init(phy_core_init); 817 818 static void __exit phy_core_exit(void) 819 { 820 class_destroy(phy_class); 821 } 822 module_exit(phy_core_exit); 823 824 MODULE_DESCRIPTION("Generic PHY Framework"); 825 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>"); 826 MODULE_LICENSE("GPL v2"); 827