xref: /openbmc/linux/drivers/phy/phy-core.c (revision 0da85d1e)
1 /*
2  * phy-core.c  --  Generic Phy framework.
3  *
4  * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
5  *
6  * Author: Kishon Vijay Abraham I <kishon@ti.com>
7  *
8  * This program is free software; you can redistribute  it and/or modify it
9  * under  the terms of  the GNU General  Public License as published by the
10  * Free Software Foundation;  either version 2 of the  License, or (at your
11  * option) any later version.
12  */
13 
14 #include <linux/kernel.h>
15 #include <linux/export.h>
16 #include <linux/module.h>
17 #include <linux/err.h>
18 #include <linux/device.h>
19 #include <linux/slab.h>
20 #include <linux/of.h>
21 #include <linux/phy/phy.h>
22 #include <linux/idr.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/regulator/consumer.h>
25 
26 static struct class *phy_class;
27 static DEFINE_MUTEX(phy_provider_mutex);
28 static LIST_HEAD(phy_provider_list);
29 static LIST_HEAD(phys);
30 static DEFINE_IDA(phy_ida);
31 
32 static void devm_phy_release(struct device *dev, void *res)
33 {
34 	struct phy *phy = *(struct phy **)res;
35 
36 	phy_put(phy);
37 }
38 
39 static void devm_phy_provider_release(struct device *dev, void *res)
40 {
41 	struct phy_provider *phy_provider = *(struct phy_provider **)res;
42 
43 	of_phy_provider_unregister(phy_provider);
44 }
45 
46 static void devm_phy_consume(struct device *dev, void *res)
47 {
48 	struct phy *phy = *(struct phy **)res;
49 
50 	phy_destroy(phy);
51 }
52 
53 static int devm_phy_match(struct device *dev, void *res, void *match_data)
54 {
55 	struct phy **phy = res;
56 
57 	return *phy == match_data;
58 }
59 
60 /**
61  * phy_create_lookup() - allocate and register PHY/device association
62  * @phy: the phy of the association
63  * @con_id: connection ID string on device
64  * @dev_id: the device of the association
65  *
66  * Creates and registers phy_lookup entry.
67  */
68 int phy_create_lookup(struct phy *phy, const char *con_id, const char *dev_id)
69 {
70 	struct phy_lookup *pl;
71 
72 	if (!phy || !dev_id || !con_id)
73 		return -EINVAL;
74 
75 	pl = kzalloc(sizeof(*pl), GFP_KERNEL);
76 	if (!pl)
77 		return -ENOMEM;
78 
79 	pl->dev_id = dev_id;
80 	pl->con_id = con_id;
81 	pl->phy = phy;
82 
83 	mutex_lock(&phy_provider_mutex);
84 	list_add_tail(&pl->node, &phys);
85 	mutex_unlock(&phy_provider_mutex);
86 
87 	return 0;
88 }
89 EXPORT_SYMBOL_GPL(phy_create_lookup);
90 
91 /**
92  * phy_remove_lookup() - find and remove PHY/device association
93  * @phy: the phy of the association
94  * @con_id: connection ID string on device
95  * @dev_id: the device of the association
96  *
97  * Finds and unregisters phy_lookup entry that was created with
98  * phy_create_lookup().
99  */
100 void phy_remove_lookup(struct phy *phy, const char *con_id, const char *dev_id)
101 {
102 	struct phy_lookup *pl;
103 
104 	if (!phy || !dev_id || !con_id)
105 		return;
106 
107 	mutex_lock(&phy_provider_mutex);
108 	list_for_each_entry(pl, &phys, node)
109 		if (pl->phy == phy && !strcmp(pl->dev_id, dev_id) &&
110 		    !strcmp(pl->con_id, con_id)) {
111 			list_del(&pl->node);
112 			kfree(pl);
113 			break;
114 		}
115 	mutex_unlock(&phy_provider_mutex);
116 }
117 EXPORT_SYMBOL_GPL(phy_remove_lookup);
118 
119 static struct phy *phy_find(struct device *dev, const char *con_id)
120 {
121 	const char *dev_id = dev_name(dev);
122 	struct phy_lookup *p, *pl = NULL;
123 
124 	mutex_lock(&phy_provider_mutex);
125 	list_for_each_entry(p, &phys, node)
126 		if (!strcmp(p->dev_id, dev_id) && !strcmp(p->con_id, con_id)) {
127 			pl = p;
128 			break;
129 		}
130 	mutex_unlock(&phy_provider_mutex);
131 
132 	return pl ? pl->phy : ERR_PTR(-ENODEV);
133 }
134 
135 static struct phy_provider *of_phy_provider_lookup(struct device_node *node)
136 {
137 	struct phy_provider *phy_provider;
138 	struct device_node *child;
139 
140 	list_for_each_entry(phy_provider, &phy_provider_list, list) {
141 		if (phy_provider->dev->of_node == node)
142 			return phy_provider;
143 
144 		for_each_child_of_node(phy_provider->dev->of_node, child)
145 			if (child == node)
146 				return phy_provider;
147 	}
148 
149 	return ERR_PTR(-EPROBE_DEFER);
150 }
151 
152 int phy_pm_runtime_get(struct phy *phy)
153 {
154 	int ret;
155 
156 	if (!pm_runtime_enabled(&phy->dev))
157 		return -ENOTSUPP;
158 
159 	ret = pm_runtime_get(&phy->dev);
160 	if (ret < 0 && ret != -EINPROGRESS)
161 		pm_runtime_put_noidle(&phy->dev);
162 
163 	return ret;
164 }
165 EXPORT_SYMBOL_GPL(phy_pm_runtime_get);
166 
167 int phy_pm_runtime_get_sync(struct phy *phy)
168 {
169 	int ret;
170 
171 	if (!pm_runtime_enabled(&phy->dev))
172 		return -ENOTSUPP;
173 
174 	ret = pm_runtime_get_sync(&phy->dev);
175 	if (ret < 0)
176 		pm_runtime_put_sync(&phy->dev);
177 
178 	return ret;
179 }
180 EXPORT_SYMBOL_GPL(phy_pm_runtime_get_sync);
181 
182 int phy_pm_runtime_put(struct phy *phy)
183 {
184 	if (!pm_runtime_enabled(&phy->dev))
185 		return -ENOTSUPP;
186 
187 	return pm_runtime_put(&phy->dev);
188 }
189 EXPORT_SYMBOL_GPL(phy_pm_runtime_put);
190 
191 int phy_pm_runtime_put_sync(struct phy *phy)
192 {
193 	if (!pm_runtime_enabled(&phy->dev))
194 		return -ENOTSUPP;
195 
196 	return pm_runtime_put_sync(&phy->dev);
197 }
198 EXPORT_SYMBOL_GPL(phy_pm_runtime_put_sync);
199 
200 void phy_pm_runtime_allow(struct phy *phy)
201 {
202 	if (!pm_runtime_enabled(&phy->dev))
203 		return;
204 
205 	pm_runtime_allow(&phy->dev);
206 }
207 EXPORT_SYMBOL_GPL(phy_pm_runtime_allow);
208 
209 void phy_pm_runtime_forbid(struct phy *phy)
210 {
211 	if (!pm_runtime_enabled(&phy->dev))
212 		return;
213 
214 	pm_runtime_forbid(&phy->dev);
215 }
216 EXPORT_SYMBOL_GPL(phy_pm_runtime_forbid);
217 
218 int phy_init(struct phy *phy)
219 {
220 	int ret;
221 
222 	if (!phy)
223 		return 0;
224 
225 	ret = phy_pm_runtime_get_sync(phy);
226 	if (ret < 0 && ret != -ENOTSUPP)
227 		return ret;
228 	ret = 0; /* Override possible ret == -ENOTSUPP */
229 
230 	mutex_lock(&phy->mutex);
231 	if (phy->init_count == 0 && phy->ops->init) {
232 		ret = phy->ops->init(phy);
233 		if (ret < 0) {
234 			dev_err(&phy->dev, "phy init failed --> %d\n", ret);
235 			goto out;
236 		}
237 	}
238 	++phy->init_count;
239 
240 out:
241 	mutex_unlock(&phy->mutex);
242 	phy_pm_runtime_put(phy);
243 	return ret;
244 }
245 EXPORT_SYMBOL_GPL(phy_init);
246 
247 int phy_exit(struct phy *phy)
248 {
249 	int ret;
250 
251 	if (!phy)
252 		return 0;
253 
254 	ret = phy_pm_runtime_get_sync(phy);
255 	if (ret < 0 && ret != -ENOTSUPP)
256 		return ret;
257 	ret = 0; /* Override possible ret == -ENOTSUPP */
258 
259 	mutex_lock(&phy->mutex);
260 	if (phy->init_count == 1 && phy->ops->exit) {
261 		ret = phy->ops->exit(phy);
262 		if (ret < 0) {
263 			dev_err(&phy->dev, "phy exit failed --> %d\n", ret);
264 			goto out;
265 		}
266 	}
267 	--phy->init_count;
268 
269 out:
270 	mutex_unlock(&phy->mutex);
271 	phy_pm_runtime_put(phy);
272 	return ret;
273 }
274 EXPORT_SYMBOL_GPL(phy_exit);
275 
276 int phy_power_on(struct phy *phy)
277 {
278 	int ret;
279 
280 	if (!phy)
281 		return 0;
282 
283 	if (phy->pwr) {
284 		ret = regulator_enable(phy->pwr);
285 		if (ret)
286 			return ret;
287 	}
288 
289 	ret = phy_pm_runtime_get_sync(phy);
290 	if (ret < 0 && ret != -ENOTSUPP)
291 		return ret;
292 	ret = 0; /* Override possible ret == -ENOTSUPP */
293 
294 	mutex_lock(&phy->mutex);
295 	if (phy->power_count == 0 && phy->ops->power_on) {
296 		ret = phy->ops->power_on(phy);
297 		if (ret < 0) {
298 			dev_err(&phy->dev, "phy poweron failed --> %d\n", ret);
299 			goto out;
300 		}
301 	}
302 	++phy->power_count;
303 	mutex_unlock(&phy->mutex);
304 	return 0;
305 
306 out:
307 	mutex_unlock(&phy->mutex);
308 	phy_pm_runtime_put_sync(phy);
309 	if (phy->pwr)
310 		regulator_disable(phy->pwr);
311 
312 	return ret;
313 }
314 EXPORT_SYMBOL_GPL(phy_power_on);
315 
316 int phy_power_off(struct phy *phy)
317 {
318 	int ret;
319 
320 	if (!phy)
321 		return 0;
322 
323 	mutex_lock(&phy->mutex);
324 	if (phy->power_count == 1 && phy->ops->power_off) {
325 		ret =  phy->ops->power_off(phy);
326 		if (ret < 0) {
327 			dev_err(&phy->dev, "phy poweroff failed --> %d\n", ret);
328 			mutex_unlock(&phy->mutex);
329 			return ret;
330 		}
331 	}
332 	--phy->power_count;
333 	mutex_unlock(&phy->mutex);
334 	phy_pm_runtime_put(phy);
335 
336 	if (phy->pwr)
337 		regulator_disable(phy->pwr);
338 
339 	return 0;
340 }
341 EXPORT_SYMBOL_GPL(phy_power_off);
342 
343 /**
344  * _of_phy_get() - lookup and obtain a reference to a phy by phandle
345  * @np: device_node for which to get the phy
346  * @index: the index of the phy
347  *
348  * Returns the phy associated with the given phandle value,
349  * after getting a refcount to it or -ENODEV if there is no such phy or
350  * -EPROBE_DEFER if there is a phandle to the phy, but the device is
351  * not yet loaded. This function uses of_xlate call back function provided
352  * while registering the phy_provider to find the phy instance.
353  */
354 static struct phy *_of_phy_get(struct device_node *np, int index)
355 {
356 	int ret;
357 	struct phy_provider *phy_provider;
358 	struct phy *phy = NULL;
359 	struct of_phandle_args args;
360 
361 	ret = of_parse_phandle_with_args(np, "phys", "#phy-cells",
362 		index, &args);
363 	if (ret)
364 		return ERR_PTR(-ENODEV);
365 
366 	mutex_lock(&phy_provider_mutex);
367 	phy_provider = of_phy_provider_lookup(args.np);
368 	if (IS_ERR(phy_provider) || !try_module_get(phy_provider->owner)) {
369 		phy = ERR_PTR(-EPROBE_DEFER);
370 		goto err0;
371 	}
372 
373 	phy = phy_provider->of_xlate(phy_provider->dev, &args);
374 	module_put(phy_provider->owner);
375 
376 err0:
377 	mutex_unlock(&phy_provider_mutex);
378 	of_node_put(args.np);
379 
380 	return phy;
381 }
382 
383 /**
384  * of_phy_get() - lookup and obtain a reference to a phy using a device_node.
385  * @np: device_node for which to get the phy
386  * @con_id: name of the phy from device's point of view
387  *
388  * Returns the phy driver, after getting a refcount to it; or
389  * -ENODEV if there is no such phy. The caller is responsible for
390  * calling phy_put() to release that count.
391  */
392 struct phy *of_phy_get(struct device_node *np, const char *con_id)
393 {
394 	struct phy *phy = NULL;
395 	int index = 0;
396 
397 	if (con_id)
398 		index = of_property_match_string(np, "phy-names", con_id);
399 
400 	phy = _of_phy_get(np, index);
401 	if (IS_ERR(phy))
402 		return phy;
403 
404 	if (!try_module_get(phy->ops->owner))
405 		return ERR_PTR(-EPROBE_DEFER);
406 
407 	get_device(&phy->dev);
408 
409 	return phy;
410 }
411 EXPORT_SYMBOL_GPL(of_phy_get);
412 
413 /**
414  * phy_put() - release the PHY
415  * @phy: the phy returned by phy_get()
416  *
417  * Releases a refcount the caller received from phy_get().
418  */
419 void phy_put(struct phy *phy)
420 {
421 	if (!phy || IS_ERR(phy))
422 		return;
423 
424 	module_put(phy->ops->owner);
425 	put_device(&phy->dev);
426 }
427 EXPORT_SYMBOL_GPL(phy_put);
428 
429 /**
430  * devm_phy_put() - release the PHY
431  * @dev: device that wants to release this phy
432  * @phy: the phy returned by devm_phy_get()
433  *
434  * destroys the devres associated with this phy and invokes phy_put
435  * to release the phy.
436  */
437 void devm_phy_put(struct device *dev, struct phy *phy)
438 {
439 	int r;
440 
441 	if (!phy)
442 		return;
443 
444 	r = devres_destroy(dev, devm_phy_release, devm_phy_match, phy);
445 	dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n");
446 }
447 EXPORT_SYMBOL_GPL(devm_phy_put);
448 
449 /**
450  * of_phy_simple_xlate() - returns the phy instance from phy provider
451  * @dev: the PHY provider device
452  * @args: of_phandle_args (not used here)
453  *
454  * Intended to be used by phy provider for the common case where #phy-cells is
455  * 0. For other cases where #phy-cells is greater than '0', the phy provider
456  * should provide a custom of_xlate function that reads the *args* and returns
457  * the appropriate phy.
458  */
459 struct phy *of_phy_simple_xlate(struct device *dev, struct of_phandle_args
460 	*args)
461 {
462 	struct phy *phy;
463 	struct class_dev_iter iter;
464 
465 	class_dev_iter_init(&iter, phy_class, NULL, NULL);
466 	while ((dev = class_dev_iter_next(&iter))) {
467 		phy = to_phy(dev);
468 		if (args->np != phy->dev.of_node)
469 			continue;
470 
471 		class_dev_iter_exit(&iter);
472 		return phy;
473 	}
474 
475 	class_dev_iter_exit(&iter);
476 	return ERR_PTR(-ENODEV);
477 }
478 EXPORT_SYMBOL_GPL(of_phy_simple_xlate);
479 
480 /**
481  * phy_get() - lookup and obtain a reference to a phy.
482  * @dev: device that requests this phy
483  * @string: the phy name as given in the dt data or the name of the controller
484  * port for non-dt case
485  *
486  * Returns the phy driver, after getting a refcount to it; or
487  * -ENODEV if there is no such phy.  The caller is responsible for
488  * calling phy_put() to release that count.
489  */
490 struct phy *phy_get(struct device *dev, const char *string)
491 {
492 	int index = 0;
493 	struct phy *phy;
494 
495 	if (string == NULL) {
496 		dev_WARN(dev, "missing string\n");
497 		return ERR_PTR(-EINVAL);
498 	}
499 
500 	if (dev->of_node) {
501 		index = of_property_match_string(dev->of_node, "phy-names",
502 			string);
503 		phy = _of_phy_get(dev->of_node, index);
504 	} else {
505 		phy = phy_find(dev, string);
506 	}
507 	if (IS_ERR(phy))
508 		return phy;
509 
510 	if (!try_module_get(phy->ops->owner))
511 		return ERR_PTR(-EPROBE_DEFER);
512 
513 	get_device(&phy->dev);
514 
515 	return phy;
516 }
517 EXPORT_SYMBOL_GPL(phy_get);
518 
519 /**
520  * phy_optional_get() - lookup and obtain a reference to an optional phy.
521  * @dev: device that requests this phy
522  * @string: the phy name as given in the dt data or the name of the controller
523  * port for non-dt case
524  *
525  * Returns the phy driver, after getting a refcount to it; or
526  * NULL if there is no such phy.  The caller is responsible for
527  * calling phy_put() to release that count.
528  */
529 struct phy *phy_optional_get(struct device *dev, const char *string)
530 {
531 	struct phy *phy = phy_get(dev, string);
532 
533 	if (PTR_ERR(phy) == -ENODEV)
534 		phy = NULL;
535 
536 	return phy;
537 }
538 EXPORT_SYMBOL_GPL(phy_optional_get);
539 
540 /**
541  * devm_phy_get() - lookup and obtain a reference to a phy.
542  * @dev: device that requests this phy
543  * @string: the phy name as given in the dt data or phy device name
544  * for non-dt case
545  *
546  * Gets the phy using phy_get(), and associates a device with it using
547  * devres. On driver detach, release function is invoked on the devres data,
548  * then, devres data is freed.
549  */
550 struct phy *devm_phy_get(struct device *dev, const char *string)
551 {
552 	struct phy **ptr, *phy;
553 
554 	ptr = devres_alloc(devm_phy_release, sizeof(*ptr), GFP_KERNEL);
555 	if (!ptr)
556 		return ERR_PTR(-ENOMEM);
557 
558 	phy = phy_get(dev, string);
559 	if (!IS_ERR(phy)) {
560 		*ptr = phy;
561 		devres_add(dev, ptr);
562 	} else {
563 		devres_free(ptr);
564 	}
565 
566 	return phy;
567 }
568 EXPORT_SYMBOL_GPL(devm_phy_get);
569 
570 /**
571  * devm_phy_optional_get() - lookup and obtain a reference to an optional phy.
572  * @dev: device that requests this phy
573  * @string: the phy name as given in the dt data or phy device name
574  * for non-dt case
575  *
576  * Gets the phy using phy_get(), and associates a device with it using
577  * devres. On driver detach, release function is invoked on the devres
578  * data, then, devres data is freed. This differs to devm_phy_get() in
579  * that if the phy does not exist, it is not considered an error and
580  * -ENODEV will not be returned. Instead the NULL phy is returned,
581  * which can be passed to all other phy consumer calls.
582  */
583 struct phy *devm_phy_optional_get(struct device *dev, const char *string)
584 {
585 	struct phy *phy = devm_phy_get(dev, string);
586 
587 	if (PTR_ERR(phy) == -ENODEV)
588 		phy = NULL;
589 
590 	return phy;
591 }
592 EXPORT_SYMBOL_GPL(devm_phy_optional_get);
593 
594 /**
595  * devm_of_phy_get() - lookup and obtain a reference to a phy.
596  * @dev: device that requests this phy
597  * @np: node containing the phy
598  * @con_id: name of the phy from device's point of view
599  *
600  * Gets the phy using of_phy_get(), and associates a device with it using
601  * devres. On driver detach, release function is invoked on the devres data,
602  * then, devres data is freed.
603  */
604 struct phy *devm_of_phy_get(struct device *dev, struct device_node *np,
605 			    const char *con_id)
606 {
607 	struct phy **ptr, *phy;
608 
609 	ptr = devres_alloc(devm_phy_release, sizeof(*ptr), GFP_KERNEL);
610 	if (!ptr)
611 		return ERR_PTR(-ENOMEM);
612 
613 	phy = of_phy_get(np, con_id);
614 	if (!IS_ERR(phy)) {
615 		*ptr = phy;
616 		devres_add(dev, ptr);
617 	} else {
618 		devres_free(ptr);
619 	}
620 
621 	return phy;
622 }
623 EXPORT_SYMBOL_GPL(devm_of_phy_get);
624 
625 /**
626  * phy_create() - create a new phy
627  * @dev: device that is creating the new phy
628  * @node: device node of the phy
629  * @ops: function pointers for performing phy operations
630  *
631  * Called to create a phy using phy framework.
632  */
633 struct phy *phy_create(struct device *dev, struct device_node *node,
634 		       const struct phy_ops *ops)
635 {
636 	int ret;
637 	int id;
638 	struct phy *phy;
639 
640 	if (WARN_ON(!dev))
641 		return ERR_PTR(-EINVAL);
642 
643 	phy = kzalloc(sizeof(*phy), GFP_KERNEL);
644 	if (!phy)
645 		return ERR_PTR(-ENOMEM);
646 
647 	id = ida_simple_get(&phy_ida, 0, 0, GFP_KERNEL);
648 	if (id < 0) {
649 		dev_err(dev, "unable to get id\n");
650 		ret = id;
651 		goto free_phy;
652 	}
653 
654 	/* phy-supply */
655 	phy->pwr = regulator_get_optional(dev, "phy");
656 	if (IS_ERR(phy->pwr)) {
657 		if (PTR_ERR(phy->pwr) == -EPROBE_DEFER) {
658 			ret = -EPROBE_DEFER;
659 			goto free_ida;
660 		}
661 		phy->pwr = NULL;
662 	}
663 
664 	device_initialize(&phy->dev);
665 	mutex_init(&phy->mutex);
666 
667 	phy->dev.class = phy_class;
668 	phy->dev.parent = dev;
669 	phy->dev.of_node = node ?: dev->of_node;
670 	phy->id = id;
671 	phy->ops = ops;
672 
673 	ret = dev_set_name(&phy->dev, "phy-%s.%d", dev_name(dev), id);
674 	if (ret)
675 		goto put_dev;
676 
677 	ret = device_add(&phy->dev);
678 	if (ret)
679 		goto put_dev;
680 
681 	if (pm_runtime_enabled(dev)) {
682 		pm_runtime_enable(&phy->dev);
683 		pm_runtime_no_callbacks(&phy->dev);
684 	}
685 
686 	return phy;
687 
688 put_dev:
689 	put_device(&phy->dev);  /* calls phy_release() which frees resources */
690 	return ERR_PTR(ret);
691 
692 free_ida:
693 	ida_simple_remove(&phy_ida, phy->id);
694 
695 free_phy:
696 	kfree(phy);
697 	return ERR_PTR(ret);
698 }
699 EXPORT_SYMBOL_GPL(phy_create);
700 
701 /**
702  * devm_phy_create() - create a new phy
703  * @dev: device that is creating the new phy
704  * @node: device node of the phy
705  * @ops: function pointers for performing phy operations
706  *
707  * Creates a new PHY device adding it to the PHY class.
708  * While at that, it also associates the device with the phy using devres.
709  * On driver detach, release function is invoked on the devres data,
710  * then, devres data is freed.
711  */
712 struct phy *devm_phy_create(struct device *dev, struct device_node *node,
713 			    const struct phy_ops *ops)
714 {
715 	struct phy **ptr, *phy;
716 
717 	ptr = devres_alloc(devm_phy_consume, sizeof(*ptr), GFP_KERNEL);
718 	if (!ptr)
719 		return ERR_PTR(-ENOMEM);
720 
721 	phy = phy_create(dev, node, ops);
722 	if (!IS_ERR(phy)) {
723 		*ptr = phy;
724 		devres_add(dev, ptr);
725 	} else {
726 		devres_free(ptr);
727 	}
728 
729 	return phy;
730 }
731 EXPORT_SYMBOL_GPL(devm_phy_create);
732 
733 /**
734  * phy_destroy() - destroy the phy
735  * @phy: the phy to be destroyed
736  *
737  * Called to destroy the phy.
738  */
739 void phy_destroy(struct phy *phy)
740 {
741 	pm_runtime_disable(&phy->dev);
742 	device_unregister(&phy->dev);
743 }
744 EXPORT_SYMBOL_GPL(phy_destroy);
745 
746 /**
747  * devm_phy_destroy() - destroy the PHY
748  * @dev: device that wants to release this phy
749  * @phy: the phy returned by devm_phy_get()
750  *
751  * destroys the devres associated with this phy and invokes phy_destroy
752  * to destroy the phy.
753  */
754 void devm_phy_destroy(struct device *dev, struct phy *phy)
755 {
756 	int r;
757 
758 	r = devres_destroy(dev, devm_phy_consume, devm_phy_match, phy);
759 	dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n");
760 }
761 EXPORT_SYMBOL_GPL(devm_phy_destroy);
762 
763 /**
764  * __of_phy_provider_register() - create/register phy provider with the framework
765  * @dev: struct device of the phy provider
766  * @owner: the module owner containing of_xlate
767  * @of_xlate: function pointer to obtain phy instance from phy provider
768  *
769  * Creates struct phy_provider from dev and of_xlate function pointer.
770  * This is used in the case of dt boot for finding the phy instance from
771  * phy provider.
772  */
773 struct phy_provider *__of_phy_provider_register(struct device *dev,
774 	struct module *owner, struct phy * (*of_xlate)(struct device *dev,
775 	struct of_phandle_args *args))
776 {
777 	struct phy_provider *phy_provider;
778 
779 	phy_provider = kzalloc(sizeof(*phy_provider), GFP_KERNEL);
780 	if (!phy_provider)
781 		return ERR_PTR(-ENOMEM);
782 
783 	phy_provider->dev = dev;
784 	phy_provider->owner = owner;
785 	phy_provider->of_xlate = of_xlate;
786 
787 	mutex_lock(&phy_provider_mutex);
788 	list_add_tail(&phy_provider->list, &phy_provider_list);
789 	mutex_unlock(&phy_provider_mutex);
790 
791 	return phy_provider;
792 }
793 EXPORT_SYMBOL_GPL(__of_phy_provider_register);
794 
795 /**
796  * __devm_of_phy_provider_register() - create/register phy provider with the
797  * framework
798  * @dev: struct device of the phy provider
799  * @owner: the module owner containing of_xlate
800  * @of_xlate: function pointer to obtain phy instance from phy provider
801  *
802  * Creates struct phy_provider from dev and of_xlate function pointer.
803  * This is used in the case of dt boot for finding the phy instance from
804  * phy provider. While at that, it also associates the device with the
805  * phy provider using devres. On driver detach, release function is invoked
806  * on the devres data, then, devres data is freed.
807  */
808 struct phy_provider *__devm_of_phy_provider_register(struct device *dev,
809 	struct module *owner, struct phy * (*of_xlate)(struct device *dev,
810 	struct of_phandle_args *args))
811 {
812 	struct phy_provider **ptr, *phy_provider;
813 
814 	ptr = devres_alloc(devm_phy_provider_release, sizeof(*ptr), GFP_KERNEL);
815 	if (!ptr)
816 		return ERR_PTR(-ENOMEM);
817 
818 	phy_provider = __of_phy_provider_register(dev, owner, of_xlate);
819 	if (!IS_ERR(phy_provider)) {
820 		*ptr = phy_provider;
821 		devres_add(dev, ptr);
822 	} else {
823 		devres_free(ptr);
824 	}
825 
826 	return phy_provider;
827 }
828 EXPORT_SYMBOL_GPL(__devm_of_phy_provider_register);
829 
830 /**
831  * of_phy_provider_unregister() - unregister phy provider from the framework
832  * @phy_provider: phy provider returned by of_phy_provider_register()
833  *
834  * Removes the phy_provider created using of_phy_provider_register().
835  */
836 void of_phy_provider_unregister(struct phy_provider *phy_provider)
837 {
838 	if (IS_ERR(phy_provider))
839 		return;
840 
841 	mutex_lock(&phy_provider_mutex);
842 	list_del(&phy_provider->list);
843 	kfree(phy_provider);
844 	mutex_unlock(&phy_provider_mutex);
845 }
846 EXPORT_SYMBOL_GPL(of_phy_provider_unregister);
847 
848 /**
849  * devm_of_phy_provider_unregister() - remove phy provider from the framework
850  * @dev: struct device of the phy provider
851  *
852  * destroys the devres associated with this phy provider and invokes
853  * of_phy_provider_unregister to unregister the phy provider.
854  */
855 void devm_of_phy_provider_unregister(struct device *dev,
856 	struct phy_provider *phy_provider) {
857 	int r;
858 
859 	r = devres_destroy(dev, devm_phy_provider_release, devm_phy_match,
860 		phy_provider);
861 	dev_WARN_ONCE(dev, r, "couldn't find PHY provider device resource\n");
862 }
863 EXPORT_SYMBOL_GPL(devm_of_phy_provider_unregister);
864 
865 /**
866  * phy_release() - release the phy
867  * @dev: the dev member within phy
868  *
869  * When the last reference to the device is removed, it is called
870  * from the embedded kobject as release method.
871  */
872 static void phy_release(struct device *dev)
873 {
874 	struct phy *phy;
875 
876 	phy = to_phy(dev);
877 	dev_vdbg(dev, "releasing '%s'\n", dev_name(dev));
878 	regulator_put(phy->pwr);
879 	ida_simple_remove(&phy_ida, phy->id);
880 	kfree(phy);
881 }
882 
883 static int __init phy_core_init(void)
884 {
885 	phy_class = class_create(THIS_MODULE, "phy");
886 	if (IS_ERR(phy_class)) {
887 		pr_err("failed to create phy class --> %ld\n",
888 			PTR_ERR(phy_class));
889 		return PTR_ERR(phy_class);
890 	}
891 
892 	phy_class->dev_release = phy_release;
893 
894 	return 0;
895 }
896 module_init(phy_core_init);
897 
898 static void __exit phy_core_exit(void)
899 {
900 	class_destroy(phy_class);
901 }
902 module_exit(phy_core_exit);
903 
904 MODULE_DESCRIPTION("Generic PHY Framework");
905 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
906 MODULE_LICENSE("GPL v2");
907