xref: /openbmc/linux/drivers/reset/core.c (revision 5086ea4b)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Reset Controller framework
4  *
5  * Copyright 2013 Philipp Zabel, Pengutronix
6  */
7 #include <linux/atomic.h>
8 #include <linux/device.h>
9 #include <linux/err.h>
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/kref.h>
13 #include <linux/module.h>
14 #include <linux/of.h>
15 #include <linux/reset.h>
16 #include <linux/reset-controller.h>
17 #include <linux/slab.h>
18 
19 static DEFINE_MUTEX(reset_list_mutex);
20 static LIST_HEAD(reset_controller_list);
21 
22 static DEFINE_MUTEX(reset_lookup_mutex);
23 static LIST_HEAD(reset_lookup_list);
24 
25 /**
26  * struct reset_control - a reset control
27  * @rcdev: a pointer to the reset controller device
28  *         this reset control belongs to
29  * @list: list entry for the rcdev's reset controller list
30  * @id: ID of the reset controller in the reset
31  *      controller device
32  * @refcnt: Number of gets of this reset_control
33  * @acquired: Only one reset_control may be acquired for a given rcdev and id.
34  * @shared: Is this a shared (1), or an exclusive (0) reset_control?
35  * @array: Is this an array of reset controls (1)?
36  * @deassert_count: Number of times this reset line has been deasserted
37  * @triggered_count: Number of times this reset line has been reset. Currently
38  *                   only used for shared resets, which means that the value
39  *                   will be either 0 or 1.
40  */
41 struct reset_control {
42 	struct reset_controller_dev *rcdev;
43 	struct list_head list;
44 	unsigned int id;
45 	struct kref refcnt;
46 	bool acquired;
47 	bool shared;
48 	bool array;
49 	atomic_t deassert_count;
50 	atomic_t triggered_count;
51 };
52 
53 /**
54  * struct reset_control_array - an array of reset controls
55  * @base: reset control for compatibility with reset control API functions
56  * @num_rstcs: number of reset controls
57  * @rstc: array of reset controls
58  */
59 struct reset_control_array {
60 	struct reset_control base;
61 	unsigned int num_rstcs;
62 	struct reset_control *rstc[];
63 };
64 
65 static const char *rcdev_name(struct reset_controller_dev *rcdev)
66 {
67 	if (rcdev->dev)
68 		return dev_name(rcdev->dev);
69 
70 	if (rcdev->of_node)
71 		return rcdev->of_node->full_name;
72 
73 	return NULL;
74 }
75 
76 /**
77  * of_reset_simple_xlate - translate reset_spec to the reset line number
78  * @rcdev: a pointer to the reset controller device
79  * @reset_spec: reset line specifier as found in the device tree
80  *
81  * This static translation function is used by default if of_xlate in
82  * :c:type:`reset_controller_dev` is not set. It is useful for all reset
83  * controllers with 1:1 mapping, where reset lines can be indexed by number
84  * without gaps.
85  */
86 static int of_reset_simple_xlate(struct reset_controller_dev *rcdev,
87 			  const struct of_phandle_args *reset_spec)
88 {
89 	if (reset_spec->args[0] >= rcdev->nr_resets)
90 		return -EINVAL;
91 
92 	return reset_spec->args[0];
93 }
94 
95 /**
96  * reset_controller_register - register a reset controller device
97  * @rcdev: a pointer to the initialized reset controller device
98  */
99 int reset_controller_register(struct reset_controller_dev *rcdev)
100 {
101 	if (!rcdev->of_xlate) {
102 		rcdev->of_reset_n_cells = 1;
103 		rcdev->of_xlate = of_reset_simple_xlate;
104 	}
105 
106 	INIT_LIST_HEAD(&rcdev->reset_control_head);
107 
108 	mutex_lock(&reset_list_mutex);
109 	list_add(&rcdev->list, &reset_controller_list);
110 	mutex_unlock(&reset_list_mutex);
111 
112 	return 0;
113 }
114 EXPORT_SYMBOL_GPL(reset_controller_register);
115 
116 /**
117  * reset_controller_unregister - unregister a reset controller device
118  * @rcdev: a pointer to the reset controller device
119  */
120 void reset_controller_unregister(struct reset_controller_dev *rcdev)
121 {
122 	mutex_lock(&reset_list_mutex);
123 	list_del(&rcdev->list);
124 	mutex_unlock(&reset_list_mutex);
125 }
126 EXPORT_SYMBOL_GPL(reset_controller_unregister);
127 
128 static void devm_reset_controller_release(struct device *dev, void *res)
129 {
130 	reset_controller_unregister(*(struct reset_controller_dev **)res);
131 }
132 
133 /**
134  * devm_reset_controller_register - resource managed reset_controller_register()
135  * @dev: device that is registering this reset controller
136  * @rcdev: a pointer to the initialized reset controller device
137  *
138  * Managed reset_controller_register(). For reset controllers registered by
139  * this function, reset_controller_unregister() is automatically called on
140  * driver detach. See reset_controller_register() for more information.
141  */
142 int devm_reset_controller_register(struct device *dev,
143 				   struct reset_controller_dev *rcdev)
144 {
145 	struct reset_controller_dev **rcdevp;
146 	int ret;
147 
148 	rcdevp = devres_alloc(devm_reset_controller_release, sizeof(*rcdevp),
149 			      GFP_KERNEL);
150 	if (!rcdevp)
151 		return -ENOMEM;
152 
153 	ret = reset_controller_register(rcdev);
154 	if (ret) {
155 		devres_free(rcdevp);
156 		return ret;
157 	}
158 
159 	*rcdevp = rcdev;
160 	devres_add(dev, rcdevp);
161 
162 	return ret;
163 }
164 EXPORT_SYMBOL_GPL(devm_reset_controller_register);
165 
166 /**
167  * reset_controller_add_lookup - register a set of lookup entries
168  * @lookup: array of reset lookup entries
169  * @num_entries: number of entries in the lookup array
170  */
171 void reset_controller_add_lookup(struct reset_control_lookup *lookup,
172 				 unsigned int num_entries)
173 {
174 	struct reset_control_lookup *entry;
175 	unsigned int i;
176 
177 	mutex_lock(&reset_lookup_mutex);
178 	for (i = 0; i < num_entries; i++) {
179 		entry = &lookup[i];
180 
181 		if (!entry->dev_id || !entry->provider) {
182 			pr_warn("%s(): reset lookup entry badly specified, skipping\n",
183 				__func__);
184 			continue;
185 		}
186 
187 		list_add_tail(&entry->list, &reset_lookup_list);
188 	}
189 	mutex_unlock(&reset_lookup_mutex);
190 }
191 EXPORT_SYMBOL_GPL(reset_controller_add_lookup);
192 
193 static inline struct reset_control_array *
194 rstc_to_array(struct reset_control *rstc) {
195 	return container_of(rstc, struct reset_control_array, base);
196 }
197 
198 static int reset_control_array_reset(struct reset_control_array *resets)
199 {
200 	int ret, i;
201 
202 	for (i = 0; i < resets->num_rstcs; i++) {
203 		ret = reset_control_reset(resets->rstc[i]);
204 		if (ret)
205 			return ret;
206 	}
207 
208 	return 0;
209 }
210 
211 static int reset_control_array_rearm(struct reset_control_array *resets)
212 {
213 	struct reset_control *rstc;
214 	int i;
215 
216 	for (i = 0; i < resets->num_rstcs; i++) {
217 		rstc = resets->rstc[i];
218 
219 		if (!rstc)
220 			continue;
221 
222 		if (WARN_ON(IS_ERR(rstc)))
223 			return -EINVAL;
224 
225 		if (rstc->shared) {
226 			if (WARN_ON(atomic_read(&rstc->deassert_count) != 0))
227 				return -EINVAL;
228 		} else {
229 			if (!rstc->acquired)
230 				return -EPERM;
231 		}
232 	}
233 
234 	for (i = 0; i < resets->num_rstcs; i++) {
235 		rstc = resets->rstc[i];
236 
237 		if (rstc && rstc->shared)
238 			WARN_ON(atomic_dec_return(&rstc->triggered_count) < 0);
239 	}
240 
241 	return 0;
242 }
243 
244 static int reset_control_array_assert(struct reset_control_array *resets)
245 {
246 	int ret, i;
247 
248 	for (i = 0; i < resets->num_rstcs; i++) {
249 		ret = reset_control_assert(resets->rstc[i]);
250 		if (ret)
251 			goto err;
252 	}
253 
254 	return 0;
255 
256 err:
257 	while (i--)
258 		reset_control_deassert(resets->rstc[i]);
259 	return ret;
260 }
261 
262 static int reset_control_array_deassert(struct reset_control_array *resets)
263 {
264 	int ret, i;
265 
266 	for (i = 0; i < resets->num_rstcs; i++) {
267 		ret = reset_control_deassert(resets->rstc[i]);
268 		if (ret)
269 			goto err;
270 	}
271 
272 	return 0;
273 
274 err:
275 	while (i--)
276 		reset_control_assert(resets->rstc[i]);
277 	return ret;
278 }
279 
280 static int reset_control_array_acquire(struct reset_control_array *resets)
281 {
282 	unsigned int i;
283 	int err;
284 
285 	for (i = 0; i < resets->num_rstcs; i++) {
286 		err = reset_control_acquire(resets->rstc[i]);
287 		if (err < 0)
288 			goto release;
289 	}
290 
291 	return 0;
292 
293 release:
294 	while (i--)
295 		reset_control_release(resets->rstc[i]);
296 
297 	return err;
298 }
299 
300 static void reset_control_array_release(struct reset_control_array *resets)
301 {
302 	unsigned int i;
303 
304 	for (i = 0; i < resets->num_rstcs; i++)
305 		reset_control_release(resets->rstc[i]);
306 }
307 
308 static inline bool reset_control_is_array(struct reset_control *rstc)
309 {
310 	return rstc->array;
311 }
312 
313 /**
314  * reset_control_reset - reset the controlled device
315  * @rstc: reset controller
316  *
317  * On a shared reset line the actual reset pulse is only triggered once for the
318  * lifetime of the reset_control instance: for all but the first caller this is
319  * a no-op.
320  * Consumers must not use reset_control_(de)assert on shared reset lines when
321  * reset_control_reset has been used.
322  *
323  * If rstc is NULL it is an optional reset and the function will just
324  * return 0.
325  */
326 int reset_control_reset(struct reset_control *rstc)
327 {
328 	int ret;
329 
330 	if (!rstc)
331 		return 0;
332 
333 	if (WARN_ON(IS_ERR(rstc)))
334 		return -EINVAL;
335 
336 	if (reset_control_is_array(rstc))
337 		return reset_control_array_reset(rstc_to_array(rstc));
338 
339 	if (!rstc->rcdev->ops->reset)
340 		return -ENOTSUPP;
341 
342 	if (rstc->shared) {
343 		if (WARN_ON(atomic_read(&rstc->deassert_count) != 0))
344 			return -EINVAL;
345 
346 		if (atomic_inc_return(&rstc->triggered_count) != 1)
347 			return 0;
348 	} else {
349 		if (!rstc->acquired)
350 			return -EPERM;
351 	}
352 
353 	ret = rstc->rcdev->ops->reset(rstc->rcdev, rstc->id);
354 	if (rstc->shared && ret)
355 		atomic_dec(&rstc->triggered_count);
356 
357 	return ret;
358 }
359 EXPORT_SYMBOL_GPL(reset_control_reset);
360 
361 /**
362  * reset_control_rearm - allow shared reset line to be re-triggered"
363  * @rstc: reset controller
364  *
365  * On a shared reset line the actual reset pulse is only triggered once for the
366  * lifetime of the reset_control instance, except if this call is used.
367  *
368  * Calls to this function must be balanced with calls to reset_control_reset,
369  * a warning is thrown in case triggered_count ever dips below 0.
370  *
371  * Consumers must not use reset_control_(de)assert on shared reset lines when
372  * reset_control_reset or reset_control_rearm have been used.
373  *
374  * If rstc is NULL the function will just return 0.
375  */
376 int reset_control_rearm(struct reset_control *rstc)
377 {
378 	if (!rstc)
379 		return 0;
380 
381 	if (WARN_ON(IS_ERR(rstc)))
382 		return -EINVAL;
383 
384 	if (reset_control_is_array(rstc))
385 		return reset_control_array_rearm(rstc_to_array(rstc));
386 
387 	if (rstc->shared) {
388 		if (WARN_ON(atomic_read(&rstc->deassert_count) != 0))
389 			return -EINVAL;
390 
391 		WARN_ON(atomic_dec_return(&rstc->triggered_count) < 0);
392 	} else {
393 		if (!rstc->acquired)
394 			return -EPERM;
395 	}
396 
397 	return 0;
398 }
399 EXPORT_SYMBOL_GPL(reset_control_rearm);
400 
401 /**
402  * reset_control_assert - asserts the reset line
403  * @rstc: reset controller
404  *
405  * Calling this on an exclusive reset controller guarantees that the reset
406  * will be asserted. When called on a shared reset controller the line may
407  * still be deasserted, as long as other users keep it so.
408  *
409  * For shared reset controls a driver cannot expect the hw's registers and
410  * internal state to be reset, but must be prepared for this to happen.
411  * Consumers must not use reset_control_reset on shared reset lines when
412  * reset_control_(de)assert has been used.
413  *
414  * If rstc is NULL it is an optional reset and the function will just
415  * return 0.
416  */
417 int reset_control_assert(struct reset_control *rstc)
418 {
419 	if (!rstc)
420 		return 0;
421 
422 	if (WARN_ON(IS_ERR(rstc)))
423 		return -EINVAL;
424 
425 	if (reset_control_is_array(rstc))
426 		return reset_control_array_assert(rstc_to_array(rstc));
427 
428 	if (rstc->shared) {
429 		if (WARN_ON(atomic_read(&rstc->triggered_count) != 0))
430 			return -EINVAL;
431 
432 		if (WARN_ON(atomic_read(&rstc->deassert_count) == 0))
433 			return -EINVAL;
434 
435 		if (atomic_dec_return(&rstc->deassert_count) != 0)
436 			return 0;
437 
438 		/*
439 		 * Shared reset controls allow the reset line to be in any state
440 		 * after this call, so doing nothing is a valid option.
441 		 */
442 		if (!rstc->rcdev->ops->assert)
443 			return 0;
444 	} else {
445 		/*
446 		 * If the reset controller does not implement .assert(), there
447 		 * is no way to guarantee that the reset line is asserted after
448 		 * this call.
449 		 */
450 		if (!rstc->rcdev->ops->assert)
451 			return -ENOTSUPP;
452 
453 		if (!rstc->acquired) {
454 			WARN(1, "reset %s (ID: %u) is not acquired\n",
455 			     rcdev_name(rstc->rcdev), rstc->id);
456 			return -EPERM;
457 		}
458 	}
459 
460 	return rstc->rcdev->ops->assert(rstc->rcdev, rstc->id);
461 }
462 EXPORT_SYMBOL_GPL(reset_control_assert);
463 
464 /**
465  * reset_control_deassert - deasserts the reset line
466  * @rstc: reset controller
467  *
468  * After calling this function, the reset is guaranteed to be deasserted.
469  * Consumers must not use reset_control_reset on shared reset lines when
470  * reset_control_(de)assert has been used.
471  *
472  * If rstc is NULL it is an optional reset and the function will just
473  * return 0.
474  */
475 int reset_control_deassert(struct reset_control *rstc)
476 {
477 	if (!rstc)
478 		return 0;
479 
480 	if (WARN_ON(IS_ERR(rstc)))
481 		return -EINVAL;
482 
483 	if (reset_control_is_array(rstc))
484 		return reset_control_array_deassert(rstc_to_array(rstc));
485 
486 	if (rstc->shared) {
487 		if (WARN_ON(atomic_read(&rstc->triggered_count) != 0))
488 			return -EINVAL;
489 
490 		if (atomic_inc_return(&rstc->deassert_count) != 1)
491 			return 0;
492 	} else {
493 		if (!rstc->acquired) {
494 			WARN(1, "reset %s (ID: %u) is not acquired\n",
495 			     rcdev_name(rstc->rcdev), rstc->id);
496 			return -EPERM;
497 		}
498 	}
499 
500 	/*
501 	 * If the reset controller does not implement .deassert(), we assume
502 	 * that it handles self-deasserting reset lines via .reset(). In that
503 	 * case, the reset lines are deasserted by default. If that is not the
504 	 * case, the reset controller driver should implement .deassert() and
505 	 * return -ENOTSUPP.
506 	 */
507 	if (!rstc->rcdev->ops->deassert)
508 		return 0;
509 
510 	return rstc->rcdev->ops->deassert(rstc->rcdev, rstc->id);
511 }
512 EXPORT_SYMBOL_GPL(reset_control_deassert);
513 
514 /**
515  * reset_control_status - returns a negative errno if not supported, a
516  * positive value if the reset line is asserted, or zero if the reset
517  * line is not asserted or if the desc is NULL (optional reset).
518  * @rstc: reset controller
519  */
520 int reset_control_status(struct reset_control *rstc)
521 {
522 	if (!rstc)
523 		return 0;
524 
525 	if (WARN_ON(IS_ERR(rstc)) || reset_control_is_array(rstc))
526 		return -EINVAL;
527 
528 	if (rstc->rcdev->ops->status)
529 		return rstc->rcdev->ops->status(rstc->rcdev, rstc->id);
530 
531 	return -ENOTSUPP;
532 }
533 EXPORT_SYMBOL_GPL(reset_control_status);
534 
535 /**
536  * reset_control_acquire() - acquires a reset control for exclusive use
537  * @rstc: reset control
538  *
539  * This is used to explicitly acquire a reset control for exclusive use. Note
540  * that exclusive resets are requested as acquired by default. In order for a
541  * second consumer to be able to control the reset, the first consumer has to
542  * release it first. Typically the easiest way to achieve this is to call the
543  * reset_control_get_exclusive_released() to obtain an instance of the reset
544  * control. Such reset controls are not acquired by default.
545  *
546  * Consumers implementing shared access to an exclusive reset need to follow
547  * a specific protocol in order to work together. Before consumers can change
548  * a reset they must acquire exclusive access using reset_control_acquire().
549  * After they are done operating the reset, they must release exclusive access
550  * with a call to reset_control_release(). Consumers are not granted exclusive
551  * access to the reset as long as another consumer hasn't released a reset.
552  *
553  * See also: reset_control_release()
554  */
555 int reset_control_acquire(struct reset_control *rstc)
556 {
557 	struct reset_control *rc;
558 
559 	if (!rstc)
560 		return 0;
561 
562 	if (WARN_ON(IS_ERR(rstc)))
563 		return -EINVAL;
564 
565 	if (reset_control_is_array(rstc))
566 		return reset_control_array_acquire(rstc_to_array(rstc));
567 
568 	mutex_lock(&reset_list_mutex);
569 
570 	if (rstc->acquired) {
571 		mutex_unlock(&reset_list_mutex);
572 		return 0;
573 	}
574 
575 	list_for_each_entry(rc, &rstc->rcdev->reset_control_head, list) {
576 		if (rstc != rc && rstc->id == rc->id) {
577 			if (rc->acquired) {
578 				mutex_unlock(&reset_list_mutex);
579 				return -EBUSY;
580 			}
581 		}
582 	}
583 
584 	rstc->acquired = true;
585 
586 	mutex_unlock(&reset_list_mutex);
587 	return 0;
588 }
589 EXPORT_SYMBOL_GPL(reset_control_acquire);
590 
591 /**
592  * reset_control_release() - releases exclusive access to a reset control
593  * @rstc: reset control
594  *
595  * Releases exclusive access right to a reset control previously obtained by a
596  * call to reset_control_acquire(). Until a consumer calls this function, no
597  * other consumers will be granted exclusive access.
598  *
599  * See also: reset_control_acquire()
600  */
601 void reset_control_release(struct reset_control *rstc)
602 {
603 	if (!rstc || WARN_ON(IS_ERR(rstc)))
604 		return;
605 
606 	if (reset_control_is_array(rstc))
607 		reset_control_array_release(rstc_to_array(rstc));
608 	else
609 		rstc->acquired = false;
610 }
611 EXPORT_SYMBOL_GPL(reset_control_release);
612 
613 static struct reset_control *__reset_control_get_internal(
614 				struct reset_controller_dev *rcdev,
615 				unsigned int index, bool shared, bool acquired)
616 {
617 	struct reset_control *rstc;
618 
619 	lockdep_assert_held(&reset_list_mutex);
620 
621 	list_for_each_entry(rstc, &rcdev->reset_control_head, list) {
622 		if (rstc->id == index) {
623 			/*
624 			 * Allow creating a secondary exclusive reset_control
625 			 * that is initially not acquired for an already
626 			 * controlled reset line.
627 			 */
628 			if (!rstc->shared && !shared && !acquired)
629 				break;
630 
631 			if (WARN_ON(!rstc->shared || !shared))
632 				return ERR_PTR(-EBUSY);
633 
634 			kref_get(&rstc->refcnt);
635 			return rstc;
636 		}
637 	}
638 
639 	rstc = kzalloc(sizeof(*rstc), GFP_KERNEL);
640 	if (!rstc)
641 		return ERR_PTR(-ENOMEM);
642 
643 	try_module_get(rcdev->owner);
644 
645 	rstc->rcdev = rcdev;
646 	list_add(&rstc->list, &rcdev->reset_control_head);
647 	rstc->id = index;
648 	kref_init(&rstc->refcnt);
649 	rstc->acquired = acquired;
650 	rstc->shared = shared;
651 
652 	return rstc;
653 }
654 
655 static void __reset_control_release(struct kref *kref)
656 {
657 	struct reset_control *rstc = container_of(kref, struct reset_control,
658 						  refcnt);
659 
660 	lockdep_assert_held(&reset_list_mutex);
661 
662 	module_put(rstc->rcdev->owner);
663 
664 	list_del(&rstc->list);
665 	kfree(rstc);
666 }
667 
668 static void __reset_control_put_internal(struct reset_control *rstc)
669 {
670 	lockdep_assert_held(&reset_list_mutex);
671 
672 	kref_put(&rstc->refcnt, __reset_control_release);
673 }
674 
675 struct reset_control *__of_reset_control_get(struct device_node *node,
676 				     const char *id, int index, bool shared,
677 				     bool optional, bool acquired)
678 {
679 	struct reset_control *rstc;
680 	struct reset_controller_dev *r, *rcdev;
681 	struct of_phandle_args args;
682 	int rstc_id;
683 	int ret;
684 
685 	if (!node)
686 		return ERR_PTR(-EINVAL);
687 
688 	if (id) {
689 		index = of_property_match_string(node,
690 						 "reset-names", id);
691 		if (index == -EILSEQ)
692 			return ERR_PTR(index);
693 		if (index < 0)
694 			return optional ? NULL : ERR_PTR(-ENOENT);
695 	}
696 
697 	ret = of_parse_phandle_with_args(node, "resets", "#reset-cells",
698 					 index, &args);
699 	if (ret == -EINVAL)
700 		return ERR_PTR(ret);
701 	if (ret)
702 		return optional ? NULL : ERR_PTR(ret);
703 
704 	mutex_lock(&reset_list_mutex);
705 	rcdev = NULL;
706 	list_for_each_entry(r, &reset_controller_list, list) {
707 		if (args.np == r->of_node) {
708 			rcdev = r;
709 			break;
710 		}
711 	}
712 
713 	if (!rcdev) {
714 		rstc = ERR_PTR(-EPROBE_DEFER);
715 		goto out;
716 	}
717 
718 	if (WARN_ON(args.args_count != rcdev->of_reset_n_cells)) {
719 		rstc = ERR_PTR(-EINVAL);
720 		goto out;
721 	}
722 
723 	rstc_id = rcdev->of_xlate(rcdev, &args);
724 	if (rstc_id < 0) {
725 		rstc = ERR_PTR(rstc_id);
726 		goto out;
727 	}
728 
729 	/* reset_list_mutex also protects the rcdev's reset_control list */
730 	rstc = __reset_control_get_internal(rcdev, rstc_id, shared, acquired);
731 
732 out:
733 	mutex_unlock(&reset_list_mutex);
734 	of_node_put(args.np);
735 
736 	return rstc;
737 }
738 EXPORT_SYMBOL_GPL(__of_reset_control_get);
739 
740 static struct reset_controller_dev *
741 __reset_controller_by_name(const char *name)
742 {
743 	struct reset_controller_dev *rcdev;
744 
745 	lockdep_assert_held(&reset_list_mutex);
746 
747 	list_for_each_entry(rcdev, &reset_controller_list, list) {
748 		if (!rcdev->dev)
749 			continue;
750 
751 		if (!strcmp(name, dev_name(rcdev->dev)))
752 			return rcdev;
753 	}
754 
755 	return NULL;
756 }
757 
758 static struct reset_control *
759 __reset_control_get_from_lookup(struct device *dev, const char *con_id,
760 				bool shared, bool optional, bool acquired)
761 {
762 	const struct reset_control_lookup *lookup;
763 	struct reset_controller_dev *rcdev;
764 	const char *dev_id = dev_name(dev);
765 	struct reset_control *rstc = NULL;
766 
767 	mutex_lock(&reset_lookup_mutex);
768 
769 	list_for_each_entry(lookup, &reset_lookup_list, list) {
770 		if (strcmp(lookup->dev_id, dev_id))
771 			continue;
772 
773 		if ((!con_id && !lookup->con_id) ||
774 		    ((con_id && lookup->con_id) &&
775 		     !strcmp(con_id, lookup->con_id))) {
776 			mutex_lock(&reset_list_mutex);
777 			rcdev = __reset_controller_by_name(lookup->provider);
778 			if (!rcdev) {
779 				mutex_unlock(&reset_list_mutex);
780 				mutex_unlock(&reset_lookup_mutex);
781 				/* Reset provider may not be ready yet. */
782 				return ERR_PTR(-EPROBE_DEFER);
783 			}
784 
785 			rstc = __reset_control_get_internal(rcdev,
786 							    lookup->index,
787 							    shared, acquired);
788 			mutex_unlock(&reset_list_mutex);
789 			break;
790 		}
791 	}
792 
793 	mutex_unlock(&reset_lookup_mutex);
794 
795 	if (!rstc)
796 		return optional ? NULL : ERR_PTR(-ENOENT);
797 
798 	return rstc;
799 }
800 
801 struct reset_control *__reset_control_get(struct device *dev, const char *id,
802 					  int index, bool shared, bool optional,
803 					  bool acquired)
804 {
805 	if (WARN_ON(shared && acquired))
806 		return ERR_PTR(-EINVAL);
807 
808 	if (dev->of_node)
809 		return __of_reset_control_get(dev->of_node, id, index, shared,
810 					      optional, acquired);
811 
812 	return __reset_control_get_from_lookup(dev, id, shared, optional,
813 					       acquired);
814 }
815 EXPORT_SYMBOL_GPL(__reset_control_get);
816 
817 static void reset_control_array_put(struct reset_control_array *resets)
818 {
819 	int i;
820 
821 	mutex_lock(&reset_list_mutex);
822 	for (i = 0; i < resets->num_rstcs; i++)
823 		__reset_control_put_internal(resets->rstc[i]);
824 	mutex_unlock(&reset_list_mutex);
825 	kfree(resets);
826 }
827 
828 /**
829  * reset_control_put - free the reset controller
830  * @rstc: reset controller
831  */
832 void reset_control_put(struct reset_control *rstc)
833 {
834 	if (IS_ERR_OR_NULL(rstc))
835 		return;
836 
837 	if (reset_control_is_array(rstc)) {
838 		reset_control_array_put(rstc_to_array(rstc));
839 		return;
840 	}
841 
842 	mutex_lock(&reset_list_mutex);
843 	__reset_control_put_internal(rstc);
844 	mutex_unlock(&reset_list_mutex);
845 }
846 EXPORT_SYMBOL_GPL(reset_control_put);
847 
848 static void devm_reset_control_release(struct device *dev, void *res)
849 {
850 	reset_control_put(*(struct reset_control **)res);
851 }
852 
853 struct reset_control *__devm_reset_control_get(struct device *dev,
854 				     const char *id, int index, bool shared,
855 				     bool optional, bool acquired)
856 {
857 	struct reset_control **ptr, *rstc;
858 
859 	ptr = devres_alloc(devm_reset_control_release, sizeof(*ptr),
860 			   GFP_KERNEL);
861 	if (!ptr)
862 		return ERR_PTR(-ENOMEM);
863 
864 	rstc = __reset_control_get(dev, id, index, shared, optional, acquired);
865 	if (IS_ERR_OR_NULL(rstc)) {
866 		devres_free(ptr);
867 		return rstc;
868 	}
869 
870 	*ptr = rstc;
871 	devres_add(dev, ptr);
872 
873 	return rstc;
874 }
875 EXPORT_SYMBOL_GPL(__devm_reset_control_get);
876 
877 /**
878  * __device_reset - find reset controller associated with the device
879  *                  and perform reset
880  * @dev: device to be reset by the controller
881  * @optional: whether it is optional to reset the device
882  *
883  * Convenience wrapper for __reset_control_get() and reset_control_reset().
884  * This is useful for the common case of devices with single, dedicated reset
885  * lines.
886  */
887 int __device_reset(struct device *dev, bool optional)
888 {
889 	struct reset_control *rstc;
890 	int ret;
891 
892 	rstc = __reset_control_get(dev, NULL, 0, 0, optional, true);
893 	if (IS_ERR(rstc))
894 		return PTR_ERR(rstc);
895 
896 	ret = reset_control_reset(rstc);
897 
898 	reset_control_put(rstc);
899 
900 	return ret;
901 }
902 EXPORT_SYMBOL_GPL(__device_reset);
903 
904 /*
905  * APIs to manage an array of reset controls.
906  */
907 
908 /**
909  * of_reset_control_get_count - Count number of resets available with a device
910  *
911  * @node: device node that contains 'resets'.
912  *
913  * Returns positive reset count on success, or error number on failure and
914  * on count being zero.
915  */
916 static int of_reset_control_get_count(struct device_node *node)
917 {
918 	int count;
919 
920 	if (!node)
921 		return -EINVAL;
922 
923 	count = of_count_phandle_with_args(node, "resets", "#reset-cells");
924 	if (count == 0)
925 		count = -ENOENT;
926 
927 	return count;
928 }
929 
930 /**
931  * of_reset_control_array_get - Get a list of reset controls using
932  *				device node.
933  *
934  * @np: device node for the device that requests the reset controls array
935  * @shared: whether reset controls are shared or not
936  * @optional: whether it is optional to get the reset controls
937  * @acquired: only one reset control may be acquired for a given controller
938  *            and ID
939  *
940  * Returns pointer to allocated reset_control on success or error on failure
941  */
942 struct reset_control *
943 of_reset_control_array_get(struct device_node *np, bool shared, bool optional,
944 			   bool acquired)
945 {
946 	struct reset_control_array *resets;
947 	struct reset_control *rstc;
948 	int num, i;
949 
950 	num = of_reset_control_get_count(np);
951 	if (num < 0)
952 		return optional ? NULL : ERR_PTR(num);
953 
954 	resets = kzalloc(struct_size(resets, rstc, num), GFP_KERNEL);
955 	if (!resets)
956 		return ERR_PTR(-ENOMEM);
957 
958 	for (i = 0; i < num; i++) {
959 		rstc = __of_reset_control_get(np, NULL, i, shared, optional,
960 					      acquired);
961 		if (IS_ERR(rstc))
962 			goto err_rst;
963 		resets->rstc[i] = rstc;
964 	}
965 	resets->num_rstcs = num;
966 	resets->base.array = true;
967 
968 	return &resets->base;
969 
970 err_rst:
971 	mutex_lock(&reset_list_mutex);
972 	while (--i >= 0)
973 		__reset_control_put_internal(resets->rstc[i]);
974 	mutex_unlock(&reset_list_mutex);
975 
976 	kfree(resets);
977 
978 	return rstc;
979 }
980 EXPORT_SYMBOL_GPL(of_reset_control_array_get);
981 
982 /**
983  * devm_reset_control_array_get - Resource managed reset control array get
984  *
985  * @dev: device that requests the list of reset controls
986  * @shared: whether reset controls are shared or not
987  * @optional: whether it is optional to get the reset controls
988  *
989  * The reset control array APIs are intended for a list of resets
990  * that just have to be asserted or deasserted, without any
991  * requirements on the order.
992  *
993  * Returns pointer to allocated reset_control on success or error on failure
994  */
995 struct reset_control *
996 devm_reset_control_array_get(struct device *dev, bool shared, bool optional)
997 {
998 	struct reset_control **ptr, *rstc;
999 
1000 	ptr = devres_alloc(devm_reset_control_release, sizeof(*ptr),
1001 			   GFP_KERNEL);
1002 	if (!ptr)
1003 		return ERR_PTR(-ENOMEM);
1004 
1005 	rstc = of_reset_control_array_get(dev->of_node, shared, optional, true);
1006 	if (IS_ERR_OR_NULL(rstc)) {
1007 		devres_free(ptr);
1008 		return rstc;
1009 	}
1010 
1011 	*ptr = rstc;
1012 	devres_add(dev, ptr);
1013 
1014 	return rstc;
1015 }
1016 EXPORT_SYMBOL_GPL(devm_reset_control_array_get);
1017 
1018 static int reset_control_get_count_from_lookup(struct device *dev)
1019 {
1020 	const struct reset_control_lookup *lookup;
1021 	const char *dev_id;
1022 	int count = 0;
1023 
1024 	if (!dev)
1025 		return -EINVAL;
1026 
1027 	dev_id = dev_name(dev);
1028 	mutex_lock(&reset_lookup_mutex);
1029 
1030 	list_for_each_entry(lookup, &reset_lookup_list, list) {
1031 		if (!strcmp(lookup->dev_id, dev_id))
1032 			count++;
1033 	}
1034 
1035 	mutex_unlock(&reset_lookup_mutex);
1036 
1037 	if (count == 0)
1038 		count = -ENOENT;
1039 
1040 	return count;
1041 }
1042 
1043 /**
1044  * reset_control_get_count - Count number of resets available with a device
1045  *
1046  * @dev: device for which to return the number of resets
1047  *
1048  * Returns positive reset count on success, or error number on failure and
1049  * on count being zero.
1050  */
1051 int reset_control_get_count(struct device *dev)
1052 {
1053 	if (dev->of_node)
1054 		return of_reset_control_get_count(dev->of_node);
1055 
1056 	return reset_control_get_count_from_lookup(dev);
1057 }
1058 EXPORT_SYMBOL_GPL(reset_control_get_count);
1059